diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..0562170d
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,13 @@
+.testrepository
+*.pyc
+.tox
+*.egg-info
+AUTHORS
+ChangeLog
+etc/gnocchi/gnocchi.conf
+doc/build
+doc/source/rest.rst
+releasenotes/build
+cover
+.coverage
+dist
diff --git a/.gitreview b/.gitreview
new file mode 100644
index 00000000..e4b8477d
--- /dev/null
+++ b/.gitreview
@@ -0,0 +1,4 @@
+[gerrit]
+host=review.openstack.org
+port=29418
+project=openstack/gnocchi.git
diff --git a/.testr.conf b/.testr.conf
new file mode 100644
index 00000000..c274843c
--- /dev/null
+++ b/.testr.conf
@@ -0,0 +1,5 @@
+[DEFAULT]
+test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} ${PYTHON:-python} -m subunit.run discover -t . ${OS_TEST_PATH:-gnocchi/tests} $LISTOPT $IDOPTION
+test_id_option=--load-list $IDFILE
+test_list_option=--list
+group_regex=(gabbi\.suitemaker\.test_gabbi((_prefix_|_live_|_)([^_]+)))_
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 00000000..72b03e19
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,44 @@
+language: python
+sudo: required
+
+services:
+ - docker
+
+cache:
+ directories:
+ - ~/.cache/pip
+env:
+ - TARGET: bashate
+ - TARGET: pep8
+ - TARGET: docs
+ - TARGET: docs-gnocchi.xyz
+
+ - TARGET: py27-mysql-ceph-upgrade-from-3.1
+ - TARGET: py35-postgresql-file-upgrade-from-3.1
+
+ - TARGET: py27-mysql
+ - TARGET: py35-mysql
+ - TARGET: py27-postgresql
+ - TARGET: py35-postgresql
+
+before_script:
+# Travis We need to fetch all tags/branches for documentation target
+ - case $TARGET in
+ docs*)
+ git fetch origin $(git ls-remote -q | sed -n '/refs\/heads/s,.*refs/heads\(.*\),:remotes/origin\1,gp') ;
+ git fetch --tags ;
+ git fetch --unshallow ;
+ ;;
+ esac
+
+ - docker build --tag gnocchi-ci --file=tools/travis-ci-setup.dockerfile .
+script:
+ - docker run -v ~/.cache/pip:/home/tester/.cache/pip -v $(pwd):/home/tester/src gnocchi-ci tox -e ${TARGET}
+
+notifications:
+ email: false
+ irc:
+ on_success: change
+ on_failure: always
+ channels:
+ - "irc.freenode.org#gnocchi"
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..68c771a0
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,176 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 00000000..8f248e6e
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1 @@
+include etc/gnocchi/gnocchi.conf
diff --git a/README b/README
deleted file mode 100644
index 90ebc471..00000000
--- a/README
+++ /dev/null
@@ -1,10 +0,0 @@
-This project has been moved to https://github.com/gnocchixyz/gnocchi
-
-The contents of this repository are still available in the Git
-source code management system. To see the contents of this
-repository before it reached its end of life, please check out the
-previous commit with "git checkout HEAD^1".
-
-For any further questions, please email
-openstack-dev@lists.openstack.org or join #openstack-dev or #gnocchi on
-Freenode.
diff --git a/README.rst b/README.rst
new file mode 100644
index 00000000..ca172f4d
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,14 @@
+===============================
+ Gnocchi - Metric as a Service
+===============================
+
+.. image:: doc/source/_static/gnocchi-logo.png
+
+Gnocchi is a multi-tenant timeseries, metrics and resources database. It
+provides an `HTTP REST`_ interface to create and manipulate the data. It is
+designed to store metrics at a very large scale while providing access to
+metrics and resources information and history.
+
+You can read the full documentation online at http://gnocchi.xyz.
+
+.. _`HTTP REST`: https://en.wikipedia.org/wiki/Representational_state_transfer
diff --git a/bindep.txt b/bindep.txt
new file mode 100644
index 00000000..9d9b91a5
--- /dev/null
+++ b/bindep.txt
@@ -0,0 +1,10 @@
+libpq-dev [platform:dpkg]
+postgresql [platform:dpkg]
+mysql-client [platform:dpkg]
+mysql-server [platform:dpkg]
+build-essential [platform:dpkg]
+libffi-dev [platform:dpkg]
+librados-dev [platform:dpkg]
+ceph [platform:dpkg]
+redis-server [platform:dpkg]
+liberasurecode-dev [platform:dpkg]
diff --git a/devstack/README.rst b/devstack/README.rst
new file mode 100644
index 00000000..1d6c9ed0
--- /dev/null
+++ b/devstack/README.rst
@@ -0,0 +1,15 @@
+============================
+Enabling Gnocchi in DevStack
+============================
+
+1. Download DevStack::
+
+ git clone https://git.openstack.org/openstack-dev/devstack.git
+ cd devstack
+
+2. Add this repo as an external repository in ``local.conf`` file::
+
+ [[local|localrc]]
+ enable_plugin gnocchi https://git.openstack.org/openstack/gnocchi
+
+3. Run ``stack.sh``.
diff --git a/devstack/apache-gnocchi.template b/devstack/apache-gnocchi.template
new file mode 100644
index 00000000..bc288755
--- /dev/null
+++ b/devstack/apache-gnocchi.template
@@ -0,0 +1,10 @@
+
+WSGIDaemonProcess gnocchi lang='en_US.UTF-8' locale='en_US.UTF-8' user=%USER% display-name=%{GROUP} processes=%APIWORKERS% threads=32 %VIRTUALENV%
+WSGIProcessGroup gnocchi
+WSGIScriptAlias %SCRIPT_NAME% %WSGI%
+
+ WSGIProcessGroup gnocchi
+ WSGIApplicationGroup %{GLOBAL}
+
+
+WSGISocketPrefix /var/run/%APACHE_NAME%
diff --git a/devstack/apache-ported-gnocchi.template b/devstack/apache-ported-gnocchi.template
new file mode 100644
index 00000000..2a56fa8d
--- /dev/null
+++ b/devstack/apache-ported-gnocchi.template
@@ -0,0 +1,15 @@
+Listen %GNOCCHI_PORT%
+
+
+ WSGIDaemonProcess gnocchi lang='en_US.UTF-8' locale='en_US.UTF-8' user=%USER% display-name=%{GROUP} processes=%APIWORKERS% threads=32 %VIRTUALENV%
+ WSGIProcessGroup gnocchi
+ WSGIScriptAlias / %WSGI%
+ WSGIApplicationGroup %{GLOBAL}
+ = 2.4>
+ ErrorLogFormat "%{cu}t %M"
+
+ ErrorLog /var/log/%APACHE_NAME%/gnocchi.log
+ CustomLog /var/log/%APACHE_NAME%/gnocchi-access.log combined
+
+
+WSGISocketPrefix /var/run/%APACHE_NAME%
diff --git a/devstack/gate/gate_hook.sh b/devstack/gate/gate_hook.sh
new file mode 100755
index 00000000..c01d37a0
--- /dev/null
+++ b/devstack/gate/gate_hook.sh
@@ -0,0 +1,59 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This script is executed inside gate_hook function in devstack gate.
+
+STORAGE_DRIVER="$1"
+SQL_DRIVER="$2"
+
+ENABLED_SERVICES="key,gnocchi-api,gnocchi-metricd,tempest,"
+
+# Use efficient wsgi web server
+DEVSTACK_LOCAL_CONFIG+=$'\nexport GNOCCHI_DEPLOY=uwsgi'
+DEVSTACK_LOCAL_CONFIG+=$'\nexport KEYSTONE_DEPLOY=uwsgi'
+
+export DEVSTACK_GATE_INSTALL_TESTONLY=1
+export DEVSTACK_GATE_NO_SERVICES=1
+export DEVSTACK_GATE_TEMPEST=1
+export DEVSTACK_GATE_TEMPEST_NOTESTS=1
+export DEVSTACK_GATE_EXERCISES=0
+export KEEP_LOCALRC=1
+
+case $STORAGE_DRIVER in
+ file)
+ DEVSTACK_LOCAL_CONFIG+=$'\nexport GNOCCHI_STORAGE_BACKEND=file'
+ ;;
+ swift)
+ ENABLED_SERVICES+="s-proxy,s-account,s-container,s-object,"
+ DEVSTACK_LOCAL_CONFIG+=$'\nexport GNOCCHI_STORAGE_BACKEND=swift'
+ # FIXME(sileht): use mod_wsgi as workaround for LP#1508424
+ DEVSTACK_GATE_TEMPEST+=$'\nexport SWIFT_USE_MOD_WSGI=True'
+ ;;
+ ceph)
+ DEVSTACK_LOCAL_CONFIG+=$'\nexport GNOCCHI_STORAGE_BACKEND=ceph'
+ ;;
+esac
+
+
+# default to mysql
+case $SQL_DRIVER in
+ postgresql)
+ export DEVSTACK_GATE_POSTGRES=1
+ ;;
+esac
+
+export ENABLED_SERVICES
+export DEVSTACK_LOCAL_CONFIG
+
+$BASE/new/devstack-gate/devstack-vm-gate.sh
diff --git a/devstack/gate/post_test_hook.sh b/devstack/gate/post_test_hook.sh
new file mode 100755
index 00000000..f4a89086
--- /dev/null
+++ b/devstack/gate/post_test_hook.sh
@@ -0,0 +1,78 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This script is executed inside post_test_hook function in devstack gate.
+
+source $BASE/new/devstack/openrc admin admin
+
+set -e
+
+function generate_testr_results {
+ if [ -f .testrepository/0 ]; then
+ sudo /usr/os-testr-env/bin/testr last --subunit > $WORKSPACE/testrepository.subunit
+ sudo mv $WORKSPACE/testrepository.subunit $BASE/logs/testrepository.subunit
+ sudo /usr/os-testr-env/bin/subunit2html $BASE/logs/testrepository.subunit $BASE/logs/testr_results.html
+ sudo gzip -9 $BASE/logs/testrepository.subunit
+ sudo gzip -9 $BASE/logs/testr_results.html
+ sudo chown jenkins:jenkins $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz
+ sudo chmod a+r $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz
+ fi
+}
+
+set -x
+
+export GNOCCHI_DIR="$BASE/new/gnocchi"
+sudo chown -R stack:stack $GNOCCHI_DIR
+cd $GNOCCHI_DIR
+
+openstack catalog list
+
+export GNOCCHI_SERVICE_TOKEN=$(openstack token issue -c id -f value)
+export GNOCCHI_ENDPOINT=$(openstack catalog show metric -c endpoints -f value | awk '/public/{print $2}')
+export GNOCCHI_AUTHORIZATION="" # Temporary set to transition to the new functional testing
+
+curl -X GET ${GNOCCHI_ENDPOINT}/v1/archive_policy -H "Content-Type: application/json"
+
+sudo gnocchi-upgrade
+
+# Just ensure tools still works
+sudo -E -H -u stack $GNOCCHI_DIR/tools/measures_injector.py --metrics 1 --batch-of-measures 2 --measures-per-batch 2
+
+# NOTE(sileht): on swift job permissions are wrong, I don't known why
+sudo chown -R tempest:stack $BASE/new/tempest
+sudo chown -R tempest:stack $BASE/data/tempest
+
+# Run tests with tempst
+cd $BASE/new/tempest
+set +e
+sudo -H -u tempest OS_TEST_TIMEOUT=$TEMPEST_OS_TEST_TIMEOUT tox -eall-plugin -- gnocchi --concurrency=$TEMPEST_CONCURRENCY
+TEMPEST_EXIT_CODE=$?
+set -e
+if [[ $TEMPEST_EXIT_CODE != 0 ]]; then
+ # Collect and parse result
+ generate_testr_results
+ exit $TEMPEST_EXIT_CODE
+fi
+
+# Run tests with tox
+cd $GNOCCHI_DIR
+echo "Running gnocchi functional test suite"
+set +e
+sudo -E -H -u stack tox -epy27-gate
+EXIT_CODE=$?
+set -e
+
+# Collect and parse result
+generate_testr_results
+exit $EXIT_CODE
diff --git a/devstack/plugin.sh b/devstack/plugin.sh
new file mode 100644
index 00000000..e1ef90b4
--- /dev/null
+++ b/devstack/plugin.sh
@@ -0,0 +1,474 @@
+# Gnocchi devstack plugin
+# Install and start **Gnocchi** service
+
+# To enable Gnocchi service, add the following to localrc:
+#
+# enable_plugin gnocchi https://github.com/openstack/gnocchi master
+#
+# This will turn on both gnocchi-api and gnocchi-metricd services.
+# If you don't want one of those (you do) you can use the
+# disable_service command in local.conf.
+
+# Dependencies:
+#
+# - functions
+# - ``functions``
+# - ``DEST``, ``STACK_USER`` must be defined
+# - ``APACHE_NAME`` for wsgi
+# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined
+# - ``SERVICE_HOST``
+# - ``OS_AUTH_URL``, ``KEYSTONE_SERVICE_URI`` for auth in api
+
+# stack.sh
+# ---------
+# - install_gnocchi
+# - configure_gnocchi
+# - init_gnocchi
+# - start_gnocchi
+# - stop_gnocchi
+# - cleanup_gnocchi
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set -o xtrace
+
+
+if [ -z "$GNOCCHI_DEPLOY" ]; then
+ # Default
+ GNOCCHI_DEPLOY=simple
+
+ # Fallback to common wsgi devstack configuration
+ if [ "$ENABLE_HTTPD_MOD_WSGI_SERVICES" == "True" ]; then
+ GNOCCHI_DEPLOY=mod_wsgi
+
+ # Deprecated config
+ elif [ -n "$GNOCCHI_USE_MOD_WSGI" ] ; then
+ echo_summary "GNOCCHI_USE_MOD_WSGI is deprecated, use GNOCCHI_DEPLOY instead"
+ if [ "$GNOCCHI_USE_MOD_WSGI" == True ]; then
+ GNOCCHI_DEPLOY=mod_wsgi
+ fi
+ fi
+fi
+
+# Functions
+# ---------
+
+# Test if any Gnocchi services are enabled
+# is_gnocchi_enabled
+function is_gnocchi_enabled {
+ [[ ,${ENABLED_SERVICES} =~ ,"gnocchi-" ]] && return 0
+ return 1
+}
+
+# Test if a Ceph services are enabled
+# _is_ceph_enabled
+function _is_ceph_enabled {
+ type is_ceph_enabled_for_service >/dev/null 2>&1 && return 0
+ return 1
+}
+
+# create_gnocchi_accounts() - Set up common required gnocchi accounts
+
+# Project User Roles
+# -------------------------------------------------------------------------
+# $SERVICE_TENANT_NAME gnocchi service
+# gnocchi_swift gnocchi_swift ResellerAdmin (if Swift is enabled)
+function create_gnocchi_accounts {
+ # Gnocchi
+ if [ "$GNOCCHI_USE_KEYSTONE" == "True" ] && is_service_enabled gnocchi-api ; then
+ # At this time, the /etc/openstack/clouds.yaml is available,
+ # we could leverage that by setting OS_CLOUD
+ OLD_OS_CLOUD=$OS_CLOUD
+ export OS_CLOUD='devstack-admin'
+
+ create_service_user "gnocchi"
+
+ local gnocchi_service=$(get_or_create_service "gnocchi" \
+ "metric" "OpenStack Metric Service")
+ get_or_create_endpoint $gnocchi_service \
+ "$REGION_NAME" \
+ "$(gnocchi_service_url)" \
+ "$(gnocchi_service_url)" \
+ "$(gnocchi_service_url)"
+
+ if is_service_enabled swift && [[ "$GNOCCHI_STORAGE_BACKEND" = 'swift' ]] ; then
+ get_or_create_project "gnocchi_swift" default
+ local gnocchi_swift_user=$(get_or_create_user "gnocchi_swift" \
+ "$SERVICE_PASSWORD" default "gnocchi_swift@example.com")
+ get_or_add_user_project_role "ResellerAdmin" $gnocchi_swift_user "gnocchi_swift"
+ fi
+
+ export OS_CLOUD=$OLD_OS_CLOUD
+ fi
+}
+
+# return the service url for gnocchi
+function gnocchi_service_url {
+ if [[ -n $GNOCCHI_SERVICE_PORT ]]; then
+ echo "$GNOCCHI_SERVICE_PROTOCOL://$GNOCCHI_SERVICE_HOST:$GNOCCHI_SERVICE_PORT"
+ else
+ echo "$GNOCCHI_SERVICE_PROTOCOL://$GNOCCHI_SERVICE_HOST$GNOCCHI_SERVICE_PREFIX"
+ fi
+}
+
+# install redis
+# NOTE(chdent): We shouldn't rely on ceilometer being present so cannot
+# use its install_redis. There are enough packages now using redis
+# that there should probably be something devstack itself for
+# installing it.
+function _gnocchi_install_redis {
+ if is_ubuntu; then
+ install_package redis-server
+ restart_service redis-server
+ else
+ # This will fail (correctly) where a redis package is unavailable
+ install_package redis
+ restart_service redis
+ fi
+
+ pip_install_gr redis
+}
+
+function _gnocchi_install_grafana {
+ if is_ubuntu; then
+ local file=$(mktemp /tmp/grafanapkg-XXXXX)
+ wget -O "$file" "$GRAFANA_DEB_PKG"
+ sudo dpkg -i "$file"
+ rm $file
+ elif is_fedora; then
+ sudo yum install "$GRAFANA_RPM_PKG"
+ fi
+ if [ ! "$GRAFANA_PLUGIN_VERSION" ]; then
+ sudo grafana-cli plugins install sileht-gnocchi-datasource
+ elif [ "$GRAFANA_PLUGIN_VERSION" != "git" ]; then
+ tmpfile=/tmp/sileht-gnocchi-datasource-${GRAFANA_PLUGIN_VERSION}.tar.gz
+ wget https://github.com/sileht/grafana-gnocchi-datasource/releases/download/${GRAFANA_PLUGIN_VERSION}/sileht-gnocchi-datasource-${GRAFANA_PLUGIN_VERSION}.tar.gz -O $tmpfile
+ sudo -u grafana tar -xzf $tmpfile -C /var/lib/grafana/plugins
+ rm -f $file
+ else
+ git_clone ${GRAFANA_PLUGINS_REPO} ${GRAFANA_PLUGINS_DIR}
+ sudo ln -sf ${GRAFANA_PLUGINS_DIR}/dist /var/lib/grafana/plugins/grafana-gnocchi-datasource
+ # NOTE(sileht): This is long and have chance to fail, thx nodejs/npm
+ (cd /var/lib/grafana/plugins/grafana-gnocchi-datasource && npm install && ./run-tests.sh) || true
+ fi
+ sudo service grafana-server restart
+}
+
+function _cleanup_gnocchi_apache_wsgi {
+ sudo rm -f $GNOCCHI_WSGI_DIR/*.wsgi
+ sudo rm -f $(apache_site_config_for gnocchi)
+}
+
+# _config_gnocchi_apache_wsgi() - Set WSGI config files of Gnocchi
+function _config_gnocchi_apache_wsgi {
+ sudo mkdir -p $GNOCCHI_WSGI_DIR
+
+ local gnocchi_apache_conf=$(apache_site_config_for gnocchi)
+ local venv_path=""
+ local script_name=$GNOCCHI_SERVICE_PREFIX
+
+ if [[ ${USE_VENV} = True ]]; then
+ venv_path="python-path=${PROJECT_VENV["gnocchi"]}/lib/$(python_version)/site-packages"
+ fi
+
+ # copy wsgi file
+ sudo cp $GNOCCHI_DIR/gnocchi/rest/app.wsgi $GNOCCHI_WSGI_DIR/
+
+ # Only run the API on a custom PORT if it has been specifically
+ # asked for.
+ if [[ -n $GNOCCHI_SERVICE_PORT ]]; then
+ sudo cp $GNOCCHI_DIR/devstack/apache-ported-gnocchi.template $gnocchi_apache_conf
+ sudo sed -e "
+ s|%GNOCCHI_PORT%|$GNOCCHI_SERVICE_PORT|g;
+ " -i $gnocchi_apache_conf
+ else
+ sudo cp $GNOCCHI_DIR/devstack/apache-gnocchi.template $gnocchi_apache_conf
+ sudo sed -e "
+ s|%SCRIPT_NAME%|$script_name|g;
+ " -i $gnocchi_apache_conf
+ fi
+ sudo sed -e "
+ s|%APACHE_NAME%|$APACHE_NAME|g;
+ s|%WSGI%|$GNOCCHI_WSGI_DIR/app.wsgi|g;
+ s|%USER%|$STACK_USER|g
+ s|%APIWORKERS%|$API_WORKERS|g
+ s|%VIRTUALENV%|$venv_path|g
+ " -i $gnocchi_apache_conf
+}
+
+
+
+# cleanup_gnocchi() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_gnocchi {
+ if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then
+ _cleanup_gnocchi_apache_wsgi
+ fi
+}
+
+# configure_gnocchi() - Set config files, create data dirs, etc
+function configure_gnocchi {
+ [ ! -d $GNOCCHI_DATA_DIR ] && sudo mkdir -m 755 -p $GNOCCHI_DATA_DIR
+ sudo chown $STACK_USER $GNOCCHI_DATA_DIR
+
+ # Configure logging
+ iniset $GNOCCHI_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
+ iniset $GNOCCHI_CONF metricd metric_processing_delay "$GNOCCHI_METRICD_PROCESSING_DELAY"
+
+ # Set up logging
+ if [ "$SYSLOG" != "False" ]; then
+ iniset $GNOCCHI_CONF DEFAULT use_syslog "True"
+ fi
+
+ # Format logging
+ if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$GNOCCHI_DEPLOY" != "mod_wsgi" ]; then
+ setup_colorized_logging $GNOCCHI_CONF DEFAULT
+ fi
+
+ if [ -n "$GNOCCHI_COORDINATOR_URL" ]; then
+ iniset $GNOCCHI_CONF storage coordination_url "$GNOCCHI_COORDINATOR_URL"
+ fi
+
+ if is_service_enabled gnocchi-statsd ; then
+ iniset $GNOCCHI_CONF statsd resource_id $GNOCCHI_STATSD_RESOURCE_ID
+ iniset $GNOCCHI_CONF statsd project_id $GNOCCHI_STATSD_PROJECT_ID
+ iniset $GNOCCHI_CONF statsd user_id $GNOCCHI_STATSD_USER_ID
+ fi
+
+ # Configure the storage driver
+ if _is_ceph_enabled && [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then
+ iniset $GNOCCHI_CONF storage driver ceph
+ iniset $GNOCCHI_CONF storage ceph_username ${GNOCCHI_CEPH_USER}
+ iniset $GNOCCHI_CONF storage ceph_secret $(awk '/key/{print $3}' ${CEPH_CONF_DIR}/ceph.client.${GNOCCHI_CEPH_USER}.keyring)
+ elif is_service_enabled swift && [[ "$GNOCCHI_STORAGE_BACKEND" = 'swift' ]] ; then
+ iniset $GNOCCHI_CONF storage driver swift
+ iniset $GNOCCHI_CONF storage swift_user gnocchi_swift
+ iniset $GNOCCHI_CONF storage swift_key $SERVICE_PASSWORD
+ iniset $GNOCCHI_CONF storage swift_project_name "gnocchi_swift"
+ iniset $GNOCCHI_CONF storage swift_auth_version 3
+ iniset $GNOCCHI_CONF storage swift_authurl $KEYSTONE_SERVICE_URI_V3
+ elif [[ "$GNOCCHI_STORAGE_BACKEND" = 'file' ]] ; then
+ iniset $GNOCCHI_CONF storage driver file
+ iniset $GNOCCHI_CONF storage file_basepath $GNOCCHI_DATA_DIR/
+ elif [[ "$GNOCCHI_STORAGE_BACKEND" = 'redis' ]] ; then
+ iniset $GNOCCHI_CONF storage driver redis
+ iniset $GNOCCHI_CONF storage redis_url $GNOCCHI_REDIS_URL
+ else
+ echo "ERROR: could not configure storage driver"
+ exit 1
+ fi
+
+ if [ "$GNOCCHI_USE_KEYSTONE" == "True" ] ; then
+ # Configure auth token middleware
+ configure_auth_token_middleware $GNOCCHI_CONF gnocchi $GNOCCHI_AUTH_CACHE_DIR
+ iniset $GNOCCHI_CONF api auth_mode keystone
+ if is_service_enabled gnocchi-grafana; then
+ iniset $GNOCCHI_CONF cors allowed_origin ${GRAFANA_URL}
+ fi
+ else
+ inidelete $GNOCCHI_CONF api auth_mode
+ fi
+
+ # Configure the indexer database
+ iniset $GNOCCHI_CONF indexer url `database_connection_url gnocchi`
+
+ if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then
+ _config_gnocchi_apache_wsgi
+ elif [ "$GNOCCHI_DEPLOY" == "uwsgi" ]; then
+ # iniset creates these files when it's called if they don't exist.
+ GNOCCHI_UWSGI_FILE=$GNOCCHI_CONF_DIR/uwsgi.ini
+
+ rm -f "$GNOCCHI_UWSGI_FILE"
+
+ iniset "$GNOCCHI_UWSGI_FILE" uwsgi http $GNOCCHI_SERVICE_HOST:$GNOCCHI_SERVICE_PORT
+ iniset "$GNOCCHI_UWSGI_FILE" uwsgi wsgi-file "/usr/local/bin/gnocchi-api"
+ # This is running standalone
+ iniset "$GNOCCHI_UWSGI_FILE" uwsgi master true
+ # Set die-on-term & exit-on-reload so that uwsgi shuts down
+ iniset "$GNOCCHI_UWSGI_FILE" uwsgi die-on-term true
+ iniset "$GNOCCHI_UWSGI_FILE" uwsgi exit-on-reload true
+ iniset "$GNOCCHI_UWSGI_FILE" uwsgi threads 32
+ iniset "$GNOCCHI_UWSGI_FILE" uwsgi processes $API_WORKERS
+ iniset "$GNOCCHI_UWSGI_FILE" uwsgi enable-threads true
+ iniset "$GNOCCHI_UWSGI_FILE" uwsgi plugins python
+ # uwsgi recommends this to prevent thundering herd on accept.
+ iniset "$GNOCCHI_UWSGI_FILE" uwsgi thunder-lock true
+ # Override the default size for headers from the 4k default.
+ iniset "$GNOCCHI_UWSGI_FILE" uwsgi buffer-size 65535
+ # Make sure the client doesn't try to re-use the connection.
+ iniset "$GNOCCHI_UWSGI_FILE" uwsgi add-header "Connection: close"
+ # Don't share rados resources and python-requests globals between processes
+ iniset "$GNOCCHI_UWSGI_FILE" uwsgi lazy-apps true
+ fi
+}
+
+# configure_keystone_for_gnocchi() - Configure Keystone needs for Gnocchi
+function configure_keystone_for_gnocchi {
+ if [ "$GNOCCHI_USE_KEYSTONE" == "True" ] ; then
+ if is_service_enabled gnocchi-grafana; then
+ # NOTE(sileht): keystone configuration have to be set before uwsgi
+ # is started
+ iniset $KEYSTONE_CONF cors allowed_origin ${GRAFANA_URL}
+ fi
+ fi
+}
+
+# configure_ceph_gnocchi() - gnocchi config needs to come after gnocchi is set up
+function configure_ceph_gnocchi {
+ # Configure gnocchi service options, ceph pool, ceph user and ceph key
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GNOCCHI_CEPH_POOL} ${GNOCCHI_CEPH_POOL_PG} ${GNOCCHI_CEPH_POOL_PGP}
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GNOCCHI_CEPH_POOL} size ${CEPH_REPLICAS}
+ if [[ $CEPH_REPLICAS -ne 1 ]]; then
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GNOCCHI_CEPH_POOL} crush_ruleset ${RULE_ID}
+
+ fi
+ sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GNOCCHI_CEPH_USER} mon "allow r" osd "allow rwx pool=${GNOCCHI_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GNOCCHI_CEPH_USER}.keyring
+ sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GNOCCHI_CEPH_USER}.keyring
+}
+
+
+# init_gnocchi() - Initialize etc.
+function init_gnocchi {
+ # Create cache dir
+ sudo mkdir -p $GNOCCHI_AUTH_CACHE_DIR
+ sudo chown $STACK_USER $GNOCCHI_AUTH_CACHE_DIR
+ rm -f $GNOCCHI_AUTH_CACHE_DIR/*
+
+ if is_service_enabled mysql postgresql; then
+ recreate_database gnocchi
+ fi
+ $GNOCCHI_BIN_DIR/gnocchi-upgrade
+}
+
+function preinstall_gnocchi {
+ if is_ubuntu; then
+ # libpq-dev is needed to build psycopg2
+ # uuid-runtime is needed to use the uuidgen command
+ install_package libpq-dev uuid-runtime
+ else
+ install_package postgresql-devel
+ fi
+ if [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then
+ install_package cython
+ install_package librados-dev
+ fi
+}
+
+# install_gnocchi() - Collect source and prepare
+function install_gnocchi {
+ if [[ "$GNOCCHI_STORAGE_BACKEND" = 'redis' ]] || [[ "${GNOCCHI_COORDINATOR_URL%%:*}" == "redis" ]]; then
+ _gnocchi_install_redis
+ fi
+
+ if [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then
+ pip_install cradox
+ fi
+
+ if is_service_enabled gnocchi-grafana
+ then
+ _gnocchi_install_grafana
+ fi
+
+ [ "$GNOCCHI_USE_KEYSTONE" == "True" ] && EXTRA_FLAVOR=,keystone
+
+ # We don't use setup_package because we don't follow openstack/requirements
+ sudo -H pip install -e "$GNOCCHI_DIR"[test,$GNOCCHI_STORAGE_BACKEND,${DATABASE_TYPE}${EXTRA_FLAVOR}]
+
+ if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then
+ install_apache_wsgi
+ elif [ "$GNOCCHI_DEPLOY" == "uwsgi" ]; then
+ pip_install uwsgi
+ fi
+
+ # Create configuration directory
+ [ ! -d $GNOCCHI_CONF_DIR ] && sudo mkdir -m 755 -p $GNOCCHI_CONF_DIR
+ sudo chown $STACK_USER $GNOCCHI_CONF_DIR
+}
+
+# start_gnocchi() - Start running processes, including screen
+function start_gnocchi {
+
+ if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then
+ enable_apache_site gnocchi
+ restart_apache_server
+ if [[ -n $GNOCCHI_SERVICE_PORT ]]; then
+ tail_log gnocchi /var/log/$APACHE_NAME/gnocchi.log
+ tail_log gnocchi-api /var/log/$APACHE_NAME/gnocchi-access.log
+ else
+ # NOTE(chdent): At the moment this is very noisy as it
+ # will tail the entire apache logs, not just the gnocchi
+ # parts. If you don't like this either USE_SCREEN=False
+ # or set GNOCCHI_SERVICE_PORT.
+ tail_log gnocchi /var/log/$APACHE_NAME/error[_\.]log
+ tail_log gnocchi-api /var/log/$APACHE_NAME/access[_\.]log
+ fi
+ elif [ "$GNOCCHI_DEPLOY" == "uwsgi" ]; then
+ run_process gnocchi-api "$GNOCCHI_BIN_DIR/uwsgi $GNOCCHI_UWSGI_FILE"
+ else
+ run_process gnocchi-api "$GNOCCHI_BIN_DIR/gnocchi-api --port $GNOCCHI_SERVICE_PORT"
+ fi
+ # only die on API if it was actually intended to be turned on
+ if is_service_enabled gnocchi-api; then
+
+ echo "Waiting for gnocchi-api to start..."
+ if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl -v --max-time 5 --noproxy '*' -s $(gnocchi_service_url)/v1/resource/generic ; do sleep 1; done"; then
+ die $LINENO "gnocchi-api did not start"
+ fi
+ fi
+
+ # run metricd last so we are properly waiting for swift and friends
+ run_process gnocchi-metricd "$GNOCCHI_BIN_DIR/gnocchi-metricd -d --config-file $GNOCCHI_CONF"
+ run_process gnocchi-statsd "$GNOCCHI_BIN_DIR/gnocchi-statsd -d --config-file $GNOCCHI_CONF"
+}
+
+# stop_gnocchi() - Stop running processes
+function stop_gnocchi {
+ if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then
+ disable_apache_site gnocchi
+ restart_apache_server
+ fi
+ # Kill the gnocchi screen windows
+ for serv in gnocchi-api gnocchi-metricd gnocchi-statsd; do
+ stop_process $serv
+ done
+}
+
+if is_service_enabled gnocchi-api; then
+ if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
+ echo_summary "Configuring system services for Gnocchi"
+ preinstall_gnocchi
+ elif [[ "$1" == "stack" && "$2" == "install" ]]; then
+ echo_summary "Installing Gnocchi"
+ stack_install_service gnocchi
+ configure_keystone_for_gnocchi
+ elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+ echo_summary "Configuring Gnocchi"
+ if _is_ceph_enabled && [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then
+ echo_summary "Configuring Gnocchi for Ceph"
+ configure_ceph_gnocchi
+ fi
+ configure_gnocchi
+ create_gnocchi_accounts
+ elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
+ echo_summary "Initializing Gnocchi"
+ init_gnocchi
+ start_gnocchi
+ fi
+
+ if [[ "$1" == "unstack" ]]; then
+ echo_summary "Stopping Gnocchi"
+ stop_gnocchi
+ fi
+
+ if [[ "$1" == "clean" ]]; then
+ cleanup_gnocchi
+ fi
+fi
+
+# Restore xtrace
+$XTRACE
+
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/devstack/settings b/devstack/settings
new file mode 100644
index 00000000..2ac7d52a
--- /dev/null
+++ b/devstack/settings
@@ -0,0 +1,65 @@
+enable_service gnocchi-api
+enable_service gnocchi-metricd
+enable_service gnocchi-statsd
+
+# Set up default directories
+GNOCCHI_DIR=$DEST/gnocchi
+GNOCCHI_CONF_DIR=/etc/gnocchi
+GNOCCHI_CONF=$GNOCCHI_CONF_DIR/gnocchi.conf
+GNOCCHI_LOG_DIR=/var/log/gnocchi
+GNOCCHI_AUTH_CACHE_DIR=${GNOCCHI_AUTH_CACHE_DIR:-/var/cache/gnocchi}
+GNOCCHI_WSGI_DIR=${GNOCCHI_WSGI_DIR:-/var/www/gnocchi}
+GNOCCHI_DATA_DIR=${GNOCCHI_DATA_DIR:-${DATA_DIR}/gnocchi}
+GNOCCHI_COORDINATOR_URL=${GNOCCHI_COORDINATOR_URL:-redis://localhost:6379}
+GNOCCHI_METRICD_PROCESSING_DELAY=${GNOCCHI_METRICD_PROCESSING_DELAY:-5}
+
+# GNOCCHI_DEPLOY defines how Gnocchi is deployed, allowed values:
+# - mod_wsgi : Run Gnocchi under Apache HTTPd mod_wsgi
+# - simple : Run gnocchi-api
+# - uwsgi : Run Gnocchi under uwsgi
+# - : Fallback to GNOCCHI_USE_MOD_WSGI or ENABLE_HTTPD_MOD_WSGI_SERVICES
+GNOCCHI_DEPLOY=${GNOCCHI_DEPLOY}
+
+# Toggle for deploying Gnocchi with/without Keystone
+GNOCCHI_USE_KEYSTONE=$(trueorfalse True GNOCCHI_USE_KEYSTONE)
+
+# Support potential entry-points console scripts and venvs
+if [[ ${USE_VENV} = True ]]; then
+ PROJECT_VENV["gnocchi"]=${GNOCCHI_DIR}.venv
+ GNOCCHI_BIN_DIR=${PROJECT_VENV["gnocchi"]}/bin
+else
+ GNOCCHI_BIN_DIR=$(get_python_exec_prefix)
+fi
+
+
+# Gnocchi connection info.
+GNOCCHI_SERVICE_PROTOCOL=http
+# NOTE(chdent): If you are not using mod wsgi you need to set port!
+GNOCCHI_SERVICE_PORT=${GNOCCHI_SERVICE_PORT:-8041}
+GNOCCHI_SERVICE_PREFIX=${GNOCCHI_SERVICE_PREFIX:-'/metric'}
+GNOCCHI_SERVICE_HOST=${GNOCCHI_SERVICE_HOST:-${SERVICE_HOST}}
+
+# Gnocchi statsd info
+GNOCCHI_STATSD_RESOURCE_ID=${GNOCCHI_STATSD_RESOURCE_ID:-$(uuidgen)}
+GNOCCHI_STATSD_USER_ID=${GNOCCHI_STATSD_USER_ID:-$(uuidgen)}
+GNOCCHI_STATSD_PROJECT_ID=${GNOCCHI_STATSD_PROJECT_ID:-$(uuidgen)}
+
+# Ceph gnocchi info
+GNOCCHI_CEPH_USER=${GNOCCHI_CEPH_USER:-gnocchi}
+GNOCCHI_CEPH_POOL=${GNOCCHI_CEPH_POOL:-gnocchi}
+GNOCCHI_CEPH_POOL_PG=${GNOCCHI_CEPH_POOL_PG:-8}
+GNOCCHI_CEPH_POOL_PGP=${GNOCCHI_CEPH_POOL_PGP:-8}
+
+# Redis gnocchi info
+GNOCCHI_REDIS_URL=${GNOCCHI_REDIS_URL:-redis://localhost:6379}
+
+# Gnocchi backend
+GNOCCHI_STORAGE_BACKEND=${GNOCCHI_STORAGE_BACKEND:-redis}
+
+# Grafana settings
+GRAFANA_RPM_PKG=${GRAFANA_RPM_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana-3.0.4-1464167696.x86_64.rpm}
+GRAFANA_DEB_PKG=${GRAFANA_DEB_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana_3.0.4-1464167696_amd64.deb}
+GRAFANA_PLUGIN_VERSION=${GRAFANA_PLUGIN_VERSION}
+GRAFANA_PLUGINS_DIR=${GRAFANA_PLUGINS_DIR:-$DEST/grafana-gnocchi-datasource}
+GRAFANA_PLUGINS_REPO=${GRAFANA_PLUGINS_REPO:-http://github.com/gnocchixyz/grafana-gnocchi-datasource.git}
+GRAFANA_URL=${GRAFANA_URL:-http://$HOST_IP:3000}
diff --git a/doc/source/_static/gnocchi-icon-source.png b/doc/source/_static/gnocchi-icon-source.png
new file mode 100644
index 00000000..d6108c41
Binary files /dev/null and b/doc/source/_static/gnocchi-icon-source.png differ
diff --git a/doc/source/_static/gnocchi-icon.ico b/doc/source/_static/gnocchi-icon.ico
new file mode 100644
index 00000000..783bde93
Binary files /dev/null and b/doc/source/_static/gnocchi-icon.ico differ
diff --git a/doc/source/_static/gnocchi-logo.png b/doc/source/_static/gnocchi-logo.png
new file mode 100644
index 00000000..e3fc8903
Binary files /dev/null and b/doc/source/_static/gnocchi-logo.png differ
diff --git a/doc/source/architecture.png b/doc/source/architecture.png
new file mode 100644
index 00000000..a54f873f
Binary files /dev/null and b/doc/source/architecture.png differ
diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst
new file mode 100755
index 00000000..9b7b4f9c
--- /dev/null
+++ b/doc/source/architecture.rst
@@ -0,0 +1,82 @@
+======================
+ Project Architecture
+======================
+
+Gnocchi consists of several services: a HTTP REST API (see :doc:`rest`), an
+optional statsd-compatible daemon (see :doc:`statsd`), and an asynchronous
+processing daemon (named `gnocchi-metricd`). Data is received via the HTTP REST
+API or statsd daemon. `gnocchi-metricd` performs operations (statistics
+computing, metric cleanup, etc...) on the received data in the background.
+
+Both the HTTP REST API and the asynchronous processing daemon are stateless and
+are scalable. Additional workers can be added depending on load.
+
+.. image:: architecture.png
+ :align: center
+ :width: 80%
+ :alt: Gnocchi architecture
+
+
+Back-ends
+---------
+
+Gnocchi uses three different back-ends for storing data: one for storing new
+incoming measures (the incoming driver), one for storing the time series (the
+storage driver) and one for indexing the data (the index driver).
+
+The *incoming* storage is responsible for storing new measures sent to metrics.
+It is by default – and usually – the same driver as the *storage* one.
+
+The *storage* is responsible for storing measures of created metrics. It
+receives timestamps and values, and pre-computes aggregations according to the
+defined archive policies.
+
+The *indexer* is responsible for storing the index of all resources, archive
+policies and metrics, along with their definitions, types and properties. The
+indexer is also responsible for linking resources with metrics.
+
+Available storage back-ends
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Gnocchi currently offers different storage drivers:
+
+* File (default)
+* `Ceph`_ (preferred)
+* `OpenStack Swift`_
+* `S3`_
+* `Redis`_
+
+The drivers are based on an intermediate library, named *Carbonara*, which
+handles the time series manipulation, since none of these storage technologies
+handle time series natively.
+
+The four *Carbonara* based drivers are working well and are as scalable as
+their back-end technology permits. Ceph and Swift are inherently more scalable
+than the file driver.
+
+Depending on the size of your architecture, using the file driver and storing
+your data on a disk might be enough. If you need to scale the number of server
+with the file driver, you can export and share the data via NFS among all
+Gnocchi processes. In any case, it is obvious that S3, Ceph and Swift drivers
+are largely more scalable. Ceph also offers better consistency, and hence is
+the recommended driver.
+
+.. _OpenStack Swift: http://docs.openstack.org/developer/swift/
+.. _Ceph: https://ceph.com
+.. _`S3`: https://aws.amazon.com/s3/
+.. _`Redis`: https://redis.io
+
+Available index back-ends
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Gnocchi currently offers different index drivers:
+
+* `PostgreSQL`_ (preferred)
+* `MySQL`_ (at least version 5.6.4)
+
+Those drivers offer almost the same performance and features, though PostgreSQL
+tends to be more performant and has some additional features (e.g. resource
+duration computing).
+
+.. _PostgreSQL: http://postgresql.org
+.. _MySQL: http://mysql.org
diff --git a/doc/source/client.rst b/doc/source/client.rst
new file mode 100644
index 00000000..6aa428a1
--- /dev/null
+++ b/doc/source/client.rst
@@ -0,0 +1,13 @@
+========
+ Client
+========
+
+Gnocchi currently only provides a Python client and SDK which can be installed
+using *pip*::
+
+ pip install gnocchiclient
+
+This package provides the `gnocchi` command line tool that can be used to send
+requests to Gnocchi. You can read the `full documentation online`_.
+
+.. _full documentation online: http://gnocchi.xyz/gnocchiclient
diff --git a/doc/source/collectd.rst b/doc/source/collectd.rst
new file mode 100644
index 00000000..0b91b448
--- /dev/null
+++ b/doc/source/collectd.rst
@@ -0,0 +1,14 @@
+==================
+ Collectd support
+==================
+
+`Collectd`_ can use Gnocchi to store its data through a plugin called
+`collectd-gnocchi`. It can be installed with *pip*::
+
+ pip install collectd-gnocchi
+
+`Sources and documentation`_ are also available.
+
+
+.. _`Collectd`: https://www.collectd.org/
+.. _`Sources and documentation`: https://github.com/gnocchixyz/collectd-gnocchi
diff --git a/doc/source/conf.py b/doc/source/conf.py
new file mode 100644
index 00000000..8c9b810b
--- /dev/null
+++ b/doc/source/conf.py
@@ -0,0 +1,197 @@
+# -*- coding: utf-8 -*-
+#
+# Gnocchi documentation build configuration file
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import datetime
+import os
+import subprocess
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration -----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = [
+ 'gnocchi.gendoc',
+ 'sphinxcontrib.httpdomain',
+ 'sphinx.ext.autodoc',
+ 'reno.sphinxext',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'Gnocchi'
+copyright = u'%s, OpenStack Foundation' % datetime.date.today().year
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = subprocess.Popen(['sh', '-c', 'cd ../..; python setup.py --version'],
+ stdout=subprocess.PIPE).stdout.read()
+version = version.strip()
+# The full version, including alpha/beta/rc tags.
+release = version
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = []
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'sphinx_rtd_theme'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+import sphinx_rtd_theme
+html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
+
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# " v documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+html_logo = '_static/gnocchi-logo.png'
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+html_favicon = '_static/gnocchi-icon.ico'
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'gnocchidoc'
+
+html_theme_options = {
+ 'logo_only': True,
+}
+
+# Multiversion docs
+scv_sort = ('semver',)
+scv_show_banner = True
+scv_banner_greatest_tag = True
+scv_priority = 'branches'
+scv_whitelist_branches = ('master', '^stable/(2\.1|2\.2|[3-9]\.)')
+scv_whitelist_tags = ("^[2-9]\.",)
+
+here = os.path.dirname(os.path.realpath(__file__))
+html_static_path_abs = ",".join([os.path.join(here, p) for p in html_static_path])
+# NOTE(sileht): Override some conf for old version. Also, warning as error have
+# been enable in version > 3.1. so we can remove all of this when we don't
+# publish version <= 3.1.X anymore
+scv_overflow = ("-D", "html_theme=sphinx_rtd_theme",
+ "-D", "html_theme_options.logo_only=True",
+ "-D", "html_logo=gnocchi-logo.png",
+ "-D", "html_favicon=gnocchi-icon.ico",
+ "-D", "html_static_path=%s" % html_static_path_abs)
diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst
new file mode 100644
index 00000000..4dcb0b45
--- /dev/null
+++ b/doc/source/glossary.rst
@@ -0,0 +1,33 @@
+========
+Glossary
+========
+
+.. glossary::
+
+ Resource
+ An entity representing anything in your infrastructure that you will
+ associate metric(s) with. It is identified by a unique ID and can contain
+ attributes.
+
+ Metric
+ An entity storing measures identified by an UUID. It can be attached to a
+ resource using a name. How a metric stores its measure is defined by the
+ archive policy it is associated to.
+
+ Measure
+ A datapoint tuple composed of timestamp and a value.
+
+ Archive policy
+ A measure storage policy attached to a metric. It determines how long
+ measures will be kept in a metric and how they will be aggregated.
+
+ Granularity
+ The time between two measures in an aggregated timeseries of a metric.
+
+ Timeseries
+ A list of measures.
+
+ Aggregation method
+ Function used to aggregate multiple measures in one. For example, the
+ `min` aggregation method will aggregate the values of different measures
+ to the minimum value of all the measures in time range.
diff --git a/doc/source/grafana-screenshot.png b/doc/source/grafana-screenshot.png
new file mode 100644
index 00000000..eff16032
Binary files /dev/null and b/doc/source/grafana-screenshot.png differ
diff --git a/doc/source/grafana.rst b/doc/source/grafana.rst
new file mode 100644
index 00000000..d731e613
--- /dev/null
+++ b/doc/source/grafana.rst
@@ -0,0 +1,52 @@
+=================
+Grafana support
+=================
+
+`Grafana`_ has support for Gnocchi through a plugin. It can be installed with
+grafana-cli::
+
+ sudo grafana-cli plugins install sileht-gnocchi-datasource
+
+`Source`_ and `Documentation`_ are also available.
+
+Grafana has 2 modes of operation: proxy or direct mode. In proxy mode, your
+browser only communicates with Grafana, and Grafana communicates with Gnocchi.
+In direct mode, your browser communicates with Grafana, Gnocchi, and possibly
+Keystone.
+
+Picking the right mode depends if your Gnocchi server is reachable by your
+browser and/or by your Grafana server.
+
+In order to use Gnocchi with Grafana in proxy mode, you just need to:
+
+1. Install Grafana and its Gnocchi plugin
+2. Configure a new datasource in Grafana with the Gnocchi URL.
+ If you are using the Keystone middleware for authentication, you can also
+ provide an authentication token.
+
+In order to use Gnocchi with Grafana in direct mode, you need to do a few more
+steps:
+
+1. Configure the CORS middleware in `gnocchi.conf` to allow request from
+ Grafana::
+
+ [cors]
+ allowed_origin = http://example.com/grafana
+
+2. Configure the CORS middleware in Keystone to allow request from Grafana too:
+
+ [cors]
+ allowed_origin = http://example.com/grafana
+
+3. Configure a new datasource in Grafana with the Keystone URL, a user, a
+ project and a password. Your browser will query Keystone for a token, and
+ then query Gnocchi based on what Grafana needs.
+
+.. image:: grafana-screenshot.png
+ :align: center
+ :alt: Grafana screenshot
+
+.. _`Grafana`: http://grafana.org
+.. _`Documentation`: https://grafana.net/plugins/sileht-gnocchi-datasource
+.. _`Source`: https://github.com/gnocchixyz/grafana-gnocchi-datasource
+.. _`CORS`: https://en.wikipedia.org/wiki/Cross-origin_resource_sharing
diff --git a/doc/source/index.rst b/doc/source/index.rst
new file mode 100644
index 00000000..6525abf7
--- /dev/null
+++ b/doc/source/index.rst
@@ -0,0 +1,70 @@
+==================================
+Gnocchi – Metric as a Service
+==================================
+
+.. include:: ../../README.rst
+ :start-line: 6
+
+Key Features
+------------
+
+- HTTP REST interface
+- Horizontal scalability
+- Metric aggregation
+- Measures batching support
+- Archiving policy
+- Metric value search
+- Structured resources
+- Resource history
+- Queryable resource indexer
+- Multi-tenant
+- Grafana support
+- Nagios/Icinga support
+- Statsd protocol support
+- Collectd plugin support
+
+Community
+---------
+You can join Gnocchi's community via the following channels:
+
+- Bug tracker: https://bugs.launchpad.net/gnocchi
+- IRC: #gnocchi on `Freenode `_
+- Mailing list: `openstack-dev@lists.openstack.org
+ `_ with
+ *[gnocchi]* in the `Subject` header.
+
+Why Gnocchi?
+------------
+
+Gnocchi has been created to fulfill the need of a time series database usable
+in the context of cloud computing: providing the ability to store large
+quantities of metrics. It has been designed to handle large amount of measures
+being stored, while being performant, scalable and fault-tolerant. While doing
+this, the goal was to be sure to not build any hard dependency on any complex
+storage system.
+
+The Gnocchi project was started in 2014 as a spin-off of the `OpenStack
+Ceilometer`_ project to address the performance issues that Ceilometer
+encountered while using standard databases as a storage backends for metrics.
+More information are available on `Julien's blog post on Gnocchi
+`_.
+
+.. _`OpenStack Ceilometer`: https://docs.openstack.org/developer/ceilometer/
+
+Documentation
+-------------
+
+.. toctree::
+ :maxdepth: 1
+
+ architecture
+ install
+ running
+ client
+ rest
+ statsd
+ grafana
+ nagios
+ collectd
+ glossary
+ releasenotes/index.rst
diff --git a/doc/source/install.rst b/doc/source/install.rst
new file mode 100644
index 00000000..897107a1
--- /dev/null
+++ b/doc/source/install.rst
@@ -0,0 +1,191 @@
+==============
+ Installation
+==============
+
+.. _installation:
+
+Installation
+============
+
+To install Gnocchi using `pip`, just type::
+
+ pip install gnocchi
+
+Depending on the drivers and features you want to use (see :doc:`architecture`
+for which driver to pick), you need to install extra variants using, for
+example::
+
+ pip install gnocchi[postgresql,ceph,keystone]
+
+This would install PostgreSQL support for the indexer, Ceph support for
+storage, and Keystone support for authentication and authorization.
+
+The list of variants available is:
+
+* keystone – provides Keystone authentication support
+* mysql - provides MySQL indexer support
+* postgresql – provides PostgreSQL indexer support
+* swift – provides OpenStack Swift storage support
+* s3 – provides Amazon S3 storage support
+* ceph – provides common part of Ceph storage support
+* ceph_recommended_lib – provides Ceph (>=0.80) storage support
+* ceph_alternative_lib – provides Ceph (>=10.1.0) storage support
+* file – provides file driver support
+* redis – provides Redis storage support
+* doc – documentation building support
+* test – unit and functional tests support
+
+To install Gnocchi from source, run the standard Python installation
+procedure::
+
+ pip install -e .
+
+Again, depending on the drivers and features you want to use, you need to
+install extra variants using, for example::
+
+ pip install -e .[postgresql,ceph,ceph_recommended_lib]
+
+
+Ceph requirements
+-----------------
+
+The ceph driver needs to have a Ceph user and a pool already created. They can
+be created for example with:
+
+::
+
+ ceph osd pool create metrics 8 8
+ ceph auth get-or-create client.gnocchi mon "allow r" osd "allow rwx pool=metrics"
+
+
+Gnocchi leverages some librados features (omap, async, operation context)
+available in python binding only since python-rados >= 10.1.0. To handle this,
+Gnocchi uses 'cradox' python library which has exactly the same API but works
+with Ceph >= 0.80.0.
+
+If Ceph and python-rados are >= 10.1.0, cradox python library becomes optional
+but is still recommended.
+
+
+Configuration
+=============
+
+Configuration file
+-------------------
+
+By default, gnocchi looks for its configuration file in the following places,
+in order:
+
+* ``~/.gnocchi/gnocchi.conf``
+* ``~/gnocchi.conf``
+* ``/etc/gnocchi/gnocchi.conf``
+* ``/etc/gnocchi.conf``
+* ``~/gnocchi/gnocchi.conf.d``
+* ``~/gnocchi.conf.d``
+* ``/etc/gnocchi/gnocchi.conf.d``
+* ``/etc/gnocchi.conf.d``
+
+
+No config file is provided with the source code; it will be created during the
+installation. In case where no configuration file was installed, one can be
+easily created by running:
+
+::
+
+ gnocchi-config-generator > /path/to/gnocchi.conf
+
+Configure Gnocchi by editing the appropriate file.
+
+The configuration file should be pretty explicit, but here are some of the base
+options you want to change and configure:
+
++---------------------+---------------------------------------------------+
+| Option name | Help |
++=====================+===================================================+
+| storage.driver | The storage driver for metrics. |
++---------------------+---------------------------------------------------+
+| indexer.url | URL to your indexer. |
++---------------------+---------------------------------------------------+
+| storage.file_* | Configuration options to store files |
+| | if you use the file storage driver. |
++---------------------+---------------------------------------------------+
+| storage.swift_* | Configuration options to access Swift |
+| | if you use the Swift storage driver. |
++---------------------+---------------------------------------------------+
+| storage.ceph_* | Configuration options to access Ceph |
+| | if you use the Ceph storage driver. |
++---------------------+---------------------------------------------------+
+| storage.s3_* | Configuration options to access S3 |
+| | if you use the S3 storage driver. |
++---------------------+---------------------------------------------------+
+| storage.redis_* | Configuration options to access Redis |
+| | if you use the Redis storage driver. |
++---------------------+---------------------------------------------------+
+
+Configuring authentication
+-----------------------------
+
+The API server supports different authentication methods: `basic` (the default)
+which uses the standard HTTP `Authorization` header or `keystone` to use
+`OpenStack Keystone`_. If you successfully installed the `keystone` flavor
+using `pip` (see :ref:`installation`), you can set `api.auth_mode` to
+`keystone` to enable Keystone authentication.
+
+.. _`Paste Deployment`: http://pythonpaste.org/deploy/
+.. _`OpenStack Keystone`: http://launchpad.net/keystone
+
+Initialization
+==============
+
+Once you have configured Gnocchi properly you need to initialize the indexer
+and storage:
+
+::
+
+ gnocchi-upgrade
+
+
+Upgrading
+=========
+In order to upgrade from a previous version of Gnocchi, you need to make sure
+that your indexer and storage are properly upgraded. Run the following:
+
+1. Stop the old version of Gnocchi API server and `gnocchi-statsd` daemon
+
+2. Stop the old version of `gnocchi-metricd` daemon
+
+.. note::
+
+ Data in backlog is never migrated between versions. Ensure the backlog is
+ empty before any upgrade to ensure data is not lost.
+
+3. Install the new version of Gnocchi
+
+4. Run `gnocchi-upgrade`
+ This can take several hours depending on the size of your index and
+ storage.
+
+5. Start the new Gnocchi API server, `gnocchi-metricd`
+ and `gnocchi-statsd` daemons
+
+
+Installation Using Devstack
+===========================
+
+To enable Gnocchi in `devstack`_, add the following to local.conf:
+
+::
+
+ enable_plugin gnocchi https://github.com/openstack/gnocchi master
+
+To enable Grafana support in devstack, you can also enable `gnocchi-grafana`::
+
+ enable_service gnocchi-grafana
+
+Then, you can start devstack:
+
+::
+
+ ./stack.sh
+
+.. _devstack: http://devstack.org
diff --git a/doc/source/nagios.rst b/doc/source/nagios.rst
new file mode 100644
index 00000000..72d2556c
--- /dev/null
+++ b/doc/source/nagios.rst
@@ -0,0 +1,19 @@
+=====================
+Nagios/Icinga support
+=====================
+
+`Nagios`_ and `Icinga`_ has support for Gnocchi through a Gnocchi-nagios
+service. It can be installed with pip::
+
+ pip install gnocchi-nagios
+
+`Source`_ and `Documentation`_ are also available.
+
+Gnocchi-nagios collects perfdata files generated by `Nagios`_ or `Icinga`_;
+transforms them into Gnocchi resources, metrics and measures format; and
+publishes them to the Gnocchi REST API.
+
+.. _`Nagios`: https://www.nagios.org/
+.. _`Icinga`: https://www.icinga.com/
+.. _`Documentation`: http://gnocchi-nagios.readthedocs.io/en/latest/
+.. _`Source`: https://github.com/sileht/gnocchi-nagios
diff --git a/doc/source/releasenotes/2.1.rst b/doc/source/releasenotes/2.1.rst
new file mode 100644
index 00000000..75b12881
--- /dev/null
+++ b/doc/source/releasenotes/2.1.rst
@@ -0,0 +1,6 @@
+===================================
+ 2.1 Series Release Notes
+===================================
+
+.. release-notes::
+ :branch: origin/stable/2.1
diff --git a/doc/source/releasenotes/2.2.rst b/doc/source/releasenotes/2.2.rst
new file mode 100644
index 00000000..fea024d6
--- /dev/null
+++ b/doc/source/releasenotes/2.2.rst
@@ -0,0 +1,6 @@
+===================================
+ 2.2 Series Release Notes
+===================================
+
+.. release-notes::
+ :branch: origin/stable/2.2
diff --git a/doc/source/releasenotes/3.0.rst b/doc/source/releasenotes/3.0.rst
new file mode 100644
index 00000000..4f664099
--- /dev/null
+++ b/doc/source/releasenotes/3.0.rst
@@ -0,0 +1,6 @@
+===================================
+ 3.0 Series Release Notes
+===================================
+
+.. release-notes::
+ :branch: origin/stable/3.0
diff --git a/doc/source/releasenotes/3.1.rst b/doc/source/releasenotes/3.1.rst
new file mode 100644
index 00000000..9673b4a8
--- /dev/null
+++ b/doc/source/releasenotes/3.1.rst
@@ -0,0 +1,6 @@
+===================================
+ 3.1 Series Release Notes
+===================================
+
+.. release-notes::
+ :branch: origin/stable/3.1
diff --git a/doc/source/releasenotes/index.rst b/doc/source/releasenotes/index.rst
new file mode 100644
index 00000000..9b4032fa
--- /dev/null
+++ b/doc/source/releasenotes/index.rst
@@ -0,0 +1,11 @@
+Release Notes
+=============
+
+.. toctree::
+ :maxdepth: 2
+
+ unreleased
+ 3.1
+ 3.0
+ 2.2
+ 2.1
diff --git a/doc/source/releasenotes/unreleased.rst b/doc/source/releasenotes/unreleased.rst
new file mode 100644
index 00000000..875030f9
--- /dev/null
+++ b/doc/source/releasenotes/unreleased.rst
@@ -0,0 +1,5 @@
+============================
+Current Series Release Notes
+============================
+
+.. release-notes::
diff --git a/doc/source/rest.j2 b/doc/source/rest.j2
new file mode 100644
index 00000000..c06c845d
--- /dev/null
+++ b/doc/source/rest.j2
@@ -0,0 +1,586 @@
+================
+ REST API Usage
+================
+
+Authentication
+==============
+
+By default, the authentication is configured to the "basic" mode. You need to
+provide an `Authorization` header in your HTTP requests with a valid username
+(the password is not used). The "admin" password is granted all privileges,
+whereas any other username is recognize as having standard permissions.
+
+You can customize permissions by specifying a different `policy_file` than the
+default one.
+
+If you set the `api.auth_mode` value to `keystone`, the OpenStack Keystone
+middleware will be enabled for authentication. It is then needed to
+authenticate against Keystone and provide a `X-Auth-Token` header with a valid
+token for each request sent to Gnocchi's API.
+
+Metrics
+=======
+
+Gnocchi provides an object type that is called *metric*. A metric designates
+any thing that can be measured: the CPU usage of a server, the temperature of a
+room or the number of bytes sent by a network interface.
+
+A metric only has a few properties: a UUID to identify it, a name, the archive
+policy that will be used to store and aggregate the measures.
+
+To create a metric, the following API request should be used:
+
+{{ scenarios['create-metric']['doc'] }}
+
+Once created, you can retrieve the metric information:
+
+{{ scenarios['get-metric']['doc'] }}
+
+To retrieve the list of all the metrics created, use the following request:
+
+{{ scenarios['list-metric']['doc'] }}
+
+.. note::
+
+ Considering the large volume of metrics Gnocchi will store, query results are
+ limited to `max_limit` value set in the configuration file. Returned results
+ are ordered by metrics' id values. To retrieve the next page of results, the
+ id of a metric should be given as `marker` for the beginning of the next page
+ of results.
+
+Default ordering and limits as well as page start can be modified
+using query parameters:
+
+{{ scenarios['list-metric-pagination']['doc'] }}
+
+It is possible to send measures to the metric:
+
+{{ scenarios['post-measures']['doc'] }}
+
+If there are no errors, Gnocchi does not return a response body, only a simple
+status code. It is possible to provide any number of measures.
+
+.. IMPORTANT::
+
+ While it is possible to send any number of (timestamp, value), it is still
+ needed to honor constraints defined by the archive policy used by the metric,
+ such as the maximum timespan.
+
+
+Once measures are sent, it is possible to retrieve them using *GET* on the same
+endpoint:
+
+{{ scenarios['get-measures']['doc'] }}
+
+Depending on the driver, there may be some lag after POSTing measures before
+they are processed and queryable. To ensure your query returns all measures
+that have been POSTed, you can force any unprocessed measures to be handled:
+
+{{ scenarios['get-measures-refresh']['doc'] }}
+
+.. note::
+
+ Depending on the amount of data that is unprocessed, `refresh` may add
+ some overhead to your query.
+
+The list of points returned is composed of tuples with (timestamp, granularity,
+value) sorted by timestamp. The granularity is the timespan covered by
+aggregation for this point.
+
+It is possible to filter the measures over a time range by specifying the
+*start* and/or *stop* parameters to the query with timestamp. The timestamp
+format can be either a floating number (UNIX epoch) or an ISO8601 formated
+timestamp:
+
+{{ scenarios['get-measures-from']['doc'] }}
+
+By default, the aggregated values that are returned use the *mean* aggregation
+method. It is possible to request for any other method by specifying the
+*aggregation* query parameter:
+
+{{ scenarios['get-measures-max']['doc'] }}
+
+The list of aggregation method available is: *mean*, *sum*, *last*, *max*,
+*min*, *std*, *median*, *first*, *count* and *Npct* (with 0 < N < 100).
+
+It's possible to provide the `granularity` argument to specify the granularity
+to retrieve, rather than all the granularities available:
+
+{{ scenarios['get-measures-granularity']['doc'] }}
+
+In addition to granularities defined by the archive policy, measures can be
+resampled to a new granularity.
+
+{{ scenarios['get-measures-resample']['doc'] }}
+
+.. note::
+
+ Depending on the aggregation method and frequency of measures, resampled
+ data may lack accuracy as it is working against previously aggregated data.
+
+Measures batching
+=================
+It is also possible to batch measures sending, i.e. send several measures for
+different metrics in a simple call:
+
+{{ scenarios['post-measures-batch']['doc'] }}
+
+Or using named metrics of resources:
+
+{{ scenarios['post-measures-batch-named']['doc'] }}
+
+If some named metrics specified in the batch request do not exist, Gnocchi can
+try to create them as long as an archive policy rule matches:
+
+{{ scenarios['post-measures-batch-named-create']['doc'] }}
+
+
+Archive Policy
+==============
+
+When sending measures for a metric to Gnocchi, the values are dynamically
+aggregated. That means that Gnocchi does not store all sent measures, but
+aggregates them over a certain period of time. Gnocchi provides several
+aggregation methods (mean, min, max, sum…) that are builtin.
+
+An archive policy is defined by a list of items in the `definition` field. Each
+item is composed of the timespan and the level of precision that must be kept
+when aggregating data, determined using at least 2 of the `points`,
+`granularity` and `timespan` fields. For example, an item might be defined
+as 12 points over 1 hour (one point every 5 minutes), or 1 point every 1 hour
+over 1 day (24 points).
+
+By default, new measures can only be processed if they have timestamps in the
+future or part of the last aggregation period. The last aggregation period size
+is based on the largest granularity defined in the archive policy definition.
+To allow processing measures that are older than the period, the `back_window`
+parameter can be used to set the number of coarsest periods to keep. That way
+it is possible to process measures that are older than the last timestamp
+period boundary.
+
+For example, if an archive policy is defined with coarsest aggregation of 1
+hour, and the last point processed has a timestamp of 14:34, it's possible to
+process measures back to 14:00 with a `back_window` of 0. If the `back_window`
+is set to 2, it will be possible to send measures with timestamp back to 12:00
+(14:00 minus 2 times 1 hour).
+
+The REST API allows to create archive policies in this way:
+
+{{ scenarios['create-archive-policy']['doc'] }}
+
+By default, the aggregation methods computed and stored are the ones defined
+with `default_aggregation_methods` in the configuration file. It is possible to
+change the aggregation methods used in an archive policy by specifying the list
+of aggregation method to use in the `aggregation_methods` attribute of an
+archive policy.
+
+{{ scenarios['create-archive-policy-without-max']['doc'] }}
+
+The list of aggregation methods can either be:
+
+- a list of aggregation methods to use, e.g. `["mean", "max"]`
+
+- a list of methods to remove (prefixed by `-`) and/or to add (prefixed by `+`)
+ to the default list (e.g. `["+mean", "-last"]`)
+
+If `*` is included in the list, it's substituted by the list of all supported
+aggregation methods.
+
+Once the archive policy is created, the complete set of properties is computed
+and returned, with the URL of the archive policy. This URL can be used to
+retrieve the details of the archive policy later:
+
+{{ scenarios['get-archive-policy']['doc'] }}
+
+It is also possible to list archive policies:
+
+{{ scenarios['list-archive-policy']['doc'] }}
+
+Existing archive policies can be modified to retain more or less data depending
+on requirements. If the policy coverage is expanded, measures are not
+retroactively calculated as backfill to accommodate the new timespan:
+
+{{ scenarios['update-archive-policy']['doc'] }}
+
+.. note::
+
+ Granularities cannot be changed to a different rate. Also, granularities
+ cannot be added or dropped from a policy.
+
+It is possible to delete an archive policy if it is not used by any metric:
+
+{{ scenarios['delete-archive-policy']['doc'] }}
+
+.. note::
+
+ An archive policy cannot be deleted until all metrics associated with it
+ are removed by a metricd daemon.
+
+
+Archive Policy Rule
+===================
+
+Gnocchi provides the ability to define a mapping called `archive_policy_rule`.
+An archive policy rule defines a mapping between a metric and an archive policy.
+This gives users the ability to pre-define rules so an archive policy is assigned to
+metrics based on a matched pattern.
+
+An archive policy rule has a few properties: a name to identify it, an archive
+policy name that will be used to store the policy name and metric pattern to
+match metric names.
+
+An archive policy rule for example could be a mapping to default a medium archive
+policy for any volume metric with a pattern matching `volume.*`. When a sample metric
+is posted with a name of `volume.size`, that would match the pattern and the
+rule applies and sets the archive policy to medium. If multiple rules match,
+the longest matching rule is taken. For example, if two rules exists which
+match `*` and `disk.*`, a `disk.io.rate` metric would match the `disk.*` rule
+rather than `*` rule.
+
+To create a rule, the following API request should be used:
+
+{{ scenarios['create-archive-policy-rule']['doc'] }}
+
+The `metric_pattern` is used to pattern match so as some examples,
+
+- `*` matches anything
+- `disk.*` matches disk.io
+- `disk.io.*` matches disk.io.rate
+
+Once created, you can retrieve the rule information:
+
+{{ scenarios['get-archive-policy-rule']['doc'] }}
+
+It is also possible to list archive policy rules. The result set is ordered by
+the `metric_pattern`, in reverse alphabetical order:
+
+{{ scenarios['list-archive-policy-rule']['doc'] }}
+
+It is possible to delete an archive policy rule:
+
+{{ scenarios['delete-archive-policy-rule']['doc'] }}
+
+Resources
+=========
+
+Gnocchi provides the ability to store and index resources. Each resource has a
+type. The basic type of resources is *generic*, but more specialized subtypes
+also exist, especially to describe OpenStack resources.
+
+The REST API allows to manipulate resources. To create a generic resource:
+
+{{ scenarios['create-resource-generic']['doc'] }}
+
+The *id*, *user_id* and *project_id* attributes must be UUID. The timestamp
+describing the lifespan of the resource are optional, and *started_at* is by
+default set to the current timestamp.
+
+It's possible to retrieve the resource by the URL provided in the `Location`
+header.
+
+More specialized resources can be created. For example, the *instance* is used
+to describe an OpenStack instance as managed by Nova_.
+
+{{ scenarios['create-resource-instance']['doc'] }}
+
+All specialized types have their own optional and mandatory attributes,
+but they all include attributes from the generic type as well.
+
+It is possible to create metrics at the same time you create a resource to save
+some requests:
+
+{{ scenarios['create-resource-with-new-metrics']['doc'] }}
+
+To retrieve a resource by its URL provided by the `Location` header at creation
+time:
+
+{{ scenarios['get-resource-generic']['doc'] }}
+
+It's possible to modify a resource by re-uploading it partially with the
+modified fields:
+
+{{ scenarios['patch-resource']['doc'] }}
+
+And to retrieve its modification history:
+
+{{ scenarios['get-patched-instance-history']['doc'] }}
+
+It is possible to delete a resource altogether:
+
+{{ scenarios['delete-resource-generic']['doc'] }}
+
+It is also possible to delete a batch of resources based on attribute values, and
+returns a number of deleted resources.
+
+To delete resources based on ids:
+
+{{ scenarios['delete-resources-by-ids']['doc'] }}
+
+or delete resources based on time:
+
+{{ scenarios['delete-resources-by-time']['doc']}}
+
+.. IMPORTANT::
+
+ When a resource is deleted, all its associated metrics are deleted at the
+ same time.
+
+ When a batch of resources are deleted, an attribute filter is required to
+ avoid deletion of the entire database.
+
+
+All resources can be listed, either by using the `generic` type that will list
+all types of resources, or by filtering on their resource type:
+
+{{ scenarios['list-resource-generic']['doc'] }}
+
+No attributes specific to the resource type are retrieved when using the
+`generic` endpoint. To retrieve the details, either list using the specific
+resource type endpoint:
+
+{{ scenarios['list-resource-instance']['doc'] }}
+
+or using `details=true` in the query parameter:
+
+{{ scenarios['list-resource-generic-details']['doc'] }}
+
+.. note::
+
+ Similar to metric list, query results are limited to `max_limit` value set in
+ the configuration file.
+
+Returned results represent a single page of data and are ordered by resouces'
+revision_start time and started_at values:
+
+{{ scenarios['list-resource-generic-pagination']['doc'] }}
+
+Each resource can be linked to any number of metrics. The `metrics` attributes
+is a key/value field where the key is the name of the relationship and
+the value is a metric:
+
+{{ scenarios['create-resource-instance-with-metrics']['doc'] }}
+
+It's also possible to create metrics dynamically while creating a resource:
+
+{{ scenarios['create-resource-instance-with-dynamic-metrics']['doc'] }}
+
+The metric associated with a resource can be accessed and manipulated using the
+usual `/v1/metric` endpoint or using the named relationship with the resource:
+
+{{ scenarios['get-resource-named-metrics-measures']['doc'] }}
+
+The same endpoint can be used to append metrics to a resource:
+
+{{ scenarios['append-metrics-to-resource']['doc'] }}
+
+.. _Nova: http://launchpad.net/nova
+
+Resource Types
+==============
+
+Gnocchi is able to manage resource types with custom attributes.
+
+To create a new resource type:
+
+{{ scenarios['create-resource-type']['doc'] }}
+
+Then to retrieve its description:
+
+{{ scenarios['get-resource-type']['doc'] }}
+
+All resource types can be listed like this:
+
+{{ scenarios['list-resource-type']['doc'] }}
+
+It can also be deleted if no more resources are associated to it:
+
+{{ scenarios['delete-resource-type']['doc'] }}
+
+Attributes can be added or removed:
+
+{{ scenarios['patch-resource-type']['doc'] }}
+
+Creating resource type means creation of new tables on the indexer backend.
+This is heavy operation that will lock some tables for a short amount of times.
+When the resource type is created, its initial `state` is `creating`. When the
+new tables have been created, the state switches to `active` and the new
+resource type is ready to be used. If something unexpected occurs during this
+step, the state switches to `creation_error`.
+
+The same behavior occurs when the resource type is deleted. The state starts to
+switch to `deleting`, the resource type is no more usable. Then the tables are
+removed and the finally the resource_type is really deleted from the database.
+If some unexpected occurs the state switches to `deletion_error`.
+
+Searching for resources
+=======================
+
+It's possible to search for resources using a query mechanism, using the
+`POST` method and uploading a JSON formatted query.
+
+When listing resources, it is possible to filter resources based on attributes
+values:
+
+{{ scenarios['search-resource-for-user']['doc'] }}
+
+Or even:
+
+{{ scenarios['search-resource-for-host-like']['doc'] }}
+
+Complex operators such as `and` and `or` are also available:
+
+{{ scenarios['search-resource-for-user-after-timestamp']['doc'] }}
+
+Details about the resource can also be retrieved at the same time:
+
+{{ scenarios['search-resource-for-user-details']['doc'] }}
+
+It's possible to search for old revisions of resources in the same ways:
+
+{{ scenarios['search-resource-history']['doc'] }}
+
+It is also possible to send the *history* parameter in the *Accept* header:
+
+{{ scenarios['search-resource-history-in-accept']['doc'] }}
+
+The timerange of the history can be set, too:
+
+{{ scenarios['search-resource-history-partial']['doc'] }}
+
+The supported operators are: equal to (`=`, `==` or `eq`), less than (`<` or
+`lt`), greater than (`>` or `gt`), less than or equal to (`<=`, `le` or `≤`)
+greater than or equal to (`>=`, `ge` or `≥`) not equal to (`!=`, `ne` or `≠`),
+value is in (`in`), value is like (`like`), or (`or` or `∨`), and (`and` or
+`∧`) and negation (`not`).
+
+The special attribute `lifespan` which is equivalent to `ended_at - started_at`
+is also available in the filtering queries.
+
+{{ scenarios['search-resource-lifespan']['doc'] }}
+
+
+Searching for values in metrics
+===============================
+
+It is possible to search for values in metrics. For example, this will look for
+all values that are greater than or equal to 50 if we add 23 to them and that
+are not equal to 55. You have to specify the list of metrics to look into by
+using the `metric_id` query parameter several times.
+
+{{ scenarios['search-value-in-metric']['doc'] }}
+
+And it is possible to search for values in metrics by using one or multiple
+granularities:
+
+{{ scenarios['search-value-in-metrics-by-granularity']['doc'] }}
+
+You can specify a time range to look for by specifying the `start` and/or
+`stop` query parameter, and the aggregation method to use by specifying the
+`aggregation` query parameter.
+
+The supported operators are: equal to (`=`, `==` or `eq`), lesser than (`<` or
+`lt`), greater than (`>` or `gt`), less than or equal to (`<=`, `le` or `≤`)
+greater than or equal to (`>=`, `ge` or `≥`) not equal to (`!=`, `ne` or `≠`),
+addition (`+` or `add`), substraction (`-` or `sub`), multiplication (`*`,
+`mul` or `×`), division (`/`, `div` or `÷`). These operations take either one
+argument, and in this case the second argument passed is the value, or it.
+
+The operators or (`or` or `∨`), and (`and` or `∧`) and `not` are also
+supported, and take a list of arguments as parameters.
+
+Aggregation across metrics
+==========================
+
+Gnocchi allows to do on-the-fly aggregation of already aggregated data of
+metrics.
+
+It can also be done by providing the list of metrics to aggregate:
+
+{{ scenarios['get-across-metrics-measures-by-metric-ids']['doc'] }}
+
+.. Note::
+
+ This aggregation is done against the aggregates built and updated for
+ a metric when new measurements are posted in Gnocchi. Therefore, the aggregate
+ of this already aggregated data may not have sense for certain kind of
+ aggregation method (e.g. stdev).
+
+By default, the measures are aggregated using the aggregation method provided,
+e.g. you'll get a mean of means, or a max of maxs. You can specify what method
+to use over the retrieved aggregation by using the `reaggregation` parameter:
+
+{{ scenarios['get-across-metrics-measures-by-metric-ids-reaggregate']['doc'] }}
+
+It's also possible to do that aggregation on metrics linked to resources. In
+order to select these resources, the following endpoint accepts a query such as
+the one described in `Searching for resources`_.
+
+{{ scenarios['get-across-metrics-measures-by-attributes-lookup']['doc'] }}
+
+It is possible to group the resource search results by any attribute of the
+requested resource type, and the compute the aggregation:
+
+{{ scenarios['get-across-metrics-measures-by-attributes-lookup-groupby']['doc'] }}
+
+Similar to retrieving measures for a single metric, the `refresh` parameter
+can be provided to force all POSTed measures to be processed across all
+metrics before computing the result. The `resample` parameter may be used as
+well.
+
+.. note::
+
+ Resampling is done prior to any reaggregation if both parameters are
+ specified.
+
+Also, aggregation across metrics have different behavior depending
+on whether boundary values are set ('start' and 'stop') and if 'needed_overlap'
+is set.
+
+If boundaries are not set, Gnocchi makes the aggregation only with points
+at timestamp present in all timeseries. When boundaries are set, Gnocchi
+expects that we have certain percent of timestamps common between timeseries,
+this percent is controlled by needed_overlap (defaulted with 100%). If this
+percent is not reached an error is returned.
+
+The ability to fill in points missing from a subset of timeseries is supported
+by specifying a `fill` value. Valid fill values include any valid float or
+`null` which will compute aggregation with only the points that exist. The
+`fill` parameter will not backfill timestamps which contain no points in any
+of the timeseries. Only timestamps which have datapoints in at least one of
+the timeseries is returned.
+
+.. note::
+
+ A granularity must be specified when using the `fill` parameter.
+
+{{ scenarios['get-across-metrics-measures-by-metric-ids-fill']['doc'] }}
+
+
+Capabilities
+============
+
+The list aggregation methods that can be used in Gnocchi are extendable and
+can differ between deployments. It is possible to get the supported list of
+aggregation methods from the API server:
+
+{{ scenarios['get-capabilities']['doc'] }}
+
+Status
+======
+The overall status of the Gnocchi installation can be retrieved via an API call
+reporting values such as the number of new measures to process for each metric:
+
+{{ scenarios['get-status']['doc'] }}
+
+
+Timestamp format
+================
+
+Timestamps used in Gnocchi are always returned using the ISO 8601 format.
+Gnocchi is able to understand a few formats of timestamp when querying or
+creating resources, for example
+
+- "2014-01-01 12:12:34" or "2014-05-20T10:00:45.856219", ISO 8601 timestamps.
+- "10 minutes", which means "10 minutes from now".
+- "-2 days", which means "2 days ago".
+- 1421767030, a Unix epoch based timestamp.
diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml
new file mode 100644
index 00000000..396576ee
--- /dev/null
+++ b/doc/source/rest.yaml
@@ -0,0 +1,749 @@
+- name: create-archive-policy
+ request: |
+ POST /v1/archive_policy HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "name": "short",
+ "back_window": 0,
+ "definition": [
+ {
+ "granularity": "1s",
+ "timespan": "1 hour"
+ },
+ {
+ "points": 48,
+ "timespan": "1 day"
+ }
+ ]
+ }
+
+- name: create-archive-policy-without-max
+ request: |
+ POST /v1/archive_policy HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "name": "short-without-max",
+ "aggregation_methods": ["-max", "-min"],
+ "back_window": 0,
+ "definition": [
+ {
+ "granularity": "1s",
+ "timespan": "1 hour"
+ },
+ {
+ "points": 48,
+ "timespan": "1 day"
+ }
+ ]
+ }
+
+- name: get-archive-policy
+ request: GET /v1/archive_policy/{{ scenarios['create-archive-policy']['response'].json['name'] }} HTTP/1.1
+
+- name: list-archive-policy
+ request: GET /v1/archive_policy HTTP/1.1
+
+- name: update-archive-policy
+ request: |
+ PATCH /v1/archive_policy/{{ scenarios['create-archive-policy']['response'].json['name'] }} HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "definition": [
+ {
+ "granularity": "1s",
+ "timespan": "1 hour"
+ },
+ {
+ "points": 48,
+ "timespan": "1 day"
+ }
+ ]
+ }
+
+- name: create-archive-policy-to-delete
+ request: |
+ POST /v1/archive_policy HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "name": "some-archive-policy",
+ "back_window": 0,
+ "definition": [
+ {
+ "granularity": "1s",
+ "timespan": "1 hour"
+ },
+ {
+ "points": 48,
+ "timespan": "1 day"
+ }
+ ]
+ }
+
+- name: delete-archive-policy
+ request: DELETE /v1/archive_policy/{{ scenarios['create-archive-policy-to-delete']['response'].json['name'] }} HTTP/1.1
+
+- name: create-metric
+ request: |
+ POST /v1/metric HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "archive_policy_name": "high"
+ }
+
+- name: create-metric-2
+ request: |
+ POST /v1/metric HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "archive_policy_name": "low"
+ }
+
+- name: create-archive-policy-rule
+ request: |
+ POST /v1/archive_policy_rule HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "name": "test_rule",
+ "metric_pattern": "disk.io.*",
+ "archive_policy_name": "low"
+ }
+
+- name: get-archive-policy-rule
+ request: GET /v1/archive_policy_rule/{{ scenarios['create-archive-policy-rule']['response'].json['name'] }} HTTP/1.1
+
+- name: list-archive-policy-rule
+ request: GET /v1/archive_policy_rule HTTP/1.1
+
+- name: create-archive-policy-rule-to-delete
+ request: |
+ POST /v1/archive_policy_rule HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "name": "test_rule_delete",
+ "metric_pattern": "disk.io.*",
+ "archive_policy_name": "low"
+ }
+
+- name: delete-archive-policy-rule
+ request: DELETE /v1/archive_policy_rule/{{ scenarios['create-archive-policy-rule-to-delete']['response'].json['name'] }} HTTP/1.1
+
+
+- name: get-metric
+ request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }} HTTP/1.1
+
+- name: list-metric
+ request: GET /v1/metric HTTP/1.1
+
+- name: list-metric-pagination
+ request: GET /v1/metric?limit=100&sort=name:asc HTTP/1.1
+
+- name: post-measures
+ request: |
+ POST /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures HTTP/1.1
+ Content-Type: application/json
+
+ [
+ {
+ "timestamp": "2014-10-06T14:33:57",
+ "value": 43.1
+ },
+ {
+ "timestamp": "2014-10-06T14:34:12",
+ "value": 12
+ },
+ {
+ "timestamp": "2014-10-06T14:34:20",
+ "value": 2
+ }
+ ]
+
+- name: post-measures-batch
+ request: |
+ POST /v1/batch/metrics/measures HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "{{ scenarios['create-metric']['response'].json['id'] }}":
+ [
+ {
+ "timestamp": "2014-10-06T14:34:12",
+ "value": 12
+ },
+ {
+ "timestamp": "2014-10-06T14:34:20",
+ "value": 2
+ }
+ ],
+ "{{ scenarios['create-metric-2']['response'].json['id'] }}":
+ [
+ {
+ "timestamp": "2014-10-06T16:12:12",
+ "value": 3
+ },
+ {
+ "timestamp": "2014-10-06T18:14:52",
+ "value": 4
+ }
+ ]
+ }
+
+- name: search-value-in-metric
+ request: |
+ POST /v1/search/metric?metric_id={{ scenarios['create-metric']['response'].json['id'] }} HTTP/1.1
+ Content-Type: application/json
+
+ {"and": [{">=": [{"+": 23}, 50]}, {"!=": 55}]}
+
+- name: create-metric-a
+ request: |
+ POST /v1/metric HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "archive_policy_name": "short"
+ }
+
+- name: post-measures-for-granularity-search
+ request: |
+ POST /v1/metric/{{ scenarios['create-metric-a']['response'].json['id'] }}/measures HTTP/1.1
+ Content-Type: application/json
+
+ [
+ {
+ "timestamp": "2014-10-06T14:34:12",
+ "value": 12
+ },
+ {
+ "timestamp": "2014-10-06T14:34:14",
+ "value": 12
+ },
+ {
+ "timestamp": "2014-10-06T14:34:16",
+ "value": 12
+ },
+ {
+ "timestamp": "2014-10-06T14:34:18",
+ "value": 12
+ },
+ {
+ "timestamp": "2014-10-06T14:34:20",
+ "value": 12
+ },
+ {
+ "timestamp": "2014-10-06T14:34:22",
+ "value": 12
+ },
+ {
+ "timestamp": "2014-10-06T14:34:24",
+ "value": 12
+ }
+ ]
+
+- name: search-value-in-metrics-by-granularity
+ request: |
+ POST /v1/search/metric?metric_id={{ scenarios['create-metric-a']['response'].json['id'] }}&granularity=1second&granularity=1800s HTTP/1.1
+ Content-Type: application/json
+
+ {"=": 12}
+
+- name: get-measures
+ request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures HTTP/1.1
+
+- name: get-measures-from
+ request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?start=2014-10-06T14:34 HTTP/1.1
+
+- name: get-measures-max
+ request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?aggregation=max HTTP/1.1
+
+- name: get-measures-granularity
+ request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?granularity=1 HTTP/1.1
+
+- name: get-measures-refresh
+ request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?refresh=true HTTP/1.1
+
+- name: get-measures-resample
+ request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?resample=5&granularity=1 HTTP/1.1
+
+- name: create-resource-generic
+ request: |
+ POST /v1/resource/generic HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "id": "75C44741-CC60-4033-804E-2D3098C7D2E9",
+ "user_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D",
+ "project_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D"
+ }
+
+- name: create-resource-with-new-metrics
+ request: |
+ POST /v1/resource/generic HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "id": "AB68DA77-FA82-4E67-ABA9-270C5A98CBCB",
+ "user_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D",
+ "project_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D",
+ "metrics": {"temperature": {"archive_policy_name": "low"}}
+ }
+
+- name: create-resource-type-instance
+ request: |
+ POST /v1/resource_type HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "name": "instance",
+ "attributes": {
+ "display_name": {"type": "string", "required": true},
+ "flavor_id": {"type": "string", "required": true},
+ "image_ref": {"type": "string", "required": true},
+ "host": {"type": "string", "required": true},
+ "server_group": {"type": "string", "required": false}
+ }
+ }
+
+- name: create-resource-instance
+ request: |
+ POST /v1/resource/instance HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "id": "6868DA77-FA82-4E67-ABA9-270C5AE8CBCA",
+ "user_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D",
+ "project_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D",
+ "started_at": "2014-01-02 23:23:34",
+ "ended_at": "2014-01-04 10:00:12",
+ "flavor_id": "2",
+ "image_ref": "http://image",
+ "host": "compute1",
+ "display_name": "myvm",
+ "metrics": {}
+ }
+
+- name: list-resource-generic
+ request: GET /v1/resource/generic HTTP/1.1
+
+- name: list-resource-instance
+ request: GET /v1/resource/instance HTTP/1.1
+
+- name: list-resource-generic-details
+ request: GET /v1/resource/generic?details=true HTTP/1.1
+
+- name: list-resource-generic-pagination
+ request: GET /v1/resource/generic?limit=2&sort=id:asc HTTP/1.1
+
+- name: search-resource-for-user
+ request: |
+ POST /v1/search/resource/instance HTTP/1.1
+ Content-Type: application/json
+
+ {"=": {"user_id": "{{ scenarios['create-resource-instance']['response'].json['user_id'] }}"}}
+
+- name: search-resource-for-host-like
+ request: |
+ POST /v1/search/resource/instance HTTP/1.1
+ Content-Type: application/json
+
+ {"like": {"host": "compute%"}}
+
+- name: search-resource-for-user-details
+ request: |
+ POST /v1/search/resource/generic?details=true HTTP/1.1
+ Content-Type: application/json
+
+ {"=": {"user_id": "{{ scenarios['create-resource-instance']['response'].json['user_id'] }}"}}
+
+- name: search-resource-for-user-after-timestamp
+ request: |
+ POST /v1/search/resource/instance HTTP/1.1
+ Content-Type: application/json
+
+ {"and": [
+ {"=": {"user_id": "{{ scenarios['create-resource-instance']['response'].json['user_id'] }}"}},
+ {">=": {"started_at": "2010-01-01"}}
+ ]}
+
+- name: search-resource-lifespan
+ request: |
+ POST /v1/search/resource/instance HTTP/1.1
+ Content-Type: application/json
+
+ {">=": {"lifespan": "30 min"}}
+
+- name: get-resource-generic
+ request: GET /v1/resource/generic/{{ scenarios['create-resource-generic']['response'].json['id'] }} HTTP/1.1
+
+- name: get-instance
+ request: GET /v1/resource/instance/{{ scenarios['create-resource-instance']['response'].json['id'] }} HTTP/1.1
+
+- name: create-resource-instance-bis
+ request: |
+ POST /v1/resource/instance HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "id": "AB0B5802-E79B-4C84-8998-9237F60D9CAE",
+ "user_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D",
+ "project_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D",
+ "flavor_id": "2",
+ "image_ref": "http://image",
+ "host": "compute1",
+ "display_name": "myvm",
+ "metrics": {}
+ }
+
+- name: patch-resource
+ request: |
+ PATCH /v1/resource/instance/{{ scenarios['create-resource-instance']['response'].json['id'] }} HTTP/1.1
+ Content-Type: application/json
+
+ {"host": "compute2"}
+
+- name: get-patched-instance-history
+ request: GET /v1/resource/instance/{{ scenarios['create-resource-instance']['response'].json['id'] }}/history HTTP/1.1
+
+- name: get-patched-instance
+ request: GET /v1/resource/instance/{{ scenarios['create-resource-instance']['response'].json['id'] }} HTTP/1.1
+
+
+- name: create-resource-type
+ request: |
+ POST /v1/resource_type HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "name": "my_custom_type",
+ "attributes": {
+ "myid": {"type": "uuid"},
+ "display_name": {"type": "string", "required": true},
+ "prefix": {"type": "string", "required": false, "max_length": 8, "min_length": 3},
+ "size": {"type": "number", "min": 5, "max": 32.8},
+ "enabled": {"type": "bool", "required": false}
+ }
+ }
+
+- name: create-resource-type-2
+ request: |
+ POST /v1/resource_type HTTP/1.1
+ Content-Type: application/json
+
+ {"name": "my_other_type"}
+
+- name: get-resource-type
+ request: GET /v1/resource_type/my_custom_type HTTP/1.1
+
+- name: list-resource-type
+ request: GET /v1/resource_type HTTP/1.1
+
+- name: patch-resource-type
+ request: |
+ PATCH /v1/resource_type/my_custom_type HTTP/1.1
+ Content-Type: application/json-patch+json
+
+ [
+ {
+ "op": "add",
+ "path": "/attributes/awesome-stuff",
+ "value": {"type": "bool", "required": false}
+ },
+ {
+ "op": "add",
+ "path": "/attributes/required-stuff",
+ "value": {"type": "bool", "required": true, "options": {"fill": true}}
+ },
+ {
+ "op": "remove",
+ "path": "/attributes/prefix"
+ }
+ ]
+
+
+- name: delete-resource-type
+ request: DELETE /v1/resource_type/my_custom_type HTTP/1.1
+
+- name: search-resource-history
+ request: |
+ POST /v1/search/resource/instance?history=true HTTP/1.1
+ Content-Type: application/json
+
+ {"=": {"id": "{{ scenarios['create-resource-instance']['response'].json['id'] }}"}}
+
+- name: search-resource-history-in-accept
+ request: |
+ POST /v1/search/resource/instance HTTP/1.1
+ Content-Type: application/json
+ Accept: application/json; history=true
+
+ {"=": {"id": "{{ scenarios['create-resource-instance']['response'].json['id'] }}"}}
+
+- name: search-resource-history-partial
+ request: |
+ POST /v1/search/resource/instance HTTP/1.1
+ Content-Type: application/json
+ Accept: application/json; history=true
+
+ {"and": [
+ {"=": {"host": "compute1"}},
+ {">=": {"revision_start": "{{ scenarios['get-instance']['response'].json['revision_start'] }}"}},
+ {"or": [{"<=": {"revision_end": "{{ scenarios['get-patched-instance']['response'].json['revision_start'] }}"}},
+ {"=": {"revision_end": null}}]}
+ ]}
+
+- name: create-resource-instance-with-metrics
+ request: |
+ POST /v1/resource/instance HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "id": "6F24EDD9-5A2F-4592-B708-FFBED821C5D2",
+ "user_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D",
+ "project_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D",
+ "flavor_id": "2",
+ "image_ref": "http://image",
+ "host": "compute1",
+ "display_name": "myvm2",
+ "server_group": "my_autoscaling_group",
+ "metrics": {"cpu.util": "{{ scenarios['create-metric']['response'].json['id'] }}"}
+ }
+
+- name: create-resource-instance-with-dynamic-metrics
+ request: |
+ POST /v1/resource/instance HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "id": "15e9c872-7ca9-11e4-a2da-2fb4032dfc09",
+ "user_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D",
+ "project_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D",
+ "flavor_id": "2",
+ "image_ref": "http://image",
+ "host": "compute2",
+ "display_name": "myvm3",
+ "server_group": "my_autoscaling_group",
+ "metrics": {"cpu.util": {"archive_policy_name": "{{ scenarios['create-archive-policy']['response'].json['name'] }}"}}
+ }
+
+- name: post-measures-batch-named
+ request: |
+ POST /v1/batch/resources/metrics/measures HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "{{ scenarios['create-resource-with-new-metrics']['response'].json['id'] }}": {
+ "temperature": [
+ { "timestamp": "2014-10-06T14:34:12", "value": 17 },
+ { "timestamp": "2014-10-06T14:34:20", "value": 18 }
+ ]
+ },
+ "{{ scenarios['create-resource-instance-with-dynamic-metrics']['response'].json['id'] }}": {
+ "cpu.util": [
+ { "timestamp": "2014-10-06T14:34:12", "value": 12 },
+ { "timestamp": "2014-10-06T14:34:20", "value": 2 }
+ ]
+ },
+ "{{ scenarios['create-resource-instance-with-metrics']['response'].json['id'] }}": {
+ "cpu.util": [
+ { "timestamp": "2014-10-06T14:34:12", "value": 6 },
+ { "timestamp": "2014-10-06T14:34:20", "value": 25 }
+ ]
+ }
+ }
+
+- name: post-measures-batch-named-create
+ request: |
+ POST /v1/batch/resources/metrics/measures?create_metrics=true HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "{{ scenarios['create-resource-with-new-metrics']['response'].json['id'] }}": {
+ "disk.io.test": [
+ { "timestamp": "2014-10-06T14:34:12", "value": 71 },
+ { "timestamp": "2014-10-06T14:34:20", "value": 81 }
+ ]
+ }
+ }
+
+- name: delete-resource-generic
+ request: DELETE /v1/resource/generic/{{ scenarios['create-resource-generic']['response'].json['id'] }} HTTP/1.1
+
+- name: create-resources-a
+ request: |
+ POST /v1/resource/generic HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "id": "340102AA-AA19-BBE0-E1E2-2D3JDC7D289R",
+ "user_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ",
+ "project_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ"
+ }
+
+- name: create-resources-b
+ request: |
+ POST /v1/resource/generic HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "id": "340102AA-AAEF-AA90-E1E2-2D3JDC7D289R",
+ "user_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ",
+ "project_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ"
+ }
+
+- name: create-resources-c
+ request: |
+ POST /v1/resource/generic HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "id": "340102AA-AAEF-BCEF-E112-2D3JDC7D289R",
+ "user_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ",
+ "project_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ"
+ }
+
+- name: create-resources-d
+ request: |
+ POST /v1/resource/generic HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "id": "340102AA-AAEF-BCEF-E112-2D15DC7D289R",
+ "user_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ",
+ "project_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ"
+ }
+
+- name: create-resources-e
+ request: |
+ POST /v1/resource/generic HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "id": "340102AA-AAEF-BCEF-E112-2D3JDC30289R",
+ "user_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ",
+ "project_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ"
+ }
+
+- name: create-resources-f
+ request: |
+ POST /v1/resource/generic HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "id": "340102AA-AAEF-BCEF-E112-2D15349D109R",
+ "user_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ",
+ "project_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ"
+ }
+
+- name: delete-resources-by-ids
+ request: |
+ DELETE /v1/resource/generic HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "in": {
+ "id": [
+ "{{ scenarios['create-resources-a']['response'].json['id'] }}",
+ "{{ scenarios['create-resources-b']['response'].json['id'] }}",
+ "{{ scenarios['create-resources-c']['response'].json['id'] }}"
+ ]
+ }
+ }
+
+- name: delete-resources-by-time
+ request: |
+ DELETE /v1/resource/generic HTTP/1.1
+ Content-Type: application/json
+
+ {
+ ">=": {"started_at": "{{ scenarios['create-resources-f']['response'].json['started_at'] }}"}
+ }
+
+
+- name: get-resource-named-metrics-measures
+ request: GET /v1/resource/generic/{{ scenarios['create-resource-instance-with-metrics']['response'].json['id'] }}/metric/cpu.util/measures?start=2014-10-06T14:34 HTTP/1.1
+
+- name: post-resource-named-metrics-measures1
+ request: |
+ POST /v1/resource/generic/{{ scenarios['create-resource-instance-with-metrics']['response'].json['id'] }}/metric/cpu.util/measures HTTP/1.1
+ Content-Type: application/json
+
+ [
+ {
+ "timestamp": "2014-10-06T14:33:57",
+ "value": 3.5
+ },
+ {
+ "timestamp": "2014-10-06T14:34:12",
+ "value": 20
+ },
+ {
+ "timestamp": "2014-10-06T14:34:20",
+ "value": 9
+ }
+ ]
+
+- name: post-resource-named-metrics-measures2
+ request: |
+ POST /v1/resource/generic/{{ scenarios['create-resource-instance-with-dynamic-metrics']['response'].json['id'] }}/metric/cpu.util/measures HTTP/1.1
+ Content-Type: application/json
+
+ [
+ {
+ "timestamp": "2014-10-06T14:33:57",
+ "value": 25.1
+ },
+ {
+ "timestamp": "2014-10-06T14:34:12",
+ "value": 4.5
+ },
+ {
+ "timestamp": "2014-10-06T14:34:20",
+ "value": 14.2
+ }
+ ]
+
+- name: get-across-metrics-measures-by-attributes-lookup
+ request: |
+ POST /v1/aggregation/resource/instance/metric/cpu.util?start=2014-10-06T14:34&aggregation=mean HTTP/1.1
+ Content-Type: application/json
+
+ {"=": {"server_group": "my_autoscaling_group"}}
+
+- name: get-across-metrics-measures-by-attributes-lookup-groupby
+ request: |
+ POST /v1/aggregation/resource/instance/metric/cpu.util?groupby=host&groupby=flavor_id HTTP/1.1
+ Content-Type: application/json
+
+ {"=": {"server_group": "my_autoscaling_group"}}
+
+- name: get-across-metrics-measures-by-metric-ids
+ request: |
+ GET /v1/aggregation/metric?metric={{ scenarios['create-resource-instance-with-metrics']['response'].json['metrics']['cpu.util'] }}&metric={{ scenarios['create-resource-instance-with-dynamic-metrics']['response'].json['metrics']['cpu.util'] }}&start=2014-10-06T14:34&aggregation=mean HTTP/1.1
+
+- name: get-across-metrics-measures-by-metric-ids-reaggregate
+ request: |
+ GET /v1/aggregation/metric?metric={{ scenarios['create-resource-instance-with-metrics']['response'].json['metrics']['cpu.util'] }}&metric={{ scenarios['create-resource-instance-with-dynamic-metrics']['response'].json['metrics']['cpu.util'] }}&aggregation=mean&reaggregation=min HTTP/1.1
+
+- name: get-across-metrics-measures-by-metric-ids-fill
+ request: |
+ GET /v1/aggregation/metric?metric={{ scenarios['create-resource-instance-with-metrics']['response'].json['metrics']['cpu.util'] }}&metric={{ scenarios['create-resource-instance-with-dynamic-metrics']['response'].json['metrics']['cpu.util'] }}&fill=0&granularity=1 HTTP/1.1
+
+- name: append-metrics-to-resource
+ request: |
+ POST /v1/resource/generic/{{ scenarios['create-resource-instance-with-metrics']['response'].json['id'] }}/metric HTTP/1.1
+ Content-Type: application/json
+
+ {"memory": {"archive_policy_name": "low"}}
+
+- name: get-capabilities
+ request: GET /v1/capabilities HTTP/1.1
+
+- name: get-status
+ request: GET /v1/status HTTP/1.1
diff --git a/doc/source/running.rst b/doc/source/running.rst
new file mode 100644
index 00000000..48c437ca
--- /dev/null
+++ b/doc/source/running.rst
@@ -0,0 +1,246 @@
+===============
+Running Gnocchi
+===============
+
+To run Gnocchi, simply run the HTTP server and metric daemon:
+
+::
+
+ gnocchi-api
+ gnocchi-metricd
+
+
+Running API As A WSGI Application
+=================================
+
+The Gnocchi API tier runs using WSGI. This means it can be run using `Apache
+httpd`_ and `mod_wsgi`_, or other HTTP daemon such as `uwsgi`_. You should
+configure the number of process and threads according to the number of CPU you
+have, usually around 1.5 × number of CPU. If one server is not enough, you can
+spawn any number of new API server to scale Gnocchi out, even on different
+machines.
+
+The following uwsgi configuration file can be used::
+
+ [uwsgi]
+ http = localhost:8041
+ # Set the correct path depending on your installation
+ wsgi-file = /usr/local/bin/gnocchi-api
+ master = true
+ die-on-term = true
+ threads = 32
+ # Adjust based on the number of CPU
+ processes = 32
+ enabled-threads = true
+ thunder-lock = true
+ plugins = python
+ buffer-size = 65535
+ lazy-apps = true
+
+Once written to `/etc/gnocchi/uwsgi.ini`, it can be launched this way::
+
+ uwsgi /etc/gnocchi/uwsgi.ini
+
+.. _Apache httpd: http://httpd.apache.org/
+.. _mod_wsgi: https://modwsgi.readthedocs.org/
+.. _uwsgi: https://uwsgi-docs.readthedocs.org/
+
+How to define archive policies
+==============================
+
+In Gnocchi, the archive policy definitions are expressed in number of points.
+If your archive policy defines a policy of 10 points with a granularity of 1
+second, the time series archive will keep up to 10 seconds, each representing
+an aggregation over 1 second. This means the time series will at maximum retain
+10 seconds of data (sometimes a bit more) between the more recent point and the
+oldest point. That does not mean it will be 10 consecutive seconds: there might
+be a gap if data is fed irregularly.
+
+There is no expiry of data relative to the current timestamp.
+
+Therefore, both the archive policy and the granularity entirely depends on your
+use case. Depending on the usage of your data, you can define several archiving
+policies. A typical low grained use case could be::
+
+ 3600 points with a granularity of 1 second = 1 hour
+ 1440 points with a granularity of 1 minute = 24 hours
+ 720 points with a granularity of 1 hour = 30 days
+ 365 points with a granularity of 1 day = 1 year
+
+This would represent 6125 points × 9 = 54 KiB per aggregation method. If
+you use the 8 standard aggregation method, your metric will take up to 8 × 54
+KiB = 432 KiB of disk space.
+
+Be aware that the more definitions you set in an archive policy, the more CPU
+it will consume. Therefore, creating an archive policy with 2 definitons (e.g.
+1 second granularity for 1 day and 1 minute granularity for 1 month) may
+consume twice CPU than just one definition (e.g. just 1 second granularity for
+1 day).
+
+Default archive policies
+========================
+
+By default, 3 archive policies are created when calling `gnocchi-upgrade`:
+*low*, *medium* and *high*. The name both describes the storage space and CPU
+usage needs. They use `default_aggregation_methods` which is by default set to
+*mean*, *min*, *max*, *sum*, *std*, *count*.
+
+A fourth archive policy named `bool` is also provided by default and is
+designed to store only boolean values (i.e. 0 and 1). It only stores one data
+point for each second (using the `last` aggregation method), with a one year
+retention period. The maximum optimistic storage size is estimated based on the
+assumption that no other value than 0 and 1 are sent as measures. If other
+values are sent, the maximum pessimistic storage size is taken into account.
+
+- low
+
+ * 5 minutes granularity over 30 days
+ * aggregation methods used: `default_aggregation_methods`
+ * maximum estimated size per metric: 406 KiB
+
+- medium
+
+ * 1 minute granularity over 7 days
+ * 1 hour granularity over 365 days
+ * aggregation methods used: `default_aggregation_methods`
+ * maximum estimated size per metric: 887 KiB
+
+- high
+
+ * 1 second granularity over 1 hour
+ * 1 minute granularity over 1 week
+ * 1 hour granularity over 1 year
+ * aggregation methods used: `default_aggregation_methods`
+ * maximum estimated size per metric: 1 057 KiB
+
+- bool
+ * 1 second granularity over 1 year
+ * aggregation methods used: *last*
+ * maximum optimistic size per metric: 1 539 KiB
+ * maximum pessimistic size per metric: 277 172 KiB
+
+How to plan for Gnocchi’s storage
+=================================
+
+Gnocchi uses a custom file format based on its library *Carbonara*. In Gnocchi,
+a time series is a collection of points, where a point is a given measure, or
+sample, in the lifespan of a time series. The storage format is compressed
+using various techniques, therefore the computing of a time series' size can be
+estimated based on its **worst** case scenario with the following formula::
+
+ number of points × 8 bytes = size in bytes
+
+The number of points you want to keep is usually determined by the following
+formula::
+
+ number of points = timespan ÷ granularity
+
+For example, if you want to keep a year of data with a one minute resolution::
+
+ number of points = (365 days × 24 hours × 60 minutes) ÷ 1 minute
+ number of points = 525 600
+
+Then::
+
+ size in bytes = 525 600 bytes × 6 = 3 159 600 bytes = 3 085 KiB
+
+This is just for a single aggregated time series. If your archive policy uses
+the 6 default aggregation methods (mean, min, max, sum, std, count) with the
+same "one year, one minute aggregations" resolution, the space used will go up
+to a maximum of 6 × 4.1 MiB = 24.6 MiB.
+
+How many metricd workers do we need to run
+==========================================
+
+By default, `gnocchi-metricd` daemon spans all your CPU power in order to
+maximize CPU utilisation when computing metric aggregation. You can use the
+`gnocchi status` command to query the HTTP API and get the cluster status for
+metric processing. It’ll show you the number of metric to process, known as the
+processing backlog for `gnocchi-metricd`. As long as this backlog is not
+continuously increasing, that means that `gnocchi-metricd` is able to cope with
+the amount of metric that are being sent. In case this number of measure to
+process is continuously increasing, you will need to (maybe temporarily)
+increase the number of `gnocchi-metricd` daemons. You can run any number of
+metricd daemon on any number of servers.
+
+How to scale measure processing
+===============================
+
+Measurement data pushed to Gnocchi is divided into sacks for better
+distribution. The number of partitions is controlled by the `sacks` option
+under the `[incoming]` section. This value should be set based on the
+number of active metrics the system will capture. Additionally, the number of
+`sacks`, should be higher than the total number of active metricd workers.
+distribution. Incoming metrics are pushed to specific sacks and each sack
+is assigned to one or more `gnocchi-metricd` daemons for processing.
+
+How many sacks do we need to create
+-----------------------------------
+
+This number of sacks enabled should be set based on the number of active
+metrics the system will capture. Additionally, the number of sacks, should
+be higher than the total number of active `gnocchi-metricd` workers.
+
+In general, use the following equation to determine the appropriate `sacks`
+value to set::
+
+ sacks value = number of **active** metrics / 300
+
+If the estimated number of metrics is the absolute maximum, divide the value
+by 500 instead. If the estimated number of active metrics is conservative and
+expected to grow, divide the value by 100 instead to accommodate growth.
+
+How do we change sack size
+--------------------------
+
+In the event your system grows to capture signficantly more metrics than
+originally anticipated, the number of sacks can be changed to maintain good
+distribution. To avoid any loss of data when modifying `sacks` option. The
+option should be changed in the following order::
+
+ 1. Stop all input services (api, statsd)
+
+ 2. Stop all metricd services once backlog is cleared
+
+ 3. Run gnocchi-change-sack-size to set new sack size. Note
+ that sack value can only be changed if the backlog is empty.
+
+ 4. Restart all gnocchi services (api, statsd, metricd) with new configuration
+
+Alternatively, to minimise API downtime::
+
+ 1. Run gnocchi-upgrade but use a new incoming storage target such as a new
+ ceph pool, file path, etc... Additionally, set aggregate storage to a
+ new target as well.
+
+ 2. Run gnocchi-change-sack-size against new target
+
+ 3. Stop all input services (api, statsd)
+
+ 4. Restart all input services but target newly created incoming storage
+
+ 5. When done clearing backlog from original incoming storage, switch all
+ metricd datemons to target new incoming storage but maintain original
+ aggregate storage.
+
+How to monitor Gnocchi
+======================
+
+The `/v1/status` endpoint of the HTTP API returns various information, such as
+the number of measures to process (measures backlog), which you can easily
+monitor (see `How many metricd workers do we need to run`_). Making sure that
+the HTTP server and `gnocchi-metricd` daemon are running and are not writing
+anything alarming in their logs is a sign of good health of the overall system.
+
+Total measures for backlog status may not accurately reflect the number of
+points to be processed when measures are submitted via batch.
+
+How to backup and restore Gnocchi
+=================================
+
+In order to be able to recover from an unfortunate event, you need to backup
+both the index and the storage. That means creating a database dump (PostgreSQL
+or MySQL) and doing snapshots or copy of your data storage (Ceph, S3, Swift or
+your file system). The procedure to restore is no more complicated than initial
+deployment: restore your index and storage backups, reinstall Gnocchi if
+necessary, and restart it.
diff --git a/doc/source/statsd.rst b/doc/source/statsd.rst
new file mode 100644
index 00000000..88405b8a
--- /dev/null
+++ b/doc/source/statsd.rst
@@ -0,0 +1,43 @@
+===================
+Statsd Daemon Usage
+===================
+
+What Is It?
+===========
+`Statsd`_ is a network daemon that listens for statistics sent over the network
+using TCP or UDP, and then sends aggregates to another backend.
+
+Gnocchi provides a daemon that is compatible with the statsd protocol and can
+listen to metrics sent over the network, named `gnocchi-statsd`.
+
+.. _`Statsd`: https://github.com/etsy/statsd/
+
+How It Works?
+=============
+In order to enable statsd support in Gnocchi, you need to configure the
+`[statsd]` option group in the configuration file. You need to provide a
+resource ID that will be used as the main generic resource where all the
+metrics will be attached, a user and project id that will be associated with
+the resource and metrics, and an archive policy name that will be used to
+create the metrics.
+
+All the metrics will be created dynamically as the metrics are sent to
+`gnocchi-statsd`, and attached with the provided name to the resource ID you
+configured.
+
+The `gnocchi-statsd` may be scaled, but trade-offs have to been made due to the
+nature of the statsd protocol. That means that if you use metrics of type
+`counter`_ or sampling (`c` in the protocol), you should always send those
+metrics to the same daemon – or not use them at all. The other supported
+types (`timing`_ and `gauges`_) does not suffer this limitation, but be aware
+that you might have more measures that expected if you send the same metric to
+different `gnocchi-statsd` server, as their cache nor their flush delay are
+synchronized.
+
+.. _`counter`: https://github.com/etsy/statsd/blob/master/docs/metric_types.md#counting
+.. _`timing`: https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing
+.. _`gauges`: https://github.com/etsy/statsd/blob/master/docs/metric_types.md#gauges
+
+.. note ::
+ The statsd protocol support is incomplete: relative gauge values with +/-
+ and sets are not supported yet.
diff --git a/gnocchi/__init__.py b/gnocchi/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/gnocchi/aggregates/__init__.py b/gnocchi/aggregates/__init__.py
new file mode 100644
index 00000000..4d54f470
--- /dev/null
+++ b/gnocchi/aggregates/__init__.py
@@ -0,0 +1,50 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright 2014 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import abc
+
+import six
+
+from gnocchi import exceptions
+
+
+class CustomAggFailure(Exception):
+ """Error raised when custom aggregation functions fail for any reason."""
+
+ def __init__(self, msg):
+ self.msg = msg
+ super(CustomAggFailure, self).__init__(msg)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class CustomAggregator(object):
+
+ @abc.abstractmethod
+ def compute(self, storage_obj, metric, start, stop, **param):
+ """Returns list of (timestamp, window, aggregate value) tuples.
+
+ :param storage_obj: storage object for retrieving the data
+ :param metric: metric
+ :param start: start timestamp
+ :param stop: stop timestamp
+ :param **param: parameters are window and optionally center.
+ 'window' is the granularity over which to compute the moving
+ aggregate.
+ 'center=True' returns the aggregated data indexed by the central
+ time in the sampling window, 'False' (default) indexes aggregates
+ by the oldest time in the window. center is not supported for EWMA.
+
+ """
+ raise exceptions.NotImplementedError
diff --git a/gnocchi/aggregates/moving_stats.py b/gnocchi/aggregates/moving_stats.py
new file mode 100644
index 00000000..b0ce3b40
--- /dev/null
+++ b/gnocchi/aggregates/moving_stats.py
@@ -0,0 +1,145 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright 2014-2015 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import datetime
+
+import numpy
+import pandas
+import six
+
+from gnocchi import aggregates
+from gnocchi import utils
+
+
+class MovingAverage(aggregates.CustomAggregator):
+
+ @staticmethod
+ def check_window_valid(window):
+ """Takes in the window parameter string, reformats as a float."""
+ if window is None:
+ msg = 'Moving aggregate must have window specified.'
+ raise aggregates.CustomAggFailure(msg)
+ try:
+ return utils.to_timespan(six.text_type(window)).total_seconds()
+ except Exception:
+ raise aggregates.CustomAggFailure('Invalid value for window')
+
+ @staticmethod
+ def retrieve_data(storage_obj, metric, start, stop, window):
+ """Retrieves finest-res data available from storage."""
+ all_data = storage_obj.get_measures(metric, start, stop)
+
+ try:
+ min_grain = min(set([row[1] for row in all_data if row[1] == 0
+ or window % row[1] == 0]))
+ except Exception:
+ msg = ("No data available that is either full-res or "
+ "of a granularity that factors into the window size "
+ "you specified.")
+ raise aggregates.CustomAggFailure(msg)
+
+ return min_grain, pandas.Series([r[2] for r in all_data
+ if r[1] == min_grain],
+ [r[0] for r in all_data
+ if r[1] == min_grain])
+
+ @staticmethod
+ def aggregate_data(data, func, window, min_grain, center=False,
+ min_size=1):
+ """Calculates moving func of data with sampling width of window.
+
+ :param data: Series of timestamp, value pairs
+ :param func: the function to use when aggregating
+ :param window: (float) range of data to use in each aggregation.
+ :param min_grain: granularity of the data being passed in.
+ :param center: whether to index the aggregated values by the first
+ timestamp of the values picked up by the window or by the central
+ timestamp.
+ :param min_size: if the number of points in the window is less than
+ min_size, the aggregate is not computed and nan is returned for
+ that iteration.
+ """
+
+ if center:
+ center = utils.strtobool(center)
+
+ def moving_window(x):
+ msec = datetime.timedelta(milliseconds=1)
+ zero = datetime.timedelta(seconds=0)
+ half_span = datetime.timedelta(seconds=window / 2)
+ start = utils.normalize_time(data.index[0])
+ stop = utils.normalize_time(
+ data.index[-1] + datetime.timedelta(seconds=min_grain))
+ # min_grain addition necessary since each bin of rolled-up data
+ # is indexed by leftmost timestamp of bin.
+
+ left = half_span if center else zero
+ right = 2 * half_span - left - msec
+ # msec subtraction is so we don't include right endpoint in slice.
+
+ x = utils.normalize_time(x)
+
+ if x - left >= start and x + right <= stop:
+ dslice = data[x - left: x + right]
+
+ if center and dslice.size % 2 == 0:
+ return func([func(data[x - msec - left: x - msec + right]),
+ func(data[x + msec - left: x + msec + right])
+ ])
+
+ # (NOTE) atmalagon: the msec shift here is so that we have two
+ # consecutive windows; one centered at time x - msec,
+ # and one centered at time x + msec. We then average the
+ # aggregates from the two windows; this result is centered
+ # at time x. Doing this double average is a way to return a
+ # centered average indexed by a timestamp that existed in
+ # the input data (which wouldn't be the case for an even number
+ # of points if we did only one centered average).
+
+ else:
+ return numpy.nan
+ if dslice.size < min_size:
+ return numpy.nan
+ return func(dslice)
+ try:
+ result = pandas.Series(data.index).apply(moving_window)
+
+ # change from integer index to timestamp index
+ result.index = data.index
+
+ return [(t, window, r) for t, r
+ in six.iteritems(result[~result.isnull()])]
+ except Exception as e:
+ raise aggregates.CustomAggFailure(str(e))
+
+ def compute(self, storage_obj, metric, start, stop, window=None,
+ center=False):
+ """Returns list of (timestamp, window, aggregated value) tuples.
+
+ :param storage_obj: a call is placed to the storage object to retrieve
+ the stored data.
+ :param metric: the metric
+ :param start: start timestamp
+ :param stop: stop timestamp
+ :param window: format string specifying the size over which to
+ aggregate the retrieved data
+ :param center: how to index the aggregated data (central timestamp or
+ leftmost timestamp)
+ """
+ window = self.check_window_valid(window)
+ min_grain, data = self.retrieve_data(storage_obj, metric, start,
+ stop, window)
+ return self.aggregate_data(data, numpy.mean, window, min_grain, center,
+ min_size=1)
diff --git a/gnocchi/archive_policy.py b/gnocchi/archive_policy.py
new file mode 100644
index 00000000..54c64cc2
--- /dev/null
+++ b/gnocchi/archive_policy.py
@@ -0,0 +1,247 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright (c) 2014-2015 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import collections
+import datetime
+import operator
+
+from oslo_config import cfg
+from oslo_config import types
+import six
+
+
+class ArchivePolicy(object):
+
+ DEFAULT_AGGREGATION_METHODS = ()
+
+ # TODO(eglynn): figure out how to accommodate multi-valued aggregation
+ # methods, where there is no longer just a single aggregate
+ # value to be stored per-period (e.g. ohlc)
+ VALID_AGGREGATION_METHODS = set(
+ ('mean', 'sum', 'last', 'max', 'min',
+ 'std', 'median', 'first', 'count')).union(
+ set((str(i) + 'pct' for i in six.moves.range(1, 100))))
+
+ # Set that contains all the above values + their minus equivalent (-mean)
+ # and the "*" entry.
+ VALID_AGGREGATION_METHODS_VALUES = VALID_AGGREGATION_METHODS.union(
+ set(('*',)),
+ set(map(lambda s: "-" + s,
+ VALID_AGGREGATION_METHODS)),
+ set(map(lambda s: "+" + s,
+ VALID_AGGREGATION_METHODS)))
+
+ def __init__(self, name, back_window, definition,
+ aggregation_methods=None):
+ self.name = name
+ self.back_window = back_window
+ self.definition = []
+ for d in definition:
+ if isinstance(d, ArchivePolicyItem):
+ self.definition.append(d)
+ elif isinstance(d, dict):
+ self.definition.append(ArchivePolicyItem(**d))
+ elif len(d) == 2:
+ self.definition.append(
+ ArchivePolicyItem(points=d[0], granularity=d[1]))
+ else:
+ raise ValueError(
+ "Unable to understand policy definition %s" % d)
+
+ duplicate_granularities = [
+ granularity
+ for granularity, count in collections.Counter(
+ d.granularity for d in self.definition).items()
+ if count > 1
+ ]
+ if duplicate_granularities:
+ raise ValueError(
+ "More than one archive policy "
+ "uses granularity `%s'"
+ % duplicate_granularities[0]
+ )
+
+ if aggregation_methods is None:
+ self.aggregation_methods = self.DEFAULT_AGGREGATION_METHODS
+ else:
+ self.aggregation_methods = aggregation_methods
+
+ @property
+ def aggregation_methods(self):
+ if '*' in self._aggregation_methods:
+ agg_methods = self.VALID_AGGREGATION_METHODS.copy()
+ elif all(map(lambda s: s.startswith('-') or s.startswith('+'),
+ self._aggregation_methods)):
+ agg_methods = set(self.DEFAULT_AGGREGATION_METHODS)
+ else:
+ agg_methods = set(self._aggregation_methods)
+
+ for entry in self._aggregation_methods:
+ if entry:
+ if entry[0] == '-':
+ agg_methods -= set((entry[1:],))
+ elif entry[0] == '+':
+ agg_methods.add(entry[1:])
+
+ return agg_methods
+
+ @aggregation_methods.setter
+ def aggregation_methods(self, value):
+ value = set(value)
+ rest = value - self.VALID_AGGREGATION_METHODS_VALUES
+ if rest:
+ raise ValueError("Invalid value for aggregation_methods: %s" %
+ rest)
+ self._aggregation_methods = value
+
+ @classmethod
+ def from_dict(cls, d):
+ return cls(d['name'],
+ d['back_window'],
+ d['definition'],
+ d.get('aggregation_methods'))
+
+ def __eq__(self, other):
+ return (isinstance(other, ArchivePolicy)
+ and self.name == other.name
+ and self.back_window == other.back_window
+ and self.definition == other.definition
+ and self.aggregation_methods == other.aggregation_methods)
+
+ def jsonify(self):
+ return {
+ "name": self.name,
+ "back_window": self.back_window,
+ "definition": self.definition,
+ "aggregation_methods": self.aggregation_methods,
+ }
+
+ @property
+ def max_block_size(self):
+ # The biggest block size is the coarse grained archive definition
+ return sorted(self.definition,
+ key=operator.attrgetter("granularity"))[-1].granularity
+
+
+OPTS = [
+ cfg.ListOpt(
+ 'default_aggregation_methods',
+ item_type=types.String(
+ choices=ArchivePolicy.VALID_AGGREGATION_METHODS),
+ default=['mean', 'min', 'max', 'sum', 'std', 'count'],
+ help='Default aggregation methods to use in created archive policies'),
+]
+
+
+class ArchivePolicyItem(dict):
+ def __init__(self, granularity=None, points=None, timespan=None):
+ if (granularity is not None
+ and points is not None
+ and timespan is not None):
+ if timespan != granularity * points:
+ raise ValueError(
+ u"timespan ≠ granularity × points")
+
+ if granularity is not None and granularity <= 0:
+ raise ValueError("Granularity should be > 0")
+
+ if points is not None and points <= 0:
+ raise ValueError("Number of points should be > 0")
+
+ if granularity is None:
+ if points is None or timespan is None:
+ raise ValueError(
+ "At least two of granularity/points/timespan "
+ "must be provided")
+ granularity = round(timespan / float(points))
+ else:
+ granularity = float(granularity)
+
+ if points is None:
+ if timespan is None:
+ self['timespan'] = None
+ else:
+ points = int(timespan / granularity)
+ self['timespan'] = granularity * points
+ else:
+ points = int(points)
+ self['timespan'] = granularity * points
+
+ self['points'] = points
+ self['granularity'] = granularity
+
+ @property
+ def granularity(self):
+ return self['granularity']
+
+ @property
+ def points(self):
+ return self['points']
+
+ @property
+ def timespan(self):
+ return self['timespan']
+
+ def jsonify(self):
+ """Return a dict representation with human readable values."""
+ return {
+ 'timespan': six.text_type(
+ datetime.timedelta(seconds=self.timespan))
+ if self.timespan is not None
+ else None,
+ 'granularity': six.text_type(
+ datetime.timedelta(seconds=self.granularity)),
+ 'points': self.points,
+ }
+
+
+DEFAULT_ARCHIVE_POLICIES = {
+ 'bool': ArchivePolicy(
+ "bool", 3600, [
+ # 1 second resolution for 365 days
+ ArchivePolicyItem(granularity=1,
+ timespan=365 * 24 * 60 * 60),
+ ],
+ aggregation_methods=("last",),
+ ),
+ 'low': ArchivePolicy(
+ "low", 0, [
+ # 5 minutes resolution for 30 days
+ ArchivePolicyItem(granularity=300,
+ timespan=30 * 24 * 60 * 60),
+ ],
+ ),
+ 'medium': ArchivePolicy(
+ "medium", 0, [
+ # 1 minute resolution for 7 days
+ ArchivePolicyItem(granularity=60,
+ timespan=7 * 24 * 60 * 60),
+ # 1 hour resolution for 365 days
+ ArchivePolicyItem(granularity=3600,
+ timespan=365 * 24 * 60 * 60),
+ ],
+ ),
+ 'high': ArchivePolicy(
+ "high", 0, [
+ # 1 second resolution for an hour
+ ArchivePolicyItem(granularity=1, points=3600),
+ # 1 minute resolution for a week
+ ArchivePolicyItem(granularity=60, points=60 * 24 * 7),
+ # 1 hour resolution for a year
+ ArchivePolicyItem(granularity=3600, points=365 * 24),
+ ],
+ ),
+}
diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py
new file mode 100644
index 00000000..4716f41a
--- /dev/null
+++ b/gnocchi/carbonara.py
@@ -0,0 +1,980 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2016 Red Hat, Inc.
+# Copyright © 2014-2015 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Time series data manipulation, better with pancetta."""
+
+import datetime
+import functools
+import logging
+import math
+import numbers
+import random
+import re
+import struct
+import time
+
+import lz4.block
+import numpy
+import numpy.lib.recfunctions
+import pandas
+from scipy import ndimage
+import six
+
+# NOTE(sileht): pandas relies on time.strptime()
+# and often triggers http://bugs.python.org/issue7980
+# its dues to our heavy threads usage, this is the workaround
+# to ensure the module is correctly loaded before we use really it.
+time.strptime("2016-02-19", "%Y-%m-%d")
+
+LOG = logging.getLogger(__name__)
+
+
+class NoDeloreanAvailable(Exception):
+ """Error raised when trying to insert a value that is too old."""
+
+ def __init__(self, first_timestamp, bad_timestamp):
+ self.first_timestamp = first_timestamp
+ self.bad_timestamp = bad_timestamp
+ super(NoDeloreanAvailable, self).__init__(
+ "%s is before %s" % (bad_timestamp, first_timestamp))
+
+
+class BeforeEpochError(Exception):
+ """Error raised when a timestamp before Epoch is used."""
+
+ def __init__(self, timestamp):
+ self.timestamp = timestamp
+ super(BeforeEpochError, self).__init__(
+ "%s is before Epoch" % timestamp)
+
+
+class UnAggregableTimeseries(Exception):
+ """Error raised when timeseries cannot be aggregated."""
+ def __init__(self, reason):
+ self.reason = reason
+ super(UnAggregableTimeseries, self).__init__(reason)
+
+
+class UnknownAggregationMethod(Exception):
+ """Error raised when the aggregation method is unknown."""
+ def __init__(self, agg):
+ self.aggregation_method = agg
+ super(UnknownAggregationMethod, self).__init__(
+ "Unknown aggregation method `%s'" % agg)
+
+
+class InvalidData(ValueError):
+ """Error raised when data are corrupted."""
+ def __init__(self):
+ super(InvalidData, self).__init__("Unable to unpack, invalid data")
+
+
+def round_timestamp(ts, freq):
+ return pandas.Timestamp(
+ (pandas.Timestamp(ts).value // freq) * freq)
+
+
+class GroupedTimeSeries(object):
+ def __init__(self, ts, granularity):
+ # NOTE(sileht): The whole class assumes ts is ordered and don't have
+ # duplicate timestamps, it uses numpy.unique that sorted list, but
+ # we always assume the orderd to be the same as the input.
+ freq = granularity * 10e8
+ self._ts = ts
+ self.indexes = (numpy.array(ts.index, numpy.float) // freq) * freq
+ self.tstamps, self.counts = numpy.unique(self.indexes,
+ return_counts=True)
+
+ def mean(self):
+ return self._scipy_aggregate(ndimage.mean)
+
+ def sum(self):
+ return self._scipy_aggregate(ndimage.sum)
+
+ def min(self):
+ return self._scipy_aggregate(ndimage.minimum)
+
+ def max(self):
+ return self._scipy_aggregate(ndimage.maximum)
+
+ def median(self):
+ return self._scipy_aggregate(ndimage.median)
+
+ def std(self):
+ # NOTE(sileht): ndimage.standard_deviation is really more performant
+ # but it use ddof=0, to get the same result as pandas we have to use
+ # ddof=1. If one day scipy allow to pass ddof, this should be changed.
+ return self._scipy_aggregate(ndimage.labeled_comprehension,
+ remove_unique=True,
+ func=functools.partial(numpy.std, ddof=1),
+ out_dtype='float64',
+ default=None)
+
+ def _count(self):
+ timestamps = self.tstamps.astype('datetime64[ns]', copy=False)
+ return (self.counts, timestamps)
+
+ def count(self):
+ return pandas.Series(*self._count())
+
+ def last(self):
+ counts, timestamps = self._count()
+ cumcounts = numpy.cumsum(counts) - 1
+ values = self._ts.values[cumcounts]
+ return pandas.Series(values, pandas.to_datetime(timestamps))
+
+ def first(self):
+ counts, timestamps = self._count()
+ counts = numpy.insert(counts[:-1], 0, 0)
+ cumcounts = numpy.cumsum(counts)
+ values = self._ts.values[cumcounts]
+ return pandas.Series(values, pandas.to_datetime(timestamps))
+
+ def quantile(self, q):
+ return self._scipy_aggregate(ndimage.labeled_comprehension,
+ func=functools.partial(
+ numpy.percentile,
+ q=q,
+ ),
+ out_dtype='float64',
+ default=None)
+
+ def _scipy_aggregate(self, method, remove_unique=False, *args, **kwargs):
+ if remove_unique:
+ tstamps = self.tstamps[self.counts > 1]
+ else:
+ tstamps = self.tstamps
+
+ if len(tstamps) == 0:
+ return pandas.Series()
+
+ values = method(self._ts.values, self.indexes, tstamps,
+ *args, **kwargs)
+ timestamps = tstamps.astype('datetime64[ns]', copy=False)
+ return pandas.Series(values, pandas.to_datetime(timestamps))
+
+
+class TimeSerie(object):
+ """A representation of series of a timestamp with a value.
+
+ Duplicate timestamps are not allowed and will be filtered to use the
+ last in the group when the TimeSerie is created or extended.
+ """
+
+ def __init__(self, ts=None):
+ if ts is None:
+ ts = pandas.Series()
+ self.ts = ts
+
+ @staticmethod
+ def clean_ts(ts):
+ if ts.index.has_duplicates:
+ ts = ts[~ts.index.duplicated(keep='last')]
+ if not ts.index.is_monotonic:
+ ts = ts.sort_index()
+ return ts
+
+ @classmethod
+ def from_data(cls, timestamps=None, values=None, clean=False):
+ ts = pandas.Series(values, timestamps)
+ if clean:
+ # For format v2
+ ts = cls.clean_ts(ts)
+ return cls(ts)
+
+ @classmethod
+ def from_tuples(cls, timestamps_values):
+ return cls.from_data(*zip(*timestamps_values))
+
+ def __eq__(self, other):
+ return (isinstance(other, TimeSerie)
+ and self.ts.all() == other.ts.all())
+
+ def __getitem__(self, key):
+ return self.ts[key]
+
+ def set_values(self, values):
+ t = pandas.Series(*reversed(list(zip(*values))))
+ self.ts = self.clean_ts(t).combine_first(self.ts)
+
+ def __len__(self):
+ return len(self.ts)
+
+ @staticmethod
+ def _timestamps_and_values_from_dict(values):
+ timestamps = numpy.array(list(values.keys()), dtype='datetime64[ns]')
+ timestamps = pandas.to_datetime(timestamps)
+ v = list(values.values())
+ if v:
+ return timestamps, v
+ return (), ()
+
+ @staticmethod
+ def _to_offset(value):
+ if isinstance(value, numbers.Real):
+ return pandas.tseries.offsets.Nano(value * 10e8)
+ return pandas.tseries.frequencies.to_offset(value)
+
+ @property
+ def first(self):
+ try:
+ return self.ts.index[0]
+ except IndexError:
+ return
+
+ @property
+ def last(self):
+ try:
+ return self.ts.index[-1]
+ except IndexError:
+ return
+
+ def group_serie(self, granularity, start=0):
+ # NOTE(jd) Our whole serialization system is based on Epoch, and we
+ # store unsigned integer, so we can't store anything before Epoch.
+ # Sorry!
+ if self.ts.index[0].value < 0:
+ raise BeforeEpochError(self.ts.index[0])
+
+ return GroupedTimeSeries(self.ts[start:], granularity)
+
+ @staticmethod
+ def _compress(payload):
+ # FIXME(jd) lz4 > 0.9.2 returns bytearray instead of bytes. But Cradox
+ # does not accept bytearray but only bytes, so make sure that we have a
+ # byte type returned.
+ return memoryview(lz4.block.compress(payload)).tobytes()
+
+
+class BoundTimeSerie(TimeSerie):
+ def __init__(self, ts=None, block_size=None, back_window=0):
+ """A time serie that is limited in size.
+
+ Used to represent the full-resolution buffer of incoming raw
+ datapoints associated with a metric.
+
+ The maximum size of this time serie is expressed in a number of block
+ size, called the back window.
+ When the timeserie is truncated, a whole block is removed.
+
+ You cannot set a value using a timestamp that is prior to the last
+ timestamp minus this number of blocks. By default, a back window of 0
+ does not allow you to go back in time prior to the current block being
+ used.
+
+ """
+ super(BoundTimeSerie, self).__init__(ts)
+ self.block_size = self._to_offset(block_size)
+ self.back_window = back_window
+ self._truncate()
+
+ @classmethod
+ def from_data(cls, timestamps=None, values=None,
+ block_size=None, back_window=0):
+ return cls(pandas.Series(values, timestamps),
+ block_size=block_size, back_window=back_window)
+
+ def __eq__(self, other):
+ return (isinstance(other, BoundTimeSerie)
+ and super(BoundTimeSerie, self).__eq__(other)
+ and self.block_size == other.block_size
+ and self.back_window == other.back_window)
+
+ def set_values(self, values, before_truncate_callback=None,
+ ignore_too_old_timestamps=False):
+ # NOTE: values must be sorted when passed in.
+ if self.block_size is not None and not self.ts.empty:
+ first_block_timestamp = self.first_block_timestamp()
+ if ignore_too_old_timestamps:
+ for index, (timestamp, value) in enumerate(values):
+ if timestamp >= first_block_timestamp:
+ values = values[index:]
+ break
+ else:
+ values = []
+ else:
+ # Check that the smallest timestamp does not go too much back
+ # in time.
+ smallest_timestamp = values[0][0]
+ if smallest_timestamp < first_block_timestamp:
+ raise NoDeloreanAvailable(first_block_timestamp,
+ smallest_timestamp)
+ super(BoundTimeSerie, self).set_values(values)
+ if before_truncate_callback:
+ before_truncate_callback(self)
+ self._truncate()
+
+ _SERIALIZATION_TIMESTAMP_VALUE_LEN = struct.calcsize("" % (self.__class__.__name__,
+ repr(self.key),
+ self._carbonara_sampling)
+
+
+class AggregatedTimeSerie(TimeSerie):
+
+ _AGG_METHOD_PCT_RE = re.compile(r"([1-9][0-9]?)pct")
+
+ PADDED_SERIAL_LEN = struct.calcsize("" % (
+ self.__class__.__name__,
+ id(self),
+ self.sampling,
+ self.max_size,
+ self.aggregation_method,
+ )
+
+ @staticmethod
+ def is_compressed(serialized_data):
+ """Check whatever the data was serialized with compression."""
+ return six.indexbytes(serialized_data, 0) == ord("c")
+
+ @classmethod
+ def unserialize(cls, data, start, agg_method, sampling):
+ x, y = [], []
+
+ start = float(start)
+ if data:
+ if cls.is_compressed(data):
+ # Compressed format
+ uncompressed = lz4.block.decompress(
+ memoryview(data)[1:].tobytes())
+ nb_points = len(uncompressed) // cls.COMPRESSED_SERIAL_LEN
+
+ timestamps_raw = uncompressed[
+ :nb_points*cls.COMPRESSED_TIMESPAMP_LEN]
+ try:
+ y = numpy.frombuffer(timestamps_raw, dtype=' 0 and
+ (right_boundary_ts == left_boundary_ts or
+ (right_boundary_ts is None
+ and maybe_next_timestamp_is_left_boundary))):
+ LOG.debug("We didn't find points that overlap in those "
+ "timeseries. "
+ "right_boundary_ts=%(right_boundary_ts)s, "
+ "left_boundary_ts=%(left_boundary_ts)s, "
+ "groups=%(groups)s", {
+ 'right_boundary_ts': right_boundary_ts,
+ 'left_boundary_ts': left_boundary_ts,
+ 'groups': list(grouped)
+ })
+ raise UnAggregableTimeseries('No overlap')
+
+ # NOTE(sileht): this call the aggregation method on already
+ # aggregated values, for some kind of aggregation this can
+ # result can looks weird, but this is the best we can do
+ # because we don't have anymore the raw datapoints in those case.
+ # FIXME(sileht): so should we bailout is case of stddev, percentile
+ # and median?
+ agg_timeserie = getattr(grouped, aggregation)()
+ agg_timeserie = agg_timeserie.dropna().reset_index()
+
+ if from_timestamp is None and left_boundary_ts:
+ agg_timeserie = agg_timeserie[
+ agg_timeserie['timestamp'] >= left_boundary_ts]
+ if to_timestamp is None and right_boundary_ts:
+ agg_timeserie = agg_timeserie[
+ agg_timeserie['timestamp'] <= right_boundary_ts]
+
+ points = (agg_timeserie.sort_values(by=['granularity', 'timestamp'],
+ ascending=[0, 1]).itertuples())
+ return [(timestamp, granularity, value)
+ for __, timestamp, granularity, value in points]
+
+
+if __name__ == '__main__':
+ import sys
+ args = sys.argv[1:]
+ if not args or "--boundtimeserie" in args:
+ BoundTimeSerie.benchmark()
+ if not args or "--aggregatedtimeserie" in args:
+ AggregatedTimeSerie.benchmark()
diff --git a/gnocchi/cli.py b/gnocchi/cli.py
new file mode 100644
index 00000000..06e1fbbc
--- /dev/null
+++ b/gnocchi/cli.py
@@ -0,0 +1,317 @@
+# Copyright (c) 2013 Mirantis Inc.
+# Copyright (c) 2015-2017 Red Hat
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import sys
+import threading
+import time
+
+import cotyledon
+from cotyledon import oslo_config_glue
+from futurist import periodics
+from oslo_config import cfg
+from oslo_log import log
+import six
+import tenacity
+import tooz
+
+from gnocchi import archive_policy
+from gnocchi import genconfig
+from gnocchi import indexer
+from gnocchi import service
+from gnocchi import statsd as statsd_service
+from gnocchi import storage
+from gnocchi.storage import incoming
+from gnocchi import utils
+
+
+LOG = log.getLogger(__name__)
+
+
+def config_generator():
+ return genconfig.prehook(None, sys.argv[1:])
+
+
+def upgrade():
+ conf = cfg.ConfigOpts()
+ conf.register_cli_opts([
+ cfg.BoolOpt("skip-index", default=False,
+ help="Skip index upgrade."),
+ cfg.BoolOpt("skip-storage", default=False,
+ help="Skip storage upgrade."),
+ cfg.BoolOpt("skip-archive-policies-creation", default=False,
+ help="Skip default archive policies creation."),
+ cfg.IntOpt("num-storage-sacks", default=128,
+ help="Initial number of storage sacks to create."),
+
+ ])
+ conf = service.prepare_service(conf=conf)
+ index = indexer.get_driver(conf)
+ index.connect()
+ if not conf.skip_index:
+ LOG.info("Upgrading indexer %s", index)
+ index.upgrade()
+ if not conf.skip_storage:
+ s = storage.get_driver(conf)
+ LOG.info("Upgrading storage %s", s)
+ s.upgrade(index, conf.num_storage_sacks)
+
+ if (not conf.skip_archive_policies_creation
+ and not index.list_archive_policies()
+ and not index.list_archive_policy_rules()):
+ for name, ap in six.iteritems(archive_policy.DEFAULT_ARCHIVE_POLICIES):
+ index.create_archive_policy(ap)
+ index.create_archive_policy_rule("default", "*", "low")
+
+
+def change_sack_size():
+ conf = cfg.ConfigOpts()
+ conf.register_cli_opts([
+ cfg.IntOpt("sack_size", required=True, min=1,
+ help="Number of sacks."),
+ ])
+ conf = service.prepare_service(conf=conf)
+ s = storage.get_driver(conf)
+ report = s.incoming.measures_report(details=False)
+ remainder = report['summary']['measures']
+ if remainder:
+ LOG.error('Cannot change sack when non-empty backlog. Process '
+ 'remaining %s measures and try again', remainder)
+ return
+ LOG.info("Changing sack size to: %s", conf.sack_size)
+ old_num_sacks = s.incoming.get_storage_sacks()
+ s.incoming.set_storage_settings(conf.sack_size)
+ s.incoming.remove_sack_group(old_num_sacks)
+
+
+def statsd():
+ statsd_service.start()
+
+
+class MetricProcessBase(cotyledon.Service):
+ def __init__(self, worker_id, conf, interval_delay=0):
+ super(MetricProcessBase, self).__init__(worker_id)
+ self.conf = conf
+ self.startup_delay = worker_id
+ self.interval_delay = interval_delay
+ self._shutdown = threading.Event()
+ self._shutdown_done = threading.Event()
+
+ def _configure(self):
+ self.store = storage.get_driver(self.conf)
+ self.index = indexer.get_driver(self.conf)
+ self.index.connect()
+
+ def run(self):
+ self._configure()
+ # Delay startup so workers are jittered.
+ time.sleep(self.startup_delay)
+
+ while not self._shutdown.is_set():
+ with utils.StopWatch() as timer:
+ self._run_job()
+ self._shutdown.wait(max(0, self.interval_delay - timer.elapsed()))
+ self._shutdown_done.set()
+
+ def terminate(self):
+ self._shutdown.set()
+ self.close_services()
+ LOG.info("Waiting ongoing metric processing to finish")
+ self._shutdown_done.wait()
+
+ @staticmethod
+ def close_services():
+ pass
+
+ @staticmethod
+ def _run_job():
+ raise NotImplementedError
+
+
+class MetricReporting(MetricProcessBase):
+ name = "reporting"
+
+ def __init__(self, worker_id, conf):
+ super(MetricReporting, self).__init__(
+ worker_id, conf, conf.metricd.metric_reporting_delay)
+
+ def _run_job(self):
+ try:
+ report = self.store.incoming.measures_report(details=False)
+ LOG.info("%d measurements bundles across %d "
+ "metrics wait to be processed.",
+ report['summary']['measures'],
+ report['summary']['metrics'])
+ except incoming.ReportGenerationError:
+ LOG.warning("Unable to compute backlog. Retrying at next "
+ "interval.")
+ except Exception:
+ LOG.error("Unexpected error during pending measures reporting",
+ exc_info=True)
+
+
+class MetricProcessor(MetricProcessBase):
+ name = "processing"
+ GROUP_ID = "gnocchi-processing"
+
+ def __init__(self, worker_id, conf):
+ super(MetricProcessor, self).__init__(
+ worker_id, conf, conf.metricd.metric_processing_delay)
+ self._coord, self._my_id = utils.get_coordinator_and_start(
+ conf.storage.coordination_url)
+ self._tasks = []
+ self.group_state = None
+
+ @utils.retry
+ def _configure(self):
+ super(MetricProcessor, self)._configure()
+ # create fallback in case paritioning fails or assigned no tasks
+ self.fallback_tasks = list(
+ six.moves.range(self.store.incoming.NUM_SACKS))
+ try:
+ self.partitioner = self._coord.join_partitioned_group(
+ self.GROUP_ID, partitions=200)
+ LOG.info('Joined coordination group: %s', self.GROUP_ID)
+
+ @periodics.periodic(spacing=self.conf.metricd.worker_sync_rate,
+ run_immediately=True)
+ def run_watchers():
+ self._coord.run_watchers()
+
+ self.periodic = periodics.PeriodicWorker.create([])
+ self.periodic.add(run_watchers)
+ t = threading.Thread(target=self.periodic.start)
+ t.daemon = True
+ t.start()
+ except NotImplementedError:
+ LOG.warning('Coordinator does not support partitioning. Worker '
+ 'will battle against other workers for jobs.')
+ except tooz.ToozError as e:
+ LOG.error('Unexpected error configuring coordinator for '
+ 'partitioning. Retrying: %s', e)
+ raise tenacity.TryAgain(e)
+
+ def _get_tasks(self):
+ try:
+ if (not self._tasks or
+ self.group_state != self.partitioner.ring.nodes):
+ self.group_state = self.partitioner.ring.nodes.copy()
+ self._tasks = [
+ i for i in six.moves.range(self.store.incoming.NUM_SACKS)
+ if self.partitioner.belongs_to_self(
+ i, replicas=self.conf.metricd.processing_replicas)]
+ finally:
+ return self._tasks or self.fallback_tasks
+
+ def _run_job(self):
+ m_count = 0
+ s_count = 0
+ in_store = self.store.incoming
+ for s in self._get_tasks():
+ # TODO(gordc): support delay release lock so we don't
+ # process a sack right after another process
+ lock = in_store.get_sack_lock(self._coord, s)
+ if not lock.acquire(blocking=False):
+ continue
+ try:
+ metrics = in_store.list_metric_with_measures_to_process(s)
+ m_count += len(metrics)
+ self.store.process_background_tasks(self.index, metrics)
+ s_count += 1
+ except Exception:
+ LOG.error("Unexpected error processing assigned job",
+ exc_info=True)
+ finally:
+ lock.release()
+ LOG.debug("%d metrics processed from %d sacks", m_count, s_count)
+
+ def close_services(self):
+ self._coord.stop()
+
+
+class MetricJanitor(MetricProcessBase):
+ name = "janitor"
+
+ def __init__(self, worker_id, conf):
+ super(MetricJanitor, self).__init__(
+ worker_id, conf, conf.metricd.metric_cleanup_delay)
+
+ def _run_job(self):
+ try:
+ self.store.expunge_metrics(self.index)
+ LOG.debug("Metrics marked for deletion removed from backend")
+ except Exception:
+ LOG.error("Unexpected error during metric cleanup", exc_info=True)
+
+
+class MetricdServiceManager(cotyledon.ServiceManager):
+ def __init__(self, conf):
+ super(MetricdServiceManager, self).__init__()
+ oslo_config_glue.setup(self, conf)
+
+ self.conf = conf
+ self.metric_processor_id = self.add(
+ MetricProcessor, args=(self.conf,),
+ workers=conf.metricd.workers)
+ if self.conf.metricd.metric_reporting_delay >= 0:
+ self.add(MetricReporting, args=(self.conf,))
+ self.add(MetricJanitor, args=(self.conf,))
+
+ self.register_hooks(on_reload=self.on_reload)
+
+ def on_reload(self):
+ # NOTE(sileht): We do not implement reload() in Workers so all workers
+ # will received SIGHUP and exit gracefully, then their will be
+ # restarted with the new number of workers. This is important because
+ # we use the number of worker to declare the capability in tooz and
+ # to select the block of metrics to proceed.
+ self.reconfigure(self.metric_processor_id,
+ workers=self.conf.metricd.workers)
+
+ def run(self):
+ super(MetricdServiceManager, self).run()
+ self.queue.close()
+
+
+def metricd_tester(conf):
+ # NOTE(sileht): This method is designed to be profiled, we
+ # want to avoid issues with profiler and os.fork(), that
+ # why we don't use the MetricdServiceManager.
+ index = indexer.get_driver(conf)
+ index.connect()
+ s = storage.get_driver(conf)
+ metrics = set()
+ for i in six.moves.range(s.incoming.NUM_SACKS):
+ metrics.update(s.incoming.list_metric_with_measures_to_process(i))
+ if len(metrics) >= conf.stop_after_processing_metrics:
+ break
+ s.process_new_measures(
+ index, list(metrics)[:conf.stop_after_processing_metrics], True)
+
+
+def metricd():
+ conf = cfg.ConfigOpts()
+ conf.register_cli_opts([
+ cfg.IntOpt("stop-after-processing-metrics",
+ default=0,
+ min=0,
+ help="Number of metrics to process without workers, "
+ "for testing purpose"),
+ ])
+ conf = service.prepare_service(conf=conf)
+
+ if conf.stop_after_processing_metrics:
+ metricd_tester(conf)
+ else:
+ MetricdServiceManager(conf).run()
diff --git a/gnocchi/exceptions.py b/gnocchi/exceptions.py
new file mode 100644
index 00000000..81b484bf
--- /dev/null
+++ b/gnocchi/exceptions.py
@@ -0,0 +1,19 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2014 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+class NotImplementedError(NotImplementedError):
+ pass
diff --git a/gnocchi/genconfig.py b/gnocchi/genconfig.py
new file mode 100644
index 00000000..0eba7359
--- /dev/null
+++ b/gnocchi/genconfig.py
@@ -0,0 +1,29 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2016-2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import os
+
+
+def prehook(cmd, args=None):
+ if args is None:
+ args = ['--output-file', 'etc/gnocchi/gnocchi.conf']
+ try:
+ from oslo_config import generator
+ generator.main(
+ ['--config-file',
+ '%s/gnocchi-config-generator.conf' % os.path.dirname(__file__)]
+ + args)
+ except Exception as e:
+ print("Unable to build sample configuration file: %s" % e)
diff --git a/gnocchi/gendoc.py b/gnocchi/gendoc.py
new file mode 100644
index 00000000..7b9a8a11
--- /dev/null
+++ b/gnocchi/gendoc.py
@@ -0,0 +1,178 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2014-2015 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from __future__ import absolute_import
+import json
+import os
+import subprocess
+import sys
+import tempfile
+
+import jinja2
+import six
+import six.moves
+import webob.request
+import yaml
+
+from gnocchi.tests import test_rest
+
+# HACK(jd) Not sure why but Sphinx setup this multiple times, so we just avoid
+# doing several times the requests by using this global variable :(
+_RUN = False
+
+
+def _setup_test_app():
+ t = test_rest.RestTest()
+ t.auth_mode = "basic"
+ t.setUpClass()
+ t.setUp()
+ return t.app
+
+
+def _format_json(txt):
+ return json.dumps(json.loads(txt),
+ sort_keys=True,
+ indent=2)
+
+
+def _extract_body(req_or_resp):
+ # TODO(jd) Make this a Sphinx option
+ if req_or_resp.content_type == "application/json":
+ body = _format_json(req_or_resp.body)
+ else:
+ body = req_or_resp.body
+ return "\n ".join(body.split("\n"))
+
+
+def _format_headers(headers):
+ return "\n".join(
+ " %s: %s" % (k, v)
+ for k, v in six.iteritems(headers))
+
+
+def _response_to_httpdomain(response):
+ return """
+ .. sourcecode:: http
+
+ HTTP/1.1 %(status)s
+%(headers)s
+
+ %(body)s""" % {
+ 'status': response.status,
+ 'body': _extract_body(response),
+ 'headers': _format_headers(response.headers),
+ }
+
+
+def _request_to_httpdomain(request):
+ return """
+ .. sourcecode:: http
+
+ %(method)s %(path)s %(http_version)s
+%(headers)s
+
+ %(body)s""" % {
+ 'body': _extract_body(request),
+ 'method': request.method,
+ 'path': request.path_qs,
+ 'http_version': request.http_version,
+ 'headers': _format_headers(request.headers),
+ }
+
+
+def _format_request_reply(request, response):
+ return (_request_to_httpdomain(request)
+ + "\n"
+ + _response_to_httpdomain(response))
+
+
+class ScenarioList(list):
+ def __getitem__(self, key):
+ for scenario in self:
+ if scenario['name'] == key:
+ return scenario
+ return super(ScenarioList, self).__getitem__(key)
+
+
+multiversion_hack = """
+import sys
+import os
+
+srcdir = os.path.join("%s", "..", "..")
+os.chdir(srcdir)
+sys.path.insert(0, srcdir)
+
+class FakeApp(object):
+ def info(self, *args, **kwasrgs):
+ pass
+
+import gnocchi.gendoc
+gnocchi.gendoc.setup(FakeApp())
+"""
+
+
+def setup(app):
+ global _RUN
+ if _RUN:
+ return
+
+ # NOTE(sileht): On gnocchi.xyz, we build a multiversion of the docs
+ # all versions are built with the master gnocchi.gendoc sphinx extension.
+ # So the hack here run an other python script to generate the rest.rst
+ # file of old version of the module.
+ # It also drop the database before each run.
+ if sys.argv[0].endswith("sphinx-versioning"):
+ subprocess.call(["dropdb", os.environ['PGDATABASE']])
+ subprocess.call(["createdb", os.environ['PGDATABASE']])
+
+ with tempfile.NamedTemporaryFile() as f:
+ f.write(multiversion_hack % app.confdir)
+ f.flush()
+ subprocess.call(['python', f.name])
+ _RUN = True
+ return
+
+ webapp = _setup_test_app()
+ # TODO(jd) Do not hardcode doc/source
+ with open("doc/source/rest.yaml") as f:
+ scenarios = ScenarioList(yaml.load(f))
+ for entry in scenarios:
+ template = jinja2.Template(entry['request'])
+ fake_file = six.moves.cStringIO()
+ fake_file.write(template.render(scenarios=scenarios).encode('utf-8'))
+ fake_file.seek(0)
+ request = webapp.RequestClass.from_file(fake_file)
+
+ # TODO(jd) Fix this lame bug in webob < 1.7
+ if (hasattr(webob.request, "http_method_probably_has_body")
+ and request.method == "DELETE"):
+ # Webob has a bug it does not read the body for DELETE, l4m3r
+ clen = request.content_length
+ if clen is None:
+ request.body = fake_file.read()
+ else:
+ request.body = fake_file.read(clen)
+
+ app.info("Doing request %s: %s" % (entry['name'],
+ six.text_type(request)))
+ with webapp.use_admin_user():
+ response = webapp.request(request)
+ entry['response'] = response
+ entry['doc'] = _format_request_reply(request, response)
+ with open("doc/source/rest.j2", "r") as f:
+ template = jinja2.Template(f.read().decode('utf-8'))
+ with open("doc/source/rest.rst", "w") as f:
+ f.write(template.render(scenarios=scenarios).encode('utf-8'))
+ _RUN = True
diff --git a/gnocchi/gnocchi-config-generator.conf b/gnocchi/gnocchi-config-generator.conf
new file mode 100644
index 00000000..df6e9880
--- /dev/null
+++ b/gnocchi/gnocchi-config-generator.conf
@@ -0,0 +1,11 @@
+[DEFAULT]
+wrap_width = 79
+namespace = gnocchi
+namespace = oslo.db
+namespace = oslo.log
+namespace = oslo.middleware.cors
+namespace = oslo.middleware.healthcheck
+namespace = oslo.middleware.http_proxy_to_wsgi
+namespace = oslo.policy
+namespace = cotyledon
+namespace = keystonemiddleware.auth_token
diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py
new file mode 100644
index 00000000..1ffc9cb4
--- /dev/null
+++ b/gnocchi/indexer/__init__.py
@@ -0,0 +1,411 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2014-2015 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import fnmatch
+import hashlib
+import os
+
+import iso8601
+from oslo_config import cfg
+import six
+from six.moves.urllib import parse
+from stevedore import driver
+
+from gnocchi import exceptions
+
+OPTS = [
+ cfg.StrOpt('url',
+ secret=True,
+ required=True,
+ default=os.getenv("GNOCCHI_INDEXER_URL"),
+ help='Indexer driver to use'),
+]
+
+
+_marker = object()
+
+
+class Resource(object):
+ def get_metric(self, metric_name):
+ for m in self.metrics:
+ if m.name == metric_name:
+ return m
+
+ def __eq__(self, other):
+ return (self.id == other.id
+ and self.type == other.type
+ and self.revision == other.revision
+ and self.revision_start == other.revision_start
+ and self.revision_end == other.revision_end
+ and self.creator == other.creator
+ and self.user_id == other.user_id
+ and self.project_id == other.project_id
+ and self.started_at == other.started_at
+ and self.ended_at == other.ended_at)
+
+ @property
+ def etag(self):
+ etag = hashlib.sha1()
+ etag.update(six.text_type(self.id).encode('utf-8'))
+ etag.update(six.text_type(
+ self.revision_start.isoformat()).encode('utf-8'))
+ return etag.hexdigest()
+
+ @property
+ def lastmodified(self):
+ # less precise revision start for Last-Modified http header
+ return self.revision_start.replace(microsecond=0,
+ tzinfo=iso8601.iso8601.UTC)
+
+
+def get_driver(conf):
+ """Return the configured driver."""
+ split = parse.urlsplit(conf.indexer.url)
+ d = driver.DriverManager('gnocchi.indexer',
+ split.scheme).driver
+ return d(conf)
+
+
+class IndexerException(Exception):
+ """Base class for all exceptions raised by an indexer."""
+
+
+class NoSuchResourceType(IndexerException):
+ """Error raised when the resource type is unknown."""
+ def __init__(self, type):
+ super(NoSuchResourceType, self).__init__(
+ "Resource type %s does not exist" % type)
+ self.type = type
+
+
+class NoSuchMetric(IndexerException):
+ """Error raised when a metric does not exist."""
+ def __init__(self, metric):
+ super(NoSuchMetric, self).__init__("Metric %s does not exist" %
+ metric)
+ self.metric = metric
+
+
+class NoSuchResource(IndexerException):
+ """Error raised when a resource does not exist."""
+ def __init__(self, resource):
+ super(NoSuchResource, self).__init__("Resource %s does not exist" %
+ resource)
+ self.resource = resource
+
+
+class NoSuchArchivePolicy(IndexerException):
+ """Error raised when an archive policy does not exist."""
+ def __init__(self, archive_policy):
+ super(NoSuchArchivePolicy, self).__init__(
+ "Archive policy %s does not exist" % archive_policy)
+ self.archive_policy = archive_policy
+
+
+class UnsupportedArchivePolicyChange(IndexerException):
+ """Error raised when modifying archive policy if not supported."""
+ def __init__(self, archive_policy, message):
+ super(UnsupportedArchivePolicyChange, self).__init__(
+ "Archive policy %s does not support change: %s" %
+ (archive_policy, message))
+ self.archive_policy = archive_policy
+ self.message = message
+
+
+class ArchivePolicyInUse(IndexerException):
+ """Error raised when an archive policy is still being used."""
+ def __init__(self, archive_policy):
+ super(ArchivePolicyInUse, self).__init__(
+ "Archive policy %s is still in use" % archive_policy)
+ self.archive_policy = archive_policy
+
+
+class ResourceTypeInUse(IndexerException):
+ """Error raised when an resource type is still being used."""
+ def __init__(self, resource_type):
+ super(ResourceTypeInUse, self).__init__(
+ "Resource type %s is still in use" % resource_type)
+ self.resource_type = resource_type
+
+
+class UnexpectedResourceTypeState(IndexerException):
+ """Error raised when an resource type state is not expected."""
+ def __init__(self, resource_type, expected_state, state):
+ super(UnexpectedResourceTypeState, self).__init__(
+ "Resource type %s state is %s (expected: %s)" % (
+ resource_type, state, expected_state))
+ self.resource_type = resource_type
+ self.expected_state = expected_state
+ self.state = state
+
+
+class NoSuchArchivePolicyRule(IndexerException):
+ """Error raised when an archive policy rule does not exist."""
+ def __init__(self, archive_policy_rule):
+ super(NoSuchArchivePolicyRule, self).__init__(
+ "Archive policy rule %s does not exist" %
+ archive_policy_rule)
+ self.archive_policy_rule = archive_policy_rule
+
+
+class NoArchivePolicyRuleMatch(IndexerException):
+ """Error raised when no archive policy rule found for metric."""
+ def __init__(self, metric_name):
+ super(NoArchivePolicyRuleMatch, self).__init__(
+ "No Archive policy rule found for metric %s" %
+ metric_name)
+ self.metric_name = metric_name
+
+
+class NamedMetricAlreadyExists(IndexerException):
+ """Error raised when a named metric already exists."""
+ def __init__(self, metric):
+ super(NamedMetricAlreadyExists, self).__init__(
+ "Named metric %s already exists" % metric)
+ self.metric = metric
+
+
+class ResourceAlreadyExists(IndexerException):
+ """Error raised when a resource already exists."""
+ def __init__(self, resource):
+ super(ResourceAlreadyExists, self).__init__(
+ "Resource %s already exists" % resource)
+ self.resource = resource
+
+
+class ResourceTypeAlreadyExists(IndexerException):
+ """Error raised when a resource type already exists."""
+ def __init__(self, resource_type):
+ super(ResourceTypeAlreadyExists, self).__init__(
+ "Resource type %s already exists" % resource_type)
+ self.resource_type = resource_type
+
+
+class ResourceAttributeError(IndexerException, AttributeError):
+ """Error raised when an attribute does not exist for a resource type."""
+ def __init__(self, resource, attribute):
+ super(ResourceAttributeError, self).__init__(
+ "Resource type %s has no %s attribute" % (resource, attribute))
+ self.resource = resource
+ self.attribute = attribute
+
+
+class ResourceValueError(IndexerException, ValueError):
+ """Error raised when an attribute value is invalid for a resource type."""
+ def __init__(self, resource_type, attribute, value):
+ super(ResourceValueError, self).__init__(
+ "Value %s for attribute %s on resource type %s is invalid"
+ % (value, attribute, resource_type))
+ self.resource_type = resource_type
+ self.attribute = attribute
+ self.value = value
+
+
+class ArchivePolicyAlreadyExists(IndexerException):
+ """Error raised when an archive policy already exists."""
+ def __init__(self, name):
+ super(ArchivePolicyAlreadyExists, self).__init__(
+ "Archive policy %s already exists" % name)
+ self.name = name
+
+
+class ArchivePolicyRuleAlreadyExists(IndexerException):
+ """Error raised when an archive policy rule already exists."""
+ def __init__(self, name):
+ super(ArchivePolicyRuleAlreadyExists, self).__init__(
+ "Archive policy rule %s already exists" % name)
+ self.name = name
+
+
+class QueryError(IndexerException):
+ def __init__(self):
+ super(QueryError, self).__init__("Unable to parse this query")
+
+
+class QueryValueError(QueryError, ValueError):
+ def __init__(self, v, f):
+ super(QueryError, self).__init__("Invalid value: `%s' for field `%s'"
+ % (v, f))
+
+
+class QueryInvalidOperator(QueryError):
+ def __init__(self, op):
+ self.op = op
+ super(QueryError, self).__init__("Unknown operator `%s'" % op)
+
+
+class QueryAttributeError(QueryError, ResourceAttributeError):
+ def __init__(self, resource, attribute):
+ ResourceAttributeError.__init__(self, resource, attribute)
+
+
+class InvalidPagination(IndexerException):
+ """Error raised when a resource does not exist."""
+ def __init__(self, reason):
+ self.reason = reason
+ super(InvalidPagination, self).__init__(
+ "Invalid pagination: `%s'" % reason)
+
+
+class IndexerDriver(object):
+ @staticmethod
+ def __init__(conf):
+ pass
+
+ @staticmethod
+ def connect():
+ pass
+
+ @staticmethod
+ def disconnect():
+ pass
+
+ @staticmethod
+ def upgrade(nocreate=False):
+ pass
+
+ @staticmethod
+ def get_resource(resource_type, resource_id, with_metrics=False):
+ """Get a resource from the indexer.
+
+ :param resource_type: The type of the resource to look for.
+ :param resource_id: The UUID of the resource.
+ :param with_metrics: Whether to include metrics information.
+ """
+ raise exceptions.NotImplementedError
+
+ @staticmethod
+ def list_resources(resource_type='generic',
+ attribute_filter=None,
+ details=False,
+ history=False,
+ limit=None,
+ marker=None,
+ sorts=None):
+ raise exceptions.NotImplementedError
+
+ @staticmethod
+ def list_archive_policies():
+ raise exceptions.NotImplementedError
+
+ @staticmethod
+ def get_archive_policy(name):
+ raise exceptions.NotImplementedError
+
+ @staticmethod
+ def update_archive_policy(name, ap_items):
+ raise exceptions.NotImplementedError
+
+ @staticmethod
+ def delete_archive_policy(name):
+ raise exceptions.NotImplementedError
+
+ @staticmethod
+ def get_archive_policy_rule(name):
+ raise exceptions.NotImplementedError
+
+ @staticmethod
+ def list_archive_policy_rules():
+ raise exceptions.NotImplementedError
+
+ @staticmethod
+ def create_archive_policy_rule(name, metric_pattern, archive_policy_name):
+ raise exceptions.NotImplementedError
+
+ @staticmethod
+ def delete_archive_policy_rule(name):
+ raise exceptions.NotImplementedError
+
+ @staticmethod
+ def create_metric(id, creator,
+ archive_policy_name, name=None, unit=None,
+ resource_id=None):
+ raise exceptions.NotImplementedError
+
+ @staticmethod
+ def list_metrics(names=None, ids=None, details=False, status='active',
+ limit=None, marker=None, sorts=None, **kwargs):
+ raise exceptions.NotImplementedError
+
+ @staticmethod
+ def create_archive_policy(archive_policy):
+ raise exceptions.NotImplementedError
+
+ @staticmethod
+ def create_resource(resource_type, id, creator,
+ user_id=None, project_id=None,
+ started_at=None, ended_at=None, metrics=None,
+ **kwargs):
+ raise exceptions.NotImplementedError
+
+ @staticmethod
+ def update_resource(resource_type, resource_id, ended_at=_marker,
+ metrics=_marker,
+ append_metrics=False,
+ create_revision=True,
+ **kwargs):
+ raise exceptions.NotImplementedError
+
+ @staticmethod
+ def delete_resource(uuid):
+ raise exceptions.NotImplementedError
+
+ @staticmethod
+ def delete_resources(resource_type='generic',
+ attribute_filter=None):
+ raise exceptions.NotImplementedError
+
+ @staticmethod
+ def delete_metric(id):
+ raise exceptions.NotImplementedError
+
+ @staticmethod
+ def expunge_metric(id):
+ raise exceptions.NotImplementedError
+
+ def get_archive_policy_for_metric(self, metric_name):
+ """Helper to get the archive policy according archive policy rules."""
+ rules = self.list_archive_policy_rules()
+ for rule in rules:
+ if fnmatch.fnmatch(metric_name or "", rule.metric_pattern):
+ return self.get_archive_policy(rule.archive_policy_name)
+ raise NoArchivePolicyRuleMatch(metric_name)
+
+ @staticmethod
+ def create_resource_type(resource_type):
+ raise exceptions.NotImplementedError
+
+ @staticmethod
+ def get_resource_type(name):
+ """Get a resource type from the indexer.
+
+ :param name: name of the resource type
+ """
+ raise exceptions.NotImplementedError
+
+ @staticmethod
+ def list_resource_types(attribute_filter=None,
+ limit=None,
+ marker=None,
+ sorts=None):
+ raise exceptions.NotImplementedError
+
+ @staticmethod
+ def get_resource_attributes_schemas():
+ raise exceptions.NotImplementedError
+
+ @staticmethod
+ def get_resource_type_schema():
+ raise exceptions.NotImplementedError
diff --git a/gnocchi/indexer/alembic/alembic.ini b/gnocchi/indexer/alembic/alembic.ini
new file mode 100644
index 00000000..db7340ac
--- /dev/null
+++ b/gnocchi/indexer/alembic/alembic.ini
@@ -0,0 +1,3 @@
+[alembic]
+script_location = gnocchi.indexer:alembic
+sqlalchemy.url = postgresql://localhost/gnocchi
diff --git a/gnocchi/indexer/alembic/env.py b/gnocchi/indexer/alembic/env.py
new file mode 100644
index 00000000..47f58efb
--- /dev/null
+++ b/gnocchi/indexer/alembic/env.py
@@ -0,0 +1,90 @@
+#
+# Copyright 2015 Red Hat. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""A test module to exercise the Gnocchi API with gabbi."""
+
+from alembic import context
+
+from gnocchi.indexer import sqlalchemy
+from gnocchi.indexer import sqlalchemy_base
+
+# this is the Alembic Config object, which provides
+# access to the values within the .ini file in use.
+config = context.config
+
+# add your model's MetaData object here
+# for 'autogenerate' support
+# from myapp import mymodel
+# target_metadata = mymodel.Base.metadata
+target_metadata = sqlalchemy_base.Base.metadata
+
+# other values from the config, defined by the needs of env.py,
+# can be acquired:
+# my_important_option = config.get_main_option("my_important_option")
+# ... etc.
+
+
+def run_migrations_offline():
+ """Run migrations in 'offline' mode.
+
+ This configures the context with just a URL
+ and not an Engine, though an Engine is acceptable
+ here as well. By skipping the Engine creation
+ we don't even need a DBAPI to be available.
+
+ Calls to context.execute() here emit the given string to the
+ script output.
+
+ """
+ conf = config.conf
+ context.configure(url=conf.indexer.url,
+ target_metadata=target_metadata)
+
+ with context.begin_transaction():
+ context.run_migrations()
+
+
+def run_migrations_online():
+ """Run migrations in 'online' mode.
+
+ In this scenario we need to create an Engine
+ and associate a connection with the context.
+
+ """
+ conf = config.conf
+ indexer = sqlalchemy.SQLAlchemyIndexer(conf)
+ indexer.connect()
+ with indexer.facade.writer_connection() as connectable:
+
+ with connectable.connect() as connection:
+ context.configure(
+ connection=connection,
+ target_metadata=target_metadata
+ )
+
+ with context.begin_transaction():
+ context.run_migrations()
+
+ indexer.disconnect()
+
+# If `alembic' was used directly from the CLI
+if not hasattr(config, "conf"):
+ from gnocchi import service
+ config.conf = service.prepare_service([])
+
+if context.is_offline_mode():
+ run_migrations_offline()
+else:
+ run_migrations_online()
diff --git a/gnocchi/indexer/alembic/script.py.mako b/gnocchi/indexer/alembic/script.py.mako
new file mode 100644
index 00000000..8f4e92ea
--- /dev/null
+++ b/gnocchi/indexer/alembic/script.py.mako
@@ -0,0 +1,36 @@
+# Copyright ${create_date.year} OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""${message}
+
+Revision ID: ${up_revision}
+Revises: ${down_revision | comma,n}
+Create Date: ${create_date}
+
+"""
+
+from alembic import op
+import sqlalchemy as sa
+${imports if imports else ""}
+
+# revision identifiers, used by Alembic.
+revision = ${repr(up_revision)}
+down_revision = ${repr(down_revision)}
+branch_labels = ${repr(branch_labels)}
+depends_on = ${repr(depends_on)}
+
+
+def upgrade():
+ ${upgrades if upgrades else "pass"}
diff --git a/gnocchi/indexer/alembic/versions/0718ed97e5b3_add_tablename_to_resource_type.py b/gnocchi/indexer/alembic/versions/0718ed97e5b3_add_tablename_to_resource_type.py
new file mode 100644
index 00000000..8662b114
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/0718ed97e5b3_add_tablename_to_resource_type.py
@@ -0,0 +1,54 @@
+# Copyright 2016 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Add tablename to resource_type
+
+Revision ID: 0718ed97e5b3
+Revises: 828c16f70cce
+Create Date: 2016-01-20 08:14:04.893783
+
+"""
+
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = '0718ed97e5b3'
+down_revision = '828c16f70cce'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ op.add_column("resource_type", sa.Column('tablename', sa.String(18),
+ nullable=True))
+
+ resource_type = sa.Table(
+ 'resource_type', sa.MetaData(),
+ sa.Column('name', sa.String(255), nullable=False),
+ sa.Column('tablename', sa.String(18), nullable=True)
+ )
+ op.execute(resource_type.update().where(
+ resource_type.c.name == "instance_network_interface"
+ ).values({'tablename': op.inline_literal("'instance_net_int'")}))
+ op.execute(resource_type.update().where(
+ resource_type.c.name != "instance_network_interface"
+ ).values({'tablename': resource_type.c.name}))
+
+ op.alter_column("resource_type", "tablename", type_=sa.String(18),
+ nullable=False)
+ op.create_unique_constraint("uniq_resource_type0tablename",
+ "resource_type", ["tablename"])
diff --git a/gnocchi/indexer/alembic/versions/1c2c61ac1f4c_add_original_resource_id_column.py b/gnocchi/indexer/alembic/versions/1c2c61ac1f4c_add_original_resource_id_column.py
new file mode 100644
index 00000000..59632635
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/1c2c61ac1f4c_add_original_resource_id_column.py
@@ -0,0 +1,40 @@
+# Copyright 2016 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""add original resource id column
+
+Revision ID: 1c2c61ac1f4c
+Revises: 1f21cbdd6bc2
+Create Date: 2016-01-27 05:57:48.909012
+
+"""
+
+from alembic import op
+import sqlalchemy as sa
+
+# revision identifiers, used by Alembic.
+revision = '1c2c61ac1f4c'
+down_revision = '62a8dfb139bb'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ op.add_column('resource', sa.Column('original_resource_id',
+ sa.String(length=255),
+ nullable=True))
+ op.add_column('resource_history', sa.Column('original_resource_id',
+ sa.String(length=255),
+ nullable=True))
diff --git a/gnocchi/indexer/alembic/versions/1c98ac614015_initial_base.py b/gnocchi/indexer/alembic/versions/1c98ac614015_initial_base.py
new file mode 100644
index 00000000..ff04411f
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/1c98ac614015_initial_base.py
@@ -0,0 +1,267 @@
+# flake8: noqa
+# Copyright 2015 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Initial base for Gnocchi 1.0.0
+
+Revision ID: 1c98ac614015
+Revises:
+Create Date: 2015-04-27 16:05:13.530625
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '1c98ac614015'
+down_revision = None
+branch_labels = None
+depends_on = None
+
+from alembic import op
+import sqlalchemy as sa
+import sqlalchemy_utils
+
+import gnocchi.indexer.sqlalchemy_base
+
+
+def upgrade():
+ op.create_table('resource',
+ sa.Column('type', sa.Enum('generic', 'instance', 'swift_account', 'volume', 'ceph_account', 'network', 'identity', 'ipmi', 'stack', 'image', name='resource_type_enum'), nullable=False),
+ sa.Column('created_by_user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
+ sa.Column('created_by_project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
+ sa.Column('started_at', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=False),
+ sa.Column('revision_start', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=False),
+ sa.Column('ended_at', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=True),
+ sa.Column('user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
+ sa.Column('project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
+ sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
+ sa.PrimaryKeyConstraint('id'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+ op.create_index('ix_resource_id', 'resource', ['id'], unique=False)
+ op.create_table('archive_policy',
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.Column('back_window', sa.Integer(), nullable=False),
+ sa.Column('definition', gnocchi.indexer.sqlalchemy_base.ArchivePolicyDefinitionType(), nullable=False),
+ sa.Column('aggregation_methods', gnocchi.indexer.sqlalchemy_base.SetType(), nullable=False),
+ sa.PrimaryKeyConstraint('name'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+ op.create_index('ix_archive_policy_name', 'archive_policy', ['name'], unique=False)
+ op.create_table('volume',
+ sa.Column('display_name', sa.String(length=255), nullable=False),
+ sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
+ sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_volume_id_resource_id", ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('id'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+ op.create_index('ix_volume_id', 'volume', ['id'], unique=False)
+ op.create_table('instance',
+ sa.Column('flavor_id', sa.Integer(), nullable=False),
+ sa.Column('image_ref', sa.String(length=255), nullable=False),
+ sa.Column('host', sa.String(length=255), nullable=False),
+ sa.Column('display_name', sa.String(length=255), nullable=False),
+ sa.Column('server_group', sa.String(length=255), nullable=True),
+ sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
+ sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_instance_id_resource_id", ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('id'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+ op.create_index('ix_instance_id', 'instance', ['id'], unique=False)
+ op.create_table('stack',
+ sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
+ sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_stack_id_resource_id", ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('id'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+ op.create_index('ix_stack_id', 'stack', ['id'], unique=False)
+ op.create_table('archive_policy_rule',
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.Column('archive_policy_name', sa.String(length=255), nullable=False),
+ sa.Column('metric_pattern', sa.String(length=255), nullable=False),
+ sa.ForeignKeyConstraint(['archive_policy_name'], ['archive_policy.name'], name="fk_archive_policy_rule_archive_policy_name_archive_policy_name", ondelete='RESTRICT'),
+ sa.PrimaryKeyConstraint('name'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+ op.create_index('ix_archive_policy_rule_name', 'archive_policy_rule', ['name'], unique=False)
+ op.create_table('swift_account',
+ sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
+ sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_swift_account_id_resource_id", ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('id'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+ op.create_index('ix_swift_account_id', 'swift_account', ['id'], unique=False)
+ op.create_table('ceph_account',
+ sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
+ sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_ceph_account_id_resource_id", ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('id'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+ op.create_index('ix_ceph_account_id', 'ceph_account', ['id'], unique=False)
+ op.create_table('ipmi',
+ sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
+ sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_ipmi_id_resource_id", ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('id'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+ op.create_index('ix_ipmi_id', 'ipmi', ['id'], unique=False)
+ op.create_table('image',
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.Column('container_format', sa.String(length=255), nullable=False),
+ sa.Column('disk_format', sa.String(length=255), nullable=False),
+ sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
+ sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_image_id_resource_id", ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('id'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+ op.create_index('ix_image_id', 'image', ['id'], unique=False)
+ op.create_table('resource_history',
+ sa.Column('type', sa.Enum('generic', 'instance', 'swift_account', 'volume', 'ceph_account', 'network', 'identity', 'ipmi', 'stack', 'image', name='resource_type_enum'), nullable=False),
+ sa.Column('created_by_user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
+ sa.Column('created_by_project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
+ sa.Column('started_at', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=False),
+ sa.Column('revision_start', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=False),
+ sa.Column('ended_at', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=True),
+ sa.Column('user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
+ sa.Column('project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
+ sa.Column('revision', sa.Integer(), nullable=False),
+ sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
+ sa.Column('revision_end', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=False),
+ sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_resource_history_id_resource_id", ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('revision'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+ op.create_index('ix_resource_history_id', 'resource_history', ['id'], unique=False)
+ op.create_table('identity',
+ sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
+ sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_identity_id_resource_id", ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('id'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+ op.create_index('ix_identity_id', 'identity', ['id'], unique=False)
+ op.create_table('network',
+ sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
+ sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_network_id_resource_id", ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('id'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+ op.create_index('ix_network_id', 'network', ['id'], unique=False)
+ op.create_table('metric',
+ sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
+ sa.Column('archive_policy_name', sa.String(length=255), nullable=False),
+ sa.Column('created_by_user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
+ sa.Column('created_by_project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
+ sa.Column('resource_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
+ sa.Column('name', sa.String(length=255), nullable=True),
+ sa.ForeignKeyConstraint(['archive_policy_name'], ['archive_policy.name'], name="fk_metric_archive_policy_name_archive_policy_name", ondelete='RESTRICT'),
+ sa.ForeignKeyConstraint(['resource_id'], ['resource.id'], name="fk_metric_resource_id_resource_id", ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('id'),
+ sa.UniqueConstraint('resource_id', 'name', name='uniq_metric0resource_id0name'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+ op.create_index('ix_metric_id', 'metric', ['id'], unique=False)
+ op.create_table('identity_history',
+ sa.Column('revision', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_identity_history_resource_history_revision", ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('revision'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+ op.create_index('ix_identity_history_revision', 'identity_history', ['revision'], unique=False)
+ op.create_table('instance_history',
+ sa.Column('flavor_id', sa.Integer(), nullable=False),
+ sa.Column('image_ref', sa.String(length=255), nullable=False),
+ sa.Column('host', sa.String(length=255), nullable=False),
+ sa.Column('display_name', sa.String(length=255), nullable=False),
+ sa.Column('server_group', sa.String(length=255), nullable=True),
+ sa.Column('revision', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_instance_history_resource_history_revision", ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('revision'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+ op.create_index('ix_instance_history_revision', 'instance_history', ['revision'], unique=False)
+ op.create_table('network_history',
+ sa.Column('revision', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_network_history_resource_history_revision", ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('revision'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+ op.create_index('ix_network_history_revision', 'network_history', ['revision'], unique=False)
+ op.create_table('swift_account_history',
+ sa.Column('revision', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_swift_account_history_resource_history_revision", ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('revision'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+ op.create_index('ix_swift_account_history_revision', 'swift_account_history', ['revision'], unique=False)
+ op.create_table('ceph_account_history',
+ sa.Column('revision', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_ceph_account_history_resource_history_revision", ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('revision'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+ op.create_index('ix_ceph_account_history_revision', 'ceph_account_history', ['revision'], unique=False)
+ op.create_table('ipmi_history',
+ sa.Column('revision', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_ipmi_history_resource_history_revision", ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('revision'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+ op.create_index('ix_ipmi_history_revision', 'ipmi_history', ['revision'], unique=False)
+ op.create_table('image_history',
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.Column('container_format', sa.String(length=255), nullable=False),
+ sa.Column('disk_format', sa.String(length=255), nullable=False),
+ sa.Column('revision', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_image_history_resource_history_revision", ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('revision'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+ op.create_index('ix_image_history_revision', 'image_history', ['revision'], unique=False)
+ op.create_table('stack_history',
+ sa.Column('revision', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_stack_history_resource_history_revision", ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('revision'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+ op.create_index('ix_stack_history_revision', 'stack_history', ['revision'], unique=False)
+ op.create_table('volume_history',
+ sa.Column('display_name', sa.String(length=255), nullable=False),
+ sa.Column('revision', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_volume_history_resource_history_revision", ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('revision'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+ op.create_index('ix_volume_history_revision', 'volume_history', ['revision'], unique=False)
diff --git a/gnocchi/indexer/alembic/versions/1e1a63d3d186_original_resource_id_not_null.py b/gnocchi/indexer/alembic/versions/1e1a63d3d186_original_resource_id_not_null.py
new file mode 100644
index 00000000..bd73b12b
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/1e1a63d3d186_original_resource_id_not_null.py
@@ -0,0 +1,66 @@
+# Copyright 2017 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Make sure resource.original_resource_id is NOT NULL
+
+Revision ID: 1e1a63d3d186
+Revises: 397987e38570
+Create Date: 2017-01-26 19:33:35.209688
+
+"""
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy import func
+import sqlalchemy_utils
+
+
+# revision identifiers, used by Alembic.
+revision = '1e1a63d3d186'
+down_revision = '397987e38570'
+branch_labels = None
+depends_on = None
+
+
+def clean_substr(col, start, length):
+ return func.lower(func.substr(func.hex(col), start, length))
+
+
+def upgrade():
+ bind = op.get_bind()
+ for table_name in ('resource', 'resource_history'):
+ table = sa.Table(table_name, sa.MetaData(),
+ sa.Column('id',
+ sqlalchemy_utils.types.uuid.UUIDType(),
+ nullable=False),
+ sa.Column('original_resource_id', sa.String(255)))
+
+ # NOTE(gordc): mysql stores id as binary so we need to rebuild back to
+ # string uuid.
+ if bind and bind.engine.name == "mysql":
+ vals = {'original_resource_id':
+ clean_substr(table.c.id, 1, 8) + '-' +
+ clean_substr(table.c.id, 9, 4) + '-' +
+ clean_substr(table.c.id, 13, 4) + '-' +
+ clean_substr(table.c.id, 17, 4) + '-' +
+ clean_substr(table.c.id, 21, 12)}
+ else:
+ vals = {'original_resource_id': table.c.id}
+
+ op.execute(table.update().where(
+ table.c.original_resource_id.is_(None)).values(vals))
+ op.alter_column(table_name, "original_resource_id", nullable=False,
+ existing_type=sa.String(255),
+ existing_nullable=True)
diff --git a/gnocchi/indexer/alembic/versions/1f21cbdd6bc2_allow_volume_display_name_to_be_null.py b/gnocchi/indexer/alembic/versions/1f21cbdd6bc2_allow_volume_display_name_to_be_null.py
new file mode 100644
index 00000000..e2e48d9b
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/1f21cbdd6bc2_allow_volume_display_name_to_be_null.py
@@ -0,0 +1,41 @@
+# Copyright 2015 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""allow volume display name to be null
+
+Revision ID: 1f21cbdd6bc2
+Revises: 469b308577a9
+Create Date: 2015-12-08 02:12:20.273880
+
+"""
+
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = '1f21cbdd6bc2'
+down_revision = '469b308577a9'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ op.alter_column('volume', 'display_name',
+ existing_type=sa.String(length=255),
+ nullable=True)
+ op.alter_column('volume_history', 'display_name',
+ existing_type=sa.String(length=255),
+ nullable=True)
diff --git a/gnocchi/indexer/alembic/versions/27d2a1d205ff_add_updating_resource_type_states.py b/gnocchi/indexer/alembic/versions/27d2a1d205ff_add_updating_resource_type_states.py
new file mode 100644
index 00000000..21dc7e42
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/27d2a1d205ff_add_updating_resource_type_states.py
@@ -0,0 +1,89 @@
+# Copyright 2016 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Add updating resource type states
+
+Revision ID: 27d2a1d205ff
+Revises: 7e6f9d542f8b
+Create Date: 2016-08-31 14:05:34.316496
+
+"""
+
+from alembic import op
+import sqlalchemy as sa
+
+from gnocchi.indexer import sqlalchemy_base
+from gnocchi import utils
+
+# revision identifiers, used by Alembic.
+revision = '27d2a1d205ff'
+down_revision = '7e6f9d542f8b'
+branch_labels = None
+depends_on = None
+
+
+resource_type = sa.sql.table(
+ 'resource_type',
+ sa.sql.column('updated_at', sqlalchemy_base.PreciseTimestamp()))
+
+state_enum = sa.Enum("active", "creating",
+ "creation_error", "deleting",
+ "deletion_error", "updating",
+ "updating_error",
+ name="resource_type_state_enum")
+
+
+def upgrade():
+
+ op.alter_column('resource_type', 'state',
+ type_=state_enum,
+ nullable=False,
+ server_default=None)
+
+ # NOTE(sileht): postgresql have a builtin ENUM type, so
+ # just altering the column won't works.
+ # https://bitbucket.org/zzzeek/alembic/issues/270/altering-enum-type
+ # Does it break offline migration because we use get_bind() ?
+
+ # NOTE(luogangyi): since we cannot use 'ALTER TYPE' in transaction,
+ # we split the 'ALTER TYPE' operation into several steps.
+ bind = op.get_bind()
+ if bind and bind.engine.name == "postgresql":
+ op.execute("ALTER TYPE resource_type_state_enum RENAME TO \
+ old_resource_type_state_enum")
+ op.execute("CREATE TYPE resource_type_state_enum AS ENUM \
+ ('active', 'creating', 'creation_error', \
+ 'deleting', 'deletion_error', 'updating', \
+ 'updating_error')")
+ op.execute("ALTER TABLE resource_type ALTER COLUMN state TYPE \
+ resource_type_state_enum USING \
+ state::text::resource_type_state_enum")
+ op.execute("DROP TYPE old_resource_type_state_enum")
+
+ # NOTE(sileht): we can't alter type with server_default set on
+ # postgresql...
+ op.alter_column('resource_type', 'state',
+ type_=state_enum,
+ nullable=False,
+ server_default="creating")
+ op.add_column("resource_type",
+ sa.Column("updated_at",
+ sqlalchemy_base.PreciseTimestamp(),
+ nullable=True))
+
+ op.execute(resource_type.update().values({'updated_at': utils.utcnow()}))
+ op.alter_column("resource_type", "updated_at",
+ type_=sqlalchemy_base.PreciseTimestamp(),
+ nullable=False)
diff --git a/gnocchi/indexer/alembic/versions/2e0b912062d1_drop_useless_enum.py b/gnocchi/indexer/alembic/versions/2e0b912062d1_drop_useless_enum.py
new file mode 100644
index 00000000..5215da09
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/2e0b912062d1_drop_useless_enum.py
@@ -0,0 +1,39 @@
+# Copyright 2016 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""drop_useless_enum
+
+Revision ID: 2e0b912062d1
+Revises: 34c517bcc2dd
+Create Date: 2016-04-15 07:29:38.492237
+
+"""
+
+from alembic import op
+
+
+# revision identifiers, used by Alembic.
+revision = '2e0b912062d1'
+down_revision = '34c517bcc2dd'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ bind = op.get_bind()
+ if bind and bind.engine.name == "postgresql":
+ # NOTE(sileht): we use IF exists because if the database have
+ # been created from scratch with 2.1 the enum doesn't exists
+ op.execute("DROP TYPE IF EXISTS resource_type_enum")
diff --git a/gnocchi/indexer/alembic/versions/34c517bcc2dd_shorter_foreign_key.py b/gnocchi/indexer/alembic/versions/34c517bcc2dd_shorter_foreign_key.py
new file mode 100644
index 00000000..f7a4a61a
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/34c517bcc2dd_shorter_foreign_key.py
@@ -0,0 +1,91 @@
+# Copyright 2016 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""shorter_foreign_key
+
+Revision ID: 34c517bcc2dd
+Revises: ed9c6ddc5c35
+Create Date: 2016-04-13 16:58:42.536431
+
+"""
+
+from alembic import op
+import sqlalchemy
+
+# revision identifiers, used by Alembic.
+revision = '34c517bcc2dd'
+down_revision = 'ed9c6ddc5c35'
+branch_labels = None
+depends_on = None
+
+
+resource_type_helper = sqlalchemy.Table(
+ 'resource_type',
+ sqlalchemy.MetaData(),
+ sqlalchemy.Column('tablename', sqlalchemy.String(18), nullable=False)
+)
+
+to_rename = [
+ ('fk_metric_archive_policy_name_archive_policy_name',
+ 'fk_metric_ap_name_ap_name',
+ 'archive_policy', 'name',
+ 'metric', 'archive_policy_name',
+ "RESTRICT"),
+ ('fk_resource_history_resource_type_name',
+ 'fk_rh_resource_type_name',
+ 'resource_type', 'name', 'resource_history', 'type',
+ "RESTRICT"),
+ ('fk_resource_history_id_resource_id',
+ 'fk_rh_id_resource_id',
+ 'resource', 'id', 'resource_history', 'id',
+ "CASCADE"),
+ ('fk_archive_policy_rule_archive_policy_name_archive_policy_name',
+ 'fk_apr_ap_name_ap_name',
+ 'archive_policy', 'name', 'archive_policy_rule', 'archive_policy_name',
+ "RESTRICT")
+]
+
+
+def upgrade():
+ connection = op.get_bind()
+
+ insp = sqlalchemy.inspect(connection)
+
+ op.alter_column("resource_type", "tablename",
+ type_=sqlalchemy.String(35),
+ existing_type=sqlalchemy.String(18), nullable=False)
+
+ for rt in connection.execute(resource_type_helper.select()):
+ if rt.tablename == "generic":
+ continue
+
+ fk_names = [fk['name'] for fk in insp.get_foreign_keys("%s_history" %
+ rt.tablename)]
+ fk_old = ("fk_%s_history_resource_history_revision" %
+ rt.tablename)
+ if fk_old not in fk_names:
+ # The table have been created from scratch recently
+ fk_old = ("fk_%s_history_revision_resource_history_revision" %
+ rt.tablename)
+
+ fk_new = "fk_%s_h_revision_rh_revision" % rt.tablename
+ to_rename.append((fk_old, fk_new, 'resource_history', 'revision',
+ "%s_history" % rt.tablename, 'revision', 'CASCADE'))
+
+ for (fk_old, fk_new, src_table, src_col, dst_table, dst_col, ondelete
+ ) in to_rename:
+ op.drop_constraint(fk_old, dst_table, type_="foreignkey")
+ op.create_foreign_key(fk_new, dst_table, src_table,
+ [dst_col], [src_col], ondelete=ondelete)
diff --git a/gnocchi/indexer/alembic/versions/3901f5ea2b8e_create_instance_disk_and_instance_.py b/gnocchi/indexer/alembic/versions/3901f5ea2b8e_create_instance_disk_and_instance_.py
new file mode 100644
index 00000000..2c221f70
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/3901f5ea2b8e_create_instance_disk_and_instance_.py
@@ -0,0 +1,103 @@
+# Copyright 2015 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""create instance_disk and instance_net_int tables
+
+Revision ID: 3901f5ea2b8e
+Revises: 42ee7f3e25f8
+Create Date: 2015-08-27 17:00:25.092891
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '3901f5ea2b8e'
+down_revision = '42ee7f3e25f8'
+branch_labels = None
+depends_on = None
+
+from alembic import op
+import sqlalchemy as sa
+import sqlalchemy_utils
+
+
+def upgrade():
+ for table in ["resource", "resource_history"]:
+ op.alter_column(table, "type",
+ type_=sa.Enum('generic', 'instance', 'swift_account',
+ 'volume', 'ceph_account', 'network',
+ 'identity', 'ipmi', 'stack', 'image',
+ 'instance_network_interface',
+ 'instance_disk',
+ name='resource_type_enum'),
+ nullable=False)
+
+ # NOTE(sileht): postgresql have a builtin ENUM type, so
+ # just altering the column won't works.
+ # https://bitbucket.org/zzzeek/alembic/issues/270/altering-enum-type
+ # Does it break offline migration because we use get_bind() ?
+
+ # NOTE(luogangyi): since we cannot use 'ALTER TYPE' in transaction,
+ # we split the 'ALTER TYPE' operation into several steps.
+ bind = op.get_bind()
+ if bind and bind.engine.name == "postgresql":
+ op.execute("ALTER TYPE resource_type_enum RENAME TO \
+ old_resource_type_enum")
+ op.execute("CREATE TYPE resource_type_enum AS ENUM \
+ ('generic', 'instance', 'swift_account', \
+ 'volume', 'ceph_account', 'network', \
+ 'identity', 'ipmi', 'stack', 'image', \
+ 'instance_network_interface', 'instance_disk')")
+ for table in ["resource", "resource_history"]:
+ op.execute("ALTER TABLE %s ALTER COLUMN type TYPE \
+ resource_type_enum USING \
+ type::text::resource_type_enum" % table)
+ op.execute("DROP TYPE old_resource_type_enum")
+
+ for table in ['instance_disk', 'instance_net_int']:
+ op.create_table(
+ table,
+ sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=True),
+ nullable=False),
+ sa.Column('instance_id',
+ sqlalchemy_utils.types.uuid.UUIDType(binary=True),
+ nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.Index('ix_%s_id' % table, 'id', unique=False),
+ sa.ForeignKeyConstraint(['id'], ['resource.id'],
+ name="fk_%s_id_resource_id" % table,
+ ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('id'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+
+ op.create_table(
+ '%s_history' % table,
+ sa.Column('instance_id',
+ sqlalchemy_utils.types.uuid.UUIDType(binary=True),
+ nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.Column('revision', sa.Integer(), nullable=False),
+ sa.Index('ix_%s_history_revision' % table, 'revision',
+ unique=False),
+ sa.ForeignKeyConstraint(['revision'],
+ ['resource_history.revision'],
+ name=("fk_%s_history_"
+ "resource_history_revision") % table,
+ ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('revision'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
diff --git a/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash_and_reencode.py b/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash_and_reencode.py
new file mode 100644
index 00000000..80b9416e
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash_and_reencode.py
@@ -0,0 +1,184 @@
+# Copyright 2017 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Remove slashes from original resource IDs, recompute their id with creator
+
+Revision ID: 397987e38570
+Revises: aba5a217ca9b
+Create Date: 2017-01-11 16:32:40.421758
+
+"""
+import uuid
+
+from alembic import op
+import six
+import sqlalchemy as sa
+import sqlalchemy_utils
+
+from gnocchi import utils
+
+# revision identifiers, used by Alembic.
+revision = '397987e38570'
+down_revision = 'aba5a217ca9b'
+branch_labels = None
+depends_on = None
+
+resource_type_table = sa.Table(
+ 'resource_type',
+ sa.MetaData(),
+ sa.Column('name', sa.String(255), nullable=False),
+ sa.Column('tablename', sa.String(35), nullable=False)
+)
+
+resource_table = sa.Table(
+ 'resource',
+ sa.MetaData(),
+ sa.Column('id',
+ sqlalchemy_utils.types.uuid.UUIDType(),
+ nullable=False),
+ sa.Column('original_resource_id', sa.String(255)),
+ sa.Column('type', sa.String(255)),
+ sa.Column('creator', sa.String(255))
+)
+
+resourcehistory_table = sa.Table(
+ 'resource_history',
+ sa.MetaData(),
+ sa.Column('id',
+ sqlalchemy_utils.types.uuid.UUIDType(),
+ nullable=False),
+ sa.Column('original_resource_id', sa.String(255))
+)
+
+metric_table = sa.Table(
+ 'metric',
+ sa.MetaData(),
+ sa.Column('id',
+ sqlalchemy_utils.types.uuid.UUIDType(),
+ nullable=False),
+ sa.Column('name', sa.String(255)),
+ sa.Column('resource_id', sqlalchemy_utils.types.uuid.UUIDType())
+
+)
+
+
+uuidtype = sqlalchemy_utils.types.uuid.UUIDType()
+
+
+def upgrade():
+ connection = op.get_bind()
+
+ resource_type_tables = {}
+ resource_type_tablenames = dict(
+ (rt.name, rt.tablename)
+ for rt in connection.execute(resource_type_table.select())
+ if rt.tablename != "generic"
+ )
+
+ op.drop_constraint("fk_metric_resource_id_resource_id", "metric",
+ type_="foreignkey")
+ for name, table in resource_type_tablenames.items():
+ op.drop_constraint("fk_%s_id_resource_id" % table, table,
+ type_="foreignkey")
+
+ resource_type_tables[name] = sa.Table(
+ table,
+ sa.MetaData(),
+ sa.Column('id',
+ sqlalchemy_utils.types.uuid.UUIDType(),
+ nullable=False),
+ )
+
+ for resource in connection.execute(resource_table.select()):
+
+ if resource.original_resource_id is None:
+ # statsd resource has no original_resource_id and is NULL
+ continue
+
+ try:
+ orig_as_uuid = uuid.UUID(str(resource.original_resource_id))
+ except ValueError:
+ pass
+ else:
+ if orig_as_uuid == resource.id:
+ continue
+
+ new_original_resource_id = resource.original_resource_id.replace(
+ '/', '_')
+ if six.PY2:
+ new_original_resource_id = new_original_resource_id.encode('utf-8')
+ new_id = sa.literal(uuidtype.process_bind_param(
+ str(utils.ResourceUUID(
+ new_original_resource_id, resource.creator)),
+ connection.dialect))
+
+ # resource table
+ connection.execute(
+ resource_table.update().where(
+ resource_table.c.id == resource.id
+ ).values(
+ id=new_id,
+ original_resource_id=new_original_resource_id
+ )
+ )
+ # resource history table
+ connection.execute(
+ resourcehistory_table.update().where(
+ resourcehistory_table.c.id == resource.id
+ ).values(
+ id=new_id,
+ original_resource_id=new_original_resource_id
+ )
+ )
+
+ if resource.type != "generic":
+ rtable = resource_type_tables[resource.type]
+
+ # resource table (type)
+ connection.execute(
+ rtable.update().where(
+ rtable.c.id == resource.id
+ ).values(id=new_id)
+ )
+
+ # Metric
+ connection.execute(
+ metric_table.update().where(
+ metric_table.c.resource_id == resource.id
+ ).values(
+ resource_id=new_id
+ )
+ )
+
+ for (name, table) in resource_type_tablenames.items():
+ op.create_foreign_key("fk_%s_id_resource_id" % table,
+ table, "resource",
+ ("id",), ("id",),
+ ondelete="CASCADE")
+
+ op.create_foreign_key("fk_metric_resource_id_resource_id",
+ "metric", "resource",
+ ("resource_id",), ("id",),
+ ondelete="SET NULL")
+
+ for metric in connection.execute(metric_table.select().where(
+ metric_table.c.name.like("%/%"))):
+ connection.execute(
+ metric_table.update().where(
+ metric_table.c.id == metric.id
+ ).values(
+ name=metric.name.replace('/', '_'),
+ )
+ )
diff --git a/gnocchi/indexer/alembic/versions/39b7d449d46a_create_metric_status_column.py b/gnocchi/indexer/alembic/versions/39b7d449d46a_create_metric_status_column.py
new file mode 100644
index 00000000..c3d7be99
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/39b7d449d46a_create_metric_status_column.py
@@ -0,0 +1,49 @@
+# Copyright 2015 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""create metric status column
+
+Revision ID: 39b7d449d46a
+Revises: 3901f5ea2b8e
+Create Date: 2015-09-16 13:25:34.249237
+
+"""
+
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = '39b7d449d46a'
+down_revision = '3901f5ea2b8e'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ enum = sa.Enum("active", "delete", name="metric_status_enum")
+ enum.create(op.get_bind(), checkfirst=False)
+ op.add_column("metric",
+ sa.Column('status', enum,
+ nullable=False,
+ server_default="active"))
+ op.create_index('ix_metric_status', 'metric', ['status'], unique=False)
+
+ op.drop_constraint("fk_metric_resource_id_resource_id",
+ "metric", type_="foreignkey")
+ op.create_foreign_key("fk_metric_resource_id_resource_id",
+ "metric", "resource",
+ ("resource_id",), ("id",),
+ ondelete="SET NULL")
diff --git a/gnocchi/indexer/alembic/versions/40c6aae14c3f_ck_started_before_ended.py b/gnocchi/indexer/alembic/versions/40c6aae14c3f_ck_started_before_ended.py
new file mode 100644
index 00000000..cf6922c9
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/40c6aae14c3f_ck_started_before_ended.py
@@ -0,0 +1,39 @@
+#
+# Copyright 2015 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""ck_started_before_ended
+
+Revision ID: 40c6aae14c3f
+Revises: 1c98ac614015
+Create Date: 2015-04-28 16:35:11.999144
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '40c6aae14c3f'
+down_revision = '1c98ac614015'
+branch_labels = None
+depends_on = None
+
+from alembic import op
+
+
+def upgrade():
+ op.create_check_constraint("ck_started_before_ended",
+ "resource",
+ "started_at <= ended_at")
+ op.create_check_constraint("ck_started_before_ended",
+ "resource_history",
+ "started_at <= ended_at")
diff --git a/gnocchi/indexer/alembic/versions/42ee7f3e25f8_alter_flavorid_from_int_to_string.py b/gnocchi/indexer/alembic/versions/42ee7f3e25f8_alter_flavorid_from_int_to_string.py
new file mode 100644
index 00000000..e8d10d44
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/42ee7f3e25f8_alter_flavorid_from_int_to_string.py
@@ -0,0 +1,38 @@
+#
+# Copyright 2015 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""alter flavorid from int to string
+
+Revision ID: 42ee7f3e25f8
+Revises: f7d44b47928
+Create Date: 2015-05-10 21:20:24.941263
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '42ee7f3e25f8'
+down_revision = 'f7d44b47928'
+branch_labels = None
+depends_on = None
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade():
+ for table in ('instance', 'instance_history'):
+ op.alter_column(table, "flavor_id",
+ type_=sa.String(length=255),
+ nullable=False)
diff --git a/gnocchi/indexer/alembic/versions/469b308577a9_allow_image_ref_to_be_null.py b/gnocchi/indexer/alembic/versions/469b308577a9_allow_image_ref_to_be_null.py
new file mode 100644
index 00000000..5ac8dfcf
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/469b308577a9_allow_image_ref_to_be_null.py
@@ -0,0 +1,41 @@
+# Copyright 2015 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""allow image_ref to be null
+
+Revision ID: 469b308577a9
+Revises: 39b7d449d46a
+Create Date: 2015-11-29 00:23:39.998256
+
+"""
+
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = '469b308577a9'
+down_revision = '39b7d449d46a'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ op.alter_column('instance', 'image_ref',
+ existing_type=sa.String(length=255),
+ nullable=True)
+ op.alter_column('instance_history', 'image_ref',
+ existing_type=sa.String(length=255),
+ nullable=True)
diff --git a/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py b/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py
new file mode 100644
index 00000000..824a3e93
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py
@@ -0,0 +1,77 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright 2016 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""mysql_float_to_timestamp
+
+Revision ID: 5c4f93e5bb4
+Revises: 7e6f9d542f8b
+Create Date: 2016-07-25 15:36:36.469847
+
+"""
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.sql import func
+
+from gnocchi.indexer import sqlalchemy_base
+
+# revision identifiers, used by Alembic.
+revision = '5c4f93e5bb4'
+down_revision = '27d2a1d205ff'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ bind = op.get_bind()
+ if bind and bind.engine.name == "mysql":
+ op.execute("SET time_zone = '+00:00'")
+ # NOTE(jd) So that crappy engine that is MySQL does not have "ALTER
+ # TABLE … USING …". We need to copy everything and convert…
+ for table_name, column_name in (("resource", "started_at"),
+ ("resource", "ended_at"),
+ ("resource", "revision_start"),
+ ("resource_history", "started_at"),
+ ("resource_history", "ended_at"),
+ ("resource_history", "revision_start"),
+ ("resource_history", "revision_end"),
+ ("resource_type", "updated_at")):
+
+ nullable = column_name == "ended_at"
+
+ existing_type = sa.types.DECIMAL(
+ precision=20, scale=6, asdecimal=True)
+ existing_col = sa.Column(
+ column_name,
+ existing_type,
+ nullable=nullable)
+ temp_col = sa.Column(
+ column_name + "_ts",
+ sqlalchemy_base.TimestampUTC(),
+ nullable=True)
+ op.add_column(table_name, temp_col)
+ t = sa.sql.table(table_name, existing_col, temp_col)
+ op.execute(t.update().values(
+ **{column_name + "_ts": func.from_unixtime(existing_col)}))
+ op.drop_column(table_name, column_name)
+ op.alter_column(table_name,
+ column_name + "_ts",
+ nullable=nullable,
+ type_=sqlalchemy_base.TimestampUTC(),
+ existing_nullable=nullable,
+ existing_type=existing_type,
+ new_column_name=column_name)
diff --git a/gnocchi/indexer/alembic/versions/62a8dfb139bb_change_uuid_to_string.py b/gnocchi/indexer/alembic/versions/62a8dfb139bb_change_uuid_to_string.py
new file mode 100644
index 00000000..9dbb437c
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/62a8dfb139bb_change_uuid_to_string.py
@@ -0,0 +1,249 @@
+# Copyright 2016 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Change uuid to string
+
+Revision ID: 62a8dfb139bb
+Revises: 1f21cbdd6bc2
+Create Date: 2016-01-20 11:57:45.954607
+
+"""
+
+from alembic import op
+import sqlalchemy as sa
+import sqlalchemy_utils
+
+
+# revision identifiers, used by Alembic.
+revision = '62a8dfb139bb'
+down_revision = '1f21cbdd6bc2'
+branch_labels = None
+depends_on = None
+
+resourcehelper = sa.Table(
+ 'resource',
+ sa.MetaData(),
+ sa.Column('id',
+ sqlalchemy_utils.types.uuid.UUIDType(binary=True),
+ nullable=False),
+ sa.Column('tmp_created_by_user_id',
+ sqlalchemy_utils.types.uuid.UUIDType(binary=True),
+ nullable=True),
+ sa.Column('tmp_created_by_project_id',
+ sqlalchemy_utils.types.uuid.UUIDType(binary=True),
+ nullable=True),
+ sa.Column('tmp_user_id',
+ sqlalchemy_utils.types.uuid.UUIDType(binary=True),
+ nullable=True),
+ sa.Column('tmp_project_id',
+ sqlalchemy_utils.types.uuid.UUIDType(binary=True),
+ nullable=True),
+ sa.Column('created_by_user_id',
+ sa.String(length=255),
+ nullable=True),
+ sa.Column('created_by_project_id',
+ sa.String(length=255),
+ nullable=True),
+ sa.Column('user_id',
+ sa.String(length=255),
+ nullable=True),
+ sa.Column('project_id',
+ sa.String(length=255),
+ nullable=True),
+)
+
+resourcehistoryhelper = sa.Table(
+ 'resource_history',
+ sa.MetaData(),
+ sa.Column('id',
+ sqlalchemy_utils.types.uuid.UUIDType(binary=True),
+ nullable=False),
+ sa.Column('tmp_created_by_user_id',
+ sqlalchemy_utils.types.uuid.UUIDType(binary=True),
+ nullable=True),
+ sa.Column('tmp_created_by_project_id',
+ sqlalchemy_utils.types.uuid.UUIDType(binary=True),
+ nullable=True),
+ sa.Column('tmp_user_id',
+ sqlalchemy_utils.types.uuid.UUIDType(binary=True),
+ nullable=True),
+ sa.Column('tmp_project_id',
+ sqlalchemy_utils.types.uuid.UUIDType(binary=True),
+ nullable=True),
+ sa.Column('created_by_user_id',
+ sa.String(length=255),
+ nullable=True),
+ sa.Column('created_by_project_id',
+ sa.String(length=255),
+ nullable=True),
+ sa.Column('user_id',
+ sa.String(length=255),
+ nullable=True),
+ sa.Column('project_id',
+ sa.String(length=255),
+ nullable=True),
+)
+
+metrichelper = sa.Table(
+ 'metric',
+ sa.MetaData(),
+ sa.Column('id',
+ sqlalchemy_utils.types.uuid.UUIDType(binary=True),
+ nullable=False),
+ sa.Column('tmp_created_by_user_id',
+ sqlalchemy_utils.types.uuid.UUIDType(binary=True),
+ nullable=True),
+ sa.Column('tmp_created_by_project_id',
+ sqlalchemy_utils.types.uuid.UUIDType(binary=True),
+ nullable=True),
+ sa.Column('created_by_user_id',
+ sa.String(length=255),
+ nullable=True),
+ sa.Column('created_by_project_id',
+ sa.String(length=255),
+ nullable=True),
+)
+
+
+def upgrade():
+ connection = op.get_bind()
+
+ # Rename user/project fields to tmp_*
+ op.alter_column('metric', 'created_by_project_id',
+ new_column_name='tmp_created_by_project_id',
+ existing_type=sa.BINARY(length=16))
+ op.alter_column('metric', 'created_by_user_id',
+ new_column_name='tmp_created_by_user_id',
+ existing_type=sa.BINARY(length=16))
+ op.alter_column('resource', 'created_by_project_id',
+ new_column_name='tmp_created_by_project_id',
+ existing_type=sa.BINARY(length=16))
+ op.alter_column('resource', 'created_by_user_id',
+ new_column_name='tmp_created_by_user_id',
+ existing_type=sa.BINARY(length=16))
+ op.alter_column('resource', 'project_id',
+ new_column_name='tmp_project_id',
+ existing_type=sa.BINARY(length=16))
+ op.alter_column('resource', 'user_id',
+ new_column_name='tmp_user_id',
+ existing_type=sa.BINARY(length=16))
+ op.alter_column('resource_history', 'created_by_project_id',
+ new_column_name='tmp_created_by_project_id',
+ existing_type=sa.BINARY(length=16))
+ op.alter_column('resource_history', 'created_by_user_id',
+ new_column_name='tmp_created_by_user_id',
+ existing_type=sa.BINARY(length=16))
+ op.alter_column('resource_history', 'project_id',
+ new_column_name='tmp_project_id',
+ existing_type=sa.BINARY(length=16))
+ op.alter_column('resource_history', 'user_id',
+ new_column_name='tmp_user_id',
+ existing_type=sa.BINARY(length=16))
+
+ # Add new user/project fields as strings
+ op.add_column('metric',
+ sa.Column('created_by_project_id',
+ sa.String(length=255), nullable=True))
+ op.add_column('metric',
+ sa.Column('created_by_user_id',
+ sa.String(length=255), nullable=True))
+ op.add_column('resource',
+ sa.Column('created_by_project_id',
+ sa.String(length=255), nullable=True))
+ op.add_column('resource',
+ sa.Column('created_by_user_id',
+ sa.String(length=255), nullable=True))
+ op.add_column('resource',
+ sa.Column('project_id',
+ sa.String(length=255), nullable=True))
+ op.add_column('resource',
+ sa.Column('user_id',
+ sa.String(length=255), nullable=True))
+ op.add_column('resource_history',
+ sa.Column('created_by_project_id',
+ sa.String(length=255), nullable=True))
+ op.add_column('resource_history',
+ sa.Column('created_by_user_id',
+ sa.String(length=255), nullable=True))
+ op.add_column('resource_history',
+ sa.Column('project_id',
+ sa.String(length=255), nullable=True))
+ op.add_column('resource_history',
+ sa.Column('user_id',
+ sa.String(length=255), nullable=True))
+
+ # Migrate data
+ for tablehelper in [resourcehelper, resourcehistoryhelper]:
+ for resource in connection.execute(tablehelper.select()):
+ if resource.tmp_created_by_project_id:
+ created_by_project_id = \
+ str(resource.tmp_created_by_project_id).replace('-', '')
+ else:
+ created_by_project_id = None
+ if resource.tmp_created_by_user_id:
+ created_by_user_id = \
+ str(resource.tmp_created_by_user_id).replace('-', '')
+ else:
+ created_by_user_id = None
+ if resource.tmp_project_id:
+ project_id = str(resource.tmp_project_id).replace('-', '')
+ else:
+ project_id = None
+ if resource.tmp_user_id:
+ user_id = str(resource.tmp_user_id).replace('-', '')
+ else:
+ user_id = None
+
+ connection.execute(
+ tablehelper.update().where(
+ tablehelper.c.id == resource.id
+ ).values(
+ created_by_project_id=created_by_project_id,
+ created_by_user_id=created_by_user_id,
+ project_id=project_id,
+ user_id=user_id,
+ )
+ )
+ for metric in connection.execute(metrichelper.select()):
+ if resource.tmp_created_by_project_id:
+ created_by_project_id = \
+ str(resource.tmp_created_by_project_id).replace('-', '')
+ else:
+ created_by_project_id = None
+ if resource.tmp_created_by_user_id:
+ created_by_user_id = \
+ str(resource.tmp_created_by_user_id).replace('-', '')
+ else:
+ created_by_user_id = None
+ connection.execute(
+ metrichelper.update().where(
+ metrichelper.c.id == metric.id
+ ).values(
+ created_by_project_id=created_by_project_id,
+ created_by_user_id=created_by_user_id,
+ )
+ )
+
+ # Delete temp fields
+ op.drop_column('metric', 'tmp_created_by_project_id')
+ op.drop_column('metric', 'tmp_created_by_user_id')
+ op.drop_column('resource', 'tmp_created_by_project_id')
+ op.drop_column('resource', 'tmp_created_by_user_id')
+ op.drop_column('resource', 'tmp_project_id')
+ op.drop_column('resource', 'tmp_user_id')
+ op.drop_column('resource_history', 'tmp_created_by_project_id')
+ op.drop_column('resource_history', 'tmp_created_by_user_id')
+ op.drop_column('resource_history', 'tmp_project_id')
+ op.drop_column('resource_history', 'tmp_user_id')
diff --git a/gnocchi/indexer/alembic/versions/7e6f9d542f8b_resource_type_state_column.py b/gnocchi/indexer/alembic/versions/7e6f9d542f8b_resource_type_state_column.py
new file mode 100644
index 00000000..9b3a88ff
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/7e6f9d542f8b_resource_type_state_column.py
@@ -0,0 +1,43 @@
+# Copyright 2016 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""resource_type state column
+
+Revision ID: 7e6f9d542f8b
+Revises: c62df18bf4ee
+Create Date: 2016-05-19 16:52:58.939088
+
+"""
+
+from alembic import op
+import sqlalchemy as sa
+
+# revision identifiers, used by Alembic.
+revision = '7e6f9d542f8b'
+down_revision = 'c62df18bf4ee'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ states = ("active", "creating", "creation_error", "deleting",
+ "deletion_error")
+ enum = sa.Enum(*states, name="resource_type_state_enum")
+ enum.create(op.get_bind(), checkfirst=False)
+ op.add_column("resource_type",
+ sa.Column('state', enum, nullable=False,
+ server_default="creating"))
+ rt = sa.sql.table('resource_type', sa.sql.column('state', enum))
+ op.execute(rt.update().values(state="active"))
diff --git a/gnocchi/indexer/alembic/versions/828c16f70cce_create_resource_type_table.py b/gnocchi/indexer/alembic/versions/828c16f70cce_create_resource_type_table.py
new file mode 100644
index 00000000..c95d2684
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/828c16f70cce_create_resource_type_table.py
@@ -0,0 +1,85 @@
+# Copyright 2016 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""create resource_type table
+
+Revision ID: 828c16f70cce
+Revises: 9901e5ea4b6e
+Create Date: 2016-01-19 12:47:19.384127
+
+"""
+
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = '828c16f70cce'
+down_revision = '9901e5ea4b6e'
+branch_labels = None
+depends_on = None
+
+
+type_string = sa.String(255)
+type_enum = sa.Enum('generic', 'instance',
+ 'swift_account', 'volume',
+ 'ceph_account', 'network',
+ 'identity', 'ipmi', 'stack',
+ 'image', 'instance_disk',
+ 'instance_network_interface',
+ 'host', 'host_disk',
+ 'host_network_interface',
+ name="resource_type_enum")
+
+
+def type_string_col(name, table):
+ return sa.Column(
+ name, type_string,
+ sa.ForeignKey('resource_type.name',
+ ondelete="RESTRICT",
+ name="fk_%s_resource_type_name" % table))
+
+
+def type_enum_col(name):
+ return sa.Column(name, type_enum,
+ nullable=False, default='generic')
+
+
+def upgrade():
+ resource_type = op.create_table(
+ 'resource_type',
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('name'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+
+ resource = sa.Table('resource', sa.MetaData(),
+ type_string_col("type", "resource"))
+ op.execute(resource_type.insert().from_select(
+ ['name'], sa.select([resource.c.type]).distinct()))
+
+ for table in ["resource", "resource_history"]:
+ op.alter_column(table, "type", new_column_name="old_type",
+ existing_type=type_enum)
+ op.add_column(table, type_string_col("type", table))
+ sa_table = sa.Table(table, sa.MetaData(),
+ type_string_col("type", table),
+ type_enum_col('old_type'))
+ op.execute(sa_table.update().values(
+ {sa_table.c.type: sa_table.c.old_type}))
+ op.drop_column(table, "old_type")
+ op.alter_column(table, "type", nullable=False,
+ existing_type=type_string)
diff --git a/gnocchi/indexer/alembic/versions/8f376189b9eb_migrate_legacy_resources_to_db.py b/gnocchi/indexer/alembic/versions/8f376189b9eb_migrate_legacy_resources_to_db.py
new file mode 100644
index 00000000..f1a83bd4
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/8f376189b9eb_migrate_legacy_resources_to_db.py
@@ -0,0 +1,48 @@
+# Copyright 2016 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Migrate legacy resources to DB
+
+Revision ID: 8f376189b9eb
+Revises: d24877c22ab0
+Create Date: 2016-01-20 15:03:28.115656
+
+"""
+import json
+
+from alembic import op
+import sqlalchemy as sa
+
+from gnocchi.indexer import sqlalchemy_legacy_resources as legacy
+
+# revision identifiers, used by Alembic.
+revision = '8f376189b9eb'
+down_revision = 'd24877c22ab0'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ resource_type = sa.Table(
+ 'resource_type', sa.MetaData(),
+ sa.Column('name', sa.String(255), nullable=False),
+ sa.Column('attributes', sa.Text, nullable=False)
+ )
+
+ for name, attributes in legacy.ceilometer_resources.items():
+ text_attributes = json.dumps(attributes)
+ op.execute(resource_type.update().where(
+ resource_type.c.name == name
+ ).values({resource_type.c.attributes: text_attributes}))
diff --git a/gnocchi/indexer/alembic/versions/9901e5ea4b6e_create_host.py b/gnocchi/indexer/alembic/versions/9901e5ea4b6e_create_host.py
new file mode 100644
index 00000000..901e6f8f
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/9901e5ea4b6e_create_host.py
@@ -0,0 +1,127 @@
+# Copyright 2015 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""create host tables
+
+Revision ID: 9901e5ea4b6e
+Revises: a54c57ada3f5
+Create Date: 2015-12-15 17:20:25.092891
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '9901e5ea4b6e'
+down_revision = 'a54c57ada3f5'
+branch_labels = None
+depends_on = None
+
+from alembic import op
+import sqlalchemy as sa
+import sqlalchemy_utils
+
+
+def upgrade():
+ for table in ["resource", "resource_history"]:
+ op.alter_column(table, "type",
+ type_=sa.Enum('generic', 'instance', 'swift_account',
+ 'volume', 'ceph_account', 'network',
+ 'identity', 'ipmi', 'stack', 'image',
+ 'instance_network_interface',
+ 'instance_disk',
+ 'host', 'host_disk',
+ 'host_network_interface',
+ name='resource_type_enum'),
+ nullable=False)
+
+ # NOTE(sileht): postgresql have a builtin ENUM type, so
+ # just altering the column won't works.
+ # https://bitbucket.org/zzzeek/alembic/issues/270/altering-enum-type
+ # Does it break offline migration because we use get_bind() ?
+
+ # NOTE(luogangyi): since we cannot use 'ALTER TYPE' in transaction,
+ # we split the 'ALTER TYPE' operation into several steps.
+ bind = op.get_bind()
+ if bind and bind.engine.name == "postgresql":
+ op.execute("ALTER TYPE resource_type_enum RENAME TO \
+ old_resource_type_enum")
+ op.execute("CREATE TYPE resource_type_enum AS ENUM \
+ ('generic', 'instance', 'swift_account', \
+ 'volume', 'ceph_account', 'network', \
+ 'identity', 'ipmi', 'stack', 'image', \
+ 'instance_network_interface', 'instance_disk', \
+ 'host', 'host_disk', \
+ 'host_network_interface')")
+ for table in ["resource", "resource_history"]:
+ op.execute("ALTER TABLE %s ALTER COLUMN type TYPE \
+ resource_type_enum USING \
+ type::text::resource_type_enum" % table)
+ op.execute("DROP TYPE old_resource_type_enum")
+
+ op.create_table(
+ 'host',
+ sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=True),
+ nullable=False),
+ sa.Column('host_name', sa.String(length=255), nullable=False),
+ sa.ForeignKeyConstraint(['id'], ['resource.id'],
+ name="fk_hypervisor_id_resource_id",
+ ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('id'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+
+ op.create_table(
+ 'host_history',
+ sa.Column('host_name', sa.String(length=255), nullable=False),
+ sa.Column('revision', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['revision'],
+ ['resource_history.revision'],
+ name=("fk_hypervisor_history_"
+ "resource_history_revision"),
+ ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('revision'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+
+ for table in ['host_disk', 'host_net_int']:
+ op.create_table(
+ table,
+ sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=True),
+ nullable=False),
+ sa.Column('host_name', sa.String(length=255), nullable=False),
+ sa.Column('device_name', sa.String(length=255), nullable=True),
+ sa.ForeignKeyConstraint(['id'], ['resource.id'],
+ name="fk_%s_id_resource_id" % table,
+ ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('id'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
+
+ op.create_table(
+ '%s_history' % table,
+ sa.Column('host_name', sa.String(length=255), nullable=False),
+ sa.Column('device_name', sa.String(length=255), nullable=True),
+ sa.Column('revision', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['revision'],
+ ['resource_history.revision'],
+ name=("fk_%s_history_"
+ "resource_history_revision") % table,
+ ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('revision'),
+ mysql_charset='utf8',
+ mysql_engine='InnoDB'
+ )
diff --git a/gnocchi/indexer/alembic/versions/a54c57ada3f5_removes_useless_indexes.py b/gnocchi/indexer/alembic/versions/a54c57ada3f5_removes_useless_indexes.py
new file mode 100644
index 00000000..b979857a
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/a54c57ada3f5_removes_useless_indexes.py
@@ -0,0 +1,72 @@
+# Copyright 2016 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""merges primarykey and indexes
+
+Revision ID: a54c57ada3f5
+Revises: 1c2c61ac1f4c
+Create Date: 2016-02-04 09:09:23.180955
+
+"""
+
+from alembic import op
+
+
+# revision identifiers, used by Alembic.
+revision = 'a54c57ada3f5'
+down_revision = '1c2c61ac1f4c'
+branch_labels = None
+depends_on = None
+
+resource_tables = [(t, "id") for t in [
+ "instance",
+ "instance_disk",
+ "instance_net_int",
+ "swift_account",
+ "volume",
+ "ceph_account",
+ "network",
+ "identity",
+ "ipmi",
+ "stack",
+ "image"
+]]
+history_tables = [("%s_history" % t, "revision")
+ for t, c in resource_tables]
+other_tables = [("metric", "id"), ("archive_policy", "name"),
+ ("archive_policy_rule", "name"),
+ ("resource", "id"),
+ ("resource_history", "id")]
+
+
+def upgrade():
+ bind = op.get_bind()
+ # NOTE(sileht): mysql can't delete an index on a foreign key
+ # even this one is not the index used by the foreign key itself...
+ # In our case we have two indexes fk_resource_history_id_resource_id and
+ # and ix_resource_history_id, we want to delete only the second, but mysql
+ # can't do that with a simple DROP INDEX ix_resource_history_id...
+ # so we have to remove the constraint and put it back...
+ if bind.engine.name == "mysql":
+ op.drop_constraint("fk_resource_history_id_resource_id",
+ type_="foreignkey", table_name="resource_history")
+
+ for table, colname in resource_tables + history_tables + other_tables:
+ op.drop_index("ix_%s_%s" % (table, colname), table_name=table)
+
+ if bind.engine.name == "mysql":
+ op.create_foreign_key("fk_resource_history_id_resource_id",
+ "resource_history", "resource", ["id"], ["id"],
+ ondelete="CASCADE")
diff --git a/gnocchi/indexer/alembic/versions/aba5a217ca9b_merge_created_in_creator.py b/gnocchi/indexer/alembic/versions/aba5a217ca9b_merge_created_in_creator.py
new file mode 100644
index 00000000..72339057
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/aba5a217ca9b_merge_created_in_creator.py
@@ -0,0 +1,53 @@
+# Copyright 2016 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""merge_created_in_creator
+
+Revision ID: aba5a217ca9b
+Revises: 5c4f93e5bb4
+Create Date: 2016-12-06 17:40:25.344578
+
+"""
+
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = 'aba5a217ca9b'
+down_revision = '5c4f93e5bb4'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ for table_name in ("resource", "resource_history", "metric"):
+ creator_col = sa.Column("creator", sa.String(255))
+ created_by_user_id_col = sa.Column("created_by_user_id",
+ sa.String(255))
+ created_by_project_id_col = sa.Column("created_by_project_id",
+ sa.String(255))
+ op.add_column(table_name, creator_col)
+ t = sa.sql.table(
+ table_name, creator_col,
+ created_by_user_id_col, created_by_project_id_col)
+ op.execute(
+ t.update().values(
+ creator=(
+ created_by_user_id_col + ":" + created_by_project_id_col
+ )).where((created_by_user_id_col is not None)
+ | (created_by_project_id_col is not None)))
+ op.drop_column(table_name, "created_by_user_id")
+ op.drop_column(table_name, "created_by_project_id")
diff --git a/gnocchi/indexer/alembic/versions/c62df18bf4ee_add_unit_column_for_metric.py b/gnocchi/indexer/alembic/versions/c62df18bf4ee_add_unit_column_for_metric.py
new file mode 100644
index 00000000..7d4deef5
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/c62df18bf4ee_add_unit_column_for_metric.py
@@ -0,0 +1,38 @@
+# Copyright 2016 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""add unit column for metric
+
+Revision ID: c62df18bf4ee
+Revises: 2e0b912062d1
+Create Date: 2016-05-04 12:31:25.350190
+
+"""
+
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = 'c62df18bf4ee'
+down_revision = '2e0b912062d1'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ op.add_column('metric', sa.Column('unit',
+ sa.String(length=31),
+ nullable=True))
diff --git a/gnocchi/indexer/alembic/versions/d24877c22ab0_add_attributes_to_resource_type.py b/gnocchi/indexer/alembic/versions/d24877c22ab0_add_attributes_to_resource_type.py
new file mode 100644
index 00000000..dda81e50
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/d24877c22ab0_add_attributes_to_resource_type.py
@@ -0,0 +1,38 @@
+# Copyright 2016 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Add attributes to resource_type
+
+Revision ID: d24877c22ab0
+Revises: 0718ed97e5b3
+Create Date: 2016-01-19 22:45:06.431190
+
+"""
+
+from alembic import op
+import sqlalchemy as sa
+import sqlalchemy_utils as sa_utils
+
+
+# revision identifiers, used by Alembic.
+revision = 'd24877c22ab0'
+down_revision = '0718ed97e5b3'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ op.add_column("resource_type",
+ sa.Column('attributes', sa_utils.JSONType(),))
diff --git a/gnocchi/indexer/alembic/versions/ed9c6ddc5c35_fix_host_foreign_key.py b/gnocchi/indexer/alembic/versions/ed9c6ddc5c35_fix_host_foreign_key.py
new file mode 100644
index 00000000..e5cfdd02
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/ed9c6ddc5c35_fix_host_foreign_key.py
@@ -0,0 +1,53 @@
+# Copyright 2016 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""fix_host_foreign_key
+
+Revision ID: ed9c6ddc5c35
+Revises: ffc7bbeec0b0
+Create Date: 2016-04-15 06:25:34.649934
+
+"""
+
+from alembic import op
+from sqlalchemy import inspect
+
+# revision identifiers, used by Alembic.
+revision = 'ed9c6ddc5c35'
+down_revision = 'ffc7bbeec0b0'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ conn = op.get_bind()
+
+ insp = inspect(conn)
+ fk_names = [fk['name'] for fk in insp.get_foreign_keys('host')]
+ if ("fk_hypervisor_id_resource_id" not in fk_names and
+ "fk_host_id_resource_id" in fk_names):
+ # NOTE(sileht): we are already good, the BD have been created from
+ # scratch after "a54c57ada3f5"
+ return
+
+ op.drop_constraint("fk_hypervisor_id_resource_id", "host",
+ type_="foreignkey")
+ op.drop_constraint("fk_hypervisor_history_resource_history_revision",
+ "host_history", type_="foreignkey")
+ op.create_foreign_key("fk_host_id_resource_id", "host", "resource",
+ ["id"], ["id"], ondelete="CASCADE")
+ op.create_foreign_key("fk_host_history_resource_history_revision",
+ "host_history", "resource_history",
+ ["revision"], ["revision"], ondelete="CASCADE")
diff --git a/gnocchi/indexer/alembic/versions/f7d44b47928_uuid_to_binary.py b/gnocchi/indexer/alembic/versions/f7d44b47928_uuid_to_binary.py
new file mode 100644
index 00000000..c53c725d
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/f7d44b47928_uuid_to_binary.py
@@ -0,0 +1,89 @@
+#
+# Copyright 2015 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""uuid_to_binary
+
+Revision ID: f7d44b47928
+Revises: 40c6aae14c3f
+Create Date: 2015-04-30 13:29:29.074794
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'f7d44b47928'
+down_revision = '40c6aae14c3f'
+branch_labels = None
+depends_on = None
+
+from alembic import op
+import sqlalchemy_utils.types.uuid
+
+
+def upgrade():
+ op.alter_column("metric", "id",
+ type_=sqlalchemy_utils.types.uuid.UUIDType(binary=True),
+ nullable=False)
+
+ for table in ('resource', 'resource_history', 'metric'):
+ op.alter_column(table, "created_by_user_id",
+ type_=sqlalchemy_utils.types.uuid.UUIDType(
+ binary=True))
+ op.alter_column(table, "created_by_project_id",
+ type_=sqlalchemy_utils.types.uuid.UUIDType(
+ binary=True))
+ for table in ('resource', 'resource_history'):
+ op.alter_column(table, "user_id",
+ type_=sqlalchemy_utils.types.uuid.UUIDType(
+ binary=True))
+ op.alter_column(table, "project_id",
+ type_=sqlalchemy_utils.types.uuid.UUIDType(
+ binary=True))
+
+ # Drop all foreign keys linking to resource.id
+ for table in ('ceph_account', 'identity', 'volume', 'swift_account',
+ 'ipmi', 'image', 'network', 'stack', 'instance',
+ 'resource_history'):
+ op.drop_constraint("fk_%s_id_resource_id" % table, table,
+ type_="foreignkey")
+
+ op.drop_constraint("fk_metric_resource_id_resource_id", "metric",
+ type_="foreignkey")
+
+ # Now change the type of resource.id
+ op.alter_column("resource", "id",
+ type_=sqlalchemy_utils.types.uuid.UUIDType(binary=True),
+ nullable=False)
+
+ # Now change all the types of $table.id and re-add the FK
+ for table in ('ceph_account', 'identity', 'volume', 'swift_account',
+ 'ipmi', 'image', 'network', 'stack', 'instance',
+ 'resource_history'):
+ op.alter_column(
+ table, "id",
+ type_=sqlalchemy_utils.types.uuid.UUIDType(binary=True),
+ nullable=False)
+
+ op.create_foreign_key("fk_%s_id_resource_id" % table,
+ table, "resource",
+ ("id",), ("id",),
+ ondelete="CASCADE")
+
+ op.alter_column("metric", "resource_id",
+ type_=sqlalchemy_utils.types.uuid.UUIDType(binary=True))
+
+ op.create_foreign_key("fk_metric_resource_id_resource_id",
+ "metric", "resource",
+ ("resource_id",), ("id",),
+ ondelete="CASCADE")
diff --git a/gnocchi/indexer/alembic/versions/ffc7bbeec0b0_migrate_legacy_resources_to_db2.py b/gnocchi/indexer/alembic/versions/ffc7bbeec0b0_migrate_legacy_resources_to_db2.py
new file mode 100644
index 00000000..1be98151
--- /dev/null
+++ b/gnocchi/indexer/alembic/versions/ffc7bbeec0b0_migrate_legacy_resources_to_db2.py
@@ -0,0 +1,65 @@
+# Copyright 2016 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""migrate_legacy_resources_to_db2
+
+Revision ID: ffc7bbeec0b0
+Revises: 8f376189b9eb
+Create Date: 2016-04-14 15:57:13.072128
+
+"""
+import json
+
+from alembic import op
+import sqlalchemy as sa
+
+from gnocchi.indexer import sqlalchemy_legacy_resources as legacy
+
+# revision identifiers, used by Alembic.
+revision = 'ffc7bbeec0b0'
+down_revision = '8f376189b9eb'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ bind = op.get_bind()
+
+ resource_type = sa.Table(
+ 'resource_type', sa.MetaData(),
+ sa.Column('name', sa.String(255), nullable=False),
+ sa.Column('tablename', sa.String(18), nullable=False),
+ sa.Column('attributes', sa.Text, nullable=False)
+ )
+
+ # NOTE(gordc): fix for incorrect migration:
+ # 0718ed97e5b3_add_tablename_to_resource_type.py#L46
+ op.execute(resource_type.update().where(
+ resource_type.c.name == "instance_network_interface"
+ ).values({'tablename': 'instance_net_int'}))
+
+ resource_type_names = [rt.name for rt in
+ list(bind.execute(resource_type.select()))]
+
+ for name, attributes in legacy.ceilometer_resources.items():
+ if name in resource_type_names:
+ continue
+ tablename = legacy.ceilometer_tablenames.get(name, name)
+ text_attributes = json.dumps(attributes)
+ op.execute(resource_type.insert().values({
+ resource_type.c.attributes: text_attributes,
+ resource_type.c.name: name,
+ resource_type.c.tablename: tablename,
+ }))
diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py
new file mode 100644
index 00000000..3497b52d
--- /dev/null
+++ b/gnocchi/indexer/sqlalchemy.py
@@ -0,0 +1,1235 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2014-2015 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from __future__ import absolute_import
+import itertools
+import operator
+import os.path
+import threading
+import uuid
+
+from alembic import migration
+from alembic import operations
+import oslo_db.api
+from oslo_db import exception
+from oslo_db.sqlalchemy import enginefacade
+from oslo_db.sqlalchemy import utils as oslo_db_utils
+from oslo_log import log
+try:
+ import psycopg2
+except ImportError:
+ psycopg2 = None
+try:
+ import pymysql.constants.ER
+ import pymysql.err
+except ImportError:
+ pymysql = None
+import six
+import sqlalchemy
+from sqlalchemy.engine import url as sqlalchemy_url
+import sqlalchemy.exc
+from sqlalchemy import types
+import sqlalchemy_utils
+
+from gnocchi import exceptions
+from gnocchi import indexer
+from gnocchi.indexer import sqlalchemy_base as base
+from gnocchi import resource_type
+from gnocchi import utils
+
+Base = base.Base
+Metric = base.Metric
+ArchivePolicy = base.ArchivePolicy
+ArchivePolicyRule = base.ArchivePolicyRule
+Resource = base.Resource
+ResourceHistory = base.ResourceHistory
+ResourceType = base.ResourceType
+
+_marker = indexer._marker
+
+LOG = log.getLogger(__name__)
+
+
+def _retry_on_exceptions(exc):
+ if not isinstance(exc, exception.DBError):
+ return False
+ inn_e = exc.inner_exception
+ if not isinstance(inn_e, sqlalchemy.exc.InternalError):
+ return False
+ return ((
+ pymysql and
+ isinstance(inn_e.orig, pymysql.err.InternalError) and
+ (inn_e.orig.args[0] == pymysql.constants.ER.TABLE_DEF_CHANGED)
+ ) or (
+ # HACK(jd) Sometimes, PostgreSQL raises an error such as "current
+ # transaction is aborted, commands ignored until end of transaction
+ # block" on its own catalog, so we need to retry, but this is not
+ # caught by oslo.db as a deadlock. This is likely because when we use
+ # Base.metadata.create_all(), sqlalchemy itself gets an error it does
+ # not catch or something. So this is why this function exists. To
+ # paperover I guess.
+ psycopg2
+ and isinstance(inn_e.orig, psycopg2.InternalError)
+ # current transaction is aborted
+ and inn_e.orig.pgcode == '25P02'
+ ))
+
+
+def retry_on_deadlock(f):
+ return oslo_db.api.wrap_db_retry(retry_on_deadlock=True,
+ max_retries=20,
+ retry_interval=0.1,
+ max_retry_interval=2,
+ exception_checker=_retry_on_exceptions)(f)
+
+
+class PerInstanceFacade(object):
+ def __init__(self, conf):
+ self.trans = enginefacade.transaction_context()
+ self.trans.configure(
+ **dict(conf.database.items())
+ )
+ self._context = threading.local()
+
+ def independent_writer(self):
+ return self.trans.independent.writer.using(self._context)
+
+ def independent_reader(self):
+ return self.trans.independent.reader.using(self._context)
+
+ def writer_connection(self):
+ return self.trans.connection.writer.using(self._context)
+
+ def reader_connection(self):
+ return self.trans.connection.reader.using(self._context)
+
+ def writer(self):
+ return self.trans.writer.using(self._context)
+
+ def reader(self):
+ return self.trans.reader.using(self._context)
+
+ def get_engine(self):
+ # TODO(mbayer): add get_engine() to enginefacade
+ if not self.trans._factory._started:
+ self.trans._factory._start()
+ return self.trans._factory._writer_engine
+
+ def dispose(self):
+ # TODO(mbayer): add dispose() to enginefacade
+ if self.trans._factory._started:
+ self.trans._factory._writer_engine.dispose()
+
+
+class ResourceClassMapper(object):
+ def __init__(self):
+ # FIXME(sileht): 3 attributes, perhaps we need a better structure.
+ self._cache = {'generic': {'resource': base.Resource,
+ 'history': base.ResourceHistory,
+ 'updated_at': utils.utcnow()}}
+
+ @staticmethod
+ def _build_class_mappers(resource_type, baseclass=None):
+ tablename = resource_type.tablename
+ tables_args = {"extend_existing": True}
+ tables_args.update(base.COMMON_TABLES_ARGS)
+ # TODO(sileht): Add columns
+ if not baseclass:
+ baseclass = resource_type.to_baseclass()
+ resource_ext = type(
+ str("%s_resource" % tablename),
+ (baseclass, base.ResourceExtMixin, base.Resource),
+ {"__tablename__": tablename, "__table_args__": tables_args})
+ resource_history_ext = type(
+ str("%s_history" % tablename),
+ (baseclass, base.ResourceHistoryExtMixin, base.ResourceHistory),
+ {"__tablename__": ("%s_history" % tablename),
+ "__table_args__": tables_args})
+ return {'resource': resource_ext,
+ 'history': resource_history_ext,
+ 'updated_at': resource_type.updated_at}
+
+ def get_classes(self, resource_type):
+ # NOTE(sileht): We don't care about concurrency here because we allow
+ # sqlalchemy to override its global object with extend_existing=True
+ # this is safe because classname and tablename are uuid.
+ try:
+ mappers = self._cache[resource_type.tablename]
+ # Cache is outdated
+ if (resource_type.name != "generic"
+ and resource_type.updated_at > mappers['updated_at']):
+ for table_purpose in ['resource', 'history']:
+ Base.metadata.remove(Base.metadata.tables[
+ mappers[table_purpose].__tablename__])
+ del self._cache[resource_type.tablename]
+ raise KeyError
+ return mappers
+ except KeyError:
+ mapper = self._build_class_mappers(resource_type)
+ self._cache[resource_type.tablename] = mapper
+ return mapper
+
+ @retry_on_deadlock
+ def map_and_create_tables(self, resource_type, facade):
+ if resource_type.state != "creating":
+ raise RuntimeError("map_and_create_tables must be called in state "
+ "creating")
+
+ mappers = self.get_classes(resource_type)
+ tables = [Base.metadata.tables[mappers["resource"].__tablename__],
+ Base.metadata.tables[mappers["history"].__tablename__]]
+
+ with facade.writer_connection() as connection:
+ Base.metadata.create_all(connection, tables=tables)
+
+ # NOTE(sileht): no need to protect the _cache with a lock
+ # get_classes cannot be called in state creating
+ self._cache[resource_type.tablename] = mappers
+
+ @retry_on_deadlock
+ def unmap_and_delete_tables(self, resource_type, facade):
+ if resource_type.state != "deleting":
+ raise RuntimeError("unmap_and_delete_tables must be called in "
+ "state deleting")
+
+ mappers = self.get_classes(resource_type)
+ del self._cache[resource_type.tablename]
+
+ tables = [Base.metadata.tables[mappers['resource'].__tablename__],
+ Base.metadata.tables[mappers['history'].__tablename__]]
+
+ # NOTE(sileht): Base.metadata.drop_all doesn't
+ # issue CASCADE stuffs correctly at least on postgresql
+ # We drop foreign keys manually to not lock the destination
+ # table for too long during drop table.
+ # It's safe to not use a transaction since
+ # the resource_type table is already cleaned and committed
+ # so this code cannot be triggerred anymore for this
+ # resource_type
+ with facade.writer_connection() as connection:
+ for table in tables:
+ for fk in table.foreign_key_constraints:
+ try:
+ self._safe_execute(
+ connection,
+ sqlalchemy.schema.DropConstraint(fk))
+ except exception.DBNonExistentConstraint:
+ pass
+ for table in tables:
+ try:
+ self._safe_execute(connection,
+ sqlalchemy.schema.DropTable(table))
+ except exception.DBNonExistentTable:
+ pass
+
+ # NOTE(sileht): If something goes wrong here, we are currently
+ # fucked, that why we expose the state to the superuser.
+ # But we allow him to delete a resource type in error state
+ # in case of he cleanup the mess manually and want gnocchi to
+ # control and finish the cleanup.
+
+ # TODO(sileht): Remove this resource on other workers
+ # by using expiration on cache ?
+ for table in tables:
+ Base.metadata.remove(table)
+
+ @retry_on_deadlock
+ def _safe_execute(self, connection, works):
+ # NOTE(sileht): we create a transaction to ensure mysql
+ # create locks on other transaction...
+ trans = connection.begin()
+ connection.execute(works)
+ trans.commit()
+
+
+class SQLAlchemyIndexer(indexer.IndexerDriver):
+ _RESOURCE_TYPE_MANAGER = ResourceClassMapper()
+
+ @classmethod
+ def _create_new_database(cls, url):
+ """Used by testing to create a new database."""
+ purl = sqlalchemy_url.make_url(
+ cls.dress_url(
+ url))
+ purl.database = purl.database + str(uuid.uuid4()).replace('-', '')
+ new_url = str(purl)
+ sqlalchemy_utils.create_database(new_url)
+ return new_url
+
+ @staticmethod
+ def dress_url(url):
+ # If no explicit driver has been set, we default to pymysql
+ if url.startswith("mysql://"):
+ url = sqlalchemy_url.make_url(url)
+ url.drivername = "mysql+pymysql"
+ return str(url)
+ return url
+
+ def __init__(self, conf):
+ conf.set_override("connection",
+ self.dress_url(conf.indexer.url),
+ "database")
+ self.conf = conf
+ self.facade = PerInstanceFacade(conf)
+
+ def disconnect(self):
+ self.facade.dispose()
+
+ def _get_alembic_config(self):
+ from alembic import config
+
+ cfg = config.Config(
+ "%s/alembic/alembic.ini" % os.path.dirname(__file__))
+ cfg.set_main_option('sqlalchemy.url',
+ self.conf.database.connection)
+ return cfg
+
+ def get_engine(self):
+ return self.facade.get_engine()
+
+ def upgrade(self, nocreate=False):
+ from alembic import command
+ from alembic import migration
+
+ cfg = self._get_alembic_config()
+ cfg.conf = self.conf
+ if nocreate:
+ command.upgrade(cfg, "head")
+ else:
+ with self.facade.writer_connection() as connection:
+ ctxt = migration.MigrationContext.configure(connection)
+ current_version = ctxt.get_current_revision()
+ if current_version is None:
+ Base.metadata.create_all(connection)
+ command.stamp(cfg, "head")
+ else:
+ command.upgrade(cfg, "head")
+
+ try:
+ with self.facade.writer() as session:
+ session.add(
+ ResourceType(
+ name="generic",
+ tablename="generic",
+ state="active",
+ attributes=resource_type.ResourceTypeAttributes()))
+ except exception.DBDuplicateEntry:
+ pass
+
+ # NOTE(jd) We can have deadlock errors either here or later in
+ # map_and_create_tables(). We can't decorate create_resource_type()
+ # directly or each part might retry later on its own and cause a
+ # duplicate. And it seems there's no way to use the same session for
+ # both adding the resource_type in our table and calling
+ # map_and_create_tables() :-(
+ @retry_on_deadlock
+ def _add_resource_type(self, resource_type):
+ try:
+ with self.facade.writer() as session:
+ session.add(resource_type)
+ except exception.DBDuplicateEntry:
+ raise indexer.ResourceTypeAlreadyExists(resource_type.name)
+
+ def create_resource_type(self, resource_type):
+ # NOTE(sileht): mysql have a stupid and small length limitation on the
+ # foreign key and index name, so we can't use the resource type name as
+ # tablename, the limit is 64. The longest name we have is
+ # fk__h_revision_rh_revision,
+ # so 64 - 26 = 38 and 3 chars for rt_, 35 chars, uuid is 32, it's cool.
+ tablename = "rt_%s" % uuid.uuid4().hex
+ resource_type = ResourceType(name=resource_type.name,
+ tablename=tablename,
+ attributes=resource_type.attributes,
+ state="creating")
+
+ # NOTE(sileht): ensure the driver is able to store the request
+ # resource_type
+ resource_type.to_baseclass()
+
+ self._add_resource_type(resource_type)
+
+ try:
+ self._RESOURCE_TYPE_MANAGER.map_and_create_tables(resource_type,
+ self.facade)
+ except Exception:
+ # NOTE(sileht): We fail the DDL, we have no way to automatically
+ # recover, just set a particular state
+ self._set_resource_type_state(resource_type.name, "creation_error")
+ raise
+
+ self._set_resource_type_state(resource_type.name, "active")
+ resource_type.state = "active"
+ return resource_type
+
+ def update_resource_type(self, name, add_attributes=None,
+ del_attributes=None):
+ if not add_attributes and not del_attributes:
+ return
+ add_attributes = add_attributes or []
+ del_attributes = del_attributes or []
+
+ self._set_resource_type_state(name, "updating", "active")
+
+ try:
+ with self.facade.independent_writer() as session:
+ engine = session.connection()
+ rt = self._get_resource_type(session, name)
+
+ with self.facade.writer_connection() as connection:
+ ctx = migration.MigrationContext.configure(connection)
+ op = operations.Operations(ctx)
+ for table in [rt.tablename, '%s_history' % rt.tablename]:
+ with op.batch_alter_table(table) as batch_op:
+ for attr in del_attributes:
+ batch_op.drop_column(attr)
+ for attr in add_attributes:
+ server_default = attr.for_filling(
+ engine.dialect)
+ batch_op.add_column(sqlalchemy.Column(
+ attr.name, attr.satype,
+ nullable=not attr.required,
+ server_default=server_default))
+
+ # We have all rows filled now, we can remove
+ # the server_default
+ if server_default is not None:
+ batch_op.alter_column(
+ column_name=attr.name,
+ existing_type=attr.satype,
+ existing_server_default=server_default,
+ existing_nullable=not attr.required,
+ server_default=None)
+
+ rt.state = "active"
+ rt.updated_at = utils.utcnow()
+ rt.attributes.extend(add_attributes)
+ for attr in list(rt.attributes):
+ if attr.name in del_attributes:
+ rt.attributes.remove(attr)
+ # FIXME(sileht): yeah that's wierd but attributes is a custom
+ # json column and 'extend' doesn't trigger sql update, this
+ # enforce the update. I wonder if sqlalchemy provides something
+ # on column description side.
+ sqlalchemy.orm.attributes.flag_modified(rt, 'attributes')
+
+ except Exception:
+ # NOTE(sileht): We fail the DDL, we have no way to automatically
+ # recover, just set a particular state
+ # TODO(sileht): Create a repair REST endpoint that delete
+ # columns not existing in the database but in the resource type
+ # description. This will allow to pass wrong update_error to active
+ # state, that currently not possible.
+ self._set_resource_type_state(name, "updating_error")
+ raise
+
+ return rt
+
+ def get_resource_type(self, name):
+ with self.facade.independent_reader() as session:
+ return self._get_resource_type(session, name)
+
+ def _get_resource_type(self, session, name):
+ resource_type = session.query(ResourceType).get(name)
+ if not resource_type:
+ raise indexer.NoSuchResourceType(name)
+ return resource_type
+
+ @retry_on_deadlock
+ def _set_resource_type_state(self, name, state,
+ expected_previous_state=None):
+ with self.facade.writer() as session:
+ q = session.query(ResourceType)
+ q = q.filter(ResourceType.name == name)
+ if expected_previous_state is not None:
+ q = q.filter(ResourceType.state == expected_previous_state)
+ update = q.update({'state': state})
+ if update == 0:
+ if expected_previous_state is not None:
+ rt = session.query(ResourceType).get(name)
+ if rt:
+ raise indexer.UnexpectedResourceTypeState(
+ name, expected_previous_state, rt.state)
+ raise indexer.IndexerException(
+ "Fail to set resource type state of %s to %s" %
+ (name, state))
+
+ @staticmethod
+ def get_resource_type_schema():
+ return base.RESOURCE_TYPE_SCHEMA_MANAGER
+
+ @staticmethod
+ def get_resource_attributes_schemas():
+ return [ext.plugin.schema() for ext in ResourceType.RESOURCE_SCHEMAS]
+
+ def list_resource_types(self):
+ with self.facade.independent_reader() as session:
+ return list(session.query(ResourceType).order_by(
+ ResourceType.name.asc()).all())
+
+ # NOTE(jd) We can have deadlock errors either here or later in
+ # map_and_create_tables(). We can't decorate delete_resource_type()
+ # directly or each part might retry later on its own and cause a
+ # duplicate. And it seems there's no way to use the same session for
+ # both adding the resource_type in our table and calling
+ # map_and_create_tables() :-(
+ @retry_on_deadlock
+ def _mark_as_deleting_resource_type(self, name):
+ try:
+ with self.facade.writer() as session:
+ rt = self._get_resource_type(session, name)
+ if rt.state not in ["active", "deletion_error",
+ "creation_error", "updating_error"]:
+ raise indexer.UnexpectedResourceTypeState(
+ name,
+ "active/deletion_error/creation_error/updating_error",
+ rt.state)
+ session.delete(rt)
+
+ # FIXME(sileht): Why do I need to flush here !!!
+ # I want remove/add in the same transaction !!!
+ session.flush()
+
+ # NOTE(sileht): delete and recreate to:
+ # * raise duplicate constraints
+ # * ensure we do not create a new resource type
+ # with the same name while we destroy the tables next
+ rt = ResourceType(name=rt.name,
+ tablename=rt.tablename,
+ state="deleting",
+ attributes=rt.attributes)
+ session.add(rt)
+ except exception.DBReferenceError as e:
+ if (e.constraint in [
+ 'fk_resource_resource_type_name',
+ 'fk_resource_history_resource_type_name',
+ 'fk_rh_resource_type_name']):
+ raise indexer.ResourceTypeInUse(name)
+ raise
+ return rt
+
+ @retry_on_deadlock
+ def _delete_resource_type(self, name):
+ # Really delete the resource type, no resource can be linked to it
+ # Because we cannot add a resource to a resource_type not in 'active'
+ # state
+ with self.facade.writer() as session:
+ resource_type = self._get_resource_type(session, name)
+ session.delete(resource_type)
+
+ def delete_resource_type(self, name):
+ if name == "generic":
+ raise indexer.ResourceTypeInUse(name)
+
+ rt = self._mark_as_deleting_resource_type(name)
+
+ try:
+ self._RESOURCE_TYPE_MANAGER.unmap_and_delete_tables(
+ rt, self.facade)
+ except Exception:
+ # NOTE(sileht): We fail the DDL, we have no way to automatically
+ # recover, just set a particular state
+ self._set_resource_type_state(rt.name, "deletion_error")
+ raise
+
+ self._delete_resource_type(name)
+
+ def _resource_type_to_mappers(self, session, name):
+ resource_type = self._get_resource_type(session, name)
+ if resource_type.state != "active":
+ raise indexer.UnexpectedResourceTypeState(
+ name, "active", resource_type.state)
+ return self._RESOURCE_TYPE_MANAGER.get_classes(resource_type)
+
+ def list_archive_policies(self):
+ with self.facade.independent_reader() as session:
+ return list(session.query(ArchivePolicy).all())
+
+ def get_archive_policy(self, name):
+ with self.facade.independent_reader() as session:
+ return session.query(ArchivePolicy).get(name)
+
+ def update_archive_policy(self, name, ap_items):
+ with self.facade.independent_writer() as session:
+ ap = session.query(ArchivePolicy).get(name)
+ if not ap:
+ raise indexer.NoSuchArchivePolicy(name)
+ current = sorted(ap.definition,
+ key=operator.attrgetter('granularity'))
+ new = sorted(ap_items, key=operator.attrgetter('granularity'))
+ if len(current) != len(new):
+ raise indexer.UnsupportedArchivePolicyChange(
+ name, 'Cannot add or drop granularities')
+ for c, n in zip(current, new):
+ if c.granularity != n.granularity:
+ raise indexer.UnsupportedArchivePolicyChange(
+ name, '%s granularity interval was changed'
+ % c.granularity)
+ # NOTE(gordc): ORM doesn't update JSON column unless new
+ ap.definition = ap_items
+ return ap
+
+ def delete_archive_policy(self, name):
+ constraints = [
+ "fk_metric_ap_name_ap_name",
+ "fk_apr_ap_name_ap_name"]
+ with self.facade.writer() as session:
+ try:
+ if session.query(ArchivePolicy).filter(
+ ArchivePolicy.name == name).delete() == 0:
+ raise indexer.NoSuchArchivePolicy(name)
+ except exception.DBReferenceError as e:
+ if e.constraint in constraints:
+ raise indexer.ArchivePolicyInUse(name)
+ raise
+
+ def create_archive_policy(self, archive_policy):
+ ap = ArchivePolicy(
+ name=archive_policy.name,
+ back_window=archive_policy.back_window,
+ definition=archive_policy.definition,
+ aggregation_methods=list(archive_policy.aggregation_methods),
+ )
+ try:
+ with self.facade.writer() as session:
+ session.add(ap)
+ except exception.DBDuplicateEntry:
+ raise indexer.ArchivePolicyAlreadyExists(archive_policy.name)
+ return ap
+
+ def list_archive_policy_rules(self):
+ with self.facade.independent_reader() as session:
+ return session.query(ArchivePolicyRule).order_by(
+ ArchivePolicyRule.metric_pattern.desc()).all()
+
+ def get_archive_policy_rule(self, name):
+ with self.facade.independent_reader() as session:
+ return session.query(ArchivePolicyRule).get(name)
+
+ def delete_archive_policy_rule(self, name):
+ with self.facade.writer() as session:
+ if session.query(ArchivePolicyRule).filter(
+ ArchivePolicyRule.name == name).delete() == 0:
+ raise indexer.NoSuchArchivePolicyRule(name)
+
+ def create_archive_policy_rule(self, name, metric_pattern,
+ archive_policy_name):
+ apr = ArchivePolicyRule(
+ name=name,
+ archive_policy_name=archive_policy_name,
+ metric_pattern=metric_pattern
+ )
+ try:
+ with self.facade.writer() as session:
+ session.add(apr)
+ except exception.DBDuplicateEntry:
+ raise indexer.ArchivePolicyRuleAlreadyExists(name)
+ return apr
+
+ @retry_on_deadlock
+ def create_metric(self, id, creator, archive_policy_name,
+ name=None, unit=None, resource_id=None):
+ m = Metric(id=id,
+ creator=creator,
+ archive_policy_name=archive_policy_name,
+ name=name,
+ unit=unit,
+ resource_id=resource_id)
+ try:
+ with self.facade.writer() as session:
+ session.add(m)
+ except exception.DBDuplicateEntry:
+ raise indexer.NamedMetricAlreadyExists(name)
+ except exception.DBReferenceError as e:
+ if (e.constraint ==
+ 'fk_metric_ap_name_ap_name'):
+ raise indexer.NoSuchArchivePolicy(archive_policy_name)
+ if e.constraint == 'fk_metric_resource_id_resource_id':
+ raise indexer.NoSuchResource(resource_id)
+ raise
+ return m
+
+ @retry_on_deadlock
+ def list_metrics(self, names=None, ids=None, details=False,
+ status='active', limit=None, marker=None, sorts=None,
+ creator=None, **kwargs):
+ sorts = sorts or []
+ if ids is not None and not ids:
+ return []
+ if names is not None and not names:
+ return []
+ with self.facade.independent_reader() as session:
+ q = session.query(Metric).filter(
+ Metric.status == status)
+ if names is not None:
+ q = q.filter(Metric.name.in_(names))
+ if ids is not None:
+ q = q.filter(Metric.id.in_(ids))
+ if creator is not None:
+ if creator[0] == ":":
+ q = q.filter(Metric.creator.like("%%%s" % creator))
+ elif creator[-1] == ":":
+ q = q.filter(Metric.creator.like("%s%%" % creator))
+ else:
+ q = q.filter(Metric.creator == creator)
+ for attr in kwargs:
+ q = q.filter(getattr(Metric, attr) == kwargs[attr])
+ if details:
+ q = q.options(sqlalchemy.orm.joinedload('resource'))
+
+ sort_keys, sort_dirs = self._build_sort_keys(sorts)
+
+ if marker:
+ metric_marker = self.list_metrics(ids=[marker])
+ if metric_marker:
+ metric_marker = metric_marker[0]
+ else:
+ raise indexer.InvalidPagination(
+ "Invalid marker: `%s'" % marker)
+ else:
+ metric_marker = None
+
+ try:
+ q = oslo_db_utils.paginate_query(q, Metric, limit=limit,
+ sort_keys=sort_keys,
+ marker=metric_marker,
+ sort_dirs=sort_dirs)
+ except ValueError as e:
+ raise indexer.InvalidPagination(e)
+ except exception.InvalidSortKey as e:
+ raise indexer.InvalidPagination(e)
+
+ return list(q.all())
+
+ @retry_on_deadlock
+ def create_resource(self, resource_type, id,
+ creator, user_id=None, project_id=None,
+ started_at=None, ended_at=None, metrics=None,
+ original_resource_id=None,
+ **kwargs):
+ if (started_at is not None
+ and ended_at is not None
+ and started_at > ended_at):
+ raise ValueError(
+ "Start timestamp cannot be after end timestamp")
+ if original_resource_id is None:
+ original_resource_id = str(id)
+ with self.facade.writer() as session:
+ resource_cls = self._resource_type_to_mappers(
+ session, resource_type)['resource']
+ r = resource_cls(
+ id=id,
+ original_resource_id=original_resource_id,
+ type=resource_type,
+ creator=creator,
+ user_id=user_id,
+ project_id=project_id,
+ started_at=started_at,
+ ended_at=ended_at,
+ **kwargs)
+ session.add(r)
+ try:
+ session.flush()
+ except exception.DBDuplicateEntry:
+ raise indexer.ResourceAlreadyExists(id)
+ except exception.DBReferenceError as ex:
+ raise indexer.ResourceValueError(r.type,
+ ex.key,
+ getattr(r, ex.key))
+ if metrics is not None:
+ self._set_metrics_for_resource(session, r, metrics)
+
+ # NOTE(jd) Force load of metrics :)
+ r.metrics
+
+ return r
+
+ @retry_on_deadlock
+ def update_resource(self, resource_type,
+ resource_id, ended_at=_marker, metrics=_marker,
+ append_metrics=False,
+ create_revision=True,
+ **kwargs):
+ with self.facade.writer() as session:
+ mappers = self._resource_type_to_mappers(session, resource_type)
+ resource_cls = mappers["resource"]
+ resource_history_cls = mappers["history"]
+
+ try:
+ # NOTE(sileht): We use FOR UPDATE that is not galera friendly,
+ # but they are no other way to cleanly patch a resource and
+ # store the history that safe when two concurrent calls are
+ # done.
+ q = session.query(resource_cls).filter(
+ resource_cls.id == resource_id).with_for_update()
+
+ r = q.first()
+ if r is None:
+ raise indexer.NoSuchResource(resource_id)
+
+ if create_revision:
+ # Build history
+ rh = resource_history_cls()
+ for col in sqlalchemy.inspect(resource_cls).columns:
+ setattr(rh, col.name, getattr(r, col.name))
+ now = utils.utcnow()
+ rh.revision_end = now
+ session.add(rh)
+ r.revision_start = now
+
+ # Update the resource
+ if ended_at is not _marker:
+ # NOTE(jd) MySQL does not honor checks. I hate it.
+ engine = session.connection()
+ if engine.dialect.name == "mysql":
+ if r.started_at is not None and ended_at is not None:
+ if r.started_at > ended_at:
+ raise indexer.ResourceValueError(
+ resource_type, "ended_at", ended_at)
+ r.ended_at = ended_at
+
+ if kwargs:
+ for attribute, value in six.iteritems(kwargs):
+ if hasattr(r, attribute):
+ setattr(r, attribute, value)
+ else:
+ raise indexer.ResourceAttributeError(
+ r.type, attribute)
+
+ if metrics is not _marker:
+ if not append_metrics:
+ session.query(Metric).filter(
+ Metric.resource_id == resource_id,
+ Metric.status == 'active').update(
+ {"resource_id": None})
+ self._set_metrics_for_resource(session, r, metrics)
+
+ session.flush()
+ except exception.DBConstraintError as e:
+ if e.check_name == "ck_started_before_ended":
+ raise indexer.ResourceValueError(
+ resource_type, "ended_at", ended_at)
+ raise
+
+ # NOTE(jd) Force load of metrics – do it outside the session!
+ r.metrics
+
+ return r
+
+ @staticmethod
+ def _set_metrics_for_resource(session, r, metrics):
+ for name, value in six.iteritems(metrics):
+ if isinstance(value, uuid.UUID):
+ try:
+ update = session.query(Metric).filter(
+ Metric.id == value,
+ Metric.status == 'active',
+ Metric.creator == r.creator,
+ ).update({"resource_id": r.id, "name": name})
+ except exception.DBDuplicateEntry:
+ raise indexer.NamedMetricAlreadyExists(name)
+ if update == 0:
+ raise indexer.NoSuchMetric(value)
+ else:
+ unit = value.get('unit')
+ ap_name = value['archive_policy_name']
+ m = Metric(id=uuid.uuid4(),
+ creator=r.creator,
+ archive_policy_name=ap_name,
+ name=name,
+ unit=unit,
+ resource_id=r.id)
+ session.add(m)
+ try:
+ session.flush()
+ except exception.DBDuplicateEntry:
+ raise indexer.NamedMetricAlreadyExists(name)
+ except exception.DBReferenceError as e:
+ if (e.constraint ==
+ 'fk_metric_ap_name_ap_name'):
+ raise indexer.NoSuchArchivePolicy(ap_name)
+ raise
+
+ session.expire(r, ['metrics'])
+
+ @retry_on_deadlock
+ def delete_resource(self, resource_id):
+ with self.facade.writer() as session:
+ # We are going to delete the resource; the on delete will set the
+ # resource_id of the attached metrics to NULL, we just have to mark
+ # their status as 'delete'
+ session.query(Metric).filter(
+ Metric.resource_id == resource_id).update(
+ {"status": "delete"})
+ if session.query(Resource).filter(
+ Resource.id == resource_id).delete() == 0:
+ raise indexer.NoSuchResource(resource_id)
+
+ @retry_on_deadlock
+ def delete_resources(self, resource_type='generic',
+ attribute_filter=None):
+ if not attribute_filter:
+ raise ValueError("attribute_filter must be set")
+
+ with self.facade.writer() as session:
+ target_cls = self._resource_type_to_mappers(
+ session, resource_type)["resource"]
+
+ q = session.query(target_cls.id)
+
+ engine = session.connection()
+ try:
+ f = QueryTransformer.build_filter(engine.dialect.name,
+ target_cls,
+ attribute_filter)
+ except indexer.QueryAttributeError as e:
+ # NOTE(jd) The QueryAttributeError does not know about
+ # resource_type, so convert it
+ raise indexer.ResourceAttributeError(resource_type,
+ e.attribute)
+
+ q = q.filter(f)
+
+ session.query(Metric).filter(
+ Metric.resource_id.in_(q)
+ ).update({"status": "delete"},
+ synchronize_session=False)
+ return q.delete(synchronize_session=False)
+
+ @retry_on_deadlock
+ def get_resource(self, resource_type, resource_id, with_metrics=False):
+ with self.facade.independent_reader() as session:
+ resource_cls = self._resource_type_to_mappers(
+ session, resource_type)['resource']
+ q = session.query(
+ resource_cls).filter(
+ resource_cls.id == resource_id)
+ if with_metrics:
+ q = q.options(sqlalchemy.orm.joinedload('metrics'))
+ return q.first()
+
+ def _get_history_result_mapper(self, session, resource_type):
+ mappers = self._resource_type_to_mappers(session, resource_type)
+ resource_cls = mappers['resource']
+ history_cls = mappers['history']
+
+ resource_cols = {}
+ history_cols = {}
+ for col in sqlalchemy.inspect(history_cls).columns:
+ history_cols[col.name] = col
+ if col.name in ["revision", "revision_end"]:
+ value = None if col.name == "revision_end" else -1
+ resource_cols[col.name] = sqlalchemy.bindparam(
+ col.name, value, col.type).label(col.name)
+ else:
+ resource_cols[col.name] = getattr(resource_cls, col.name)
+ s1 = sqlalchemy.select(history_cols.values())
+ s2 = sqlalchemy.select(resource_cols.values())
+ if resource_type != "generic":
+ s1 = s1.where(history_cls.revision == ResourceHistory.revision)
+ s2 = s2.where(resource_cls.id == Resource.id)
+ union_stmt = sqlalchemy.union(s1, s2)
+ stmt = union_stmt.alias("result")
+
+ class Result(base.ResourceJsonifier, base.GnocchiBase):
+ def __iter__(self):
+ return iter((key, getattr(self, key)) for key in stmt.c.keys())
+
+ sqlalchemy.orm.mapper(
+ Result, stmt, primary_key=[stmt.c.id, stmt.c.revision],
+ properties={
+ 'metrics': sqlalchemy.orm.relationship(
+ Metric,
+ primaryjoin=sqlalchemy.and_(
+ Metric.resource_id == stmt.c.id,
+ Metric.status == 'active'),
+ foreign_keys=Metric.resource_id)
+ })
+
+ return Result
+
+ @retry_on_deadlock
+ def list_resources(self, resource_type='generic',
+ attribute_filter=None,
+ details=False,
+ history=False,
+ limit=None,
+ marker=None,
+ sorts=None):
+ sorts = sorts or []
+
+ with self.facade.independent_reader() as session:
+ if history:
+ target_cls = self._get_history_result_mapper(
+ session, resource_type)
+ else:
+ target_cls = self._resource_type_to_mappers(
+ session, resource_type)["resource"]
+
+ q = session.query(target_cls)
+
+ if attribute_filter:
+ engine = session.connection()
+ try:
+ f = QueryTransformer.build_filter(engine.dialect.name,
+ target_cls,
+ attribute_filter)
+ except indexer.QueryAttributeError as e:
+ # NOTE(jd) The QueryAttributeError does not know about
+ # resource_type, so convert it
+ raise indexer.ResourceAttributeError(resource_type,
+ e.attribute)
+
+ q = q.filter(f)
+
+ sort_keys, sort_dirs = self._build_sort_keys(sorts)
+
+ if marker:
+ resource_marker = self.get_resource(resource_type, marker)
+ if resource_marker is None:
+ raise indexer.InvalidPagination(
+ "Invalid marker: `%s'" % marker)
+ else:
+ resource_marker = None
+
+ try:
+ q = oslo_db_utils.paginate_query(q, target_cls, limit=limit,
+ sort_keys=sort_keys,
+ marker=resource_marker,
+ sort_dirs=sort_dirs)
+ except ValueError as e:
+ raise indexer.InvalidPagination(e)
+ except exception.InvalidSortKey as e:
+ raise indexer.InvalidPagination(e)
+
+ # Always include metrics
+ q = q.options(sqlalchemy.orm.joinedload("metrics"))
+ all_resources = q.all()
+
+ if details:
+ grouped_by_type = itertools.groupby(
+ all_resources, lambda r: (r.revision != -1, r.type))
+ all_resources = []
+ for (is_history, type), resources in grouped_by_type:
+ if type == 'generic':
+ # No need for a second query
+ all_resources.extend(resources)
+ else:
+ try:
+ target_cls = self._resource_type_to_mappers(
+ session, type)['history' if is_history else
+ 'resource']
+ except (indexer.UnexpectedResourceTypeState,
+ indexer.NoSuchResourceType):
+ # NOTE(sileht): This resource_type have been
+ # removed in the meantime.
+ continue
+ if is_history:
+ f = target_cls.revision.in_([r.revision
+ for r in resources])
+ else:
+ f = target_cls.id.in_([r.id for r in resources])
+
+ q = session.query(target_cls).filter(f)
+ # Always include metrics
+ q = q.options(sqlalchemy.orm.joinedload('metrics'))
+ try:
+ all_resources.extend(q.all())
+ except sqlalchemy.exc.ProgrammingError as e:
+ # NOTE(jd) This exception can happen when the
+ # resources and their resource type have been
+ # deleted in the meantime:
+ # sqlalchemy.exc.ProgrammingError:
+ # (pymysql.err.ProgrammingError)
+ # (1146, "Table \'test.rt_f00\' doesn\'t exist")
+ # In that case, just ignore those resources.
+ if (not pymysql
+ or not isinstance(
+ e, sqlalchemy.exc.ProgrammingError)
+ or not isinstance(
+ e.orig, pymysql.err.ProgrammingError)
+ or (e.orig.args[0]
+ != pymysql.constants.ER.NO_SUCH_TABLE)):
+ raise
+
+ return all_resources
+
+ def expunge_metric(self, id):
+ with self.facade.writer() as session:
+ if session.query(Metric).filter(Metric.id == id).delete() == 0:
+ raise indexer.NoSuchMetric(id)
+
+ def delete_metric(self, id):
+ with self.facade.writer() as session:
+ if session.query(Metric).filter(
+ Metric.id == id, Metric.status == 'active').update(
+ {"status": "delete"}) == 0:
+ raise indexer.NoSuchMetric(id)
+
+ @staticmethod
+ def _build_sort_keys(sorts):
+ # transform the api-wg representation to the oslo.db one
+ sort_keys = []
+ sort_dirs = []
+ for sort in sorts:
+ sort_key, __, sort_dir = sort.partition(":")
+ sort_keys.append(sort_key.strip())
+ sort_dirs.append(sort_dir or 'asc')
+
+ # paginate_query require at list one uniq column
+ if 'id' not in sort_keys:
+ sort_keys.append('id')
+ sort_dirs.append('asc')
+
+ return sort_keys, sort_dirs
+
+
+class QueryTransformer(object):
+ unary_operators = {
+ u"not": sqlalchemy.not_,
+ }
+
+ binary_operators = {
+ u"=": operator.eq,
+ u"==": operator.eq,
+ u"eq": operator.eq,
+
+ u"<": operator.lt,
+ u"lt": operator.lt,
+
+ u">": operator.gt,
+ u"gt": operator.gt,
+
+ u"<=": operator.le,
+ u"≤": operator.le,
+ u"le": operator.le,
+
+ u">=": operator.ge,
+ u"≥": operator.ge,
+ u"ge": operator.ge,
+
+ u"!=": operator.ne,
+ u"≠": operator.ne,
+ u"ne": operator.ne,
+
+ u"in": lambda field_name, values: field_name.in_(values),
+
+ u"like": lambda field, value: field.like(value),
+ }
+
+ multiple_operators = {
+ u"or": sqlalchemy.or_,
+ u"∨": sqlalchemy.or_,
+
+ u"and": sqlalchemy.and_,
+ u"∧": sqlalchemy.and_,
+ }
+
+ converters = (
+ (base.TimestampUTC, utils.to_datetime),
+ (types.String, six.text_type),
+ (types.Integer, int),
+ (types.Numeric, float),
+ )
+
+ @classmethod
+ def _handle_multiple_op(cls, engine, table, op, nodes):
+ return op(*[
+ cls.build_filter(engine, table, node)
+ for node in nodes
+ ])
+
+ @classmethod
+ def _handle_unary_op(cls, engine, table, op, node):
+ return op(cls.build_filter(engine, table, node))
+
+ @classmethod
+ def _handle_binary_op(cls, engine, table, op, nodes):
+ try:
+ field_name, value = list(nodes.items())[0]
+ except Exception:
+ raise indexer.QueryError()
+
+ if field_name == "lifespan":
+ attr = getattr(table, "ended_at") - getattr(table, "started_at")
+ value = utils.to_timespan(value)
+ if engine == "mysql":
+ # NOTE(jd) So subtracting 2 timestamps in MySQL result in some
+ # weird results based on string comparison. It's useless and it
+ # does not work at all with seconds or anything. Just skip it.
+ raise exceptions.NotImplementedError
+ elif field_name == "created_by_user_id":
+ creator = getattr(table, "creator")
+ if op == operator.eq:
+ return creator.like("%s:%%" % value)
+ elif op == operator.ne:
+ return sqlalchemy.not_(creator.like("%s:%%" % value))
+ elif op == cls.binary_operators[u"like"]:
+ return creator.like("%s:%%" % value)
+ raise indexer.QueryValueError(value, field_name)
+ elif field_name == "created_by_project_id":
+ creator = getattr(table, "creator")
+ if op == operator.eq:
+ return creator.like("%%:%s" % value)
+ elif op == operator.ne:
+ return sqlalchemy.not_(creator.like("%%:%s" % value))
+ elif op == cls.binary_operators[u"like"]:
+ return creator.like("%%:%s" % value)
+ raise indexer.QueryValueError(value, field_name)
+ else:
+ try:
+ attr = getattr(table, field_name)
+ except AttributeError:
+ raise indexer.QueryAttributeError(table, field_name)
+
+ if not hasattr(attr, "type"):
+ # This is not a column
+ raise indexer.QueryAttributeError(table, field_name)
+
+ # Convert value to the right type
+ if value is not None:
+ for klass, converter in cls.converters:
+ if isinstance(attr.type, klass):
+ try:
+ if isinstance(value, list):
+ # we got a list for in_ operator
+ value = [converter(v) for v in value]
+ else:
+ value = converter(value)
+ except Exception:
+ raise indexer.QueryValueError(value, field_name)
+ break
+
+ return op(attr, value)
+
+ @classmethod
+ def build_filter(cls, engine, table, tree):
+ try:
+ operator, nodes = list(tree.items())[0]
+ except Exception:
+ raise indexer.QueryError()
+
+ try:
+ op = cls.multiple_operators[operator]
+ except KeyError:
+ try:
+ op = cls.binary_operators[operator]
+ except KeyError:
+ try:
+ op = cls.unary_operators[operator]
+ except KeyError:
+ raise indexer.QueryInvalidOperator(operator)
+ return cls._handle_unary_op(engine, op, nodes)
+ return cls._handle_binary_op(engine, table, op, nodes)
+ return cls._handle_multiple_op(engine, table, op, nodes)
diff --git a/gnocchi/indexer/sqlalchemy_base.py b/gnocchi/indexer/sqlalchemy_base.py
new file mode 100644
index 00000000..1ebc60a9
--- /dev/null
+++ b/gnocchi/indexer/sqlalchemy_base.py
@@ -0,0 +1,443 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2016 Red Hat, Inc.
+# Copyright © 2014-2015 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from __future__ import absolute_import
+import calendar
+import datetime
+import decimal
+
+import iso8601
+from oslo_db.sqlalchemy import models
+import six
+import sqlalchemy
+from sqlalchemy.dialects import mysql
+from sqlalchemy.ext import declarative
+from sqlalchemy import types
+import sqlalchemy_utils
+
+from gnocchi import archive_policy
+from gnocchi import indexer
+from gnocchi import resource_type
+from gnocchi import storage
+from gnocchi import utils
+
+Base = declarative.declarative_base()
+
+COMMON_TABLES_ARGS = {'mysql_charset': "utf8",
+ 'mysql_engine': "InnoDB"}
+
+
+class PreciseTimestamp(types.TypeDecorator):
+ """Represents a timestamp precise to the microsecond.
+
+ Deprecated in favor of TimestampUTC.
+ Still used in alembic migrations.
+ """
+
+ impl = sqlalchemy.DateTime
+
+ @staticmethod
+ def _decimal_to_dt(dec):
+ """Return a datetime from Decimal unixtime format."""
+ if dec is None:
+ return None
+
+ integer = int(dec)
+ micro = (dec - decimal.Decimal(integer)) * decimal.Decimal(1000000)
+ daittyme = datetime.datetime.utcfromtimestamp(integer)
+ return daittyme.replace(microsecond=int(round(micro)))
+
+ @staticmethod
+ def _dt_to_decimal(utc):
+ """Datetime to Decimal.
+
+ Some databases don't store microseconds in datetime
+ so we always store as Decimal unixtime.
+ """
+ if utc is None:
+ return None
+
+ decimal.getcontext().prec = 30
+ return (decimal.Decimal(str(calendar.timegm(utc.utctimetuple()))) +
+ (decimal.Decimal(str(utc.microsecond)) /
+ decimal.Decimal("1000000.0")))
+
+ def load_dialect_impl(self, dialect):
+ if dialect.name == 'mysql':
+ return dialect.type_descriptor(
+ types.DECIMAL(precision=20,
+ scale=6,
+ asdecimal=True))
+ return dialect.type_descriptor(self.impl)
+
+ def compare_against_backend(self, dialect, conn_type):
+ if dialect.name == 'mysql':
+ return issubclass(type(conn_type), types.DECIMAL)
+ return issubclass(type(conn_type), type(self.impl))
+
+ def process_bind_param(self, value, dialect):
+ if value is not None:
+ value = utils.normalize_time(value)
+ if dialect.name == 'mysql':
+ return self._dt_to_decimal(value)
+ return value
+
+ def process_result_value(self, value, dialect):
+ if dialect.name == 'mysql':
+ value = self._decimal_to_dt(value)
+ if value is not None:
+ return utils.normalize_time(value).replace(
+ tzinfo=iso8601.iso8601.UTC)
+
+
+class TimestampUTC(types.TypeDecorator):
+ """Represents a timestamp precise to the microsecond."""
+
+ impl = sqlalchemy.DateTime
+
+ def load_dialect_impl(self, dialect):
+ if dialect.name == 'mysql':
+ return dialect.type_descriptor(mysql.DATETIME(fsp=6))
+ return self.impl
+
+ def process_bind_param(self, value, dialect):
+ if value is not None:
+ return utils.normalize_time(value)
+
+ def process_result_value(self, value, dialect):
+ if value is not None:
+ return value.replace(tzinfo=iso8601.iso8601.UTC)
+
+
+class GnocchiBase(models.ModelBase):
+ __table_args__ = (
+ COMMON_TABLES_ARGS,
+ )
+
+
+class ArchivePolicyDefinitionType(sqlalchemy_utils.JSONType):
+ def process_result_value(self, value, dialect):
+ values = super(ArchivePolicyDefinitionType,
+ self).process_result_value(value, dialect)
+ return [archive_policy.ArchivePolicyItem(**v) for v in values]
+
+
+class SetType(sqlalchemy_utils.JSONType):
+ def process_result_value(self, value, dialect):
+ return set(super(SetType,
+ self).process_result_value(value, dialect))
+
+
+class ArchivePolicy(Base, GnocchiBase, archive_policy.ArchivePolicy):
+ __tablename__ = 'archive_policy'
+
+ name = sqlalchemy.Column(sqlalchemy.String(255), primary_key=True)
+ back_window = sqlalchemy.Column(sqlalchemy.Integer, nullable=False)
+ definition = sqlalchemy.Column(ArchivePolicyDefinitionType, nullable=False)
+ # TODO(jd) Use an array of string instead, PostgreSQL can do that
+ aggregation_methods = sqlalchemy.Column(SetType,
+ nullable=False)
+
+
+class Metric(Base, GnocchiBase, storage.Metric):
+ __tablename__ = 'metric'
+ __table_args__ = (
+ sqlalchemy.Index('ix_metric_status', 'status'),
+ sqlalchemy.UniqueConstraint("resource_id", "name",
+ name="uniq_metric0resource_id0name"),
+ COMMON_TABLES_ARGS,
+ )
+
+ id = sqlalchemy.Column(sqlalchemy_utils.UUIDType(),
+ primary_key=True)
+ archive_policy_name = sqlalchemy.Column(
+ sqlalchemy.String(255),
+ sqlalchemy.ForeignKey(
+ 'archive_policy.name',
+ ondelete="RESTRICT",
+ name="fk_metric_ap_name_ap_name"),
+ nullable=False)
+ archive_policy = sqlalchemy.orm.relationship(ArchivePolicy, lazy="joined")
+ creator = sqlalchemy.Column(sqlalchemy.String(255))
+ resource_id = sqlalchemy.Column(
+ sqlalchemy_utils.UUIDType(),
+ sqlalchemy.ForeignKey('resource.id',
+ ondelete="SET NULL",
+ name="fk_metric_resource_id_resource_id"))
+ name = sqlalchemy.Column(sqlalchemy.String(255))
+ unit = sqlalchemy.Column(sqlalchemy.String(31))
+ status = sqlalchemy.Column(sqlalchemy.Enum('active', 'delete',
+ name="metric_status_enum"),
+ nullable=False,
+ server_default='active')
+
+ def jsonify(self):
+ d = {
+ "id": self.id,
+ "creator": self.creator,
+ "name": self.name,
+ "unit": self.unit,
+ }
+ unloaded = sqlalchemy.inspect(self).unloaded
+ if 'resource' in unloaded:
+ d['resource_id'] = self.resource_id
+ else:
+ d['resource'] = self.resource
+ if 'archive_policy' in unloaded:
+ d['archive_policy_name'] = self.archive_policy_name
+ else:
+ d['archive_policy'] = self.archive_policy
+
+ if self.creator is None:
+ d['created_by_user_id'] = d['created_by_project_id'] = None
+ else:
+ d['created_by_user_id'], _, d['created_by_project_id'] = (
+ self.creator.partition(":")
+ )
+
+ return d
+
+ def __eq__(self, other):
+ # NOTE(jd) If `other` is a SQL Metric, we only compare
+ # archive_policy_name, and we don't compare archive_policy that might
+ # not be loaded. Otherwise we fallback to the original comparison for
+ # storage.Metric.
+ return ((isinstance(other, Metric)
+ and self.id == other.id
+ and self.archive_policy_name == other.archive_policy_name
+ and self.creator == other.creator
+ and self.name == other.name
+ and self.unit == other.unit
+ and self.resource_id == other.resource_id)
+ or (storage.Metric.__eq__(self, other)))
+
+ __hash__ = storage.Metric.__hash__
+
+
+RESOURCE_TYPE_SCHEMA_MANAGER = resource_type.ResourceTypeSchemaManager(
+ "gnocchi.indexer.sqlalchemy.resource_type_attribute")
+
+
+class ResourceTypeAttributes(sqlalchemy_utils.JSONType):
+ def process_bind_param(self, attributes, dialect):
+ return super(ResourceTypeAttributes, self).process_bind_param(
+ attributes.jsonify(), dialect)
+
+ def process_result_value(self, value, dialect):
+ attributes = super(ResourceTypeAttributes, self).process_result_value(
+ value, dialect)
+ return RESOURCE_TYPE_SCHEMA_MANAGER.attributes_from_dict(attributes)
+
+
+class ResourceType(Base, GnocchiBase, resource_type.ResourceType):
+ __tablename__ = 'resource_type'
+ __table_args__ = (
+ sqlalchemy.UniqueConstraint("tablename",
+ name="uniq_resource_type0tablename"),
+ COMMON_TABLES_ARGS,
+ )
+
+ name = sqlalchemy.Column(sqlalchemy.String(255), primary_key=True,
+ nullable=False)
+ tablename = sqlalchemy.Column(sqlalchemy.String(35), nullable=False)
+ attributes = sqlalchemy.Column(ResourceTypeAttributes)
+ state = sqlalchemy.Column(sqlalchemy.Enum("active", "creating",
+ "creation_error", "deleting",
+ "deletion_error", "updating",
+ "updating_error",
+ name="resource_type_state_enum"),
+ nullable=False,
+ server_default="creating")
+ updated_at = sqlalchemy.Column(TimestampUTC, nullable=False,
+ # NOTE(jd): We would like to use
+ # sqlalchemy.func.now, but we can't
+ # because the type of PreciseTimestamp in
+ # MySQL is not a Timestamp, so it would
+ # not store a timestamp but a date as an
+ # integer.
+ default=lambda: utils.utcnow())
+
+ def to_baseclass(self):
+ cols = {}
+ for attr in self.attributes:
+ cols[attr.name] = sqlalchemy.Column(attr.satype,
+ nullable=not attr.required)
+ return type(str("%s_base" % self.tablename), (object, ), cols)
+
+
+class ResourceJsonifier(indexer.Resource):
+ def jsonify(self):
+ d = dict(self)
+ del d['revision']
+ if 'metrics' not in sqlalchemy.inspect(self).unloaded:
+ d['metrics'] = dict((m.name, six.text_type(m.id))
+ for m in self.metrics)
+
+ if self.creator is None:
+ d['created_by_user_id'] = d['created_by_project_id'] = None
+ else:
+ d['created_by_user_id'], _, d['created_by_project_id'] = (
+ self.creator.partition(":")
+ )
+
+ return d
+
+
+class ResourceMixin(ResourceJsonifier):
+ @declarative.declared_attr
+ def __table_args__(cls):
+ return (sqlalchemy.CheckConstraint('started_at <= ended_at',
+ name="ck_started_before_ended"),
+ COMMON_TABLES_ARGS)
+
+ @declarative.declared_attr
+ def type(cls):
+ return sqlalchemy.Column(
+ sqlalchemy.String(255),
+ sqlalchemy.ForeignKey('resource_type.name',
+ ondelete="RESTRICT",
+ name="fk_%s_resource_type_name" %
+ cls.__tablename__),
+ nullable=False)
+
+ creator = sqlalchemy.Column(sqlalchemy.String(255))
+ started_at = sqlalchemy.Column(TimestampUTC, nullable=False,
+ default=lambda: utils.utcnow())
+ revision_start = sqlalchemy.Column(TimestampUTC, nullable=False,
+ default=lambda: utils.utcnow())
+ ended_at = sqlalchemy.Column(TimestampUTC)
+ user_id = sqlalchemy.Column(sqlalchemy.String(255))
+ project_id = sqlalchemy.Column(sqlalchemy.String(255))
+ original_resource_id = sqlalchemy.Column(sqlalchemy.String(255),
+ nullable=False)
+
+
+class Resource(ResourceMixin, Base, GnocchiBase):
+ __tablename__ = 'resource'
+ _extra_keys = ['revision', 'revision_end']
+ revision = -1
+ id = sqlalchemy.Column(sqlalchemy_utils.UUIDType(),
+ primary_key=True)
+ revision_end = None
+ metrics = sqlalchemy.orm.relationship(
+ Metric, backref="resource",
+ primaryjoin="and_(Resource.id == Metric.resource_id, "
+ "Metric.status == 'active')")
+
+ def get_metric(self, metric_name):
+ m = super(Resource, self).get_metric(metric_name)
+ if m:
+ if sqlalchemy.orm.session.object_session(self):
+ # NOTE(jd) The resource is already loaded so that should not
+ # trigger a SELECT
+ m.resource
+ return m
+
+
+class ResourceHistory(ResourceMixin, Base, GnocchiBase):
+ __tablename__ = 'resource_history'
+
+ revision = sqlalchemy.Column(sqlalchemy.Integer, autoincrement=True,
+ primary_key=True)
+ id = sqlalchemy.Column(sqlalchemy_utils.UUIDType(),
+ sqlalchemy.ForeignKey(
+ 'resource.id',
+ ondelete="CASCADE",
+ name="fk_rh_id_resource_id"),
+ nullable=False)
+ revision_end = sqlalchemy.Column(TimestampUTC, nullable=False,
+ default=lambda: utils.utcnow())
+ metrics = sqlalchemy.orm.relationship(
+ Metric, primaryjoin="Metric.resource_id == ResourceHistory.id",
+ foreign_keys='Metric.resource_id')
+
+
+class ResourceExt(object):
+ """Default extension class for plugin
+
+ Used for plugin that doesn't need additional columns
+ """
+
+
+class ResourceExtMixin(object):
+ @declarative.declared_attr
+ def __table_args__(cls):
+ return (COMMON_TABLES_ARGS, )
+
+ @declarative.declared_attr
+ def id(cls):
+ tablename_compact = cls.__tablename__
+ if tablename_compact.endswith("_history"):
+ tablename_compact = tablename_compact[:-6]
+ return sqlalchemy.Column(
+ sqlalchemy_utils.UUIDType(),
+ sqlalchemy.ForeignKey(
+ 'resource.id',
+ ondelete="CASCADE",
+ name="fk_%s_id_resource_id" % tablename_compact,
+ # NOTE(sileht): We use to ensure that postgresql
+ # does not use AccessExclusiveLock on destination table
+ use_alter=True),
+ primary_key=True
+ )
+
+
+class ResourceHistoryExtMixin(object):
+ @declarative.declared_attr
+ def __table_args__(cls):
+ return (COMMON_TABLES_ARGS, )
+
+ @declarative.declared_attr
+ def revision(cls):
+ tablename_compact = cls.__tablename__
+ if tablename_compact.endswith("_history"):
+ tablename_compact = tablename_compact[:-6]
+ return sqlalchemy.Column(
+ sqlalchemy.Integer,
+ sqlalchemy.ForeignKey(
+ 'resource_history.revision',
+ ondelete="CASCADE",
+ name="fk_%s_revision_rh_revision"
+ % tablename_compact,
+ # NOTE(sileht): We use to ensure that postgresql
+ # does not use AccessExclusiveLock on destination table
+ use_alter=True),
+ primary_key=True
+ )
+
+
+class HistoryModelIterator(models.ModelIterator):
+ def __next__(self):
+ # NOTE(sileht): Our custom resource attribute columns don't
+ # have the same name in database than in sqlalchemy model
+ # so remove the additional "f_" for the model name
+ n = six.advance_iterator(self.i)
+ model_attr = n[2:] if n[:2] == "f_" else n
+ return model_attr, getattr(self.model, n)
+
+
+class ArchivePolicyRule(Base, GnocchiBase):
+ __tablename__ = 'archive_policy_rule'
+
+ name = sqlalchemy.Column(sqlalchemy.String(255), primary_key=True)
+ archive_policy_name = sqlalchemy.Column(
+ sqlalchemy.String(255),
+ sqlalchemy.ForeignKey(
+ 'archive_policy.name',
+ ondelete="RESTRICT",
+ name="fk_apr_ap_name_ap_name"),
+ nullable=False)
+ metric_pattern = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)
diff --git a/gnocchi/indexer/sqlalchemy_extension.py b/gnocchi/indexer/sqlalchemy_extension.py
new file mode 100644
index 00000000..bc4d8418
--- /dev/null
+++ b/gnocchi/indexer/sqlalchemy_extension.py
@@ -0,0 +1,56 @@
+# -*- encoding: utf-8 -*-
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from __future__ import absolute_import
+
+import sqlalchemy
+import sqlalchemy_utils
+
+from gnocchi import resource_type
+
+
+class SchemaMixin(object):
+ def for_filling(self, dialect):
+ # NOTE(sileht): This must be used only for patching resource type
+ # to fill all row with a default value and then switch back the
+ # server_default to None
+ if self.fill is None:
+ return None
+
+ # NOTE(sileht): server_default must be converted in sql element
+ return sqlalchemy.literal(self.fill)
+
+
+class StringSchema(resource_type.StringSchema, SchemaMixin):
+ @property
+ def satype(self):
+ return sqlalchemy.String(self.max_length)
+
+
+class UUIDSchema(resource_type.UUIDSchema, SchemaMixin):
+ satype = sqlalchemy_utils.UUIDType()
+
+ def for_filling(self, dialect):
+ if self.fill is None:
+ return False # Don't set any server_default
+ return sqlalchemy.literal(
+ self.satype.process_bind_param(self.fill, dialect))
+
+
+class NumberSchema(resource_type.NumberSchema, SchemaMixin):
+ satype = sqlalchemy.Float(53)
+
+
+class BoolSchema(resource_type.BoolSchema, SchemaMixin):
+ satype = sqlalchemy.Boolean
diff --git a/gnocchi/indexer/sqlalchemy_legacy_resources.py b/gnocchi/indexer/sqlalchemy_legacy_resources.py
new file mode 100644
index 00000000..8390476b
--- /dev/null
+++ b/gnocchi/indexer/sqlalchemy_legacy_resources.py
@@ -0,0 +1,78 @@
+# -*- encoding: utf-8 -*-
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# NOTE(sileht): this code is also in alembic migration
+ceilometer_tablenames = {
+ "instance_network_interface": "instance_net_int",
+ "host_network_interface": "host_net_int",
+}
+ceilometer_resources = {
+ "generic": {},
+ "image": {
+ "name": {"type": "string", "min_length": 0, "max_length": 255,
+ "required": True},
+ "container_format": {"type": "string", "min_length": 0,
+ "max_length": 255, "required": True},
+ "disk_format": {"type": "string", "min_length": 0, "max_length": 255,
+ "required": True},
+ },
+ "instance": {
+ "flavor_id": {"type": "string", "min_length": 0, "max_length": 255,
+ "required": True},
+ "image_ref": {"type": "string", "min_length": 0, "max_length": 255,
+ "required": False},
+ "host": {"type": "string", "min_length": 0, "max_length": 255,
+ "required": True},
+ "display_name": {"type": "string", "min_length": 0, "max_length": 255,
+ "required": True},
+ "server_group": {"type": "string", "min_length": 0, "max_length": 255,
+ "required": False},
+ },
+ "instance_disk": {
+ "name": {"type": "string", "min_length": 0, "max_length": 255,
+ "required": True},
+ "instance_id": {"type": "uuid", "required": True},
+ },
+ "instance_network_interface": {
+ "name": {"type": "string", "min_length": 0, "max_length": 255,
+ "required": True},
+ "instance_id": {"type": "uuid", "required": True},
+ },
+ "volume": {
+ "display_name": {"type": "string", "min_length": 0, "max_length": 255,
+ "required": False},
+ },
+ "swift_account": {},
+ "ceph_account": {},
+ "network": {},
+ "identity": {},
+ "ipmi": {},
+ "stack": {},
+ "host": {
+ "host_name": {"type": "string", "min_length": 0, "max_length": 255,
+ "required": True},
+ },
+ "host_network_interface": {
+ "host_name": {"type": "string", "min_length": 0, "max_length": 255,
+ "required": True},
+ "device_name": {"type": "string", "min_length": 0, "max_length": 255,
+ "required": False},
+ },
+ "host_disk": {
+ "host_name": {"type": "string", "min_length": 0, "max_length": 255,
+ "required": True},
+ "device_name": {"type": "string", "min_length": 0, "max_length": 255,
+ "required": False},
+ },
+}
diff --git a/gnocchi/json.py b/gnocchi/json.py
new file mode 100644
index 00000000..eb5fa924
--- /dev/null
+++ b/gnocchi/json.py
@@ -0,0 +1,58 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2015-2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import datetime
+import uuid
+
+import numpy
+import six
+import ujson
+
+
+def to_primitive(obj):
+ if isinstance(obj, ((six.text_type,)
+ + six.integer_types
+ + (type(None), bool, float))):
+ return obj
+ if isinstance(obj, uuid.UUID):
+ return six.text_type(obj)
+ if isinstance(obj, datetime.datetime):
+ return obj.isoformat()
+ if isinstance(obj, numpy.datetime64):
+ # Do not include nanoseconds if null
+ return str(obj).rpartition(".000000000")[0] + "+00:00"
+ # This mimics what Pecan implements in its default JSON encoder
+ if hasattr(obj, "jsonify"):
+ return to_primitive(obj.jsonify())
+ if isinstance(obj, dict):
+ return {to_primitive(k): to_primitive(v)
+ for k, v in obj.items()}
+ if hasattr(obj, 'iteritems'):
+ return to_primitive(dict(obj.iteritems()))
+ # Python 3 does not have iteritems
+ if hasattr(obj, 'items'):
+ return to_primitive(dict(obj.items()))
+ if hasattr(obj, '__iter__'):
+ return list(map(to_primitive, obj))
+ return obj
+
+
+def dumps(obj):
+ return ujson.dumps(to_primitive(obj))
+
+
+# For convenience
+loads = ujson.loads
+load = ujson.load
diff --git a/gnocchi/opts.py b/gnocchi/opts.py
new file mode 100644
index 00000000..023138da
--- /dev/null
+++ b/gnocchi/opts.py
@@ -0,0 +1,167 @@
+# -*- encoding: utf-8 -*-
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import copy
+import itertools
+import operator
+import pkg_resources
+import uuid
+
+from oslo_config import cfg
+from oslo_middleware import cors
+
+import gnocchi.archive_policy
+import gnocchi.indexer
+import gnocchi.storage
+import gnocchi.storage.ceph
+import gnocchi.storage.file
+import gnocchi.storage.redis
+import gnocchi.storage.s3
+import gnocchi.storage.swift
+
+
+# NOTE(sileht): The oslo.config interpolation is buggy when the value
+# is None, this replaces it by the expected empty string.
+# Fix will perhaps be fixed by https://review.openstack.org/#/c/417496/
+# But it seems some projects are relaying on the bug...
+class CustomStrSubWrapper(cfg.ConfigOpts.StrSubWrapper):
+ def __getitem__(self, key):
+ value = super(CustomStrSubWrapper, self).__getitem__(key)
+ if value is None:
+ return ''
+ return value
+
+cfg.ConfigOpts.StrSubWrapper = CustomStrSubWrapper
+
+
+_STORAGE_OPTS = list(itertools.chain(gnocchi.storage.OPTS,
+ gnocchi.storage.ceph.OPTS,
+ gnocchi.storage.file.OPTS,
+ gnocchi.storage.swift.OPTS,
+ gnocchi.storage.redis.OPTS,
+ gnocchi.storage.s3.OPTS))
+
+
+_INCOMING_OPTS = copy.deepcopy(_STORAGE_OPTS)
+for opt in _INCOMING_OPTS:
+ opt.default = '${storage.%s}' % opt.name
+
+
+def list_opts():
+ return [
+ ("indexer", gnocchi.indexer.OPTS),
+ ("metricd", (
+ cfg.IntOpt('workers', min=1,
+ required=True,
+ help='Number of workers for Gnocchi metric daemons. '
+ 'By default the available number of CPU is used.'),
+ cfg.IntOpt('metric_processing_delay',
+ default=60,
+ required=True,
+ deprecated_group='storage',
+ help="How many seconds to wait between "
+ "scheduling new metrics to process"),
+ cfg.IntOpt('metric_reporting_delay',
+ deprecated_group='storage',
+ default=120,
+ min=-1,
+ required=True,
+ help="How many seconds to wait between "
+ "metric ingestion reporting. Set value to -1 to "
+ "disable reporting"),
+ cfg.IntOpt('metric_cleanup_delay',
+ deprecated_group='storage',
+ default=300,
+ required=True,
+ help="How many seconds to wait between "
+ "cleaning of expired data"),
+ cfg.IntOpt('worker_sync_rate',
+ default=30,
+ help="Frequency to detect when metricd workers join or "
+ "leave system (in seconds). A shorter rate, may "
+ "improve rebalancing but create more coordination "
+ "load"),
+ cfg.IntOpt('processing_replicas',
+ default=3,
+ min=1,
+ help="Number of workers that share a task. A higher "
+ "value may improve worker utilization but may also "
+ "increase load on coordination backend. Value is "
+ "capped by number of workers globally."),
+ )),
+ ("api", (
+ cfg.StrOpt('paste_config',
+ default="api-paste.ini",
+ help='Path to API Paste configuration.'),
+ cfg.StrOpt('auth_mode',
+ default="basic",
+ choices=list(map(operator.attrgetter("name"),
+ pkg_resources.iter_entry_points(
+ "gnocchi.rest.auth_helper"))),
+ help='Authentication mode to use.'),
+ cfg.IntOpt('max_limit',
+ default=1000,
+ required=True,
+ help=('The maximum number of items returned in a '
+ 'single response from a collection resource')),
+ cfg.IntOpt('refresh_timeout',
+ default=10, min=0,
+ help='Number of seconds before timeout when attempting '
+ 'to force refresh of metric.'),
+ )),
+ ("storage", (_STORAGE_OPTS + gnocchi.storage._carbonara.OPTS)),
+ ("incoming", _INCOMING_OPTS),
+ ("statsd", (
+ cfg.HostAddressOpt('host',
+ default='0.0.0.0',
+ help='The listen IP for statsd'),
+ cfg.PortOpt('port',
+ default=8125,
+ help='The port for statsd'),
+ cfg.Opt(
+ 'resource_id',
+ type=uuid.UUID,
+ help='Resource UUID to use to identify statsd in Gnocchi'),
+ cfg.StrOpt(
+ 'user_id',
+ deprecated_for_removal=True,
+ help='User ID to use to identify statsd in Gnocchi'),
+ cfg.StrOpt(
+ 'project_id',
+ deprecated_for_removal=True,
+ help='Project ID to use to identify statsd in Gnocchi'),
+ cfg.StrOpt(
+ 'creator',
+ default="${statsd.user_id}:${statsd.project_id}",
+ help='Creator value to use to identify statsd in Gnocchi'),
+ cfg.StrOpt(
+ 'archive_policy_name',
+ help='Archive policy name to use when creating metrics'),
+ cfg.FloatOpt(
+ 'flush_delay',
+ default=10,
+ help='Delay between flushes'),
+ )),
+ ("archive_policy", gnocchi.archive_policy.OPTS),
+ ]
+
+
+def set_defaults():
+ cfg.set_defaults(cors.CORS_OPTS,
+ allow_headers=[
+ 'X-Auth-Token',
+ 'X-Subject-Token',
+ 'X-User-Id',
+ 'X-Domain-Id',
+ 'X-Project-Id',
+ 'X-Roles'])
diff --git a/gnocchi/resource_type.py b/gnocchi/resource_type.py
new file mode 100644
index 00000000..73b75564
--- /dev/null
+++ b/gnocchi/resource_type.py
@@ -0,0 +1,266 @@
+# -*- encoding: utf-8 -*-
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import numbers
+import re
+import six
+import stevedore
+import voluptuous
+
+from gnocchi import utils
+
+
+INVALID_NAMES = [
+ "id", "type", "metrics",
+ "revision", "revision_start", "revision_end",
+ "started_at", "ended_at",
+ "user_id", "project_id",
+ "created_by_user_id", "created_by_project_id", "get_metric",
+ "creator",
+]
+
+VALID_CHARS = re.compile("[a-zA-Z0-9][a-zA-Z0-9_]*")
+
+
+class InvalidResourceAttribute(ValueError):
+ pass
+
+
+class InvalidResourceAttributeName(InvalidResourceAttribute):
+ """Error raised when the resource attribute name is invalid."""
+ def __init__(self, name):
+ super(InvalidResourceAttributeName, self).__init__(
+ "Resource attribute name %s is invalid" % str(name))
+ self.name = name
+
+
+class InvalidResourceAttributeValue(InvalidResourceAttribute):
+ """Error raised when the resource attribute min is greater than max"""
+ def __init__(self, min, max):
+ super(InvalidResourceAttributeValue, self).__init__(
+ "Resource attribute value min (or min_length) %s must be less "
+ "than or equal to max (or max_length) %s!" % (str(min), str(max)))
+ self.min = min
+ self.max = max
+
+
+class InvalidResourceAttributeOption(InvalidResourceAttribute):
+ """Error raised when the resource attribute name is invalid."""
+ def __init__(self, name, option, reason):
+ super(InvalidResourceAttributeOption, self).__init__(
+ "Option '%s' of resource attribute %s is invalid: %s" %
+ (option, str(name), str(reason)))
+ self.name = name
+ self.option = option
+ self.reason = reason
+
+
+# NOTE(sileht): This is to store the behavior of some operations:
+# * fill, to set a default value to all existing resource type
+#
+# in the future for example, we can allow to change the length of
+# a string attribute, if the new one is shorter, we can add a option
+# to define the behavior like:
+# * resize = trunc or reject
+OperationOptions = {
+ voluptuous.Optional('fill'): object
+}
+
+
+class CommonAttributeSchema(object):
+ meta_schema_ext = {}
+ schema_ext = None
+
+ def __init__(self, type, name, required, options=None):
+ if (len(name) > 63 or name in INVALID_NAMES
+ or not VALID_CHARS.match(name)):
+ raise InvalidResourceAttributeName(name)
+
+ self.name = name
+ self.required = required
+ self.fill = None
+
+ # options is set only when we update a resource type
+ if options is not None:
+ fill = options.get("fill")
+ if fill is None and required:
+ raise InvalidResourceAttributeOption(
+ name, "fill", "must not be empty if required=True")
+ elif fill is not None:
+ # Ensure fill have the correct attribute type
+ try:
+ self.fill = voluptuous.Schema(self.schema_ext)(fill)
+ except voluptuous.Error as e:
+ raise InvalidResourceAttributeOption(name, "fill", e)
+
+ @classmethod
+ def meta_schema(cls, for_update=False):
+ d = {
+ voluptuous.Required('type'): cls.typename,
+ voluptuous.Required('required', default=True): bool
+ }
+ if for_update:
+ d[voluptuous.Required('options', default={})] = OperationOptions
+ if callable(cls.meta_schema_ext):
+ d.update(cls.meta_schema_ext())
+ else:
+ d.update(cls.meta_schema_ext)
+ return d
+
+ def schema(self):
+ if self.required:
+ return {self.name: self.schema_ext}
+ else:
+ return {voluptuous.Optional(self.name): self.schema_ext}
+
+ def jsonify(self):
+ return {"type": self.typename,
+ "required": self.required}
+
+
+class StringSchema(CommonAttributeSchema):
+ typename = "string"
+
+ def __init__(self, min_length, max_length, *args, **kwargs):
+ if min_length > max_length:
+ raise InvalidResourceAttributeValue(min_length, max_length)
+
+ self.min_length = min_length
+ self.max_length = max_length
+ super(StringSchema, self).__init__(*args, **kwargs)
+
+ meta_schema_ext = {
+ voluptuous.Required('min_length', default=0):
+ voluptuous.All(int, voluptuous.Range(min=0, max=255)),
+ voluptuous.Required('max_length', default=255):
+ voluptuous.All(int, voluptuous.Range(min=1, max=255))
+ }
+
+ @property
+ def schema_ext(self):
+ return voluptuous.All(six.text_type,
+ voluptuous.Length(
+ min=self.min_length,
+ max=self.max_length))
+
+ def jsonify(self):
+ d = super(StringSchema, self).jsonify()
+ d.update({"max_length": self.max_length,
+ "min_length": self.min_length})
+ return d
+
+
+class UUIDSchema(CommonAttributeSchema):
+ typename = "uuid"
+ schema_ext = staticmethod(utils.UUID)
+
+
+class NumberSchema(CommonAttributeSchema):
+ typename = "number"
+
+ def __init__(self, min, max, *args, **kwargs):
+ if max is not None and min is not None and min > max:
+ raise InvalidResourceAttributeValue(min, max)
+ self.min = min
+ self.max = max
+ super(NumberSchema, self).__init__(*args, **kwargs)
+
+ meta_schema_ext = {
+ voluptuous.Required('min', default=None): voluptuous.Any(
+ None, numbers.Real),
+ voluptuous.Required('max', default=None): voluptuous.Any(
+ None, numbers.Real)
+ }
+
+ @property
+ def schema_ext(self):
+ return voluptuous.All(numbers.Real,
+ voluptuous.Range(min=self.min,
+ max=self.max))
+
+ def jsonify(self):
+ d = super(NumberSchema, self).jsonify()
+ d.update({"min": self.min, "max": self.max})
+ return d
+
+
+class BoolSchema(CommonAttributeSchema):
+ typename = "bool"
+ schema_ext = bool
+
+
+class ResourceTypeAttributes(list):
+ def jsonify(self):
+ d = {}
+ for attr in self:
+ d[attr.name] = attr.jsonify()
+ return d
+
+
+class ResourceTypeSchemaManager(stevedore.ExtensionManager):
+ def __init__(self, *args, **kwargs):
+ super(ResourceTypeSchemaManager, self).__init__(*args, **kwargs)
+ type_schemas = tuple([ext.plugin.meta_schema()
+ for ext in self.extensions])
+ self._schema = voluptuous.Schema({
+ "name": six.text_type,
+ voluptuous.Required("attributes", default={}): {
+ six.text_type: voluptuous.Any(*tuple(type_schemas))
+ }
+ })
+
+ type_schemas = tuple([ext.plugin.meta_schema(for_update=True)
+ for ext in self.extensions])
+ self._schema_for_update = voluptuous.Schema({
+ "name": six.text_type,
+ voluptuous.Required("attributes", default={}): {
+ six.text_type: voluptuous.Any(*tuple(type_schemas))
+ }
+ })
+
+ def __call__(self, definition):
+ return self._schema(definition)
+
+ def for_update(self, definition):
+ return self._schema_for_update(definition)
+
+ def attributes_from_dict(self, attributes):
+ return ResourceTypeAttributes(
+ self[attr["type"]].plugin(name=name, **attr)
+ for name, attr in attributes.items())
+
+ def resource_type_from_dict(self, name, attributes, state):
+ return ResourceType(name, self.attributes_from_dict(attributes), state)
+
+
+class ResourceType(object):
+ def __init__(self, name, attributes, state):
+ self.name = name
+ self.attributes = attributes
+ self.state = state
+
+ @property
+ def schema(self):
+ schema = {}
+ for attr in self.attributes:
+ schema.update(attr.schema())
+ return schema
+
+ def __eq__(self, other):
+ return self.name == other.name
+
+ def jsonify(self):
+ return {"name": self.name,
+ "attributes": self.attributes.jsonify(),
+ "state": self.state}
diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py
new file mode 100644
index 00000000..42e9bc41
--- /dev/null
+++ b/gnocchi/rest/__init__.py
@@ -0,0 +1,1785 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2016 Red Hat, Inc.
+# Copyright © 2014-2015 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import functools
+import itertools
+import uuid
+
+import jsonpatch
+import pecan
+from pecan import rest
+import pyparsing
+import six
+from six.moves.urllib import parse as urllib_parse
+from stevedore import extension
+import voluptuous
+import webob.exc
+import werkzeug.http
+
+from gnocchi import aggregates
+from gnocchi import archive_policy
+from gnocchi import indexer
+from gnocchi import json
+from gnocchi import resource_type
+from gnocchi import storage
+from gnocchi.storage import incoming
+from gnocchi import utils
+
+
+def arg_to_list(value):
+ if isinstance(value, list):
+ return value
+ elif value:
+ return [value]
+ return []
+
+
+def abort(status_code, detail='', headers=None, comment=None, **kw):
+ """Like pecan.abort, but make sure detail is a string."""
+ if status_code == 404 and not detail:
+ raise RuntimeError("http code 404 must have 'detail' set")
+ if isinstance(detail, Exception):
+ detail = six.text_type(detail)
+ return pecan.abort(status_code, detail, headers, comment, **kw)
+
+
+def flatten_dict_to_keypairs(d, separator=':'):
+ """Generator that produces sequence of keypairs for nested dictionaries.
+
+ :param d: dictionaries which may be nested
+ :param separator: symbol between names
+ """
+ for name, value in sorted(six.iteritems(d)):
+ if isinstance(value, dict):
+ for subname, subvalue in flatten_dict_to_keypairs(value,
+ separator):
+ yield ('%s%s%s' % (name, separator, subname), subvalue)
+ else:
+ yield name, value
+
+
+def enforce(rule, target):
+ """Return the user and project the request should be limited to.
+
+ :param rule: The rule name
+ :param target: The target to enforce on.
+
+ """
+ creds = pecan.request.auth_helper.get_auth_info(pecan.request.headers)
+
+ if not isinstance(target, dict):
+ if hasattr(target, "jsonify"):
+ target = target.jsonify()
+ else:
+ target = target.__dict__
+
+ # Flatten dict
+ target = dict(flatten_dict_to_keypairs(d=target, separator='.'))
+
+ if not pecan.request.policy_enforcer.enforce(rule, target, creds):
+ abort(403)
+
+
+def set_resp_location_hdr(location):
+ location = '%s%s' % (pecan.request.script_name, location)
+ # NOTE(sileht): according the pep-3333 the headers must be
+ # str in py2 and py3 even this is not the same thing in both
+ # version
+ # see: http://legacy.python.org/dev/peps/pep-3333/#unicode-issues
+ if six.PY2 and isinstance(location, six.text_type):
+ location = location.encode('utf-8')
+ location = urllib_parse.quote(location)
+ pecan.response.headers['Location'] = location
+
+
+def deserialize(expected_content_types=None):
+ if expected_content_types is None:
+ expected_content_types = ("application/json", )
+
+ mime_type, options = werkzeug.http.parse_options_header(
+ pecan.request.headers.get('Content-Type'))
+ if mime_type not in expected_content_types:
+ abort(415)
+ try:
+ params = json.load(pecan.request.body_file)
+ except Exception as e:
+ abort(400, "Unable to decode body: " + six.text_type(e))
+ return params
+
+
+def deserialize_and_validate(schema, required=True,
+ expected_content_types=None):
+ try:
+ return voluptuous.Schema(schema, required=required)(
+ deserialize(expected_content_types=expected_content_types))
+ except voluptuous.Error as e:
+ abort(400, "Invalid input: %s" % e)
+
+
+def PositiveOrNullInt(value):
+ value = int(value)
+ if value < 0:
+ raise ValueError("Value must be positive")
+ return value
+
+
+def PositiveNotNullInt(value):
+ value = int(value)
+ if value <= 0:
+ raise ValueError("Value must be positive and not null")
+ return value
+
+
+def Timespan(value):
+ return utils.to_timespan(value).total_seconds()
+
+
+def get_header_option(name, params):
+ type, options = werkzeug.http.parse_options_header(
+ pecan.request.headers.get('Accept'))
+ return strtobool('Accept header' if name in options else name,
+ options.get(name, params.pop(name, 'false')))
+
+
+def get_history(params):
+ return get_header_option('history', params)
+
+
+def get_details(params):
+ return get_header_option('details', params)
+
+
+def strtobool(varname, v):
+ """Convert a string to a boolean.
+
+ Default to false if unable to convert.
+ """
+ try:
+ return utils.strtobool(v)
+ except ValueError as e:
+ abort(400, "Unable to parse `%s': %s" % (varname, six.text_type(e)))
+
+
+RESOURCE_DEFAULT_PAGINATION = ['revision_start:asc',
+ 'started_at:asc']
+
+METRIC_DEFAULT_PAGINATION = ['id:asc']
+
+
+def get_pagination_options(params, default):
+ max_limit = pecan.request.conf.api.max_limit
+ limit = params.pop('limit', max_limit)
+ marker = params.pop('marker', None)
+ sorts = params.pop('sort', default)
+ if not isinstance(sorts, list):
+ sorts = [sorts]
+
+ try:
+ limit = PositiveNotNullInt(limit)
+ except ValueError:
+ abort(400, "Invalid 'limit' value: %s" % params.get('limit'))
+
+ limit = min(limit, max_limit)
+
+ return {'limit': limit,
+ 'marker': marker,
+ 'sorts': sorts}
+
+
+def ValidAggMethod(value):
+ value = six.text_type(value)
+ if value in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS_VALUES:
+ return value
+ raise ValueError("Invalid aggregation method")
+
+
+class ArchivePolicyController(rest.RestController):
+ def __init__(self, archive_policy):
+ self.archive_policy = archive_policy
+
+ @pecan.expose('json')
+ def get(self):
+ ap = pecan.request.indexer.get_archive_policy(self.archive_policy)
+ if ap:
+ enforce("get archive policy", ap)
+ return ap
+ abort(404, indexer.NoSuchArchivePolicy(self.archive_policy))
+
+ @pecan.expose('json')
+ def patch(self):
+ ap = pecan.request.indexer.get_archive_policy(self.archive_policy)
+ if not ap:
+ abort(404, indexer.NoSuchArchivePolicy(self.archive_policy))
+ enforce("update archive policy", ap)
+
+ body = deserialize_and_validate(voluptuous.Schema({
+ voluptuous.Required("definition"):
+ voluptuous.All([{
+ "granularity": Timespan,
+ "points": PositiveNotNullInt,
+ "timespan": Timespan}], voluptuous.Length(min=1)),
+ }))
+ # Validate the data
+ try:
+ ap_items = [archive_policy.ArchivePolicyItem(**item) for item in
+ body['definition']]
+ except ValueError as e:
+ abort(400, e)
+
+ try:
+ return pecan.request.indexer.update_archive_policy(
+ self.archive_policy, ap_items)
+ except indexer.UnsupportedArchivePolicyChange as e:
+ abort(400, e)
+
+ @pecan.expose()
+ def delete(self):
+ # NOTE(jd) I don't think there's any point in fetching and passing the
+ # archive policy here, as the rule is probably checking the actual role
+ # of the user, not the content of the AP.
+ enforce("delete archive policy", {})
+ try:
+ pecan.request.indexer.delete_archive_policy(self.archive_policy)
+ except indexer.NoSuchArchivePolicy as e:
+ abort(404, e)
+ except indexer.ArchivePolicyInUse as e:
+ abort(400, e)
+
+
+class ArchivePoliciesController(rest.RestController):
+ @pecan.expose()
+ def _lookup(self, archive_policy, *remainder):
+ return ArchivePolicyController(archive_policy), remainder
+
+ @pecan.expose('json')
+ def post(self):
+ # NOTE(jd): Initialize this one at run-time because we rely on conf
+ conf = pecan.request.conf
+ enforce("create archive policy", {})
+ ArchivePolicySchema = voluptuous.Schema({
+ voluptuous.Required("name"): six.text_type,
+ voluptuous.Required("back_window", default=0): PositiveOrNullInt,
+ voluptuous.Required(
+ "aggregation_methods",
+ default=set(conf.archive_policy.default_aggregation_methods)):
+ [ValidAggMethod],
+ voluptuous.Required("definition"):
+ voluptuous.All([{
+ "granularity": Timespan,
+ "points": PositiveNotNullInt,
+ "timespan": Timespan,
+ }], voluptuous.Length(min=1)),
+ })
+
+ body = deserialize_and_validate(ArchivePolicySchema)
+ # Validate the data
+ try:
+ ap = archive_policy.ArchivePolicy.from_dict(body)
+ except ValueError as e:
+ abort(400, e)
+ enforce("create archive policy", ap)
+ try:
+ ap = pecan.request.indexer.create_archive_policy(ap)
+ except indexer.ArchivePolicyAlreadyExists as e:
+ abort(409, e)
+
+ location = "/archive_policy/" + ap.name
+ set_resp_location_hdr(location)
+ pecan.response.status = 201
+ return ap
+
+ @pecan.expose('json')
+ def get_all(self):
+ enforce("list archive policy", {})
+ return pecan.request.indexer.list_archive_policies()
+
+
+class ArchivePolicyRulesController(rest.RestController):
+ @pecan.expose('json')
+ def post(self):
+ enforce("create archive policy rule", {})
+ ArchivePolicyRuleSchema = voluptuous.Schema({
+ voluptuous.Required("name"): six.text_type,
+ voluptuous.Required("metric_pattern"): six.text_type,
+ voluptuous.Required("archive_policy_name"): six.text_type,
+ })
+
+ body = deserialize_and_validate(ArchivePolicyRuleSchema)
+ enforce("create archive policy rule", body)
+ try:
+ ap = pecan.request.indexer.create_archive_policy_rule(
+ body['name'], body['metric_pattern'],
+ body['archive_policy_name']
+ )
+ except indexer.ArchivePolicyRuleAlreadyExists as e:
+ abort(409, e)
+
+ location = "/archive_policy_rule/" + ap.name
+ set_resp_location_hdr(location)
+ pecan.response.status = 201
+ return ap
+
+ @pecan.expose('json')
+ def get_one(self, name):
+ ap = pecan.request.indexer.get_archive_policy_rule(name)
+ if ap:
+ enforce("get archive policy rule", ap)
+ return ap
+ abort(404, indexer.NoSuchArchivePolicyRule(name))
+
+ @pecan.expose('json')
+ def get_all(self):
+ enforce("list archive policy rule", {})
+ return pecan.request.indexer.list_archive_policy_rules()
+
+ @pecan.expose()
+ def delete(self, name):
+ # NOTE(jd) I don't think there's any point in fetching and passing the
+ # archive policy rule here, as the rule is probably checking the actual
+ # role of the user, not the content of the AP rule.
+ enforce("delete archive policy rule", {})
+ try:
+ pecan.request.indexer.delete_archive_policy_rule(name)
+ except indexer.NoSuchArchivePolicyRule as e:
+ abort(404, e)
+ except indexer.ArchivePolicyRuleInUse as e:
+ abort(400, e)
+
+
+def MeasuresListSchema(measures):
+ try:
+ times = utils.to_timestamps((m['timestamp'] for m in measures))
+ except TypeError:
+ abort(400, "Invalid format for measures")
+ except ValueError as e:
+ abort(400, "Invalid input for timestamp: %s" % e)
+
+ try:
+ values = [float(i['value']) for i in measures]
+ except Exception:
+ abort(400, "Invalid input for a value")
+
+ return (storage.Measure(t, v) for t, v in six.moves.zip(
+ times.tolist(), values))
+
+
+class MetricController(rest.RestController):
+ _custom_actions = {
+ 'measures': ['POST', 'GET']
+ }
+
+ def __init__(self, metric):
+ self.metric = metric
+ mgr = extension.ExtensionManager(namespace='gnocchi.aggregates',
+ invoke_on_load=True)
+ self.custom_agg = dict((x.name, x.obj) for x in mgr)
+
+ def enforce_metric(self, rule):
+ enforce(rule, json.to_primitive(self.metric))
+
+ @pecan.expose('json')
+ def get_all(self):
+ self.enforce_metric("get metric")
+ return self.metric
+
+ @pecan.expose()
+ def post_measures(self):
+ self.enforce_metric("post measures")
+ params = deserialize()
+ if not isinstance(params, list):
+ abort(400, "Invalid input for measures")
+ if params:
+ pecan.request.storage.incoming.add_measures(
+ self.metric, MeasuresListSchema(params))
+ pecan.response.status = 202
+
+ @pecan.expose('json')
+ def get_measures(self, start=None, stop=None, aggregation='mean',
+ granularity=None, resample=None, refresh=False, **param):
+ self.enforce_metric("get measures")
+ if not (aggregation
+ in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS
+ or aggregation in self.custom_agg):
+ msg = '''Invalid aggregation value %(agg)s, must be one of %(std)s
+ or %(custom)s'''
+ abort(400, msg % dict(
+ agg=aggregation,
+ std=archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS,
+ custom=str(self.custom_agg.keys())))
+
+ if start is not None:
+ try:
+ start = utils.to_datetime(start)
+ except Exception:
+ abort(400, "Invalid value for start")
+
+ if stop is not None:
+ try:
+ stop = utils.to_datetime(stop)
+ except Exception:
+ abort(400, "Invalid value for stop")
+
+ if resample:
+ if not granularity:
+ abort(400, 'A granularity must be specified to resample')
+ try:
+ resample = Timespan(resample)
+ except ValueError as e:
+ abort(400, e)
+
+ if (strtobool("refresh", refresh) and
+ pecan.request.storage.incoming.has_unprocessed(self.metric)):
+ try:
+ pecan.request.storage.refresh_metric(
+ pecan.request.indexer, self.metric,
+ pecan.request.conf.api.refresh_timeout)
+ except storage.SackLockTimeoutError as e:
+ abort(503, e)
+ try:
+ if aggregation in self.custom_agg:
+ measures = self.custom_agg[aggregation].compute(
+ pecan.request.storage, self.metric,
+ start, stop, **param)
+ else:
+ measures = pecan.request.storage.get_measures(
+ self.metric, start, stop, aggregation,
+ Timespan(granularity) if granularity is not None else None,
+ resample)
+ # Replace timestamp keys by their string versions
+ return [(timestamp.isoformat(), offset, v)
+ for timestamp, offset, v in measures]
+ except (storage.MetricDoesNotExist,
+ storage.GranularityDoesNotExist,
+ storage.AggregationDoesNotExist) as e:
+ abort(404, e)
+ except aggregates.CustomAggFailure as e:
+ abort(400, e)
+
+ @pecan.expose()
+ def delete(self):
+ self.enforce_metric("delete metric")
+ try:
+ pecan.request.indexer.delete_metric(self.metric.id)
+ except indexer.NoSuchMetric as e:
+ abort(404, e)
+
+
+class MetricsController(rest.RestController):
+
+ @pecan.expose()
+ def _lookup(self, id, *remainder):
+ try:
+ metric_id = uuid.UUID(id)
+ except ValueError:
+ abort(404, indexer.NoSuchMetric(id))
+ metrics = pecan.request.indexer.list_metrics(
+ id=metric_id, details=True)
+ if not metrics:
+ abort(404, indexer.NoSuchMetric(id))
+ return MetricController(metrics[0]), remainder
+
+ _MetricSchema = voluptuous.Schema({
+ "archive_policy_name": six.text_type,
+ "name": six.text_type,
+ voluptuous.Optional("unit"):
+ voluptuous.All(six.text_type, voluptuous.Length(max=31)),
+ })
+
+ # NOTE(jd) Define this method as it was a voluptuous schema – it's just a
+ # smarter version of a voluptuous schema, no?
+ @classmethod
+ def MetricSchema(cls, definition):
+ # First basic validation
+ definition = cls._MetricSchema(definition)
+ archive_policy_name = definition.get('archive_policy_name')
+
+ name = definition.get('name')
+ if name and '/' in name:
+ abort(400, "'/' is not supported in metric name")
+ if archive_policy_name is None:
+ try:
+ ap = pecan.request.indexer.get_archive_policy_for_metric(name)
+ except indexer.NoArchivePolicyRuleMatch:
+ # NOTE(jd) Since this is a schema-like function, we
+ # should/could raise ValueError, but if we do so, voluptuous
+ # just returns a "invalid value" with no useful message – so we
+ # prefer to use abort() to make sure the user has the right
+ # error message
+ abort(400, "No archive policy name specified "
+ "and no archive policy rule found matching "
+ "the metric name %s" % name)
+ else:
+ definition['archive_policy_name'] = ap.name
+
+ creator = pecan.request.auth_helper.get_current_user(
+ pecan.request.headers)
+
+ enforce("create metric", {
+ "creator": creator,
+ "archive_policy_name": archive_policy_name,
+ "name": name,
+ "unit": definition.get('unit'),
+ })
+
+ return definition
+
+ @pecan.expose('json')
+ def post(self):
+ creator = pecan.request.auth_helper.get_current_user(
+ pecan.request.headers)
+ body = deserialize_and_validate(self.MetricSchema)
+ try:
+ m = pecan.request.indexer.create_metric(
+ uuid.uuid4(),
+ creator,
+ name=body.get('name'),
+ unit=body.get('unit'),
+ archive_policy_name=body['archive_policy_name'])
+ except indexer.NoSuchArchivePolicy as e:
+ abort(400, e)
+ set_resp_location_hdr("/metric/" + str(m.id))
+ pecan.response.status = 201
+ return m
+
+ MetricListSchema = voluptuous.Schema({
+ "user_id": six.text_type,
+ "project_id": six.text_type,
+ "creator": six.text_type,
+ "limit": six.text_type,
+ "name": six.text_type,
+ "id": six.text_type,
+ "unit": six.text_type,
+ "archive_policy_name": six.text_type,
+ "status": voluptuous.Any("active", "delete"),
+ "sort": voluptuous.Any([six.text_type], six.text_type),
+ "marker": six.text_type,
+ })
+
+ @classmethod
+ @pecan.expose('json')
+ def get_all(cls, **kwargs):
+ kwargs = cls.MetricListSchema(kwargs)
+
+ # Compat with old user/project API
+ provided_user_id = kwargs.pop('user_id', None)
+ provided_project_id = kwargs.pop('project_id', None)
+ if provided_user_id is None and provided_project_id is None:
+ provided_creator = kwargs.pop('creator', None)
+ else:
+ provided_creator = (
+ (provided_user_id or "")
+ + ":"
+ + (provided_project_id or "")
+ )
+ try:
+ enforce("list all metric", {})
+ except webob.exc.HTTPForbidden:
+ enforce("list metric", {})
+ creator = pecan.request.auth_helper.get_current_user(
+ pecan.request.headers)
+ if provided_creator and creator != provided_creator:
+ abort(403, "Insufficient privileges to filter by user/project")
+ attr_filter = {}
+ if provided_creator is not None:
+ attr_filter['creator'] = provided_creator
+ attr_filter.update(get_pagination_options(
+ kwargs, METRIC_DEFAULT_PAGINATION))
+ attr_filter.update(kwargs)
+ try:
+ return pecan.request.indexer.list_metrics(**attr_filter)
+ except indexer.IndexerException as e:
+ abort(400, e)
+
+
+_MetricsSchema = voluptuous.Schema({
+ six.text_type: voluptuous.Any(utils.UUID,
+ MetricsController.MetricSchema),
+})
+
+
+def MetricsSchema(data):
+ # NOTE(jd) Before doing any kind of validation, copy the metric name
+ # into the metric definition. This is required so we have the name
+ # available when doing the metric validation with its own MetricSchema,
+ # and so we can do things such as applying archive policy rules.
+ if isinstance(data, dict):
+ for metric_name, metric_def in six.iteritems(data):
+ if isinstance(metric_def, dict):
+ metric_def['name'] = metric_name
+ return _MetricsSchema(data)
+
+
+class NamedMetricController(rest.RestController):
+ def __init__(self, resource_id, resource_type):
+ self.resource_id = resource_id
+ self.resource_type = resource_type
+
+ @pecan.expose()
+ def _lookup(self, name, *remainder):
+ details = True if pecan.request.method == 'GET' else False
+ m = pecan.request.indexer.list_metrics(details=details,
+ name=name,
+ resource_id=self.resource_id)
+ if m:
+ return MetricController(m[0]), remainder
+
+ resource = pecan.request.indexer.get_resource(self.resource_type,
+ self.resource_id)
+ if resource:
+ abort(404, indexer.NoSuchMetric(name))
+ else:
+ abort(404, indexer.NoSuchResource(self.resource_id))
+
+ @pecan.expose()
+ def post(self):
+ resource = pecan.request.indexer.get_resource(
+ self.resource_type, self.resource_id)
+ if not resource:
+ abort(404, indexer.NoSuchResource(self.resource_id))
+ enforce("update resource", resource)
+ metrics = deserialize_and_validate(MetricsSchema)
+ try:
+ pecan.request.indexer.update_resource(
+ self.resource_type, self.resource_id, metrics=metrics,
+ append_metrics=True,
+ create_revision=False)
+ except (indexer.NoSuchMetric,
+ indexer.NoSuchArchivePolicy,
+ ValueError) as e:
+ abort(400, e)
+ except indexer.NamedMetricAlreadyExists as e:
+ abort(409, e)
+ except indexer.NoSuchResource as e:
+ abort(404, e)
+
+ @pecan.expose('json')
+ def get_all(self):
+ resource = pecan.request.indexer.get_resource(
+ self.resource_type, self.resource_id)
+ if not resource:
+ abort(404, indexer.NoSuchResource(self.resource_id))
+ enforce("get resource", resource)
+ return pecan.request.indexer.list_metrics(resource_id=self.resource_id)
+
+
+class ResourceHistoryController(rest.RestController):
+ def __init__(self, resource_id, resource_type):
+ self.resource_id = resource_id
+ self.resource_type = resource_type
+
+ @pecan.expose('json')
+ def get(self, **kwargs):
+ details = get_details(kwargs)
+ pagination_opts = get_pagination_options(
+ kwargs, RESOURCE_DEFAULT_PAGINATION)
+
+ resource = pecan.request.indexer.get_resource(
+ self.resource_type, self.resource_id)
+ if not resource:
+ abort(404, indexer.NoSuchResource(self.resource_id))
+
+ enforce("get resource", resource)
+
+ try:
+ # FIXME(sileht): next API version should returns
+ # {'resources': [...], 'links': [ ... pagination rel ...]}
+ return pecan.request.indexer.list_resources(
+ self.resource_type,
+ attribute_filter={"=": {"id": self.resource_id}},
+ details=details,
+ history=True,
+ **pagination_opts
+ )
+ except indexer.IndexerException as e:
+ abort(400, e)
+
+
+def etag_precondition_check(obj):
+ etag, lastmodified = obj.etag, obj.lastmodified
+ # NOTE(sileht): Checks and order come from rfc7232
+ # in webob, the '*' and the absent of the header is handled by
+ # if_match.__contains__() and if_none_match.__contains__()
+ # and are identique...
+ if etag not in pecan.request.if_match:
+ abort(412)
+ elif (not pecan.request.environ.get("HTTP_IF_MATCH")
+ and pecan.request.if_unmodified_since
+ and pecan.request.if_unmodified_since < lastmodified):
+ abort(412)
+
+ if etag in pecan.request.if_none_match:
+ if pecan.request.method in ['GET', 'HEAD']:
+ abort(304)
+ else:
+ abort(412)
+ elif (not pecan.request.environ.get("HTTP_IF_NONE_MATCH")
+ and pecan.request.if_modified_since
+ and (pecan.request.if_modified_since >=
+ lastmodified)
+ and pecan.request.method in ['GET', 'HEAD']):
+ abort(304)
+
+
+def etag_set_headers(obj):
+ pecan.response.etag = obj.etag
+ pecan.response.last_modified = obj.lastmodified
+
+
+def AttributesPath(value):
+ if value.startswith("/attributes"):
+ return value
+ raise ValueError("Only attributes can be modified")
+
+
+ResourceTypeJsonPatchSchema = voluptuous.Schema([{
+ "op": voluptuous.Any("add", "remove"),
+ "path": AttributesPath,
+ voluptuous.Optional("value"): dict,
+}])
+
+
+class ResourceTypeController(rest.RestController):
+ def __init__(self, name):
+ self._name = name
+
+ @pecan.expose('json')
+ def get(self):
+ try:
+ rt = pecan.request.indexer.get_resource_type(self._name)
+ except indexer.NoSuchResourceType as e:
+ abort(404, e)
+ enforce("get resource type", rt)
+ return rt
+
+ @pecan.expose('json')
+ def patch(self):
+ # NOTE(sileht): should we check for "application/json-patch+json"
+ # Content-Type ?
+
+ try:
+ rt = pecan.request.indexer.get_resource_type(self._name)
+ except indexer.NoSuchResourceType as e:
+ abort(404, e)
+ enforce("update resource type", rt)
+
+ # Ensure this is a valid jsonpatch dict
+ patch = deserialize_and_validate(
+ ResourceTypeJsonPatchSchema,
+ expected_content_types=["application/json-patch+json"])
+
+ # Add new attributes to the resource type
+ rt_json_current = rt.jsonify()
+ try:
+ rt_json_next = jsonpatch.apply_patch(rt_json_current, patch)
+ except jsonpatch.JsonPatchException as e:
+ abort(400, e)
+ del rt_json_next['state']
+
+ # Validate that the whole new resource_type is valid
+ schema = pecan.request.indexer.get_resource_type_schema()
+ try:
+ rt_json_next = voluptuous.Schema(schema.for_update, required=True)(
+ rt_json_next)
+ except voluptuous.Error as e:
+ abort(400, "Invalid input: %s" % e)
+
+ # Get only newly formatted and deleted attributes
+ add_attrs = {k: v for k, v in rt_json_next["attributes"].items()
+ if k not in rt_json_current["attributes"]}
+ del_attrs = [k for k in rt_json_current["attributes"]
+ if k not in rt_json_next["attributes"]]
+
+ if not add_attrs and not del_attrs:
+ # NOTE(sileht): just returns the resource, the asked changes
+ # just do nothing
+ return rt
+
+ try:
+ add_attrs = schema.attributes_from_dict(add_attrs)
+ except resource_type.InvalidResourceAttribute as e:
+ abort(400, "Invalid input: %s" % e)
+
+ try:
+ return pecan.request.indexer.update_resource_type(
+ self._name, add_attributes=add_attrs,
+ del_attributes=del_attrs)
+ except indexer.NoSuchResourceType as e:
+ abort(400, e)
+
+ @pecan.expose()
+ def delete(self):
+ try:
+ pecan.request.indexer.get_resource_type(self._name)
+ except indexer.NoSuchResourceType as e:
+ abort(404, e)
+ enforce("delete resource type", resource_type)
+ try:
+ pecan.request.indexer.delete_resource_type(self._name)
+ except (indexer.NoSuchResourceType,
+ indexer.ResourceTypeInUse) as e:
+ abort(400, e)
+
+
+class ResourceTypesController(rest.RestController):
+
+ @pecan.expose()
+ def _lookup(self, name, *remainder):
+ return ResourceTypeController(name), remainder
+
+ @pecan.expose('json')
+ def post(self):
+ schema = pecan.request.indexer.get_resource_type_schema()
+ body = deserialize_and_validate(schema)
+ body["state"] = "creating"
+
+ try:
+ rt = schema.resource_type_from_dict(**body)
+ except resource_type.InvalidResourceAttribute as e:
+ abort(400, "Invalid input: %s" % e)
+
+ enforce("create resource type", body)
+ try:
+ rt = pecan.request.indexer.create_resource_type(rt)
+ except indexer.ResourceTypeAlreadyExists as e:
+ abort(409, e)
+ set_resp_location_hdr("/resource_type/" + rt.name)
+ pecan.response.status = 201
+ return rt
+
+ @pecan.expose('json')
+ def get_all(self, **kwargs):
+ enforce("list resource type", {})
+ try:
+ return pecan.request.indexer.list_resource_types()
+ except indexer.IndexerException as e:
+ abort(400, e)
+
+
+def ResourceSchema(schema):
+ base_schema = {
+ voluptuous.Optional('started_at'): utils.to_datetime,
+ voluptuous.Optional('ended_at'): utils.to_datetime,
+ voluptuous.Optional('user_id'): voluptuous.Any(None, six.text_type),
+ voluptuous.Optional('project_id'): voluptuous.Any(None, six.text_type),
+ voluptuous.Optional('metrics'): MetricsSchema,
+ }
+ base_schema.update(schema)
+ return base_schema
+
+
+class ResourceController(rest.RestController):
+
+ def __init__(self, resource_type, id):
+ self._resource_type = resource_type
+ creator = pecan.request.auth_helper.get_current_user(
+ pecan.request.headers)
+ try:
+ self.id = utils.ResourceUUID(id, creator)
+ except ValueError:
+ abort(404, indexer.NoSuchResource(id))
+ self.metric = NamedMetricController(str(self.id), self._resource_type)
+ self.history = ResourceHistoryController(str(self.id),
+ self._resource_type)
+
+ @pecan.expose('json')
+ def get(self):
+ resource = pecan.request.indexer.get_resource(
+ self._resource_type, self.id, with_metrics=True)
+ if resource:
+ enforce("get resource", resource)
+ etag_precondition_check(resource)
+ etag_set_headers(resource)
+ return resource
+ abort(404, indexer.NoSuchResource(self.id))
+
+ @pecan.expose('json')
+ def patch(self):
+ resource = pecan.request.indexer.get_resource(
+ self._resource_type, self.id, with_metrics=True)
+ if not resource:
+ abort(404, indexer.NoSuchResource(self.id))
+ enforce("update resource", resource)
+ etag_precondition_check(resource)
+
+ body = deserialize_and_validate(
+ schema_for(self._resource_type),
+ required=False)
+
+ if len(body) == 0:
+ etag_set_headers(resource)
+ return resource
+
+ for k, v in six.iteritems(body):
+ if k != 'metrics' and getattr(resource, k) != v:
+ create_revision = True
+ break
+ else:
+ if 'metrics' not in body:
+ # No need to go further, we assume the db resource
+ # doesn't change between the get and update
+ return resource
+ create_revision = False
+
+ try:
+ resource = pecan.request.indexer.update_resource(
+ self._resource_type,
+ self.id,
+ create_revision=create_revision,
+ **body)
+ except (indexer.NoSuchMetric,
+ indexer.NoSuchArchivePolicy,
+ ValueError) as e:
+ abort(400, e)
+ except indexer.NoSuchResource as e:
+ abort(404, e)
+ etag_set_headers(resource)
+ return resource
+
+ @pecan.expose()
+ def delete(self):
+ resource = pecan.request.indexer.get_resource(
+ self._resource_type, self.id)
+ if not resource:
+ abort(404, indexer.NoSuchResource(self.id))
+ enforce("delete resource", resource)
+ etag_precondition_check(resource)
+ try:
+ pecan.request.indexer.delete_resource(self.id)
+ except indexer.NoSuchResource as e:
+ abort(404, e)
+
+
+def schema_for(resource_type):
+ resource_type = pecan.request.indexer.get_resource_type(resource_type)
+ return ResourceSchema(resource_type.schema)
+
+
+def ResourceUUID(value, creator):
+ try:
+ return utils.ResourceUUID(value, creator)
+ except ValueError as e:
+ raise voluptuous.Invalid(e)
+
+
+def ResourceID(value, creator):
+ return (six.text_type(value), ResourceUUID(value, creator))
+
+
+class ResourcesController(rest.RestController):
+ def __init__(self, resource_type):
+ self._resource_type = resource_type
+
+ @pecan.expose()
+ def _lookup(self, id, *remainder):
+ return ResourceController(self._resource_type, id), remainder
+
+ @pecan.expose('json')
+ def post(self):
+ # NOTE(sileht): we need to copy the dict because when change it
+ # and we don't want that next patch call have the "id"
+ schema = dict(schema_for(self._resource_type))
+ creator = pecan.request.auth_helper.get_current_user(
+ pecan.request.headers)
+ schema["id"] = functools.partial(ResourceID, creator=creator)
+
+ body = deserialize_and_validate(schema)
+ body["original_resource_id"], body["id"] = body["id"]
+
+ target = {
+ "resource_type": self._resource_type,
+ }
+ target.update(body)
+ enforce("create resource", target)
+ rid = body['id']
+ del body['id']
+ try:
+ resource = pecan.request.indexer.create_resource(
+ self._resource_type, rid, creator,
+ **body)
+ except (ValueError,
+ indexer.NoSuchMetric,
+ indexer.NoSuchArchivePolicy) as e:
+ abort(400, e)
+ except indexer.ResourceAlreadyExists as e:
+ abort(409, e)
+ set_resp_location_hdr("/resource/"
+ + self._resource_type + "/"
+ + six.text_type(resource.id))
+ etag_set_headers(resource)
+ pecan.response.status = 201
+ return resource
+
+ @pecan.expose('json')
+ def get_all(self, **kwargs):
+ details = get_details(kwargs)
+ history = get_history(kwargs)
+ pagination_opts = get_pagination_options(
+ kwargs, RESOURCE_DEFAULT_PAGINATION)
+ policy_filter = pecan.request.auth_helper.get_resource_policy_filter(
+ pecan.request.headers, "list resource", self._resource_type)
+
+ try:
+ # FIXME(sileht): next API version should returns
+ # {'resources': [...], 'links': [ ... pagination rel ...]}
+ return pecan.request.indexer.list_resources(
+ self._resource_type,
+ attribute_filter=policy_filter,
+ details=details,
+ history=history,
+ **pagination_opts
+ )
+ except indexer.IndexerException as e:
+ abort(400, e)
+
+ @pecan.expose('json')
+ def delete(self, **kwargs):
+ # NOTE(sileht): Don't allow empty filter, this is going to delete
+ # the entire database.
+ attr_filter = deserialize_and_validate(ResourceSearchSchema)
+
+ # the voluptuous checks everything, but it is better to
+ # have this here.
+ if not attr_filter:
+ abort(400, "caution: the query can not be empty, or it will \
+ delete entire database")
+
+ policy_filter = pecan.request.auth_helper.get_resource_policy_filter(
+ pecan.request.headers,
+ "delete resources", self._resource_type)
+
+ if policy_filter:
+ attr_filter = {"and": [policy_filter, attr_filter]}
+
+ try:
+ delete_num = pecan.request.indexer.delete_resources(
+ self._resource_type, attribute_filter=attr_filter)
+ except indexer.IndexerException as e:
+ abort(400, e)
+
+ return {"deleted": delete_num}
+
+
+class ResourcesByTypeController(rest.RestController):
+ @pecan.expose('json')
+ def get_all(self):
+ return dict(
+ (rt.name,
+ pecan.request.application_url + '/resource/' + rt.name)
+ for rt in pecan.request.indexer.list_resource_types())
+
+ @pecan.expose()
+ def _lookup(self, resource_type, *remainder):
+ try:
+ pecan.request.indexer.get_resource_type(resource_type)
+ except indexer.NoSuchResourceType as e:
+ abort(404, e)
+ return ResourcesController(resource_type), remainder
+
+
+class InvalidQueryStringSearchAttrFilter(Exception):
+ def __init__(self, reason):
+ super(InvalidQueryStringSearchAttrFilter, self).__init__(
+ "Invalid filter: %s" % reason)
+
+
+class QueryStringSearchAttrFilter(object):
+ uninary_operators = ("not", )
+ binary_operator = (u">=", u"<=", u"!=", u">", u"<", u"=", u"==", u"eq",
+ u"ne", u"lt", u"gt", u"ge", u"le", u"in", u"like", u"≠",
+ u"≥", u"≤")
+ multiple_operators = (u"and", u"or", u"∧", u"∨")
+
+ operator = pyparsing.Regex(u"|".join(binary_operator))
+ null = pyparsing.Regex("None|none|null").setParseAction(
+ pyparsing.replaceWith(None))
+ boolean = "False|True|false|true"
+ boolean = pyparsing.Regex(boolean).setParseAction(
+ lambda t: t[0].lower() == "true")
+ hex_string = lambda n: pyparsing.Word(pyparsing.hexnums, exact=n)
+ uuid_string = pyparsing.Combine(
+ hex_string(8) + (pyparsing.Optional("-") + hex_string(4)) * 3 +
+ pyparsing.Optional("-") + hex_string(12))
+ number = r"[+-]?\d+(:?\.\d*)?(:?[eE][+-]?\d+)?"
+ number = pyparsing.Regex(number).setParseAction(lambda t: float(t[0]))
+ identifier = pyparsing.Word(pyparsing.alphas, pyparsing.alphanums + "_")
+ quoted_string = pyparsing.QuotedString('"') | pyparsing.QuotedString("'")
+ comparison_term = pyparsing.Forward()
+ in_list = pyparsing.Group(
+ pyparsing.Suppress('[') +
+ pyparsing.Optional(pyparsing.delimitedList(comparison_term)) +
+ pyparsing.Suppress(']'))("list")
+ comparison_term << (null | boolean | uuid_string | identifier | number |
+ quoted_string | in_list)
+ condition = pyparsing.Group(comparison_term + operator + comparison_term)
+
+ expr = pyparsing.infixNotation(condition, [
+ ("not", 1, pyparsing.opAssoc.RIGHT, ),
+ ("and", 2, pyparsing.opAssoc.LEFT, ),
+ ("∧", 2, pyparsing.opAssoc.LEFT, ),
+ ("or", 2, pyparsing.opAssoc.LEFT, ),
+ ("∨", 2, pyparsing.opAssoc.LEFT, ),
+ ])
+
+ @classmethod
+ def _parsed_query2dict(cls, parsed_query):
+ result = None
+ while parsed_query:
+ part = parsed_query.pop()
+ if part in cls.binary_operator:
+ result = {part: {parsed_query.pop(): result}}
+
+ elif part in cls.multiple_operators:
+ if result.get(part):
+ result[part].append(
+ cls._parsed_query2dict(parsed_query.pop()))
+ else:
+ result = {part: [result]}
+
+ elif part in cls.uninary_operators:
+ result = {part: result}
+ elif isinstance(part, pyparsing.ParseResults):
+ kind = part.getName()
+ if kind == "list":
+ res = part.asList()
+ else:
+ res = cls._parsed_query2dict(part)
+ if result is None:
+ result = res
+ elif isinstance(result, dict):
+ list(result.values())[0].append(res)
+ else:
+ result = part
+ return result
+
+ @classmethod
+ def parse(cls, query):
+ try:
+ parsed_query = cls.expr.parseString(query, parseAll=True)[0]
+ except pyparsing.ParseException as e:
+ raise InvalidQueryStringSearchAttrFilter(six.text_type(e))
+ return cls._parsed_query2dict(parsed_query)
+
+
+def ResourceSearchSchema(v):
+ return _ResourceSearchSchema()(v)
+
+
+def _ResourceSearchSchema():
+ user = pecan.request.auth_helper.get_current_user(
+ pecan.request.headers)
+ _ResourceUUID = functools.partial(ResourceUUID, creator=user)
+
+ return voluptuous.Schema(
+ voluptuous.All(
+ voluptuous.Length(min=0, max=1),
+ {
+ voluptuous.Any(
+ u"=", u"==", u"eq",
+ u"<", u"lt",
+ u">", u"gt",
+ u"<=", u"≤", u"le",
+ u">=", u"≥", u"ge",
+ u"!=", u"≠", u"ne",
+ u"in",
+ u"like",
+ ): voluptuous.All(
+ voluptuous.Length(min=1, max=1),
+ voluptuous.Any(
+ {"id": voluptuous.Any(
+ [_ResourceUUID], _ResourceUUID),
+ voluptuous.Extra: voluptuous.Extra})),
+ voluptuous.Any(
+ u"and", u"∨",
+ u"or", u"∧",
+ u"not",
+ ): voluptuous.All(
+ [ResourceSearchSchema], voluptuous.Length(min=1)
+ )
+ }
+ )
+ )
+
+
+class SearchResourceTypeController(rest.RestController):
+ def __init__(self, resource_type):
+ self._resource_type = resource_type
+
+ @staticmethod
+ def parse_and_validate_qs_filter(query):
+ try:
+ attr_filter = QueryStringSearchAttrFilter.parse(query)
+ except InvalidQueryStringSearchAttrFilter as e:
+ raise abort(400, e)
+ return voluptuous.Schema(ResourceSearchSchema,
+ required=True)(attr_filter)
+
+ def _search(self, **kwargs):
+ if pecan.request.body:
+ attr_filter = deserialize_and_validate(ResourceSearchSchema)
+ elif kwargs.get("filter"):
+ attr_filter = self.parse_and_validate_qs_filter(kwargs["filter"])
+ else:
+ attr_filter = None
+
+ details = get_details(kwargs)
+ history = get_history(kwargs)
+ pagination_opts = get_pagination_options(
+ kwargs, RESOURCE_DEFAULT_PAGINATION)
+
+ policy_filter = pecan.request.auth_helper.get_resource_policy_filter(
+ pecan.request.headers, "search resource", self._resource_type)
+ if policy_filter:
+ if attr_filter:
+ attr_filter = {"and": [
+ policy_filter,
+ attr_filter
+ ]}
+ else:
+ attr_filter = policy_filter
+
+ return pecan.request.indexer.list_resources(
+ self._resource_type,
+ attribute_filter=attr_filter,
+ details=details,
+ history=history,
+ **pagination_opts)
+
+ @pecan.expose('json')
+ def post(self, **kwargs):
+ try:
+ return self._search(**kwargs)
+ except indexer.IndexerException as e:
+ abort(400, e)
+
+
+class SearchResourceController(rest.RestController):
+ @pecan.expose()
+ def _lookup(self, resource_type, *remainder):
+ try:
+ pecan.request.indexer.get_resource_type(resource_type)
+ except indexer.NoSuchResourceType as e:
+ abort(404, e)
+ return SearchResourceTypeController(resource_type), remainder
+
+
+def _MetricSearchSchema(v):
+ """Helper method to indirect the recursivity of the search schema"""
+ return SearchMetricController.MetricSearchSchema(v)
+
+
+def _MetricSearchOperationSchema(v):
+ """Helper method to indirect the recursivity of the search schema"""
+ return SearchMetricController.MetricSearchOperationSchema(v)
+
+
+class SearchMetricController(rest.RestController):
+
+ MetricSearchOperationSchema = voluptuous.Schema(
+ voluptuous.All(
+ voluptuous.Length(min=1, max=1),
+ {
+ voluptuous.Any(
+ u"=", u"==", u"eq",
+ u"<", u"lt",
+ u">", u"gt",
+ u"<=", u"≤", u"le",
+ u">=", u"≥", u"ge",
+ u"!=", u"≠", u"ne",
+ u"%", u"mod",
+ u"+", u"add",
+ u"-", u"sub",
+ u"*", u"×", u"mul",
+ u"/", u"÷", u"div",
+ u"**", u"^", u"pow",
+ ): voluptuous.Any(
+ float, int,
+ voluptuous.All(
+ [float, int,
+ voluptuous.Any(_MetricSearchOperationSchema)],
+ voluptuous.Length(min=2, max=2),
+ ),
+ ),
+ },
+ )
+ )
+
+ MetricSearchSchema = voluptuous.Schema(
+ voluptuous.Any(
+ MetricSearchOperationSchema,
+ voluptuous.All(
+ voluptuous.Length(min=1, max=1),
+ {
+ voluptuous.Any(
+ u"and", u"∨",
+ u"or", u"∧",
+ u"not",
+ ): [_MetricSearchSchema],
+ }
+ )
+ )
+ )
+
+ @pecan.expose('json')
+ def post(self, metric_id, start=None, stop=None, aggregation='mean',
+ granularity=None):
+ granularity = [Timespan(g)
+ for g in arg_to_list(granularity or [])]
+ metrics = pecan.request.indexer.list_metrics(
+ ids=arg_to_list(metric_id))
+
+ for metric in metrics:
+ enforce("search metric", metric)
+
+ if not pecan.request.body:
+ abort(400, "No query specified in body")
+
+ query = deserialize_and_validate(self.MetricSearchSchema)
+
+ if start is not None:
+ try:
+ start = utils.to_datetime(start)
+ except Exception:
+ abort(400, "Invalid value for start")
+
+ if stop is not None:
+ try:
+ stop = utils.to_datetime(stop)
+ except Exception:
+ abort(400, "Invalid value for stop")
+
+ try:
+ return {
+ str(metric.id): values
+ for metric, values in six.iteritems(
+ pecan.request.storage.search_value(
+ metrics, query, start, stop, aggregation,
+ granularity
+ )
+ )
+ }
+ except storage.InvalidQuery as e:
+ abort(400, e)
+ except storage.GranularityDoesNotExist as e:
+ abort(400, e)
+
+
+class ResourcesMetricsMeasuresBatchController(rest.RestController):
+ @pecan.expose('json')
+ def post(self, create_metrics=False):
+ creator = pecan.request.auth_helper.get_current_user(
+ pecan.request.headers)
+ MeasuresBatchSchema = voluptuous.Schema(
+ {functools.partial(ResourceID, creator=creator):
+ {six.text_type: MeasuresListSchema}}
+ )
+
+ body = deserialize_and_validate(MeasuresBatchSchema)
+
+ known_metrics = []
+ unknown_metrics = []
+ unknown_resources = []
+ body_by_rid = {}
+ for original_resource_id, resource_id in body:
+ body_by_rid[resource_id] = body[(original_resource_id,
+ resource_id)]
+ names = body[(original_resource_id, resource_id)].keys()
+ metrics = pecan.request.indexer.list_metrics(
+ names=names, resource_id=resource_id)
+
+ known_names = [m.name for m in metrics]
+ if strtobool("create_metrics", create_metrics):
+ already_exists_names = []
+ for name in names:
+ if name not in known_names:
+ metric = MetricsController.MetricSchema({
+ "name": name
+ })
+ try:
+ m = pecan.request.indexer.create_metric(
+ uuid.uuid4(),
+ creator=creator,
+ resource_id=resource_id,
+ name=metric.get('name'),
+ unit=metric.get('unit'),
+ archive_policy_name=metric[
+ 'archive_policy_name'])
+ except indexer.NamedMetricAlreadyExists as e:
+ already_exists_names.append(e.metric)
+ except indexer.NoSuchResource:
+ unknown_resources.append({
+ 'resource_id': six.text_type(resource_id),
+ 'original_resource_id': original_resource_id})
+ break
+ except indexer.IndexerException as e:
+ # This catch NoSuchArchivePolicy, which is unlikely
+ # be still possible
+ abort(400, e)
+ else:
+ known_metrics.append(m)
+
+ if already_exists_names:
+ # Add metrics created in the meantime
+ known_names.extend(already_exists_names)
+ known_metrics.extend(
+ pecan.request.indexer.list_metrics(
+ names=already_exists_names,
+ resource_id=resource_id)
+ )
+
+ elif len(names) != len(metrics):
+ unknown_metrics.extend(
+ ["%s/%s" % (six.text_type(resource_id), m)
+ for m in names if m not in known_names])
+
+ known_metrics.extend(metrics)
+
+ if unknown_resources:
+ abort(400, {"cause": "Unknown resources",
+ "detail": unknown_resources})
+
+ if unknown_metrics:
+ abort(400, "Unknown metrics: %s" % ", ".join(
+ sorted(unknown_metrics)))
+
+ for metric in known_metrics:
+ enforce("post measures", metric)
+
+ pecan.request.storage.incoming.add_measures_batch(
+ dict((metric,
+ body_by_rid[metric.resource_id][metric.name])
+ for metric in known_metrics))
+
+ pecan.response.status = 202
+
+
+class MetricsMeasuresBatchController(rest.RestController):
+ # NOTE(sileht): we don't allow to mix both formats
+ # to not have to deal with id collision that can
+ # occurs between a metric_id and a resource_id.
+ # Because while json allow duplicate keys in dict payload
+ # only the last key will be retain by json python module to
+ # build the python dict.
+ MeasuresBatchSchema = voluptuous.Schema(
+ {utils.UUID: MeasuresListSchema}
+ )
+
+ @pecan.expose()
+ def post(self):
+ body = deserialize_and_validate(self.MeasuresBatchSchema)
+ metrics = pecan.request.indexer.list_metrics(ids=body.keys())
+
+ if len(metrics) != len(body):
+ missing_metrics = sorted(set(body) - set(m.id for m in metrics))
+ abort(400, "Unknown metrics: %s" % ", ".join(
+ six.moves.map(str, missing_metrics)))
+
+ for metric in metrics:
+ enforce("post measures", metric)
+
+ pecan.request.storage.incoming.add_measures_batch(
+ dict((metric, body[metric.id]) for metric in
+ metrics))
+
+ pecan.response.status = 202
+
+
+class SearchController(object):
+ resource = SearchResourceController()
+ metric = SearchMetricController()
+
+
+class AggregationResourceController(rest.RestController):
+ def __init__(self, resource_type, metric_name):
+ self.resource_type = resource_type
+ self.metric_name = metric_name
+
+ @pecan.expose('json')
+ def post(self, start=None, stop=None, aggregation='mean',
+ reaggregation=None, granularity=None, needed_overlap=100.0,
+ groupby=None, fill=None, refresh=False, resample=None):
+ # First, set groupby in the right format: a sorted list of unique
+ # strings.
+ groupby = sorted(set(arg_to_list(groupby)))
+
+ # NOTE(jd) Sort by groupby so we are sure we do not return multiple
+ # groups when using itertools.groupby later.
+ try:
+ resources = SearchResourceTypeController(
+ self.resource_type)._search(sort=groupby)
+ except indexer.InvalidPagination:
+ abort(400, "Invalid groupby attribute")
+ except indexer.IndexerException as e:
+ abort(400, e)
+
+ if resources is None:
+ return []
+
+ if not groupby:
+ metrics = list(filter(None,
+ (r.get_metric(self.metric_name)
+ for r in resources)))
+ return AggregationController.get_cross_metric_measures_from_objs(
+ metrics, start, stop, aggregation, reaggregation,
+ granularity, needed_overlap, fill, refresh, resample)
+
+ def groupper(r):
+ return tuple((attr, r[attr]) for attr in groupby)
+
+ results = []
+ for key, resources in itertools.groupby(resources, groupper):
+ metrics = list(filter(None,
+ (r.get_metric(self.metric_name)
+ for r in resources)))
+ results.append({
+ "group": dict(key),
+ "measures": AggregationController.get_cross_metric_measures_from_objs( # noqa
+ metrics, start, stop, aggregation, reaggregation,
+ granularity, needed_overlap, fill, refresh, resample)
+ })
+
+ return results
+
+
+class AggregationController(rest.RestController):
+ _custom_actions = {
+ 'metric': ['GET'],
+ }
+
+ @pecan.expose()
+ def _lookup(self, object_type, resource_type, key, metric_name,
+ *remainder):
+ if object_type != "resource" or key != "metric":
+ # NOTE(sileht): we want the raw 404 message here
+ # so use directly pecan
+ pecan.abort(404)
+ try:
+ pecan.request.indexer.get_resource_type(resource_type)
+ except indexer.NoSuchResourceType as e:
+ abort(404, e)
+ return AggregationResourceController(resource_type,
+ metric_name), remainder
+
+ @staticmethod
+ def get_cross_metric_measures_from_objs(metrics, start=None, stop=None,
+ aggregation='mean',
+ reaggregation=None,
+ granularity=None,
+ needed_overlap=100.0, fill=None,
+ refresh=False, resample=None):
+ try:
+ needed_overlap = float(needed_overlap)
+ except ValueError:
+ abort(400, 'needed_overlap must be a number')
+
+ if start is not None:
+ try:
+ start = utils.to_datetime(start)
+ except Exception:
+ abort(400, "Invalid value for start")
+
+ if stop is not None:
+ try:
+ stop = utils.to_datetime(stop)
+ except Exception:
+ abort(400, "Invalid value for stop")
+
+ if (aggregation
+ not in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS):
+ abort(
+ 400,
+ 'Invalid aggregation value %s, must be one of %s'
+ % (aggregation,
+ archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS))
+
+ for metric in metrics:
+ enforce("get metric", metric)
+
+ number_of_metrics = len(metrics)
+ if number_of_metrics == 0:
+ return []
+ if granularity is not None:
+ try:
+ granularity = Timespan(granularity)
+ except ValueError as e:
+ abort(400, e)
+
+ if resample:
+ if not granularity:
+ abort(400, 'A granularity must be specified to resample')
+ try:
+ resample = Timespan(resample)
+ except ValueError as e:
+ abort(400, e)
+
+ if fill is not None:
+ if granularity is None:
+ abort(400, "Unable to fill without a granularity")
+ try:
+ fill = float(fill)
+ except ValueError as e:
+ if fill != 'null':
+ abort(400, "fill must be a float or \'null\': %s" % e)
+
+ try:
+ if strtobool("refresh", refresh):
+ store = pecan.request.storage
+ metrics_to_update = [
+ m for m in metrics if store.incoming.has_unprocessed(m)]
+ for m in metrics_to_update:
+ try:
+ pecan.request.storage.refresh_metric(
+ pecan.request.indexer, m,
+ pecan.request.conf.api.refresh_timeout)
+ except storage.SackLockTimeoutError as e:
+ abort(503, e)
+ if number_of_metrics == 1:
+ # NOTE(sileht): don't do the aggregation if we only have one
+ # metric
+ measures = pecan.request.storage.get_measures(
+ metrics[0], start, stop, aggregation,
+ granularity, resample)
+ else:
+ measures = pecan.request.storage.get_cross_metric_measures(
+ metrics, start, stop, aggregation,
+ reaggregation, resample, granularity, needed_overlap, fill)
+ # Replace timestamp keys by their string versions
+ return [(timestamp.isoformat(), offset, v)
+ for timestamp, offset, v in measures]
+ except storage.MetricUnaggregatable as e:
+ abort(400, ("One of the metrics being aggregated doesn't have "
+ "matching granularity: %s") % str(e))
+ except storage.MetricDoesNotExist as e:
+ abort(404, e)
+ except storage.AggregationDoesNotExist as e:
+ abort(404, e)
+
+ @pecan.expose('json')
+ def get_metric(self, metric=None, start=None, stop=None,
+ aggregation='mean', reaggregation=None, granularity=None,
+ needed_overlap=100.0, fill=None,
+ refresh=False, resample=None):
+ # Check RBAC policy
+ metric_ids = arg_to_list(metric)
+ metrics = pecan.request.indexer.list_metrics(ids=metric_ids)
+ missing_metric_ids = (set(metric_ids)
+ - set(six.text_type(m.id) for m in metrics))
+ if missing_metric_ids:
+ # Return one of the missing one in the error
+ abort(404, storage.MetricDoesNotExist(
+ missing_metric_ids.pop()))
+ return self.get_cross_metric_measures_from_objs(
+ metrics, start, stop, aggregation, reaggregation,
+ granularity, needed_overlap, fill, refresh, resample)
+
+
+class CapabilityController(rest.RestController):
+ @staticmethod
+ @pecan.expose('json')
+ def get():
+ aggregation_methods = set(
+ archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS)
+ return dict(aggregation_methods=aggregation_methods,
+ dynamic_aggregation_methods=[
+ ext.name for ext in extension.ExtensionManager(
+ namespace='gnocchi.aggregates')
+ ])
+
+
+class StatusController(rest.RestController):
+ @staticmethod
+ @pecan.expose('json')
+ def get(details=True):
+ enforce("get status", {})
+ try:
+ report = pecan.request.storage.incoming.measures_report(
+ strtobool("details", details))
+ except incoming.ReportGenerationError:
+ abort(503, 'Unable to generate status. Please retry.')
+ report_dict = {"storage": {"summary": report['summary']}}
+ if 'details' in report:
+ report_dict["storage"]["measures_to_process"] = report['details']
+ return report_dict
+
+
+class MetricsBatchController(object):
+ measures = MetricsMeasuresBatchController()
+
+
+class ResourcesMetricsBatchController(object):
+ measures = ResourcesMetricsMeasuresBatchController()
+
+
+class ResourcesBatchController(object):
+ metrics = ResourcesMetricsBatchController()
+
+
+class BatchController(object):
+ metrics = MetricsBatchController()
+ resources = ResourcesBatchController()
+
+
+class V1Controller(object):
+
+ def __init__(self):
+ self.sub_controllers = {
+ "search": SearchController(),
+ "archive_policy": ArchivePoliciesController(),
+ "archive_policy_rule": ArchivePolicyRulesController(),
+ "metric": MetricsController(),
+ "batch": BatchController(),
+ "resource": ResourcesByTypeController(),
+ "resource_type": ResourceTypesController(),
+ "aggregation": AggregationController(),
+ "capabilities": CapabilityController(),
+ "status": StatusController(),
+ }
+ for name, ctrl in self.sub_controllers.items():
+ setattr(self, name, ctrl)
+
+ @pecan.expose('json')
+ def index(self):
+ return {
+ "version": "1.0",
+ "links": [
+ {"rel": "self",
+ "href": pecan.request.application_url}
+ ] + [
+ {"rel": name,
+ "href": pecan.request.application_url + "/" + name}
+ for name in sorted(self.sub_controllers)
+ ]
+ }
+
+
+class VersionsController(object):
+ @staticmethod
+ @pecan.expose('json')
+ def index():
+ return {
+ "versions": [
+ {
+ "status": "CURRENT",
+ "links": [
+ {
+ "rel": "self",
+ "href": pecan.request.application_url + "/v1/"
+ }
+ ],
+ "id": "v1.0",
+ "updated": "2015-03-19"
+ }
+ ]
+ }
diff --git a/gnocchi/rest/api-paste.ini b/gnocchi/rest/api-paste.ini
new file mode 100644
index 00000000..47bb3c32
--- /dev/null
+++ b/gnocchi/rest/api-paste.ini
@@ -0,0 +1,46 @@
+[composite:gnocchi+noauth]
+use = egg:Paste#urlmap
+/ = gnocchiversions_pipeline
+/v1 = gnocchiv1+noauth
+/healthcheck = healthcheck
+
+[composite:gnocchi+basic]
+use = egg:Paste#urlmap
+/ = gnocchiversions_pipeline
+/v1 = gnocchiv1+noauth
+/healthcheck = healthcheck
+
+[composite:gnocchi+keystone]
+use = egg:Paste#urlmap
+/ = gnocchiversions_pipeline
+/v1 = gnocchiv1+keystone
+/healthcheck = healthcheck
+
+[pipeline:gnocchiv1+noauth]
+pipeline = http_proxy_to_wsgi gnocchiv1
+
+[pipeline:gnocchiv1+keystone]
+pipeline = http_proxy_to_wsgi keystone_authtoken gnocchiv1
+
+[pipeline:gnocchiversions_pipeline]
+pipeline = http_proxy_to_wsgi gnocchiversions
+
+[app:gnocchiversions]
+paste.app_factory = gnocchi.rest.app:app_factory
+root = gnocchi.rest.VersionsController
+
+[app:gnocchiv1]
+paste.app_factory = gnocchi.rest.app:app_factory
+root = gnocchi.rest.V1Controller
+
+[filter:keystone_authtoken]
+use = egg:keystonemiddleware#auth_token
+oslo_config_project = gnocchi
+
+[filter:http_proxy_to_wsgi]
+use = egg:oslo.middleware#http_proxy_to_wsgi
+oslo_config_project = gnocchi
+
+[app:healthcheck]
+use = egg:oslo.middleware#healthcheck
+oslo_config_project = gnocchi
diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py
new file mode 100644
index 00000000..02022bd9
--- /dev/null
+++ b/gnocchi/rest/app.py
@@ -0,0 +1,143 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2014-2016 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import os
+import pkg_resources
+import uuid
+import warnings
+
+from oslo_log import log
+from oslo_middleware import cors
+from oslo_policy import policy
+from paste import deploy
+import pecan
+from pecan import jsonify
+from stevedore import driver
+import webob.exc
+
+from gnocchi import exceptions
+from gnocchi import indexer as gnocchi_indexer
+from gnocchi import json
+from gnocchi import service
+from gnocchi import storage as gnocchi_storage
+
+
+LOG = log.getLogger(__name__)
+
+
+# Register our encoder by default for everything
+jsonify.jsonify.register(object)(json.to_primitive)
+
+
+class GnocchiHook(pecan.hooks.PecanHook):
+
+ def __init__(self, storage, indexer, conf):
+ self.storage = storage
+ self.indexer = indexer
+ self.conf = conf
+ self.policy_enforcer = policy.Enforcer(conf)
+ self.auth_helper = driver.DriverManager("gnocchi.rest.auth_helper",
+ conf.api.auth_mode,
+ invoke_on_load=True).driver
+
+ def on_route(self, state):
+ state.request.storage = self.storage
+ state.request.indexer = self.indexer
+ state.request.conf = self.conf
+ state.request.policy_enforcer = self.policy_enforcer
+ state.request.auth_helper = self.auth_helper
+
+
+class NotImplementedMiddleware(object):
+ def __init__(self, app):
+ self.app = app
+
+ def __call__(self, environ, start_response):
+ try:
+ return self.app(environ, start_response)
+ except exceptions.NotImplementedError:
+ raise webob.exc.HTTPNotImplemented(
+ "Sorry, this Gnocchi server does "
+ "not implement this feature 😞")
+
+# NOTE(sileht): pastedeploy uses ConfigParser to handle
+# global_conf, since python 3 ConfigParser doesn't
+# allow to store object as config value, only strings are
+# permit, so to be able to pass an object created before paste load
+# the app, we store them into a global var. But the each loaded app
+# store it's configuration in unique key to be concurrency safe.
+global APPCONFIGS
+APPCONFIGS = {}
+
+
+def load_app(conf, indexer=None, storage=None,
+ not_implemented_middleware=True):
+ global APPCONFIGS
+
+ # NOTE(sileht): We load config, storage and indexer,
+ # so all
+ if not storage:
+ storage = gnocchi_storage.get_driver(conf)
+ if not indexer:
+ indexer = gnocchi_indexer.get_driver(conf)
+ indexer.connect()
+
+ # Build the WSGI app
+ cfg_path = conf.api.paste_config
+ if not os.path.isabs(cfg_path):
+ cfg_path = conf.find_file(cfg_path)
+
+ if cfg_path is None or not os.path.exists(cfg_path):
+ LOG.debug("No api-paste configuration file found! Using default.")
+ cfg_path = pkg_resources.resource_filename(__name__, "api-paste.ini")
+
+ config = dict(conf=conf, indexer=indexer, storage=storage,
+ not_implemented_middleware=not_implemented_middleware)
+ configkey = str(uuid.uuid4())
+ APPCONFIGS[configkey] = config
+
+ LOG.info("WSGI config used: %s", cfg_path)
+
+ if conf.api.auth_mode == "noauth":
+ warnings.warn("The `noauth' authentication mode is deprecated",
+ category=DeprecationWarning)
+
+ appname = "gnocchi+" + conf.api.auth_mode
+ app = deploy.loadapp("config:" + cfg_path, name=appname,
+ global_conf={'configkey': configkey})
+ return cors.CORS(app, conf=conf)
+
+
+def _setup_app(root, conf, indexer, storage, not_implemented_middleware):
+ app = pecan.make_app(
+ root,
+ hooks=(GnocchiHook(storage, indexer, conf),),
+ guess_content_type_from_ext=False,
+ )
+
+ if not_implemented_middleware:
+ app = webob.exc.HTTPExceptionMiddleware(NotImplementedMiddleware(app))
+
+ return app
+
+
+def app_factory(global_config, **local_conf):
+ global APPCONFIGS
+ appconfig = APPCONFIGS.get(global_config.get('configkey'))
+ return _setup_app(root=local_conf.get('root'), **appconfig)
+
+
+def build_wsgi_app():
+ return load_app(service.prepare_service())
diff --git a/gnocchi/rest/app.wsgi b/gnocchi/rest/app.wsgi
new file mode 100644
index 00000000..475d9acb
--- /dev/null
+++ b/gnocchi/rest/app.wsgi
@@ -0,0 +1,29 @@
+#
+# Copyright 2014 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Use this file for deploying the API under mod_wsgi.
+
+See http://pecan.readthedocs.org/en/latest/deployment.html for details.
+"""
+
+import debtcollector
+
+from gnocchi.rest import app
+
+application = app.build_wsgi_app()
+debtcollector.deprecate(prefix="The wsgi script gnocchi/rest/app.wsgi is deprecated",
+ postfix=", please use gnocchi-api binary as wsgi script instead",
+ version="4.0", removal_version="4.1",
+ category=RuntimeWarning)
diff --git a/gnocchi/rest/auth_helper.py b/gnocchi/rest/auth_helper.py
new file mode 100644
index 00000000..46c0893c
--- /dev/null
+++ b/gnocchi/rest/auth_helper.py
@@ -0,0 +1,125 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2016 Red Hat, Inc.
+# Copyright © 2014-2015 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import webob
+import werkzeug.http
+
+from gnocchi import rest
+
+
+class KeystoneAuthHelper(object):
+ @staticmethod
+ def get_current_user(headers):
+ # FIXME(jd) should have domain but should not break existing :(
+ user_id = headers.get("X-User-Id", "")
+ project_id = headers.get("X-Project-Id", "")
+ return user_id + ":" + project_id
+
+ @staticmethod
+ def get_auth_info(headers):
+ user_id = headers.get("X-User-Id")
+ project_id = headers.get("X-Project-Id")
+ return {
+ "user": (user_id or "") + ":" + (project_id or ""),
+ "user_id": user_id,
+ "project_id": project_id,
+ 'domain_id': headers.get("X-Domain-Id"),
+ 'roles': headers.get("X-Roles", "").split(","),
+ }
+
+ @staticmethod
+ def get_resource_policy_filter(headers, rule, resource_type):
+ try:
+ # Check if the policy allows the user to list any resource
+ rest.enforce(rule, {
+ "resource_type": resource_type,
+ })
+ except webob.exc.HTTPForbidden:
+ policy_filter = []
+ project_id = headers.get("X-Project-Id")
+
+ try:
+ # Check if the policy allows the user to list resources linked
+ # to their project
+ rest.enforce(rule, {
+ "resource_type": resource_type,
+ "project_id": project_id,
+ })
+ except webob.exc.HTTPForbidden:
+ pass
+ else:
+ policy_filter.append({"=": {"project_id": project_id}})
+
+ try:
+ # Check if the policy allows the user to list resources linked
+ # to their created_by_project
+ rest.enforce(rule, {
+ "resource_type": resource_type,
+ "created_by_project_id": project_id,
+ })
+ except webob.exc.HTTPForbidden:
+ pass
+ else:
+ if project_id:
+ policy_filter.append(
+ {"like": {"creator": "%:" + project_id}})
+ else:
+ policy_filter.append({"=": {"creator": None}})
+
+ if not policy_filter:
+ # We need to have at least one policy filter in place
+ rest.abort(403, "Insufficient privileges")
+
+ return {"or": policy_filter}
+
+
+class NoAuthHelper(KeystoneAuthHelper):
+ @staticmethod
+ def get_current_user(headers):
+ # FIXME(jd) Should be a single header
+ user_id = headers.get("X-User-Id")
+ project_id = headers.get("X-Project-Id")
+ if user_id:
+ if project_id:
+ return user_id + ":" + project_id
+ return user_id
+ if project_id:
+ return project_id
+ rest.abort(401, "Unable to determine current user")
+
+
+class BasicAuthHelper(object):
+ @staticmethod
+ def get_current_user(headers):
+ auth = werkzeug.http.parse_authorization_header(
+ headers.get("Authorization"))
+ if auth is None:
+ rest.abort(401)
+ return auth.username
+
+ def get_auth_info(self, headers):
+ user = self.get_current_user(headers)
+ roles = []
+ if user == "admin":
+ roles.append("admin")
+ return {
+ "user": user,
+ "roles": roles
+ }
+
+ @staticmethod
+ def get_resource_policy_filter(headers, rule, resource_type):
+ return None
diff --git a/gnocchi/rest/policy.json b/gnocchi/rest/policy.json
new file mode 100644
index 00000000..51d39674
--- /dev/null
+++ b/gnocchi/rest/policy.json
@@ -0,0 +1,42 @@
+{
+ "admin_or_creator": "role:admin or user:%(creator)s or project_id:%(created_by_project_id)s",
+ "resource_owner": "project_id:%(project_id)s",
+ "metric_owner": "project_id:%(resource.project_id)s",
+
+ "get status": "role:admin",
+
+ "create resource": "",
+ "get resource": "rule:admin_or_creator or rule:resource_owner",
+ "update resource": "rule:admin_or_creator",
+ "delete resource": "rule:admin_or_creator",
+ "delete resources": "rule:admin_or_creator",
+ "list resource": "rule:admin_or_creator or rule:resource_owner",
+ "search resource": "rule:admin_or_creator or rule:resource_owner",
+
+ "create resource type": "role:admin",
+ "delete resource type": "role:admin",
+ "update resource type": "role:admin",
+ "list resource type": "",
+ "get resource type": "",
+
+ "get archive policy": "",
+ "list archive policy": "",
+ "create archive policy": "role:admin",
+ "update archive policy": "role:admin",
+ "delete archive policy": "role:admin",
+
+ "create archive policy rule": "role:admin",
+ "get archive policy rule": "",
+ "list archive policy rule": "",
+ "delete archive policy rule": "role:admin",
+
+ "create metric": "",
+ "delete metric": "rule:admin_or_creator",
+ "get metric": "rule:admin_or_creator or rule:metric_owner",
+ "search metric": "rule:admin_or_creator or rule:metric_owner",
+ "list metric": "",
+ "list all metric": "role:admin",
+
+ "get measures": "rule:admin_or_creator or rule:metric_owner",
+ "post measures": "rule:admin_or_creator"
+}
diff --git a/gnocchi/service.py b/gnocchi/service.py
new file mode 100644
index 00000000..26b8e7dd
--- /dev/null
+++ b/gnocchi/service.py
@@ -0,0 +1,93 @@
+# Copyright (c) 2016-2017 Red Hat, Inc.
+# Copyright (c) 2015 eNovance
+# Copyright (c) 2013 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+
+from oslo_config import cfg
+from oslo_db import options as db_options
+from oslo_log import log
+from oslo_policy import opts as policy_opts
+import pbr.version
+from six.moves.urllib import parse as urlparse
+
+from gnocchi import archive_policy
+from gnocchi import opts
+from gnocchi import utils
+
+LOG = log.getLogger(__name__)
+
+
+def prepare_service(args=None, conf=None,
+ default_config_files=None):
+ if conf is None:
+ conf = cfg.ConfigOpts()
+ opts.set_defaults()
+ # FIXME(jd) Use the pkg_entry info to register the options of these libs
+ log.register_options(conf)
+ db_options.set_defaults(conf)
+ policy_opts.set_defaults(conf)
+
+ # Register our own Gnocchi options
+ for group, options in opts.list_opts():
+ conf.register_opts(list(options),
+ group=None if group == "DEFAULT" else group)
+
+ conf.set_default("workers", utils.get_default_workers(), group="metricd")
+
+ conf(args, project='gnocchi', validate_default_values=True,
+ default_config_files=default_config_files,
+ version=pbr.version.VersionInfo('gnocchi').version_string())
+
+ # HACK(jd) I'm not happy about that, fix AP class to handle a conf object?
+ archive_policy.ArchivePolicy.DEFAULT_AGGREGATION_METHODS = (
+ conf.archive_policy.default_aggregation_methods
+ )
+
+ # If no coordination URL is provided, default to using the indexer as
+ # coordinator
+ if conf.storage.coordination_url is None:
+ if conf.storage.driver == "redis":
+ conf.set_default("coordination_url",
+ conf.storage.redis_url,
+ "storage")
+ elif conf.incoming.driver == "redis":
+ conf.set_default("coordination_url",
+ conf.incoming.redis_url,
+ "storage")
+ else:
+ parsed = urlparse.urlparse(conf.indexer.url)
+ proto, _, _ = parsed.scheme.partition("+")
+ parsed = list(parsed)
+ # Set proto without the + part
+ parsed[0] = proto
+ conf.set_default("coordination_url",
+ urlparse.urlunparse(parsed),
+ "storage")
+
+ cfg_path = conf.oslo_policy.policy_file
+ if not os.path.isabs(cfg_path):
+ cfg_path = conf.find_file(cfg_path)
+ if cfg_path is None or not os.path.exists(cfg_path):
+ cfg_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
+ 'rest', 'policy.json'))
+ conf.set_default('policy_file', cfg_path, group='oslo_policy')
+
+ log.set_defaults(default_log_levels=log.get_default_log_levels() +
+ ["passlib.utils.compat=INFO"])
+ log.setup(conf, 'gnocchi')
+ conf.log_opt_values(LOG, log.DEBUG)
+
+ return conf
diff --git a/gnocchi/statsd.py b/gnocchi/statsd.py
new file mode 100644
index 00000000..267df497
--- /dev/null
+++ b/gnocchi/statsd.py
@@ -0,0 +1,195 @@
+# Copyright (c) 2015 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import itertools
+import uuid
+
+try:
+ import asyncio
+except ImportError:
+ import trollius as asyncio
+from oslo_config import cfg
+from oslo_log import log
+import six
+
+from gnocchi import indexer
+from gnocchi import service
+from gnocchi import storage
+from gnocchi import utils
+
+
+LOG = log.getLogger(__name__)
+
+
+class Stats(object):
+ def __init__(self, conf):
+ self.conf = conf
+ self.storage = storage.get_driver(self.conf)
+ self.indexer = indexer.get_driver(self.conf)
+ self.indexer.connect()
+ try:
+ self.indexer.create_resource('generic',
+ self.conf.statsd.resource_id,
+ self.conf.statsd.creator)
+ except indexer.ResourceAlreadyExists:
+ LOG.debug("Resource %s already exists",
+ self.conf.statsd.resource_id)
+ else:
+ LOG.info("Created resource %s", self.conf.statsd.resource_id)
+ self.gauges = {}
+ self.counters = {}
+ self.times = {}
+
+ def reset(self):
+ self.gauges.clear()
+ self.counters.clear()
+ self.times.clear()
+
+ def treat_metric(self, metric_name, metric_type, value, sampling):
+ metric_name += "|" + metric_type
+ if metric_type == "ms":
+ if sampling is not None:
+ raise ValueError(
+ "Invalid sampling for ms: `%d`, should be none"
+ % sampling)
+ self.times[metric_name] = storage.Measure(
+ utils.dt_in_unix_ns(utils.utcnow()), value)
+ elif metric_type == "g":
+ if sampling is not None:
+ raise ValueError(
+ "Invalid sampling for g: `%d`, should be none"
+ % sampling)
+ self.gauges[metric_name] = storage.Measure(
+ utils.dt_in_unix_ns(utils.utcnow()), value)
+ elif metric_type == "c":
+ sampling = 1 if sampling is None else sampling
+ if metric_name in self.counters:
+ current_value = self.counters[metric_name].value
+ else:
+ current_value = 0
+ self.counters[metric_name] = storage.Measure(
+ utils.dt_in_unix_ns(utils.utcnow()),
+ current_value + (value * (1 / sampling)))
+ # TODO(jd) Support "set" type
+ # elif metric_type == "s":
+ # pass
+ else:
+ raise ValueError("Unknown metric type `%s'" % metric_type)
+
+ def flush(self):
+ resource = self.indexer.get_resource('generic',
+ self.conf.statsd.resource_id,
+ with_metrics=True)
+
+ for metric_name, measure in itertools.chain(
+ six.iteritems(self.gauges),
+ six.iteritems(self.counters),
+ six.iteritems(self.times)):
+ try:
+ # NOTE(jd) We avoid considering any concurrency here as statsd
+ # is not designed to run in parallel and we do not envision
+ # operators manipulating the resource/metrics using the Gnocchi
+ # API at the same time.
+ metric = resource.get_metric(metric_name)
+ if not metric:
+ ap_name = self._get_archive_policy_name(metric_name)
+ metric = self.indexer.create_metric(
+ uuid.uuid4(),
+ self.conf.statsd.creator,
+ archive_policy_name=ap_name,
+ name=metric_name,
+ resource_id=self.conf.statsd.resource_id)
+ self.storage.incoming.add_measures(metric, (measure,))
+ except Exception as e:
+ LOG.error("Unable to add measure %s: %s",
+ metric_name, e)
+
+ self.reset()
+
+ def _get_archive_policy_name(self, metric_name):
+ if self.conf.statsd.archive_policy_name:
+ return self.conf.statsd.archive_policy_name
+ # NOTE(sileht): We didn't catch NoArchivePolicyRuleMatch to log it
+ ap = self.indexer.get_archive_policy_for_metric(metric_name)
+ return ap.name
+
+
+class StatsdServer(object):
+ def __init__(self, stats):
+ self.stats = stats
+
+ @staticmethod
+ def connection_made(transport):
+ pass
+
+ def datagram_received(self, data, addr):
+ LOG.debug("Received data `%r' from %s", data, addr)
+ try:
+ messages = [m for m in data.decode().split("\n") if m]
+ except Exception as e:
+ LOG.error("Unable to decode datagram: %s", e)
+ return
+ for message in messages:
+ metric = message.split("|")
+ if len(metric) == 2:
+ metric_name, metric_type = metric
+ sampling = None
+ elif len(metric) == 3:
+ metric_name, metric_type, sampling = metric
+ else:
+ LOG.error("Invalid number of | in `%s'", message)
+ continue
+ sampling = float(sampling[1:]) if sampling is not None else None
+ metric_name, metric_str_val = metric_name.split(':')
+ # NOTE(jd): We do not support +/- gauge, and we delete gauge on
+ # each flush.
+ value = float(metric_str_val)
+ try:
+ self.stats.treat_metric(metric_name, metric_type,
+ value, sampling)
+ except Exception as e:
+ LOG.error("Unable to treat metric %s: %s", message, str(e))
+
+
+def start():
+ conf = service.prepare_service()
+
+ if conf.statsd.resource_id is None:
+ raise cfg.RequiredOptError("resource_id", cfg.OptGroup("statsd"))
+
+ stats = Stats(conf)
+
+ loop = asyncio.get_event_loop()
+ # TODO(jd) Add TCP support
+ listen = loop.create_datagram_endpoint(
+ lambda: StatsdServer(stats),
+ local_addr=(conf.statsd.host, conf.statsd.port))
+
+ def _flush():
+ loop.call_later(conf.statsd.flush_delay, _flush)
+ stats.flush()
+
+ loop.call_later(conf.statsd.flush_delay, _flush)
+ transport, protocol = loop.run_until_complete(listen)
+
+ LOG.info("Started on %s:%d", conf.statsd.host, conf.statsd.port)
+ LOG.info("Flush delay: %d seconds", conf.statsd.flush_delay)
+
+ try:
+ loop.run_forever()
+ except KeyboardInterrupt:
+ pass
+
+ transport.close()
+ loop.close()
diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py
new file mode 100644
index 00000000..d06a47cf
--- /dev/null
+++ b/gnocchi/storage/__init__.py
@@ -0,0 +1,372 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2014-2015 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import operator
+from oslo_config import cfg
+from oslo_log import log
+from stevedore import driver
+
+from gnocchi import exceptions
+from gnocchi import indexer
+
+
+OPTS = [
+ cfg.StrOpt('driver',
+ default='file',
+ help='Storage driver to use'),
+]
+
+LOG = log.getLogger(__name__)
+
+
+class Measure(object):
+ def __init__(self, timestamp, value):
+ self.timestamp = timestamp
+ self.value = value
+
+ def __iter__(self):
+ """Allow to transform measure to tuple."""
+ yield self.timestamp
+ yield self.value
+
+
+class Metric(object):
+ def __init__(self, id, archive_policy,
+ creator=None,
+ name=None,
+ resource_id=None):
+ self.id = id
+ self.archive_policy = archive_policy
+ self.creator = creator
+ self.name = name
+ self.resource_id = resource_id
+
+ def __repr__(self):
+ return '<%s %s>' % (self.__class__.__name__, self.id)
+
+ def __str__(self):
+ return str(self.id)
+
+ def __eq__(self, other):
+ return (isinstance(other, Metric)
+ and self.id == other.id
+ and self.archive_policy == other.archive_policy
+ and self.creator == other.creator
+ and self.name == other.name
+ and self.resource_id == other.resource_id)
+
+ __hash__ = object.__hash__
+
+
+class StorageError(Exception):
+ pass
+
+
+class InvalidQuery(StorageError):
+ pass
+
+
+class MetricDoesNotExist(StorageError):
+ """Error raised when this metric does not exist."""
+
+ def __init__(self, metric):
+ self.metric = metric
+ super(MetricDoesNotExist, self).__init__(
+ "Metric %s does not exist" % metric)
+
+
+class AggregationDoesNotExist(StorageError):
+ """Error raised when the aggregation method doesn't exists for a metric."""
+
+ def __init__(self, metric, method):
+ self.metric = metric
+ self.method = method
+ super(AggregationDoesNotExist, self).__init__(
+ "Aggregation method '%s' for metric %s does not exist" %
+ (method, metric))
+
+
+class GranularityDoesNotExist(StorageError):
+ """Error raised when the granularity doesn't exist for a metric."""
+
+ def __init__(self, metric, granularity):
+ self.metric = metric
+ self.granularity = granularity
+ super(GranularityDoesNotExist, self).__init__(
+ "Granularity '%s' for metric %s does not exist" %
+ (granularity, metric))
+
+
+class MetricAlreadyExists(StorageError):
+ """Error raised when this metric already exists."""
+
+ def __init__(self, metric):
+ self.metric = metric
+ super(MetricAlreadyExists, self).__init__(
+ "Metric %s already exists" % metric)
+
+
+class MetricUnaggregatable(StorageError):
+ """Error raised when metrics can't be aggregated."""
+
+ def __init__(self, metrics, reason):
+ self.metrics = metrics
+ self.reason = reason
+ super(MetricUnaggregatable, self).__init__(
+ "Metrics %s can't be aggregated: %s"
+ % (", ".join((str(m.id) for m in metrics)), reason))
+
+
+class LockedMetric(StorageError):
+ """Error raised when this metric is already being handled by another."""
+
+ def __init__(self, metric):
+ self.metric = metric
+ super(LockedMetric, self).__init__("Metric %s is locked" % metric)
+
+
+def get_driver_class(namespace, conf):
+ """Return the storage driver class.
+
+ :param conf: The conf to use to determine the driver.
+ """
+ return driver.DriverManager(namespace,
+ conf.driver).driver
+
+
+def get_driver(conf):
+ """Return the configured driver."""
+ incoming = get_driver_class('gnocchi.incoming', conf.incoming)(
+ conf.incoming)
+ return get_driver_class('gnocchi.storage', conf.storage)(
+ conf.storage, incoming)
+
+
+class StorageDriver(object):
+ def __init__(self, conf, incoming):
+ self.incoming = incoming
+
+ @staticmethod
+ def stop():
+ pass
+
+ def upgrade(self, index, num_sacks):
+ self.incoming.upgrade(index, num_sacks)
+
+ def process_background_tasks(self, index, metrics, sync=False):
+ """Process background tasks for this storage.
+
+ This calls :func:`process_new_measures` to process new measures
+
+ :param index: An indexer to be used for querying metrics
+ :param metrics: The list of metrics waiting for processing
+ :param sync: If True, then process everything synchronously and raise
+ on error
+ :type sync: bool
+ """
+ LOG.debug("Processing new measures")
+ try:
+ self.process_new_measures(index, metrics, sync)
+ except Exception:
+ if sync:
+ raise
+ LOG.error("Unexpected error during measures processing",
+ exc_info=True)
+
+ def expunge_metrics(self, index, sync=False):
+ """Remove deleted metrics
+
+ :param index: An indexer to be used for querying metrics
+ :param sync: If True, then delete everything synchronously and raise
+ on error
+ :type sync: bool
+ """
+
+ metrics_to_expunge = index.list_metrics(status='delete')
+ for m in metrics_to_expunge:
+ try:
+ self.delete_metric(m, sync)
+ index.expunge_metric(m.id)
+ except (indexer.NoSuchMetric, LockedMetric):
+ # It's possible another process deleted or is deleting the
+ # metric, not a big deal
+ pass
+ except Exception:
+ if sync:
+ raise
+ LOG.error("Unable to expunge metric %s from storage", m,
+ exc_info=True)
+
+ @staticmethod
+ def process_new_measures(indexer, metrics, sync=False):
+ """Process added measures in background.
+
+ Some drivers might need to have a background task running that process
+ the measures sent to metrics. This is used for that.
+ """
+
+ @staticmethod
+ def get_measures(metric, from_timestamp=None, to_timestamp=None,
+ aggregation='mean', granularity=None, resample=None):
+ """Get a measure to a metric.
+
+ :param metric: The metric measured.
+ :param from timestamp: The timestamp to get the measure from.
+ :param to timestamp: The timestamp to get the measure to.
+ :param aggregation: The type of aggregation to retrieve.
+ :param granularity: The granularity to retrieve.
+ :param resample: The granularity to resample to.
+ """
+ if aggregation not in metric.archive_policy.aggregation_methods:
+ raise AggregationDoesNotExist(metric, aggregation)
+
+ @staticmethod
+ def delete_metric(metric, sync=False):
+ raise exceptions.NotImplementedError
+
+ @staticmethod
+ def get_cross_metric_measures(metrics, from_timestamp=None,
+ to_timestamp=None, aggregation='mean',
+ reaggregation=None, resample=None,
+ granularity=None, needed_overlap=None,
+ fill=None):
+ """Get aggregated measures of multiple entities.
+
+ :param entities: The entities measured to aggregate.
+ :param from timestamp: The timestamp to get the measure from.
+ :param to timestamp: The timestamp to get the measure to.
+ :param granularity: The granularity to retrieve.
+ :param aggregation: The type of aggregation to retrieve.
+ :param reaggregation: The type of aggregation to compute
+ on the retrieved measures.
+ :param resample: The granularity to resample to.
+ :param fill: The value to use to fill in missing data in series.
+ """
+ for metric in metrics:
+ if aggregation not in metric.archive_policy.aggregation_methods:
+ raise AggregationDoesNotExist(metric, aggregation)
+ if (granularity is not None and granularity
+ not in set(d.granularity
+ for d in metric.archive_policy.definition)):
+ raise GranularityDoesNotExist(metric, granularity)
+
+ @staticmethod
+ def search_value(metrics, query, from_timestamp=None,
+ to_timestamp=None,
+ aggregation='mean',
+ granularity=None):
+ """Search for an aggregated value that realizes a predicate.
+
+ :param metrics: The list of metrics to look into.
+ :param query: The query being sent.
+ :param from_timestamp: The timestamp to get the measure from.
+ :param to_timestamp: The timestamp to get the measure to.
+ :param aggregation: The type of aggregation to retrieve.
+ :param granularity: The granularity to retrieve.
+ """
+ raise exceptions.NotImplementedError
+
+
+class MeasureQuery(object):
+ binary_operators = {
+ u"=": operator.eq,
+ u"==": operator.eq,
+ u"eq": operator.eq,
+
+ u"<": operator.lt,
+ u"lt": operator.lt,
+
+ u">": operator.gt,
+ u"gt": operator.gt,
+
+ u"<=": operator.le,
+ u"≤": operator.le,
+ u"le": operator.le,
+
+ u">=": operator.ge,
+ u"≥": operator.ge,
+ u"ge": operator.ge,
+
+ u"!=": operator.ne,
+ u"≠": operator.ne,
+ u"ne": operator.ne,
+
+ u"%": operator.mod,
+ u"mod": operator.mod,
+
+ u"+": operator.add,
+ u"add": operator.add,
+
+ u"-": operator.sub,
+ u"sub": operator.sub,
+
+ u"*": operator.mul,
+ u"×": operator.mul,
+ u"mul": operator.mul,
+
+ u"/": operator.truediv,
+ u"÷": operator.truediv,
+ u"div": operator.truediv,
+
+ u"**": operator.pow,
+ u"^": operator.pow,
+ u"pow": operator.pow,
+ }
+
+ multiple_operators = {
+ u"or": any,
+ u"∨": any,
+ u"and": all,
+ u"∧": all,
+ }
+
+ def __init__(self, tree):
+ self._eval = self.build_evaluator(tree)
+
+ def __call__(self, value):
+ return self._eval(value)
+
+ def build_evaluator(self, tree):
+ try:
+ operator, nodes = list(tree.items())[0]
+ except Exception:
+ return lambda value: tree
+ try:
+ op = self.multiple_operators[operator]
+ except KeyError:
+ try:
+ op = self.binary_operators[operator]
+ except KeyError:
+ raise InvalidQuery("Unknown operator %s" % operator)
+ return self._handle_binary_op(op, nodes)
+ return self._handle_multiple_op(op, nodes)
+
+ def _handle_multiple_op(self, op, nodes):
+ elements = [self.build_evaluator(node) for node in nodes]
+ return lambda value: op((e(value) for e in elements))
+
+ def _handle_binary_op(self, op, node):
+ try:
+ iterator = iter(node)
+ except Exception:
+ return lambda value: op(value, node)
+ nodes = list(iterator)
+ if len(nodes) != 2:
+ raise InvalidQuery(
+ "Binary operator %s needs 2 arguments, %d given" %
+ (op, len(nodes)))
+ node0 = self.build_evaluator(node[0])
+ node1 = self.build_evaluator(node[1])
+ return lambda value: op(node0(value), node1(value))
diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py
new file mode 100644
index 00000000..65983ad1
--- /dev/null
+++ b/gnocchi/storage/_carbonara.py
@@ -0,0 +1,571 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2016 Red Hat, Inc.
+# Copyright © 2014-2015 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import collections
+import datetime
+import itertools
+import operator
+
+from concurrent import futures
+import iso8601
+from oslo_config import cfg
+from oslo_log import log
+import six
+import six.moves
+
+from gnocchi import carbonara
+from gnocchi import storage
+from gnocchi import utils
+
+
+OPTS = [
+ cfg.IntOpt('aggregation_workers_number',
+ default=1, min=1,
+ help='Number of threads to process and store aggregates. '
+ 'Set value roughly equal to number of aggregates to be '
+ 'computed per metric'),
+ cfg.StrOpt('coordination_url',
+ secret=True,
+ help='Coordination driver URL'),
+
+]
+
+LOG = log.getLogger(__name__)
+
+
+class CorruptionError(ValueError):
+ """Data corrupted, damn it."""
+
+ def __init__(self, message):
+ super(CorruptionError, self).__init__(message)
+
+
+class SackLockTimeoutError(Exception):
+ pass
+
+
+class CarbonaraBasedStorage(storage.StorageDriver):
+
+ def __init__(self, conf, incoming):
+ super(CarbonaraBasedStorage, self).__init__(conf, incoming)
+ self.aggregation_workers_number = conf.aggregation_workers_number
+ if self.aggregation_workers_number == 1:
+ # NOTE(jd) Avoid using futures at all if we don't want any threads.
+ self._map_in_thread = self._map_no_thread
+ else:
+ self._map_in_thread = self._map_in_futures_threads
+ self.coord, my_id = utils.get_coordinator_and_start(
+ conf.coordination_url)
+
+ def stop(self):
+ self.coord.stop()
+
+ @staticmethod
+ def _get_measures(metric, timestamp_key, aggregation, granularity,
+ version=3):
+ raise NotImplementedError
+
+ @staticmethod
+ def _get_unaggregated_timeserie(metric, version=3):
+ raise NotImplementedError
+
+ def _get_unaggregated_timeserie_and_unserialize(
+ self, metric, block_size, back_window):
+ """Retrieve unaggregated timeserie for a metric and unserialize it.
+
+ Returns a gnocchi.carbonara.BoundTimeSerie object. If the data cannot
+ be retrieved, returns None.
+
+ """
+ with utils.StopWatch() as sw:
+ raw_measures = (
+ self._get_unaggregated_timeserie(
+ metric)
+ )
+ if not raw_measures:
+ return
+ LOG.debug(
+ "Retrieve unaggregated measures "
+ "for %s in %.2fs",
+ metric.id, sw.elapsed())
+ try:
+ return carbonara.BoundTimeSerie.unserialize(
+ raw_measures, block_size, back_window)
+ except ValueError:
+ raise CorruptionError(
+ "Data corruption detected for %s "
+ "unaggregated timeserie" % metric.id)
+
+ @staticmethod
+ def _store_unaggregated_timeserie(metric, data, version=3):
+ raise NotImplementedError
+
+ @staticmethod
+ def _store_metric_measures(metric, timestamp_key, aggregation,
+ granularity, data, offset=None, version=3):
+ raise NotImplementedError
+
+ @staticmethod
+ def _list_split_keys_for_metric(metric, aggregation, granularity,
+ version=3):
+ raise NotImplementedError
+
+ @staticmethod
+ def _version_check(name, v):
+ """Validate object matches expected version.
+
+ Version should be last attribute and start with 'v'
+ """
+ return name.split("_")[-1] == 'v%s' % v
+
+ def get_measures(self, metric, from_timestamp=None, to_timestamp=None,
+ aggregation='mean', granularity=None, resample=None):
+ super(CarbonaraBasedStorage, self).get_measures(
+ metric, from_timestamp, to_timestamp, aggregation)
+ if granularity is None:
+ agg_timeseries = self._map_in_thread(
+ self._get_measures_timeserie,
+ ((metric, aggregation, ap.granularity,
+ from_timestamp, to_timestamp)
+ for ap in reversed(metric.archive_policy.definition)))
+ else:
+ agg_timeseries = self._get_measures_timeserie(
+ metric, aggregation, granularity,
+ from_timestamp, to_timestamp)
+ if resample:
+ agg_timeseries = agg_timeseries.resample(resample)
+ agg_timeseries = [agg_timeseries]
+
+ return [(timestamp.replace(tzinfo=iso8601.iso8601.UTC), r, v)
+ for ts in agg_timeseries
+ for timestamp, r, v in ts.fetch(from_timestamp, to_timestamp)]
+
+ def _get_measures_and_unserialize(self, metric, key,
+ aggregation, granularity):
+ data = self._get_measures(metric, key, aggregation, granularity)
+ try:
+ return carbonara.AggregatedTimeSerie.unserialize(
+ data, key, aggregation, granularity)
+ except carbonara.InvalidData:
+ LOG.error("Data corruption detected for %s "
+ "aggregated `%s' timeserie, granularity `%s' "
+ "around time `%s', ignoring.",
+ metric.id, aggregation, granularity, key)
+
+ def _get_measures_timeserie(self, metric,
+ aggregation, granularity,
+ from_timestamp=None, to_timestamp=None):
+
+ # Find the number of point
+ for d in metric.archive_policy.definition:
+ if d.granularity == granularity:
+ points = d.points
+ break
+ else:
+ raise storage.GranularityDoesNotExist(metric, granularity)
+
+ all_keys = None
+ try:
+ all_keys = self._list_split_keys_for_metric(
+ metric, aggregation, granularity)
+ except storage.MetricDoesNotExist:
+ for d in metric.archive_policy.definition:
+ if d.granularity == granularity:
+ return carbonara.AggregatedTimeSerie(
+ sampling=granularity,
+ aggregation_method=aggregation,
+ max_size=d.points)
+ raise storage.GranularityDoesNotExist(metric, granularity)
+
+ if from_timestamp:
+ from_timestamp = str(
+ carbonara.SplitKey.from_timestamp_and_sampling(
+ from_timestamp, granularity))
+
+ if to_timestamp:
+ to_timestamp = str(
+ carbonara.SplitKey.from_timestamp_and_sampling(
+ to_timestamp, granularity))
+
+ timeseries = filter(
+ lambda x: x is not None,
+ self._map_in_thread(
+ self._get_measures_and_unserialize,
+ ((metric, key, aggregation, granularity)
+ for key in all_keys
+ if ((not from_timestamp or key >= from_timestamp)
+ and (not to_timestamp or key <= to_timestamp))))
+ )
+
+ return carbonara.AggregatedTimeSerie.from_timeseries(
+ sampling=granularity,
+ aggregation_method=aggregation,
+ timeseries=timeseries,
+ max_size=points)
+
+ def _store_timeserie_split(self, metric, key, split,
+ aggregation, archive_policy_def,
+ oldest_mutable_timestamp):
+ # NOTE(jd) We write the full split only if the driver works that way
+ # (self.WRITE_FULL) or if the oldest_mutable_timestamp is out of range.
+ write_full = self.WRITE_FULL or next(key) <= oldest_mutable_timestamp
+ key_as_str = str(key)
+ if write_full:
+ try:
+ existing = self._get_measures_and_unserialize(
+ metric, key_as_str, aggregation,
+ archive_policy_def.granularity)
+ except storage.AggregationDoesNotExist:
+ pass
+ else:
+ if existing is not None:
+ if split is None:
+ split = existing
+ else:
+ split.merge(existing)
+
+ if split is None:
+ # `split' can be none if existing is None and no split was passed
+ # in order to rewrite and compress the data; in that case, it means
+ # the split key is present and listed, but some aggregation method
+ # or granularity is missing. That means data is corrupted, but it
+ # does not mean we have to fail, we can just do nothing and log a
+ # warning.
+ LOG.warning("No data found for metric %s, granularity %f "
+ "and aggregation method %s (split key %s): "
+ "possible data corruption",
+ metric, archive_policy_def.granularity,
+ aggregation, key)
+ return
+
+ offset, data = split.serialize(key, compressed=write_full)
+
+ return self._store_metric_measures(
+ metric, key_as_str, aggregation, archive_policy_def.granularity,
+ data, offset=offset)
+
+ def _add_measures(self, aggregation, archive_policy_def,
+ metric, grouped_serie,
+ previous_oldest_mutable_timestamp,
+ oldest_mutable_timestamp):
+ ts = carbonara.AggregatedTimeSerie.from_grouped_serie(
+ grouped_serie, archive_policy_def.granularity,
+ aggregation, max_size=archive_policy_def.points)
+
+ # Don't do anything if the timeserie is empty
+ if not ts:
+ return
+
+ # We only need to check for rewrite if driver is not in WRITE_FULL mode
+ # and if we already stored splits once
+ need_rewrite = (
+ not self.WRITE_FULL
+ and previous_oldest_mutable_timestamp is not None
+ )
+
+ if archive_policy_def.timespan or need_rewrite:
+ existing_keys = self._list_split_keys_for_metric(
+ metric, aggregation, archive_policy_def.granularity)
+
+ # First delete old splits
+ if archive_policy_def.timespan:
+ oldest_point_to_keep = ts.last - datetime.timedelta(
+ seconds=archive_policy_def.timespan)
+ oldest_key_to_keep = ts.get_split_key(oldest_point_to_keep)
+ oldest_key_to_keep_s = str(oldest_key_to_keep)
+ for key in list(existing_keys):
+ # NOTE(jd) Only delete if the key is strictly inferior to
+ # the timestamp; we don't delete any timeserie split that
+ # contains our timestamp, so we prefer to keep a bit more
+ # than deleting too much
+ if key < oldest_key_to_keep_s:
+ self._delete_metric_measures(
+ metric, key, aggregation,
+ archive_policy_def.granularity)
+ existing_keys.remove(key)
+ else:
+ oldest_key_to_keep = carbonara.SplitKey(0, 0)
+
+ # Rewrite all read-only splits just for fun (and compression). This
+ # only happens if `previous_oldest_mutable_timestamp' exists, which
+ # means we already wrote some splits at some point – so this is not the
+ # first time we treat this timeserie.
+ if need_rewrite:
+ previous_oldest_mutable_key = str(ts.get_split_key(
+ previous_oldest_mutable_timestamp))
+ oldest_mutable_key = str(ts.get_split_key(
+ oldest_mutable_timestamp))
+
+ if previous_oldest_mutable_key != oldest_mutable_key:
+ for key in existing_keys:
+ if previous_oldest_mutable_key <= key < oldest_mutable_key:
+ LOG.debug(
+ "Compressing previous split %s (%s) for metric %s",
+ key, aggregation, metric)
+ # NOTE(jd) Rewrite it entirely for fun (and later for
+ # compression). For that, we just pass None as split.
+ self._store_timeserie_split(
+ metric, carbonara.SplitKey(
+ float(key), archive_policy_def.granularity),
+ None, aggregation, archive_policy_def,
+ oldest_mutable_timestamp)
+
+ for key, split in ts.split():
+ if key >= oldest_key_to_keep:
+ LOG.debug(
+ "Storing split %s (%s) for metric %s",
+ key, aggregation, metric)
+ self._store_timeserie_split(
+ metric, key, split, aggregation, archive_policy_def,
+ oldest_mutable_timestamp)
+
+ @staticmethod
+ def _delete_metric(metric):
+ raise NotImplementedError
+
+ def delete_metric(self, metric, sync=False):
+ LOG.debug("Deleting metric %s", metric)
+ lock = self.incoming.get_sack_lock(
+ self.coord, self.incoming.sack_for_metric(metric.id))
+ if not lock.acquire(blocking=sync):
+ raise storage.LockedMetric(metric)
+ # NOTE(gordc): no need to hold lock because the metric has been already
+ # marked as "deleted" in the indexer so no measure worker
+ # is going to process it anymore.
+ lock.release()
+ self._delete_metric(metric)
+ self.incoming.delete_unprocessed_measures_for_metric_id(metric.id)
+
+ @staticmethod
+ def _delete_metric_measures(metric, timestamp_key,
+ aggregation, granularity, version=3):
+ raise NotImplementedError
+
+ def refresh_metric(self, indexer, metric, timeout):
+ s = self.incoming.sack_for_metric(metric.id)
+ lock = self.incoming.get_sack_lock(self.coord, s)
+ if not lock.acquire(blocking=timeout):
+ raise SackLockTimeoutError(
+ 'Unable to refresh metric: %s. Metric is locked. '
+ 'Please try again.' % metric.id)
+ try:
+ self.process_new_measures(indexer, [six.text_type(metric.id)])
+ finally:
+ lock.release()
+
+ def process_new_measures(self, indexer, metrics_to_process,
+ sync=False):
+ # process only active metrics. deleted metrics with unprocessed
+ # measures will be skipped until cleaned by janitor.
+ metrics = indexer.list_metrics(ids=metrics_to_process)
+ for metric in metrics:
+ # NOTE(gordc): must lock at sack level
+ try:
+ LOG.debug("Processing measures for %s", metric)
+ with self.incoming.process_measure_for_metric(metric) \
+ as measures:
+ self._compute_and_store_timeseries(metric, measures)
+ LOG.debug("Measures for metric %s processed", metric)
+ except Exception:
+ if sync:
+ raise
+ LOG.error("Error processing new measures", exc_info=True)
+
+ def _compute_and_store_timeseries(self, metric, measures):
+ # NOTE(mnaser): The metric could have been handled by
+ # another worker, ignore if no measures.
+ if len(measures) == 0:
+ LOG.debug("Skipping %s (already processed)", metric)
+ return
+
+ measures = sorted(measures, key=operator.itemgetter(0))
+
+ agg_methods = list(metric.archive_policy.aggregation_methods)
+ block_size = metric.archive_policy.max_block_size
+ back_window = metric.archive_policy.back_window
+ definition = metric.archive_policy.definition
+
+ try:
+ ts = self._get_unaggregated_timeserie_and_unserialize(
+ metric, block_size=block_size, back_window=back_window)
+ except storage.MetricDoesNotExist:
+ try:
+ self._create_metric(metric)
+ except storage.MetricAlreadyExists:
+ # Created in the mean time, do not worry
+ pass
+ ts = None
+ except CorruptionError as e:
+ LOG.error(e)
+ ts = None
+
+ if ts is None:
+ # This is the first time we treat measures for this
+ # metric, or data are corrupted, create a new one
+ ts = carbonara.BoundTimeSerie(block_size=block_size,
+ back_window=back_window)
+ current_first_block_timestamp = None
+ else:
+ current_first_block_timestamp = ts.first_block_timestamp()
+
+ # NOTE(jd) This is Python where you need such
+ # hack to pass a variable around a closure,
+ # sorry.
+ computed_points = {"number": 0}
+
+ def _map_add_measures(bound_timeserie):
+ # NOTE (gordc): bound_timeserie is entire set of
+ # unaggregated measures matching largest
+ # granularity. the following takes only the points
+ # affected by new measures for specific granularity
+ tstamp = max(bound_timeserie.first, measures[0][0])
+ new_first_block_timestamp = bound_timeserie.first_block_timestamp()
+ computed_points['number'] = len(bound_timeserie)
+ for d in definition:
+ ts = bound_timeserie.group_serie(
+ d.granularity, carbonara.round_timestamp(
+ tstamp, d.granularity * 10e8))
+
+ self._map_in_thread(
+ self._add_measures,
+ ((aggregation, d, metric, ts,
+ current_first_block_timestamp,
+ new_first_block_timestamp)
+ for aggregation in agg_methods))
+
+ with utils.StopWatch() as sw:
+ ts.set_values(measures,
+ before_truncate_callback=_map_add_measures,
+ ignore_too_old_timestamps=True)
+
+ number_of_operations = (len(agg_methods) * len(definition))
+ perf = ""
+ elapsed = sw.elapsed()
+ if elapsed > 0:
+ perf = " (%d points/s, %d measures/s)" % (
+ ((number_of_operations * computed_points['number']) /
+ elapsed),
+ ((number_of_operations * len(measures)) / elapsed)
+ )
+ LOG.debug("Computed new metric %s with %d new measures "
+ "in %.2f seconds%s",
+ metric.id, len(measures), elapsed, perf)
+
+ self._store_unaggregated_timeserie(metric, ts.serialize())
+
+ def get_cross_metric_measures(self, metrics, from_timestamp=None,
+ to_timestamp=None, aggregation='mean',
+ reaggregation=None, resample=None,
+ granularity=None, needed_overlap=100.0,
+ fill=None):
+ super(CarbonaraBasedStorage, self).get_cross_metric_measures(
+ metrics, from_timestamp, to_timestamp,
+ aggregation, reaggregation, resample, granularity, needed_overlap)
+
+ if reaggregation is None:
+ reaggregation = aggregation
+
+ if granularity is None:
+ granularities = (
+ definition.granularity
+ for metric in metrics
+ for definition in metric.archive_policy.definition
+ )
+ granularities_in_common = [
+ g
+ for g, occurrence in six.iteritems(
+ collections.Counter(granularities))
+ if occurrence == len(metrics)
+ ]
+
+ if not granularities_in_common:
+ raise storage.MetricUnaggregatable(
+ metrics, 'No granularity match')
+ else:
+ granularities_in_common = [granularity]
+
+ if resample and granularity:
+ tss = self._map_in_thread(self._get_measures_timeserie,
+ [(metric, aggregation, granularity,
+ from_timestamp, to_timestamp)
+ for metric in metrics])
+ for i, ts in enumerate(tss):
+ tss[i] = ts.resample(resample)
+ else:
+ tss = self._map_in_thread(self._get_measures_timeserie,
+ [(metric, aggregation, g,
+ from_timestamp, to_timestamp)
+ for metric in metrics
+ for g in granularities_in_common])
+
+ try:
+ return [(timestamp.replace(tzinfo=iso8601.iso8601.UTC), r, v)
+ for timestamp, r, v
+ in carbonara.AggregatedTimeSerie.aggregated(
+ tss, reaggregation, from_timestamp, to_timestamp,
+ needed_overlap, fill)]
+ except carbonara.UnAggregableTimeseries as e:
+ raise storage.MetricUnaggregatable(metrics, e.reason)
+
+ def _find_measure(self, metric, aggregation, granularity, predicate,
+ from_timestamp, to_timestamp):
+ timeserie = self._get_measures_timeserie(
+ metric, aggregation, granularity,
+ from_timestamp, to_timestamp)
+ values = timeserie.fetch(from_timestamp, to_timestamp)
+ return {metric:
+ [(timestamp.replace(tzinfo=iso8601.iso8601.UTC),
+ g, value)
+ for timestamp, g, value in values
+ if predicate(value)]}
+
+ def search_value(self, metrics, query, from_timestamp=None,
+ to_timestamp=None, aggregation='mean',
+ granularity=None):
+ granularity = granularity or []
+ predicate = storage.MeasureQuery(query)
+
+ results = self._map_in_thread(
+ self._find_measure,
+ [(metric, aggregation,
+ gran, predicate,
+ from_timestamp, to_timestamp)
+ for metric in metrics
+ for gran in granularity or
+ (defin.granularity
+ for defin in metric.archive_policy.definition)])
+ result = collections.defaultdict(list)
+ for r in results:
+ for metric, metric_result in six.iteritems(r):
+ result[metric].extend(metric_result)
+
+ # Sort the result
+ for metric, r in six.iteritems(result):
+ # Sort by timestamp asc, granularity desc
+ r.sort(key=lambda t: (t[0], - t[1]))
+
+ return result
+
+ @staticmethod
+ def _map_no_thread(method, list_of_args):
+ return list(itertools.starmap(method, list_of_args))
+
+ def _map_in_futures_threads(self, method, list_of_args):
+ with futures.ThreadPoolExecutor(
+ max_workers=self.aggregation_workers_number) as executor:
+ # We use 'list' to iterate all threads here to raise the first
+ # exception now, not much choice
+ return list(executor.map(lambda args: method(*args), list_of_args))
diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py
new file mode 100644
index 00000000..4de4d1b5
--- /dev/null
+++ b/gnocchi/storage/ceph.py
@@ -0,0 +1,203 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2014-2015 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+from gnocchi import storage
+from gnocchi.storage import _carbonara
+from gnocchi.storage.common import ceph
+
+
+OPTS = [
+ cfg.StrOpt('ceph_pool',
+ default='gnocchi',
+ help='Ceph pool name to use.'),
+ cfg.StrOpt('ceph_username',
+ help='Ceph username (ie: admin without "client." prefix).'),
+ cfg.StrOpt('ceph_secret', help='Ceph key', secret=True),
+ cfg.StrOpt('ceph_keyring', help='Ceph keyring path.'),
+ cfg.IntOpt('ceph_timeout', help='Ceph connection timeout'),
+ cfg.StrOpt('ceph_conffile',
+ default='/etc/ceph/ceph.conf',
+ help='Ceph configuration file.'),
+]
+
+rados = ceph.rados
+
+
+class CephStorage(_carbonara.CarbonaraBasedStorage):
+ WRITE_FULL = False
+
+ def __init__(self, conf, incoming):
+ super(CephStorage, self).__init__(conf, incoming)
+ self.rados, self.ioctx = ceph.create_rados_connection(conf)
+
+ def stop(self):
+ ceph.close_rados_connection(self.rados, self.ioctx)
+ super(CephStorage, self).stop()
+
+ @staticmethod
+ def _get_object_name(metric, timestamp_key, aggregation, granularity,
+ version=3):
+ name = str("gnocchi_%s_%s_%s_%s" % (
+ metric.id, timestamp_key, aggregation, granularity))
+ return name + '_v%s' % version if version else name
+
+ def _object_exists(self, name):
+ try:
+ self.ioctx.stat(name)
+ return True
+ except rados.ObjectNotFound:
+ return False
+
+ def _create_metric(self, metric):
+ name = self._build_unaggregated_timeserie_path(metric, 3)
+ if self._object_exists(name):
+ raise storage.MetricAlreadyExists(metric)
+ else:
+ self.ioctx.write_full(name, b"")
+
+ def _store_metric_measures(self, metric, timestamp_key, aggregation,
+ granularity, data, offset=None, version=3):
+ name = self._get_object_name(metric, timestamp_key,
+ aggregation, granularity, version)
+ if offset is None:
+ self.ioctx.write_full(name, data)
+ else:
+ self.ioctx.write(name, data, offset=offset)
+ with rados.WriteOpCtx() as op:
+ self.ioctx.set_omap(op, (name,), (b"",))
+ self.ioctx.operate_write_op(
+ op, self._build_unaggregated_timeserie_path(metric, 3))
+
+ def _delete_metric_measures(self, metric, timestamp_key, aggregation,
+ granularity, version=3):
+ name = self._get_object_name(metric, timestamp_key,
+ aggregation, granularity, version)
+
+ try:
+ self.ioctx.remove_object(name)
+ except rados.ObjectNotFound:
+ # It's possible that we already remove that object and then crashed
+ # before removing it from the OMAP key list; then no big deal
+ # anyway.
+ pass
+
+ with rados.WriteOpCtx() as op:
+ self.ioctx.remove_omap_keys(op, (name,))
+ self.ioctx.operate_write_op(
+ op, self._build_unaggregated_timeserie_path(metric, 3))
+
+ def _delete_metric(self, metric):
+ with rados.ReadOpCtx() as op:
+ omaps, ret = self.ioctx.get_omap_vals(op, "", "", -1)
+ try:
+ self.ioctx.operate_read_op(
+ op, self._build_unaggregated_timeserie_path(metric, 3))
+ except rados.ObjectNotFound:
+ return
+
+ # NOTE(sileht): after reading the libradospy, I'm
+ # not sure that ret will have the correct value
+ # get_omap_vals transforms the C int to python int
+ # before operate_read_op is called, I dunno if the int
+ # content is copied during this transformation or if
+ # this is a pointer to the C int, I think it's copied...
+ try:
+ ceph.errno_to_exception(ret)
+ except rados.ObjectNotFound:
+ return
+
+ ops = [self.ioctx.aio_remove(name) for name, _ in omaps]
+
+ for op in ops:
+ op.wait_for_complete_and_cb()
+
+ try:
+ self.ioctx.remove_object(
+ self._build_unaggregated_timeserie_path(metric, 3))
+ except rados.ObjectNotFound:
+ # It's possible that the object does not exists
+ pass
+
+ def _get_measures(self, metric, timestamp_key, aggregation, granularity,
+ version=3):
+ try:
+ name = self._get_object_name(metric, timestamp_key,
+ aggregation, granularity, version)
+ return self._get_object_content(name)
+ except rados.ObjectNotFound:
+ if self._object_exists(
+ self._build_unaggregated_timeserie_path(metric, 3)):
+ raise storage.AggregationDoesNotExist(metric, aggregation)
+ else:
+ raise storage.MetricDoesNotExist(metric)
+
+ def _list_split_keys_for_metric(self, metric, aggregation, granularity,
+ version=3):
+ with rados.ReadOpCtx() as op:
+ omaps, ret = self.ioctx.get_omap_vals(op, "", "", -1)
+ try:
+ self.ioctx.operate_read_op(
+ op, self._build_unaggregated_timeserie_path(metric, 3))
+ except rados.ObjectNotFound:
+ raise storage.MetricDoesNotExist(metric)
+
+ # NOTE(sileht): after reading the libradospy, I'm
+ # not sure that ret will have the correct value
+ # get_omap_vals transforms the C int to python int
+ # before operate_read_op is called, I dunno if the int
+ # content is copied during this transformation or if
+ # this is a pointer to the C int, I think it's copied...
+ try:
+ ceph.errno_to_exception(ret)
+ except rados.ObjectNotFound:
+ raise storage.MetricDoesNotExist(metric)
+
+ keys = set()
+ for name, value in omaps:
+ meta = name.split('_')
+ if (aggregation == meta[3] and granularity == float(meta[4])
+ and self._version_check(name, version)):
+ keys.add(meta[2])
+ return keys
+
+ @staticmethod
+ def _build_unaggregated_timeserie_path(metric, version):
+ return (('gnocchi_%s_none' % metric.id)
+ + ("_v%s" % version if version else ""))
+
+ def _get_unaggregated_timeserie(self, metric, version=3):
+ try:
+ return self._get_object_content(
+ self._build_unaggregated_timeserie_path(metric, version))
+ except rados.ObjectNotFound:
+ raise storage.MetricDoesNotExist(metric)
+
+ def _store_unaggregated_timeserie(self, metric, data, version=3):
+ self.ioctx.write_full(
+ self._build_unaggregated_timeserie_path(metric, version), data)
+
+ def _get_object_content(self, name):
+ offset = 0
+ content = b''
+ while True:
+ data = self.ioctx.read(name, offset=offset)
+ if not data:
+ break
+ content += data
+ offset += len(data)
+ return content
diff --git a/gnocchi/storage/common/__init__.py b/gnocchi/storage/common/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/gnocchi/storage/common/ceph.py b/gnocchi/storage/common/ceph.py
new file mode 100644
index 00000000..b1c9b673
--- /dev/null
+++ b/gnocchi/storage/common/ceph.py
@@ -0,0 +1,100 @@
+# -*- encoding: utf-8 -*-
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import errno
+
+from oslo_log import log
+
+LOG = log.getLogger(__name__)
+
+
+for RADOS_MODULE_NAME in ('cradox', 'rados'):
+ try:
+ rados = __import__(RADOS_MODULE_NAME)
+ except ImportError:
+ pass
+ else:
+ break
+else:
+ RADOS_MODULE_NAME = None
+ rados = None
+
+if rados is not None and hasattr(rados, 'run_in_thread'):
+ rados.run_in_thread = lambda target, args, timeout=None: target(*args)
+ LOG.info("rados.run_in_thread is monkeypatched.")
+
+
+def create_rados_connection(conf):
+ options = {}
+ if conf.ceph_keyring:
+ options['keyring'] = conf.ceph_keyring
+ if conf.ceph_secret:
+ options['key'] = conf.ceph_secret
+ if conf.ceph_timeout:
+ options['rados_osd_op_timeout'] = conf.ceph_timeout
+ options['rados_mon_op_timeout'] = conf.ceph_timeout
+ options['client_mount_timeout'] = conf.ceph_timeout
+
+ if not rados:
+ raise ImportError("No module named 'rados' nor 'cradox'")
+
+ if not hasattr(rados, 'OmapIterator'):
+ raise ImportError("Your rados python module does not support "
+ "omap feature. Install 'cradox' (recommended) "
+ "or upgrade 'python-rados' >= 9.1.0 ")
+
+ LOG.info("Ceph storage backend use '%s' python library",
+ RADOS_MODULE_NAME)
+
+ # NOTE(sileht): librados handles reconnection itself,
+ # by default if a call timeout (30sec), it raises
+ # a rados.Timeout exception, and librados
+ # still continues to reconnect on the next call
+ conn = rados.Rados(conffile=conf.ceph_conffile,
+ rados_id=conf.ceph_username,
+ conf=options)
+ conn.connect()
+ ioctx = conn.open_ioctx(conf.ceph_pool)
+ return conn, ioctx
+
+
+def close_rados_connection(conn, ioctx):
+ ioctx.aio_flush()
+ ioctx.close()
+ conn.shutdown()
+
+
+# NOTE(sileht): The mapping is not part of the rados Public API So we copy it
+# here.
+EXCEPTION_NAMES = {
+ errno.EPERM: 'PermissionError',
+ errno.ENOENT: 'ObjectNotFound',
+ errno.EIO: 'IOError',
+ errno.ENOSPC: 'NoSpace',
+ errno.EEXIST: 'ObjectExists',
+ errno.EBUSY: 'ObjectBusy',
+ errno.ENODATA: 'NoData',
+ errno.EINTR: 'InterruptedOrTimeoutError',
+ errno.ETIMEDOUT: 'TimedOut',
+ errno.EACCES: 'PermissionDeniedError'
+}
+
+
+def errno_to_exception(ret):
+ if ret < 0:
+ name = EXCEPTION_NAMES.get(abs(ret))
+ if name is None:
+ raise rados.Error("Unhandled error '%s'" % ret)
+ else:
+ raise getattr(rados, name)
diff --git a/gnocchi/storage/common/redis.py b/gnocchi/storage/common/redis.py
new file mode 100644
index 00000000..8491c369
--- /dev/null
+++ b/gnocchi/storage/common/redis.py
@@ -0,0 +1,129 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2017 Red Hat
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from __future__ import absolute_import
+
+from six.moves.urllib import parse
+
+try:
+ import redis
+ from redis import sentinel
+except ImportError:
+ redis = None
+ sentinel = None
+
+from gnocchi import utils
+
+
+SEP = ':'
+
+CLIENT_ARGS = frozenset([
+ 'db',
+ 'encoding',
+ 'retry_on_timeout',
+ 'socket_keepalive',
+ 'socket_timeout',
+ 'ssl',
+ 'ssl_certfile',
+ 'ssl_keyfile',
+ 'sentinel',
+ 'sentinel_fallback',
+])
+"""
+Keys that we allow to proxy from the coordinator configuration into the
+redis client (used to configure the redis client internals so that
+it works as you expect/want it to).
+
+See: http://redis-py.readthedocs.org/en/latest/#redis.Redis
+
+See: https://github.com/andymccurdy/redis-py/blob/2.10.3/redis/client.py
+"""
+
+#: Client arguments that are expected/allowed to be lists.
+CLIENT_LIST_ARGS = frozenset([
+ 'sentinel_fallback',
+])
+
+#: Client arguments that are expected to be boolean convertible.
+CLIENT_BOOL_ARGS = frozenset([
+ 'retry_on_timeout',
+ 'ssl',
+])
+
+#: Client arguments that are expected to be int convertible.
+CLIENT_INT_ARGS = frozenset([
+ 'db',
+ 'socket_keepalive',
+ 'socket_timeout',
+])
+
+#: Default socket timeout to use when none is provided.
+CLIENT_DEFAULT_SOCKET_TO = 30
+
+
+def get_client(conf):
+ if redis is None:
+ raise RuntimeError("python-redis unavailable")
+ parsed_url = parse.urlparse(conf.redis_url)
+ options = parse.parse_qs(parsed_url.query)
+
+ kwargs = {}
+ if parsed_url.hostname:
+ kwargs['host'] = parsed_url.hostname
+ if parsed_url.port:
+ kwargs['port'] = parsed_url.port
+ else:
+ if not parsed_url.path:
+ raise ValueError("Expected socket path in parsed urls path")
+ kwargs['unix_socket_path'] = parsed_url.path
+ if parsed_url.password:
+ kwargs['password'] = parsed_url.password
+
+ for a in CLIENT_ARGS:
+ if a not in options:
+ continue
+ if a in CLIENT_BOOL_ARGS:
+ v = utils.strtobool(options[a][-1])
+ elif a in CLIENT_LIST_ARGS:
+ v = options[a]
+ elif a in CLIENT_INT_ARGS:
+ v = int(options[a][-1])
+ else:
+ v = options[a][-1]
+ kwargs[a] = v
+ if 'socket_timeout' not in kwargs:
+ kwargs['socket_timeout'] = CLIENT_DEFAULT_SOCKET_TO
+
+ # Ask the sentinel for the current master if there is a
+ # sentinel arg.
+ if 'sentinel' in kwargs:
+ sentinel_hosts = [
+ tuple(fallback.split(':'))
+ for fallback in kwargs.get('sentinel_fallback', [])
+ ]
+ sentinel_hosts.insert(0, (kwargs['host'], kwargs['port']))
+ sentinel_server = sentinel.Sentinel(
+ sentinel_hosts,
+ socket_timeout=kwargs['socket_timeout'])
+ sentinel_name = kwargs['sentinel']
+ del kwargs['sentinel']
+ if 'sentinel_fallback' in kwargs:
+ del kwargs['sentinel_fallback']
+ master_client = sentinel_server.master_for(sentinel_name, **kwargs)
+ # The master_client is a redis.StrictRedis using a
+ # Sentinel managed connection pool.
+ return master_client
+ return redis.StrictRedis(**kwargs)
diff --git a/gnocchi/storage/common/s3.py b/gnocchi/storage/common/s3.py
new file mode 100644
index 00000000..eb6c0660
--- /dev/null
+++ b/gnocchi/storage/common/s3.py
@@ -0,0 +1,81 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2016 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log
+import tenacity
+try:
+ import boto3
+ import botocore.exceptions
+except ImportError:
+ boto3 = None
+ botocore = None
+
+from gnocchi import utils
+
+LOG = log.getLogger(__name__)
+
+
+def retry_if_operationaborted(exception):
+ return (isinstance(exception, botocore.exceptions.ClientError)
+ and exception.response['Error'].get('Code') == "OperationAborted")
+
+
+def get_connection(conf):
+ if boto3 is None:
+ raise RuntimeError("boto3 unavailable")
+ conn = boto3.client(
+ 's3',
+ endpoint_url=conf.s3_endpoint_url,
+ region_name=conf.s3_region_name,
+ aws_access_key_id=conf.s3_access_key_id,
+ aws_secret_access_key=conf.s3_secret_access_key)
+ return conn, conf.s3_region_name, conf.s3_bucket_prefix
+
+
+# NOTE(jd) OperationAborted might be raised if we try to create the bucket
+# for the first time at the same time
+@tenacity.retry(
+ stop=tenacity.stop_after_attempt(10),
+ wait=tenacity.wait_fixed(0.5),
+ retry=tenacity.retry_if_exception(retry_if_operationaborted)
+)
+def create_bucket(conn, name, region_name):
+ if region_name:
+ kwargs = dict(CreateBucketConfiguration={
+ "LocationConstraint": region_name,
+ })
+ else:
+ kwargs = {}
+ return conn.create_bucket(Bucket=name, **kwargs)
+
+
+def bulk_delete(conn, bucket, objects):
+ # NOTE(jd) The maximum object to delete at once is 1000
+ # TODO(jd) Parallelize?
+ deleted = 0
+ for obj_slice in utils.grouper(objects, 1000):
+ d = {
+ 'Objects': [{'Key': o} for o in obj_slice],
+ # FIXME(jd) Use Quiet mode, but s3rver does not seem to
+ # support it
+ # 'Quiet': True,
+ }
+ response = conn.delete_objects(
+ Bucket=bucket,
+ Delete=d)
+ deleted += len(response['Deleted'])
+ LOG.debug('%s objects deleted, %s objects skipped',
+ deleted, len(objects) - deleted)
diff --git a/gnocchi/storage/common/swift.py b/gnocchi/storage/common/swift.py
new file mode 100644
index 00000000..5d4ff47e
--- /dev/null
+++ b/gnocchi/storage/common/swift.py
@@ -0,0 +1,70 @@
+# -*- encoding: utf-8 -*-
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from oslo_log import log
+from six.moves.urllib.parse import quote
+
+try:
+ from swiftclient import client as swclient
+ from swiftclient import utils as swift_utils
+except ImportError:
+ swclient = None
+ swift_utils = None
+
+from gnocchi import storage
+from gnocchi import utils
+
+LOG = log.getLogger(__name__)
+
+
+@utils.retry
+def _get_connection(conf):
+ return swclient.Connection(
+ auth_version=conf.swift_auth_version,
+ authurl=conf.swift_authurl,
+ preauthtoken=conf.swift_preauthtoken,
+ user=conf.swift_user,
+ key=conf.swift_key,
+ tenant_name=conf.swift_project_name,
+ timeout=conf.swift_timeout,
+ os_options={'endpoint_type': conf.swift_endpoint_type,
+ 'user_domain_name': conf.swift_user_domain_name},
+ retries=0)
+
+
+def get_connection(conf):
+ if swclient is None:
+ raise RuntimeError("python-swiftclient unavailable")
+
+ return _get_connection(conf)
+
+
+POST_HEADERS = {'Accept': 'application/json', 'Content-Type': 'text/plain'}
+
+
+def bulk_delete(conn, container, objects):
+ objects = [quote(('/%s/%s' % (container, obj['name'])).encode('utf-8'))
+ for obj in objects]
+ resp = {}
+ headers, body = conn.post_account(
+ headers=POST_HEADERS, query_string='bulk-delete',
+ data=b''.join(obj.encode('utf-8') + b'\n' for obj in objects),
+ response_dict=resp)
+ if resp['status'] != 200:
+ raise storage.StorageError(
+ "Unable to bulk-delete, is bulk-delete enabled in Swift?")
+ resp = swift_utils.parse_api_response(headers, body)
+ LOG.debug('# of objects deleted: %s, # of objects skipped: %s',
+ resp['Number Deleted'], resp['Number Not Found'])
diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py
new file mode 100644
index 00000000..3c067bef
--- /dev/null
+++ b/gnocchi/storage/file.py
@@ -0,0 +1,151 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2014 Objectif Libre
+# Copyright © 2015 Red Hat
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import errno
+import os
+import shutil
+import tempfile
+
+from oslo_config import cfg
+
+from gnocchi import storage
+from gnocchi.storage import _carbonara
+from gnocchi import utils
+
+
+OPTS = [
+ cfg.StrOpt('file_basepath',
+ default='/var/lib/gnocchi',
+ help='Path used to store gnocchi data files.'),
+]
+
+
+class FileStorage(_carbonara.CarbonaraBasedStorage):
+ WRITE_FULL = True
+
+ def __init__(self, conf, incoming):
+ super(FileStorage, self).__init__(conf, incoming)
+ self.basepath = conf.file_basepath
+ self.basepath_tmp = os.path.join(self.basepath, 'tmp')
+ utils.ensure_paths([self.basepath_tmp])
+
+ def _atomic_file_store(self, dest, data):
+ tmpfile = tempfile.NamedTemporaryFile(
+ prefix='gnocchi', dir=self.basepath_tmp,
+ delete=False)
+ tmpfile.write(data)
+ tmpfile.close()
+ os.rename(tmpfile.name, dest)
+
+ def _build_metric_dir(self, metric):
+ return os.path.join(self.basepath, str(metric.id))
+
+ def _build_unaggregated_timeserie_path(self, metric, version=3):
+ return os.path.join(
+ self._build_metric_dir(metric),
+ 'none' + ("_v%s" % version if version else ""))
+
+ def _build_metric_path(self, metric, aggregation):
+ return os.path.join(self._build_metric_dir(metric),
+ "agg_" + aggregation)
+
+ def _build_metric_path_for_split(self, metric, aggregation,
+ timestamp_key, granularity, version=3):
+ path = os.path.join(self._build_metric_path(metric, aggregation),
+ timestamp_key + "_" + str(granularity))
+ return path + '_v%s' % version if version else path
+
+ def _create_metric(self, metric):
+ path = self._build_metric_dir(metric)
+ try:
+ os.mkdir(path, 0o750)
+ except OSError as e:
+ if e.errno == errno.EEXIST:
+ raise storage.MetricAlreadyExists(metric)
+ raise
+ for agg in metric.archive_policy.aggregation_methods:
+ try:
+ os.mkdir(self._build_metric_path(metric, agg), 0o750)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ def _store_unaggregated_timeserie(self, metric, data, version=3):
+ self._atomic_file_store(
+ self._build_unaggregated_timeserie_path(metric, version),
+ data)
+
+ def _get_unaggregated_timeserie(self, metric, version=3):
+ path = self._build_unaggregated_timeserie_path(metric, version)
+ try:
+ with open(path, 'rb') as f:
+ return f.read()
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ raise storage.MetricDoesNotExist(metric)
+ raise
+
+ def _list_split_keys_for_metric(self, metric, aggregation, granularity,
+ version=3):
+ try:
+ files = os.listdir(self._build_metric_path(metric, aggregation))
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ raise storage.MetricDoesNotExist(metric)
+ raise
+ keys = set()
+ for f in files:
+ meta = f.split("_")
+ if meta[1] == str(granularity) and self._version_check(f, version):
+ keys.add(meta[0])
+ return keys
+
+ def _delete_metric_measures(self, metric, timestamp_key, aggregation,
+ granularity, version=3):
+ os.unlink(self._build_metric_path_for_split(
+ metric, aggregation, timestamp_key, granularity, version))
+
+ def _store_metric_measures(self, metric, timestamp_key, aggregation,
+ granularity, data, offset=None, version=3):
+ self._atomic_file_store(
+ self._build_metric_path_for_split(metric, aggregation,
+ timestamp_key, granularity,
+ version),
+ data)
+
+ def _delete_metric(self, metric):
+ path = self._build_metric_dir(metric)
+ try:
+ shutil.rmtree(path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ # NOTE(jd) Maybe the metric has never been created (no
+ # measures)
+ raise
+
+ def _get_measures(self, metric, timestamp_key, aggregation, granularity,
+ version=3):
+ path = self._build_metric_path_for_split(
+ metric, aggregation, timestamp_key, granularity, version)
+ try:
+ with open(path, 'rb') as aggregation_file:
+ return aggregation_file.read()
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ if os.path.exists(self._build_metric_dir(metric)):
+ raise storage.AggregationDoesNotExist(metric, aggregation)
+ raise storage.MetricDoesNotExist(metric)
+ raise
diff --git a/gnocchi/storage/incoming/__init__.py b/gnocchi/storage/incoming/__init__.py
new file mode 100644
index 00000000..eb99ae4d
--- /dev/null
+++ b/gnocchi/storage/incoming/__init__.py
@@ -0,0 +1,64 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2017 Red Hat, Inc.
+# Copyright © 2014-2015 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from gnocchi import exceptions
+
+
+class ReportGenerationError(Exception):
+ pass
+
+
+class StorageDriver(object):
+
+ @staticmethod
+ def __init__(conf):
+ pass
+
+ @staticmethod
+ def upgrade(indexer):
+ pass
+
+ def add_measures(self, metric, measures):
+ """Add a measure to a metric.
+
+ :param metric: The metric measured.
+ :param measures: The actual measures.
+ """
+ self.add_measures_batch({metric: measures})
+
+ @staticmethod
+ def add_measures_batch(metrics_and_measures):
+ """Add a batch of measures for some metrics.
+
+ :param metrics_and_measures: A dict where keys
+ are metrics and value are measure.
+ """
+ raise exceptions.NotImplementedError
+
+ def measures_report(details=True):
+ """Return a report of pending to process measures.
+
+ Only useful for drivers that process measurements in background
+
+ :return: {'summary': {'metrics': count, 'measures': count},
+ 'details': {metric_id: pending_measures_count}}
+ """
+ raise exceptions.NotImplementedError
+
+ @staticmethod
+ def list_metric_with_measures_to_process(sack):
+ raise NotImplementedError
diff --git a/gnocchi/storage/incoming/_carbonara.py b/gnocchi/storage/incoming/_carbonara.py
new file mode 100644
index 00000000..e20720d6
--- /dev/null
+++ b/gnocchi/storage/incoming/_carbonara.py
@@ -0,0 +1,138 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2016 Red Hat, Inc.
+# Copyright © 2014-2015 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from concurrent import futures
+import itertools
+import struct
+
+from oslo_log import log
+import pandas
+import six
+
+from gnocchi.storage import incoming
+from gnocchi import utils
+
+LOG = log.getLogger(__name__)
+
+_NUM_WORKERS = utils.get_default_workers()
+
+
+class CarbonaraBasedStorage(incoming.StorageDriver):
+ MEASURE_PREFIX = "measure"
+ SACK_PREFIX = "incoming"
+ CFG_PREFIX = 'gnocchi-config'
+ CFG_SACKS = 'sacks'
+ _MEASURE_SERIAL_FORMAT = "Qd"
+ _MEASURE_SERIAL_LEN = struct.calcsize(_MEASURE_SERIAL_FORMAT)
+
+ @property
+ def NUM_SACKS(self):
+ if not hasattr(self, '_num_sacks'):
+ try:
+ self._num_sacks = int(self.get_storage_sacks())
+ except Exception as e:
+ LOG.error('Unable to detect the number of storage sacks. '
+ 'Ensure gnocchi-upgrade has been executed: %s', e)
+ raise
+ return self._num_sacks
+
+ def get_sack_prefix(self, num_sacks=None):
+ sacks = num_sacks if num_sacks else self.NUM_SACKS
+ return self.SACK_PREFIX + str(sacks) + '-%s'
+
+ def upgrade(self, index, num_sacks):
+ super(CarbonaraBasedStorage, self).upgrade(index)
+ if not self.get_storage_sacks():
+ self.set_storage_settings(num_sacks)
+
+ @staticmethod
+ def get_storage_sacks():
+ """Return the number of sacks in storage. None if not set."""
+ raise NotImplementedError
+
+ @staticmethod
+ def set_storage_settings(num_sacks):
+ raise NotImplementedError
+
+ @staticmethod
+ def remove_sack_group(num_sacks):
+ raise NotImplementedError
+
+ @staticmethod
+ def get_sack_lock(coord, sack):
+ lock_name = b'gnocchi-sack-%s-lock' % str(sack).encode('ascii')
+ return coord.get_lock(lock_name)
+
+ def _unserialize_measures(self, measure_id, data):
+ nb_measures = len(data) // self._MEASURE_SERIAL_LEN
+ try:
+ measures = struct.unpack(
+ "<" + self._MEASURE_SERIAL_FORMAT * nb_measures, data)
+ except struct.error:
+ LOG.error(
+ "Unable to decode measure %s, possible data corruption",
+ measure_id)
+ raise
+ return six.moves.zip(
+ pandas.to_datetime(measures[::2], unit='ns'),
+ itertools.islice(measures, 1, len(measures), 2))
+
+ def _encode_measures(self, measures):
+ measures = list(measures)
+ return struct.pack(
+ "<" + self._MEASURE_SERIAL_FORMAT * len(measures),
+ *list(itertools.chain.from_iterable(measures)))
+
+ def add_measures_batch(self, metrics_and_measures):
+ with futures.ThreadPoolExecutor(max_workers=_NUM_WORKERS) as executor:
+ list(executor.map(
+ lambda args: self._store_new_measures(*args),
+ ((metric, self._encode_measures(measures))
+ for metric, measures
+ in six.iteritems(metrics_and_measures))))
+
+ @staticmethod
+ def _store_new_measures(metric, data):
+ raise NotImplementedError
+
+ def measures_report(self, details=True):
+ metrics, measures, full_details = self._build_report(details)
+ report = {'summary': {'metrics': metrics, 'measures': measures}}
+ if full_details is not None:
+ report['details'] = full_details
+ return report
+
+ @staticmethod
+ def _build_report(details):
+ raise NotImplementedError
+
+ @staticmethod
+ def delete_unprocessed_measures_for_metric_id(metric_id):
+ raise NotImplementedError
+
+ @staticmethod
+ def process_measure_for_metric(metric):
+ raise NotImplementedError
+
+ @staticmethod
+ def has_unprocessed(metric):
+ raise NotImplementedError
+
+ def sack_for_metric(self, metric_id):
+ return metric_id.int % self.NUM_SACKS
+
+ def get_sack_name(self, sack):
+ return self.get_sack_prefix() % sack
diff --git a/gnocchi/storage/incoming/ceph.py b/gnocchi/storage/incoming/ceph.py
new file mode 100644
index 00000000..15777a52
--- /dev/null
+++ b/gnocchi/storage/incoming/ceph.py
@@ -0,0 +1,225 @@
+# -*- encoding: utf-8 -*-
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from collections import defaultdict
+import contextlib
+import datetime
+import json
+import uuid
+
+import six
+
+from gnocchi.storage.common import ceph
+from gnocchi.storage.incoming import _carbonara
+
+rados = ceph.rados
+
+
+class CephStorage(_carbonara.CarbonaraBasedStorage):
+
+ Q_LIMIT = 1000
+
+ def __init__(self, conf):
+ super(CephStorage, self).__init__(conf)
+ self.rados, self.ioctx = ceph.create_rados_connection(conf)
+ # NOTE(sileht): constants can't be class attributes because
+ # they rely on presence of rados module
+
+ # NOTE(sileht): We allow to read the measure object on
+ # outdated replicats, that safe for us, we will
+ # get the new stuffs on next metricd pass.
+ self.OMAP_READ_FLAGS = (rados.LIBRADOS_OPERATION_BALANCE_READS |
+ rados.LIBRADOS_OPERATION_SKIPRWLOCKS)
+
+ # NOTE(sileht): That should be safe to manipulate the omap keys
+ # with any OSDs at the same times, each osd should replicate the
+ # new key to others and same thing for deletion.
+ # I wonder how ceph handle rm_omap and set_omap run at same time
+ # on the same key. I assume the operation are timestamped so that will
+ # be same. If not, they are still one acceptable race here, a rm_omap
+ # can finish before all replicats of set_omap are done, but we don't
+ # care, if that occurs next metricd run, will just remove it again, no
+ # object with the measure have already been delected by previous, so
+ # we are safe and good.
+ self.OMAP_WRITE_FLAGS = rados.LIBRADOS_OPERATION_SKIPRWLOCKS
+
+ def stop(self):
+ ceph.close_rados_connection(self.rados, self.ioctx)
+ super(CephStorage, self).stop()
+
+ def get_storage_sacks(self):
+ try:
+ return json.loads(
+ self.ioctx.read(self.CFG_PREFIX).decode())[self.CFG_SACKS]
+ except rados.ObjectNotFound:
+ return
+
+ def set_storage_settings(self, num_sacks):
+ self.ioctx.write_full(self.CFG_PREFIX,
+ json.dumps({self.CFG_SACKS: num_sacks}).encode())
+
+ def remove_sack_group(self, num_sacks):
+ prefix = self.get_sack_prefix(num_sacks)
+ for i in six.moves.xrange(num_sacks):
+ try:
+ self.ioctx.remove_object(prefix % i)
+ except rados.ObjectNotFound:
+ pass
+
+ def add_measures_batch(self, metrics_and_measures):
+ data_by_sack = defaultdict(lambda: defaultdict(list))
+ for metric, measures in six.iteritems(metrics_and_measures):
+ name = "_".join((
+ self.MEASURE_PREFIX,
+ str(metric.id),
+ str(uuid.uuid4()),
+ datetime.datetime.utcnow().strftime("%Y%m%d_%H:%M:%S")))
+ sack = self.get_sack_name(self.sack_for_metric(metric.id))
+ data_by_sack[sack]['names'].append(name)
+ data_by_sack[sack]['measures'].append(
+ self._encode_measures(measures))
+
+ ops = []
+ for sack, data in data_by_sack.items():
+ with rados.WriteOpCtx() as op:
+ # NOTE(sileht): list all objects in a pool is too slow with
+ # many objects (2min for 20000 objects in 50osds cluster),
+ # and enforce us to iterrate over all objects
+ # So we create an object MEASURE_PREFIX, that have as
+ # omap the list of objects to process (not xattr because
+ # it doesn't # allow to configure the locking behavior)
+ self.ioctx.set_omap(op, tuple(data['names']),
+ tuple(data['measures']))
+ ops.append(self.ioctx.operate_aio_write_op(
+ op, sack, flags=self.OMAP_WRITE_FLAGS))
+ while ops:
+ op = ops.pop()
+ op.wait_for_complete()
+
+ def _build_report(self, details):
+ metrics = set()
+ count = 0
+ metric_details = defaultdict(int)
+ for i in six.moves.range(self.NUM_SACKS):
+ marker = ""
+ while True:
+ names = list(self._list_keys_to_process(
+ i, marker=marker, limit=self.Q_LIMIT))
+ if names and names[0] < marker:
+ raise _carbonara.ReportGenerationError("Unable to cleanly "
+ "compute backlog.")
+ for name in names:
+ count += 1
+ metric = name.split("_")[1]
+ metrics.add(metric)
+ if details:
+ metric_details[metric] += 1
+ if len(names) < self.Q_LIMIT:
+ break
+ else:
+ marker = name
+
+ return len(metrics), count, metric_details if details else None
+
+ def _list_keys_to_process(self, sack, prefix="", marker="", limit=-1):
+ with rados.ReadOpCtx() as op:
+ omaps, ret = self.ioctx.get_omap_vals(op, marker, prefix, limit)
+ try:
+ self.ioctx.operate_read_op(
+ op, self.get_sack_name(sack), flag=self.OMAP_READ_FLAGS)
+ except rados.ObjectNotFound:
+ # API have still written nothing
+ return ()
+ # NOTE(sileht): after reading the libradospy, I'm
+ # not sure that ret will have the correct value
+ # get_omap_vals transforms the C int to python int
+ # before operate_read_op is called, I dunno if the int
+ # content is copied during this transformation or if
+ # this is a pointer to the C int, I think it's copied...
+ try:
+ ceph.errno_to_exception(ret)
+ except rados.ObjectNotFound:
+ return ()
+
+ return (k for k, v in omaps)
+
+ def list_metric_with_measures_to_process(self, sack):
+ names = set()
+ marker = ""
+ while True:
+ obj_names = list(self._list_keys_to_process(
+ sack, marker=marker, limit=self.Q_LIMIT))
+ names.update(name.split("_")[1] for name in obj_names)
+ if len(obj_names) < self.Q_LIMIT:
+ break
+ else:
+ marker = obj_names[-1]
+ return names
+
+ def delete_unprocessed_measures_for_metric_id(self, metric_id):
+ sack = self.sack_for_metric(metric_id)
+ key_prefix = self.MEASURE_PREFIX + "_" + str(metric_id)
+ keys = tuple(self._list_keys_to_process(sack, key_prefix))
+
+ if not keys:
+ return
+
+ # Now clean objects and omap
+ with rados.WriteOpCtx() as op:
+ # NOTE(sileht): come on Ceph, no return code
+ # for this operation ?!!
+ self.ioctx.remove_omap_keys(op, keys)
+ self.ioctx.operate_write_op(op, self.get_sack_name(sack),
+ flags=self.OMAP_WRITE_FLAGS)
+
+ def has_unprocessed(self, metric):
+ sack = self.sack_for_metric(metric.id)
+ object_prefix = self.MEASURE_PREFIX + "_" + str(metric.id)
+ return bool(self._list_keys_to_process(sack, object_prefix))
+
+ @contextlib.contextmanager
+ def process_measure_for_metric(self, metric):
+ sack = self.sack_for_metric(metric.id)
+ key_prefix = self.MEASURE_PREFIX + "_" + str(metric.id)
+
+ measures = []
+ processed_keys = []
+ with rados.ReadOpCtx() as op:
+ omaps, ret = self.ioctx.get_omap_vals(op, "", key_prefix, -1)
+ self.ioctx.operate_read_op(op, self.get_sack_name(sack),
+ flag=self.OMAP_READ_FLAGS)
+ # NOTE(sileht): after reading the libradospy, I'm
+ # not sure that ret will have the correct value
+ # get_omap_vals transforms the C int to python int
+ # before operate_read_op is called, I dunno if the int
+ # content is copied during this transformation or if
+ # this is a pointer to the C int, I think it's copied...
+ try:
+ ceph.errno_to_exception(ret)
+ except rados.ObjectNotFound:
+ # Object has been deleted, so this is just a stalled entry
+ # in the OMAP listing, ignore
+ return
+ for k, v in omaps:
+ measures.extend(self._unserialize_measures(k, v))
+ processed_keys.append(k)
+
+ yield measures
+
+ # Now clean omap
+ with rados.WriteOpCtx() as op:
+ # NOTE(sileht): come on Ceph, no return code
+ # for this operation ?!!
+ self.ioctx.remove_omap_keys(op, tuple(processed_keys))
+ self.ioctx.operate_write_op(op, self.get_sack_name(sack),
+ flags=self.OMAP_WRITE_FLAGS)
diff --git a/gnocchi/storage/incoming/file.py b/gnocchi/storage/incoming/file.py
new file mode 100644
index 00000000..781d3ec5
--- /dev/null
+++ b/gnocchi/storage/incoming/file.py
@@ -0,0 +1,165 @@
+# -*- encoding: utf-8 -*-
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import contextlib
+import datetime
+import errno
+import json
+import os
+import shutil
+import tempfile
+import uuid
+
+import six
+
+from gnocchi.storage.incoming import _carbonara
+from gnocchi import utils
+
+
+class FileStorage(_carbonara.CarbonaraBasedStorage):
+ def __init__(self, conf):
+ super(FileStorage, self).__init__(conf)
+ self.basepath = conf.file_basepath
+ self.basepath_tmp = os.path.join(self.basepath, 'tmp')
+
+ def upgrade(self, index, num_sacks):
+ super(FileStorage, self).upgrade(index, num_sacks)
+ utils.ensure_paths([self.basepath_tmp])
+
+ def get_storage_sacks(self):
+ try:
+ with open(os.path.join(self.basepath_tmp, self.CFG_PREFIX),
+ 'r') as f:
+ return json.load(f)[self.CFG_SACKS]
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ return
+ raise
+
+ def set_storage_settings(self, num_sacks):
+ data = {self.CFG_SACKS: num_sacks}
+ with open(os.path.join(self.basepath_tmp, self.CFG_PREFIX), 'w') as f:
+ json.dump(data, f)
+ utils.ensure_paths([self._sack_path(i)
+ for i in six.moves.range(self.NUM_SACKS)])
+
+ def remove_sack_group(self, num_sacks):
+ prefix = self.get_sack_prefix(num_sacks)
+ for i in six.moves.xrange(num_sacks):
+ shutil.rmtree(os.path.join(self.basepath, prefix % i))
+
+ def _sack_path(self, sack):
+ return os.path.join(self.basepath, self.get_sack_name(sack))
+
+ def _measure_path(self, sack, metric_id):
+ return os.path.join(self._sack_path(sack), six.text_type(metric_id))
+
+ def _build_measure_path(self, metric_id, random_id=None):
+ sack = self.sack_for_metric(metric_id)
+ path = self._measure_path(sack, metric_id)
+ if random_id:
+ if random_id is True:
+ now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S")
+ random_id = six.text_type(uuid.uuid4()) + now
+ return os.path.join(path, random_id)
+ return path
+
+ def _store_new_measures(self, metric, data):
+ tmpfile = tempfile.NamedTemporaryFile(
+ prefix='gnocchi', dir=self.basepath_tmp,
+ delete=False)
+ tmpfile.write(data)
+ tmpfile.close()
+ path = self._build_measure_path(metric.id, True)
+ while True:
+ try:
+ os.rename(tmpfile.name, path)
+ break
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ try:
+ os.mkdir(self._build_measure_path(metric.id))
+ except OSError as e:
+ # NOTE(jd) It's possible that another process created the
+ # path just before us! In this case, good for us, let's do
+ # nothing then! (see bug #1475684)
+ if e.errno != errno.EEXIST:
+ raise
+
+ def _build_report(self, details):
+ metric_details = {}
+ for i in six.moves.range(self.NUM_SACKS):
+ for metric in self.list_metric_with_measures_to_process(i):
+ metric_details[metric] = len(
+ self._list_measures_container_for_metric_id_str(i, metric))
+ return (len(metric_details.keys()), sum(metric_details.values()),
+ metric_details if details else None)
+
+ def list_metric_with_measures_to_process(self, sack):
+ return set(self._list_target(self._sack_path(sack)))
+
+ def _list_measures_container_for_metric_id_str(self, sack, metric_id):
+ return self._list_target(self._measure_path(sack, metric_id))
+
+ def _list_measures_container_for_metric_id(self, metric_id):
+ return self._list_target(self._build_measure_path(metric_id))
+
+ @staticmethod
+ def _list_target(target):
+ try:
+ return os.listdir(target)
+ except OSError as e:
+ # Some other process treated this one, then do nothing
+ if e.errno == errno.ENOENT:
+ return []
+ raise
+
+ def _delete_measures_files_for_metric_id(self, metric_id, files):
+ for f in files:
+ try:
+ os.unlink(self._build_measure_path(metric_id, f))
+ except OSError as e:
+ # Another process deleted it in the meantime, no prob'
+ if e.errno != errno.ENOENT:
+ raise
+ try:
+ os.rmdir(self._build_measure_path(metric_id))
+ except OSError as e:
+ # ENOENT: ok, it has been removed at almost the same time
+ # by another process
+ # ENOTEMPTY: ok, someone pushed measure in the meantime,
+ # we'll delete the measures and directory later
+ # EEXIST: some systems use this instead of ENOTEMPTY
+ if e.errno not in (errno.ENOENT, errno.ENOTEMPTY, errno.EEXIST):
+ raise
+
+ def delete_unprocessed_measures_for_metric_id(self, metric_id):
+ files = self._list_measures_container_for_metric_id(metric_id)
+ self._delete_measures_files_for_metric_id(metric_id, files)
+
+ def has_unprocessed(self, metric):
+ return os.path.isdir(self._build_measure_path(metric.id))
+
+ @contextlib.contextmanager
+ def process_measure_for_metric(self, metric):
+ files = self._list_measures_container_for_metric_id(metric.id)
+ measures = []
+ for f in files:
+ abspath = self._build_measure_path(metric.id, f)
+ with open(abspath, "rb") as e:
+ measures.extend(self._unserialize_measures(f, e.read()))
+
+ yield measures
+
+ self._delete_measures_files_for_metric_id(metric.id, files)
diff --git a/gnocchi/storage/incoming/redis.py b/gnocchi/storage/incoming/redis.py
new file mode 100644
index 00000000..9e81327c
--- /dev/null
+++ b/gnocchi/storage/incoming/redis.py
@@ -0,0 +1,85 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2017 Red Hat
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import collections
+import contextlib
+
+import six
+
+from gnocchi.storage.common import redis
+from gnocchi.storage.incoming import _carbonara
+
+
+class RedisStorage(_carbonara.CarbonaraBasedStorage):
+
+ def __init__(self, conf):
+ super(RedisStorage, self).__init__(conf)
+ self._client = redis.get_client(conf)
+
+ def get_storage_sacks(self):
+ return self._client.hget(self.CFG_PREFIX, self.CFG_SACKS)
+
+ def set_storage_settings(self, num_sacks):
+ self._client.hset(self.CFG_PREFIX, self.CFG_SACKS, num_sacks)
+
+ @staticmethod
+ def remove_sack_group(num_sacks):
+ # NOTE(gordc): redis doesn't maintain keys with empty values
+ pass
+
+ def _build_measure_path(self, metric_id):
+ return redis.SEP.join([
+ self.get_sack_name(self.sack_for_metric(metric_id)),
+ six.text_type(metric_id)])
+
+ def _store_new_measures(self, metric, data):
+ path = self._build_measure_path(metric.id)
+ self._client.rpush(path, data)
+
+ def _build_report(self, details):
+ match = redis.SEP.join([self.get_sack_name("*"), "*"])
+ metric_details = collections.defaultdict(int)
+ for key in self._client.scan_iter(match=match, count=1000):
+ metric = key.decode('utf8').split(redis.SEP)[1]
+ metric_details[metric] = self._client.llen(key)
+ return (len(metric_details.keys()), sum(metric_details.values()),
+ metric_details if details else None)
+
+ def list_metric_with_measures_to_process(self, sack):
+ match = redis.SEP.join([self.get_sack_name(sack), "*"])
+ keys = self._client.scan_iter(match=match, count=1000)
+ return set([k.decode('utf8').split(redis.SEP)[1] for k in keys])
+
+ def delete_unprocessed_measures_for_metric_id(self, metric_id):
+ self._client.delete(self._build_measure_path(metric_id))
+
+ def has_unprocessed(self, metric):
+ return bool(self._client.exists(self._build_measure_path(metric.id)))
+
+ @contextlib.contextmanager
+ def process_measure_for_metric(self, metric):
+ key = self._build_measure_path(metric.id)
+ item_len = self._client.llen(key)
+ # lrange is inclusive on both ends, decrease to grab exactly n items
+ item_len = item_len - 1 if item_len else item_len
+ measures = []
+ for i, data in enumerate(self._client.lrange(key, 0, item_len)):
+ measures.extend(self._unserialize_measures(
+ '%s-%s' % (metric.id, i), data))
+
+ yield measures
+
+ # ltrim is inclusive, bump 1 to remove up to and including nth item
+ self._client.ltrim(key, item_len + 1, -1)
diff --git a/gnocchi/storage/incoming/s3.py b/gnocchi/storage/incoming/s3.py
new file mode 100644
index 00000000..89de4192
--- /dev/null
+++ b/gnocchi/storage/incoming/s3.py
@@ -0,0 +1,177 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2016 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from collections import defaultdict
+import contextlib
+import datetime
+import json
+import uuid
+
+import six
+
+from gnocchi.storage.common import s3
+from gnocchi.storage.incoming import _carbonara
+
+boto3 = s3.boto3
+botocore = s3.botocore
+
+
+class S3Storage(_carbonara.CarbonaraBasedStorage):
+
+ def __init__(self, conf):
+ super(S3Storage, self).__init__(conf)
+ self.s3, self._region_name, self._bucket_prefix = (
+ s3.get_connection(conf)
+ )
+
+ self._bucket_name_measures = (
+ self._bucket_prefix + "-" + self.MEASURE_PREFIX
+ )
+
+ def get_storage_sacks(self):
+ try:
+ response = self.s3.get_object(Bucket=self._bucket_name_measures,
+ Key=self.CFG_PREFIX)
+ return json.loads(response['Body'].read().decode())[self.CFG_SACKS]
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error'].get('Code') == "NoSuchKey":
+ return
+
+ def set_storage_settings(self, num_sacks):
+ data = {self.CFG_SACKS: num_sacks}
+ self.s3.put_object(Bucket=self._bucket_name_measures,
+ Key=self.CFG_PREFIX,
+ Body=json.dumps(data).encode())
+
+ def get_sack_prefix(self, num_sacks=None):
+ # NOTE(gordc): override to follow s3 partitioning logic
+ return '%s-' + ('%s/' % (num_sacks if num_sacks else self.NUM_SACKS))
+
+ @staticmethod
+ def remove_sack_group(num_sacks):
+ # nothing to cleanup since sacks are part of path
+ pass
+
+ def upgrade(self, indexer, num_sacks):
+ try:
+ s3.create_bucket(self.s3, self._bucket_name_measures,
+ self._region_name)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error'].get('Code') not in (
+ "BucketAlreadyExists", "BucketAlreadyOwnedByYou"
+ ):
+ raise
+ # need to create bucket first to store storage settings object
+ super(S3Storage, self).upgrade(indexer, num_sacks)
+
+ def _store_new_measures(self, metric, data):
+ now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S")
+ self.s3.put_object(
+ Bucket=self._bucket_name_measures,
+ Key=(self.get_sack_name(self.sack_for_metric(metric.id))
+ + six.text_type(metric.id) + "/"
+ + six.text_type(uuid.uuid4()) + now),
+ Body=data)
+
+ def _build_report(self, details):
+ metric_details = defaultdict(int)
+ response = {}
+ while response.get('IsTruncated', True):
+ if 'NextContinuationToken' in response:
+ kwargs = {
+ 'ContinuationToken': response['NextContinuationToken']
+ }
+ else:
+ kwargs = {}
+ response = self.s3.list_objects_v2(
+ Bucket=self._bucket_name_measures,
+ **kwargs)
+ # FIXME(gordc): this can be streamlined if not details
+ for c in response.get('Contents', ()):
+ if c['Key'] != self.CFG_PREFIX:
+ __, metric, metric_file = c['Key'].split("/", 2)
+ metric_details[metric] += 1
+ return (len(metric_details), sum(metric_details.values()),
+ metric_details if details else None)
+
+ def list_metric_with_measures_to_process(self, sack):
+ limit = 1000 # 1000 is the default anyway
+ metrics = set()
+ response = {}
+ # Handle pagination
+ while response.get('IsTruncated', True):
+ if 'NextContinuationToken' in response:
+ kwargs = {
+ 'ContinuationToken': response['NextContinuationToken']
+ }
+ else:
+ kwargs = {}
+ response = self.s3.list_objects_v2(
+ Bucket=self._bucket_name_measures,
+ Prefix=self.get_sack_name(sack),
+ Delimiter="/",
+ MaxKeys=limit,
+ **kwargs)
+ for p in response.get('CommonPrefixes', ()):
+ metrics.add(p['Prefix'].split('/', 2)[1])
+ return metrics
+
+ def _list_measure_files_for_metric_id(self, sack, metric_id):
+ files = set()
+ response = {}
+ while response.get('IsTruncated', True):
+ if 'NextContinuationToken' in response:
+ kwargs = {
+ 'ContinuationToken': response['NextContinuationToken']
+ }
+ else:
+ kwargs = {}
+ response = self.s3.list_objects_v2(
+ Bucket=self._bucket_name_measures,
+ Prefix=(self.get_sack_name(sack)
+ + six.text_type(metric_id) + "/"),
+ **kwargs)
+
+ for c in response.get('Contents', ()):
+ files.add(c['Key'])
+
+ return files
+
+ def delete_unprocessed_measures_for_metric_id(self, metric_id):
+ sack = self.sack_for_metric(metric_id)
+ files = self._list_measure_files_for_metric_id(sack, metric_id)
+ s3.bulk_delete(self.s3, self._bucket_name_measures, files)
+
+ def has_unprocessed(self, metric):
+ sack = self.sack_for_metric(metric.id)
+ return bool(self._list_measure_files_for_metric_id(sack, metric.id))
+
+ @contextlib.contextmanager
+ def process_measure_for_metric(self, metric):
+ sack = self.sack_for_metric(metric.id)
+ files = self._list_measure_files_for_metric_id(sack, metric.id)
+
+ measures = []
+ for f in files:
+ response = self.s3.get_object(
+ Bucket=self._bucket_name_measures,
+ Key=f)
+ measures.extend(
+ self._unserialize_measures(f, response['Body'].read()))
+
+ yield measures
+
+ # Now clean objects
+ s3.bulk_delete(self.s3, self._bucket_name_measures, files)
diff --git a/gnocchi/storage/incoming/swift.py b/gnocchi/storage/incoming/swift.py
new file mode 100644
index 00000000..304126f9
--- /dev/null
+++ b/gnocchi/storage/incoming/swift.py
@@ -0,0 +1,114 @@
+# -*- encoding: utf-8 -*-
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from collections import defaultdict
+import contextlib
+import datetime
+import json
+import uuid
+
+import six
+
+from gnocchi.storage.common import swift
+from gnocchi.storage.incoming import _carbonara
+
+swclient = swift.swclient
+swift_utils = swift.swift_utils
+
+
+class SwiftStorage(_carbonara.CarbonaraBasedStorage):
+ def __init__(self, conf):
+ super(SwiftStorage, self).__init__(conf)
+ self.swift = swift.get_connection(conf)
+
+ def get_storage_sacks(self):
+ try:
+ __, data = self.swift.get_object(self.CFG_PREFIX, self.CFG_PREFIX)
+ return json.loads(data)[self.CFG_SACKS]
+ except swclient.ClientException as e:
+ if e.http_status == 404:
+ return
+
+ def set_storage_settings(self, num_sacks):
+ self.swift.put_container(self.CFG_PREFIX)
+ self.swift.put_object(self.CFG_PREFIX, self.CFG_PREFIX,
+ json.dumps({self.CFG_SACKS: num_sacks}))
+ for i in six.moves.range(self.NUM_SACKS):
+ self.swift.put_container(self.get_sack_name(i))
+
+ def remove_sack_group(self, num_sacks):
+ prefix = self.get_sack_prefix(num_sacks)
+ for i in six.moves.xrange(num_sacks):
+ self.swift.delete_container(prefix % i)
+
+ def _store_new_measures(self, metric, data):
+ now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S")
+ self.swift.put_object(
+ self.get_sack_name(self.sack_for_metric(metric.id)),
+ six.text_type(metric.id) + "/" + six.text_type(uuid.uuid4()) + now,
+ data)
+
+ def _build_report(self, details):
+ metric_details = defaultdict(int)
+ nb_metrics = 0
+ measures = 0
+ for i in six.moves.range(self.NUM_SACKS):
+ if details:
+ headers, files = self.swift.get_container(
+ self.get_sack_name(i), full_listing=True)
+ for f in files:
+ metric, __ = f['name'].split("/", 1)
+ metric_details[metric] += 1
+ else:
+ headers, files = self.swift.get_container(
+ self.get_sack_name(i), delimiter='/', full_listing=True)
+ nb_metrics += len(files)
+ measures += int(headers.get('x-container-object-count'))
+ return (nb_metrics or len(metric_details), measures,
+ metric_details if details else None)
+
+ def list_metric_with_measures_to_process(self, sack):
+ headers, files = self.swift.get_container(
+ self.get_sack_name(sack), delimiter='/', full_listing=True)
+ return set(f['subdir'][:-1] for f in files if 'subdir' in f)
+
+ def _list_measure_files_for_metric_id(self, sack, metric_id):
+ headers, files = self.swift.get_container(
+ self.get_sack_name(sack), path=six.text_type(metric_id),
+ full_listing=True)
+ return files
+
+ def delete_unprocessed_measures_for_metric_id(self, metric_id):
+ sack = self.sack_for_metric(metric_id)
+ files = self._list_measure_files_for_metric_id(sack, metric_id)
+ swift.bulk_delete(self.swift, self.get_sack_name(sack), files)
+
+ def has_unprocessed(self, metric):
+ sack = self.sack_for_metric(metric.id)
+ return bool(self._list_measure_files_for_metric_id(sack, metric.id))
+
+ @contextlib.contextmanager
+ def process_measure_for_metric(self, metric):
+ sack = self.sack_for_metric(metric.id)
+ sack_name = self.get_sack_name(sack)
+ files = self._list_measure_files_for_metric_id(sack, metric.id)
+
+ measures = []
+ for f in files:
+ headers, data = self.swift.get_object(sack_name, f['name'])
+ measures.extend(self._unserialize_measures(f['name'], data))
+
+ yield measures
+
+ # Now clean objects
+ swift.bulk_delete(self.swift, sack_name, files)
diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py
new file mode 100644
index 00000000..fc2c63ad
--- /dev/null
+++ b/gnocchi/storage/redis.py
@@ -0,0 +1,114 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2017 Red Hat
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from oslo_config import cfg
+
+from gnocchi import storage
+from gnocchi.storage import _carbonara
+from gnocchi.storage.common import redis
+
+
+OPTS = [
+ cfg.StrOpt('redis_url',
+ default='redis://localhost:6379/',
+ help='Redis URL'),
+]
+
+
+class RedisStorage(_carbonara.CarbonaraBasedStorage):
+ WRITE_FULL = True
+
+ STORAGE_PREFIX = "timeseries"
+ FIELD_SEP = '_'
+
+ def __init__(self, conf, incoming):
+ super(RedisStorage, self).__init__(conf, incoming)
+ self._client = redis.get_client(conf)
+
+ def _metric_key(self, metric):
+ return redis.SEP.join([self.STORAGE_PREFIX, str(metric.id)])
+
+ @staticmethod
+ def _unaggregated_field(version=3):
+ return 'none' + ("_v%s" % version if version else "")
+
+ @classmethod
+ def _aggregated_field_for_split(cls, aggregation, timestamp_key,
+ granularity, version=3):
+ path = cls.FIELD_SEP.join([timestamp_key, aggregation,
+ str(granularity)])
+ return path + '_v%s' % version if version else path
+
+ def _create_metric(self, metric):
+ key = self._metric_key(metric)
+ if self._client.exists(key):
+ raise storage.MetricAlreadyExists(metric)
+ self._client.hset(key, self._unaggregated_field(), '')
+
+ def _store_unaggregated_timeserie(self, metric, data, version=3):
+ self._client.hset(self._metric_key(metric),
+ self._unaggregated_field(version), data)
+
+ def _get_unaggregated_timeserie(self, metric, version=3):
+ data = self._client.hget(self._metric_key(metric),
+ self._unaggregated_field(version))
+ if data is None:
+ raise storage.MetricDoesNotExist(metric)
+ return data
+
+ def _list_split_keys_for_metric(self, metric, aggregation, granularity,
+ version=3):
+ key = self._metric_key(metric)
+ if not self._client.exists(key):
+ raise storage.MetricDoesNotExist(metric)
+ split_keys = set()
+ hashes = self._client.hscan_iter(
+ key, match=self._aggregated_field_for_split(aggregation, '*',
+ granularity, version))
+ for f, __ in hashes:
+ meta = f.decode("utf8").split(self.FIELD_SEP, 1)
+ split_keys.add(meta[0])
+ return split_keys
+
+ def _delete_metric_measures(self, metric, timestamp_key, aggregation,
+ granularity, version=3):
+ key = self._metric_key(metric)
+ field = self._aggregated_field_for_split(
+ aggregation, timestamp_key, granularity, version)
+ self._client.hdel(key, field)
+
+ def _store_metric_measures(self, metric, timestamp_key, aggregation,
+ granularity, data, offset=None, version=3):
+ key = self._metric_key(metric)
+ field = self._aggregated_field_for_split(
+ aggregation, timestamp_key, granularity, version)
+ self._client.hset(key, field, data)
+
+ def _delete_metric(self, metric):
+ self._client.delete(self._metric_key(metric))
+
+ # Carbonara API
+
+ def _get_measures(self, metric, timestamp_key, aggregation, granularity,
+ version=3):
+ key = self._metric_key(metric)
+ field = self._aggregated_field_for_split(
+ aggregation, timestamp_key, granularity, version)
+ data = self._client.hget(key, field)
+ if data is None:
+ if not self._client.exists(key):
+ raise storage.MetricDoesNotExist(metric)
+ raise storage.AggregationDoesNotExist(metric, aggregation)
+ return data
diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py
new file mode 100644
index 00000000..59c801de
--- /dev/null
+++ b/gnocchi/storage/s3.py
@@ -0,0 +1,221 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2016-2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import os
+
+from oslo_config import cfg
+import tenacity
+
+from gnocchi import storage
+from gnocchi.storage import _carbonara
+from gnocchi.storage.common import s3
+
+boto3 = s3.boto3
+botocore = s3.botocore
+
+OPTS = [
+ cfg.StrOpt('s3_endpoint_url',
+ help='S3 endpoint URL'),
+ cfg.StrOpt('s3_region_name',
+ default=os.getenv("AWS_DEFAULT_REGION"),
+ help='S3 region name'),
+ cfg.StrOpt('s3_access_key_id',
+ default=os.getenv("AWS_ACCESS_KEY_ID"),
+ help='S3 access key id'),
+ cfg.StrOpt('s3_secret_access_key',
+ default=os.getenv("AWS_SECRET_ACCESS_KEY"),
+ help='S3 secret access key'),
+ cfg.StrOpt('s3_bucket_prefix',
+ # Max bucket length is 63 and we use "-" as separator
+ # 63 - 1 - len(uuid) = 26
+ max_length=26,
+ default='gnocchi',
+ help='Prefix to namespace metric bucket.'),
+ cfg.FloatOpt('s3_check_consistency_timeout',
+ min=0,
+ default=60,
+ help="Maximum time to wait checking data consistency when "
+ "writing to S3. Set to 0 to disable data consistency "
+ "validation."),
+]
+
+
+def retry_if_operationaborted(exception):
+ return (isinstance(exception, botocore.exceptions.ClientError)
+ and exception.response['Error'].get('Code') == "OperationAborted")
+
+
+class S3Storage(_carbonara.CarbonaraBasedStorage):
+
+ WRITE_FULL = True
+
+ _consistency_wait = tenacity.wait_exponential(multiplier=0.1)
+
+ def __init__(self, conf, incoming):
+ super(S3Storage, self).__init__(conf, incoming)
+ self.s3, self._region_name, self._bucket_prefix = (
+ s3.get_connection(conf)
+ )
+ self._bucket_name = '%s-aggregates' % self._bucket_prefix
+ if conf.s3_check_consistency_timeout > 0:
+ self._consistency_stop = tenacity.stop_after_delay(
+ conf.s3_check_consistency_timeout)
+ else:
+ self._consistency_stop = None
+
+ def upgrade(self, index, num_sacks):
+ super(S3Storage, self).upgrade(index, num_sacks)
+ try:
+ s3.create_bucket(self.s3, self._bucket_name, self._region_name)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error'].get('Code') != "BucketAlreadyExists":
+ raise
+
+ @staticmethod
+ def _object_name(split_key, aggregation, granularity, version=3):
+ name = '%s_%s_%s' % (aggregation, granularity, split_key)
+ return name + '_v%s' % version if version else name
+
+ @staticmethod
+ def _prefix(metric):
+ return str(metric.id) + '/'
+
+ def _create_metric(self, metric):
+ pass
+
+ def _put_object_safe(self, Bucket, Key, Body):
+ put = self.s3.put_object(Bucket=Bucket, Key=Key, Body=Body)
+
+ if self._consistency_stop:
+
+ def _head():
+ return self.s3.head_object(Bucket=Bucket,
+ Key=Key, IfMatch=put['ETag'])
+
+ tenacity.Retrying(
+ retry=tenacity.retry_if_result(
+ lambda r: r['ETag'] != put['ETag']),
+ wait=self._consistency_wait,
+ stop=self._consistency_stop)(_head)
+
+ def _store_metric_measures(self, metric, timestamp_key, aggregation,
+ granularity, data, offset=0, version=3):
+ self._put_object_safe(
+ Bucket=self._bucket_name,
+ Key=self._prefix(metric) + self._object_name(
+ timestamp_key, aggregation, granularity, version),
+ Body=data)
+
+ def _delete_metric_measures(self, metric, timestamp_key, aggregation,
+ granularity, version=3):
+ self.s3.delete_object(
+ Bucket=self._bucket_name,
+ Key=self._prefix(metric) + self._object_name(
+ timestamp_key, aggregation, granularity, version))
+
+ def _delete_metric(self, metric):
+ bucket = self._bucket_name
+ response = {}
+ while response.get('IsTruncated', True):
+ if 'NextContinuationToken' in response:
+ kwargs = {
+ 'ContinuationToken': response['NextContinuationToken']
+ }
+ else:
+ kwargs = {}
+ try:
+ response = self.s3.list_objects_v2(
+ Bucket=bucket, Prefix=self._prefix(metric), **kwargs)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error'].get('Code') == "NoSuchKey":
+ # Maybe it never has been created (no measure)
+ return
+ raise
+ s3.bulk_delete(self.s3, bucket,
+ [c['Key'] for c in response.get('Contents', ())])
+
+ def _get_measures(self, metric, timestamp_key, aggregation, granularity,
+ version=3):
+ try:
+ response = self.s3.get_object(
+ Bucket=self._bucket_name,
+ Key=self._prefix(metric) + self._object_name(
+ timestamp_key, aggregation, granularity, version))
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error'].get('Code') == 'NoSuchKey':
+ try:
+ response = self.s3.list_objects_v2(
+ Bucket=self._bucket_name, Prefix=self._prefix(metric))
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error'].get('Code') == 'NoSuchKey':
+ raise storage.MetricDoesNotExist(metric)
+ raise
+ raise storage.AggregationDoesNotExist(metric, aggregation)
+ raise
+ return response['Body'].read()
+
+ def _list_split_keys_for_metric(self, metric, aggregation, granularity,
+ version=3):
+ bucket = self._bucket_name
+ keys = set()
+ response = {}
+ while response.get('IsTruncated', True):
+ if 'NextContinuationToken' in response:
+ kwargs = {
+ 'ContinuationToken': response['NextContinuationToken']
+ }
+ else:
+ kwargs = {}
+ try:
+ response = self.s3.list_objects_v2(
+ Bucket=bucket,
+ Prefix=self._prefix(metric) + '%s_%s' % (aggregation,
+ granularity),
+ **kwargs)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error'].get('Code') == "NoSuchKey":
+ raise storage.MetricDoesNotExist(metric)
+ raise
+ for f in response.get('Contents', ()):
+ try:
+ meta = f['Key'].split('_')
+ if (self._version_check(f['Key'], version)):
+ keys.add(meta[2])
+ except (ValueError, IndexError):
+ # Might be "none", or any other file. Be resilient.
+ continue
+ return keys
+
+ @staticmethod
+ def _build_unaggregated_timeserie_path(metric, version):
+ return S3Storage._prefix(metric) + 'none' + ("_v%s" % version
+ if version else "")
+
+ def _get_unaggregated_timeserie(self, metric, version=3):
+ try:
+ response = self.s3.get_object(
+ Bucket=self._bucket_name,
+ Key=self._build_unaggregated_timeserie_path(metric, version))
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error'].get('Code') == "NoSuchKey":
+ raise storage.MetricDoesNotExist(metric)
+ raise
+ return response['Body'].read()
+
+ def _store_unaggregated_timeserie(self, metric, data, version=3):
+ self._put_object_safe(
+ Bucket=self._bucket_name,
+ Key=self._build_unaggregated_timeserie_path(metric, version),
+ Body=data)
diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py
new file mode 100644
index 00000000..52dadbdb
--- /dev/null
+++ b/gnocchi/storage/swift.py
@@ -0,0 +1,185 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2014-2015 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+from gnocchi import storage
+from gnocchi.storage import _carbonara
+from gnocchi.storage.common import swift
+
+swclient = swift.swclient
+swift_utils = swift.swift_utils
+
+OPTS = [
+ cfg.StrOpt('swift_auth_version',
+ default='1',
+ help='Swift authentication version to user.'),
+ cfg.StrOpt('swift_preauthurl',
+ help='Swift pre-auth URL.'),
+ cfg.StrOpt('swift_authurl',
+ default="http://localhost:8080/auth/v1.0",
+ help='Swift auth URL.'),
+ cfg.StrOpt('swift_preauthtoken',
+ secret=True,
+ help='Swift token to user to authenticate.'),
+ cfg.StrOpt('swift_user',
+ default="admin:admin",
+ help='Swift user.'),
+ cfg.StrOpt('swift_user_domain_name',
+ default='Default',
+ help='Swift user domain name.'),
+ cfg.StrOpt('swift_key',
+ secret=True,
+ default="admin",
+ help='Swift key/password.'),
+ cfg.StrOpt('swift_project_name',
+ help='Swift tenant name, only used in v2/v3 auth.',
+ deprecated_name="swift_tenant_name"),
+ cfg.StrOpt('swift_project_domain_name',
+ default='Default',
+ help='Swift project domain name.'),
+ cfg.StrOpt('swift_container_prefix',
+ default='gnocchi',
+ help='Prefix to namespace metric containers.'),
+ cfg.StrOpt('swift_endpoint_type',
+ default='publicURL',
+ help='Endpoint type to connect to Swift',),
+ cfg.IntOpt('swift_timeout',
+ min=0,
+ default=300,
+ help='Connection timeout in seconds.'),
+]
+
+
+class SwiftStorage(_carbonara.CarbonaraBasedStorage):
+
+ WRITE_FULL = True
+
+ def __init__(self, conf, incoming):
+ super(SwiftStorage, self).__init__(conf, incoming)
+ self.swift = swift.get_connection(conf)
+ self._container_prefix = conf.swift_container_prefix
+
+ def _container_name(self, metric):
+ return '%s.%s' % (self._container_prefix, str(metric.id))
+
+ @staticmethod
+ def _object_name(split_key, aggregation, granularity, version=3):
+ name = '%s_%s_%s' % (split_key, aggregation, granularity)
+ return name + '_v%s' % version if version else name
+
+ def _create_metric(self, metric):
+ # TODO(jd) A container per user in their account?
+ resp = {}
+ self.swift.put_container(self._container_name(metric),
+ response_dict=resp)
+ # put_container() should return 201 Created; if it returns 204, that
+ # means the metric was already created!
+ if resp['status'] == 204:
+ raise storage.MetricAlreadyExists(metric)
+
+ def _store_metric_measures(self, metric, timestamp_key, aggregation,
+ granularity, data, offset=None, version=3):
+ self.swift.put_object(
+ self._container_name(metric),
+ self._object_name(timestamp_key, aggregation, granularity,
+ version),
+ data)
+
+ def _delete_metric_measures(self, metric, timestamp_key, aggregation,
+ granularity, version=3):
+ self.swift.delete_object(
+ self._container_name(metric),
+ self._object_name(timestamp_key, aggregation, granularity,
+ version))
+
+ def _delete_metric(self, metric):
+ container = self._container_name(metric)
+ try:
+ headers, files = self.swift.get_container(
+ container, full_listing=True)
+ except swclient.ClientException as e:
+ if e.http_status != 404:
+ # Maybe it never has been created (no measure)
+ raise
+ else:
+ swift.bulk_delete(self.swift, container, files)
+ try:
+ self.swift.delete_container(container)
+ except swclient.ClientException as e:
+ if e.http_status != 404:
+ # Deleted in the meantime? Whatever.
+ raise
+
+ def _get_measures(self, metric, timestamp_key, aggregation, granularity,
+ version=3):
+ try:
+ headers, contents = self.swift.get_object(
+ self._container_name(metric), self._object_name(
+ timestamp_key, aggregation, granularity, version))
+ except swclient.ClientException as e:
+ if e.http_status == 404:
+ try:
+ self.swift.head_container(self._container_name(metric))
+ except swclient.ClientException as e:
+ if e.http_status == 404:
+ raise storage.MetricDoesNotExist(metric)
+ raise
+ raise storage.AggregationDoesNotExist(metric, aggregation)
+ raise
+ return contents
+
+ def _list_split_keys_for_metric(self, metric, aggregation, granularity,
+ version=3):
+ container = self._container_name(metric)
+ try:
+ headers, files = self.swift.get_container(
+ container, full_listing=True)
+ except swclient.ClientException as e:
+ if e.http_status == 404:
+ raise storage.MetricDoesNotExist(metric)
+ raise
+ keys = set()
+ for f in files:
+ try:
+ meta = f['name'].split('_')
+ if (aggregation == meta[1] and granularity == float(meta[2])
+ and self._version_check(f['name'], version)):
+ keys.add(meta[0])
+ except (ValueError, IndexError):
+ # Might be "none", or any other file. Be resilient.
+ continue
+ return keys
+
+ @staticmethod
+ def _build_unaggregated_timeserie_path(version):
+ return 'none' + ("_v%s" % version if version else "")
+
+ def _get_unaggregated_timeserie(self, metric, version=3):
+ try:
+ headers, contents = self.swift.get_object(
+ self._container_name(metric),
+ self._build_unaggregated_timeserie_path(version))
+ except swclient.ClientException as e:
+ if e.http_status == 404:
+ raise storage.MetricDoesNotExist(metric)
+ raise
+ return contents
+
+ def _store_unaggregated_timeserie(self, metric, data, version=3):
+ self.swift.put_object(self._container_name(metric),
+ self._build_unaggregated_timeserie_path(version),
+ data)
diff --git a/gnocchi/tempest/__init__.py b/gnocchi/tempest/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/gnocchi/tempest/config.py b/gnocchi/tempest/config.py
new file mode 100644
index 00000000..74d7ef3e
--- /dev/null
+++ b/gnocchi/tempest/config.py
@@ -0,0 +1,33 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+service_option = cfg.BoolOpt('gnocchi',
+ default=True,
+ help="Whether or not Gnocchi is expected to be"
+ "available")
+
+metric_group = cfg.OptGroup(name='metric',
+ title='Metric Service Options')
+
+metric_opts = [
+ cfg.StrOpt('catalog_type',
+ default='metric',
+ help="Catalog type of the Metric service."),
+ cfg.StrOpt('endpoint_type',
+ default='publicURL',
+ choices=['public', 'admin', 'internal',
+ 'publicURL', 'adminURL', 'internalURL'],
+ help="The endpoint type to use for the metric service."),
+]
diff --git a/gnocchi/tempest/plugin.py b/gnocchi/tempest/plugin.py
new file mode 100644
index 00000000..3410471f
--- /dev/null
+++ b/gnocchi/tempest/plugin.py
@@ -0,0 +1,42 @@
+# -*- encoding: utf-8 -*-
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from __future__ import absolute_import
+
+import os
+
+from tempest.test_discover import plugins
+
+import gnocchi
+from gnocchi.tempest import config as tempest_config
+
+
+class GnocchiTempestPlugin(plugins.TempestPlugin):
+ def load_tests(self):
+ base_path = os.path.split(os.path.dirname(
+ os.path.abspath(gnocchi.__file__)))[0]
+ test_dir = "gnocchi/tempest"
+ full_test_dir = os.path.join(base_path, test_dir)
+ return full_test_dir, base_path
+
+ def register_opts(self, conf):
+ conf.register_opt(tempest_config.service_option,
+ group='service_available')
+ conf.register_group(tempest_config.metric_group)
+ conf.register_opts(tempest_config.metric_opts, group='metric')
+
+ def get_opt_lists(self):
+ return [(tempest_config.metric_group.name,
+ tempest_config.metric_opts),
+ ('service_available', [tempest_config.service_option])]
diff --git a/gnocchi/tempest/scenario/__init__.py b/gnocchi/tempest/scenario/__init__.py
new file mode 100644
index 00000000..7db0fd6f
--- /dev/null
+++ b/gnocchi/tempest/scenario/__init__.py
@@ -0,0 +1,110 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from __future__ import absolute_import
+
+import os
+import unittest
+
+from gabbi import runner
+from gabbi import suitemaker
+from gabbi import utils
+import six.moves.urllib.parse as urlparse
+from tempest import config
+import tempest.test
+
+CONF = config.CONF
+
+TEST_DIR = os.path.join(os.path.dirname(__file__), '..', '..',
+ 'tests', 'functional_live', 'gabbits')
+
+
+class GnocchiGabbiTest(tempest.test.BaseTestCase):
+ credentials = ['admin']
+
+ TIMEOUT_SCALING_FACTOR = 5
+
+ @classmethod
+ def skip_checks(cls):
+ super(GnocchiGabbiTest, cls).skip_checks()
+ if not CONF.service_available.gnocchi:
+ raise cls.skipException("Gnocchi support is required")
+
+ def _do_test(self, filename):
+ token = self.os_admin.auth_provider.get_token()
+ url = self.os_admin.auth_provider.base_url(
+ {'service': CONF.metric.catalog_type,
+ 'endpoint_type': CONF.metric.endpoint_type})
+
+ parsed_url = urlparse.urlsplit(url)
+ prefix = parsed_url.path.rstrip('/') # turn it into a prefix
+ if parsed_url.scheme == 'https':
+ port = 443
+ require_ssl = True
+ else:
+ port = 80
+ require_ssl = False
+ host = parsed_url.hostname
+ if parsed_url.port:
+ port = parsed_url.port
+
+ os.environ["GNOCCHI_SERVICE_TOKEN"] = token
+ os.environ["GNOCCHI_AUTHORIZATION"] = "not used"
+
+ with file(os.path.join(TEST_DIR, filename)) as f:
+ suite_dict = utils.load_yaml(f)
+ suite_dict.setdefault('defaults', {})['ssl'] = require_ssl
+ test_suite = suitemaker.test_suite_from_dict(
+ loader=unittest.defaultTestLoader,
+ test_base_name="gabbi",
+ suite_dict=suite_dict,
+ test_directory=TEST_DIR,
+ host=host, port=port,
+ fixture_module=None,
+ intercept=None,
+ prefix=prefix,
+ handlers=runner.initialize_handlers([]),
+ test_loader_name="tempest")
+
+ # NOTE(sileht): We hide stdout/stderr and reraise the failure
+ # manually, tempest will print it itself.
+ with open(os.devnull, 'w') as stream:
+ result = unittest.TextTestRunner(
+ stream=stream, verbosity=0, failfast=True,
+ ).run(test_suite)
+
+ if not result.wasSuccessful():
+ failures = (result.errors + result.failures +
+ result.unexpectedSuccesses)
+ if failures:
+ test, bt = failures[0]
+ name = test.test_data.get('name', test.id())
+ msg = 'From test "%s" :\n%s' % (name, bt)
+ self.fail(msg)
+
+ self.assertTrue(result.wasSuccessful())
+
+
+def test_maker(name, filename):
+ def test(self):
+ self._do_test(filename)
+ test.__name__ = name
+ return test
+
+
+# Create one scenario per yaml file
+for filename in os.listdir(TEST_DIR):
+ if not filename.endswith('.yaml'):
+ continue
+ name = "test_%s" % filename[:-5].lower().replace("-", "_")
+ setattr(GnocchiGabbiTest, name,
+ test_maker(name, filename))
diff --git a/gnocchi/tests/__init__.py b/gnocchi/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py
new file mode 100644
index 00000000..3f35b40c
--- /dev/null
+++ b/gnocchi/tests/base.py
@@ -0,0 +1,335 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2014-2016 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import functools
+import json
+import os
+import subprocess
+import threading
+import uuid
+
+import fixtures
+from oslotest import base
+from oslotest import log
+from oslotest import output
+import six
+from six.moves.urllib.parse import unquote
+try:
+ from swiftclient import exceptions as swexc
+except ImportError:
+ swexc = None
+from testtools import testcase
+from tooz import coordination
+
+from gnocchi import archive_policy
+from gnocchi import exceptions
+from gnocchi import indexer
+from gnocchi import service
+from gnocchi import storage
+
+
+class SkipNotImplementedMeta(type):
+ def __new__(cls, name, bases, local):
+ for attr in local:
+ value = local[attr]
+ if callable(value) and (
+ attr.startswith('test_') or attr == 'setUp'):
+ local[attr] = _skip_decorator(value)
+ return type.__new__(cls, name, bases, local)
+
+
+def _skip_decorator(func):
+ @functools.wraps(func)
+ def skip_if_not_implemented(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except exceptions.NotImplementedError as e:
+ raise testcase.TestSkipped(six.text_type(e))
+ return skip_if_not_implemented
+
+
+class FakeSwiftClient(object):
+ def __init__(self, *args, **kwargs):
+ self.kvs = {}
+
+ def put_container(self, container, response_dict=None):
+ if response_dict is not None:
+ if container in self.kvs:
+ response_dict['status'] = 204
+ else:
+ response_dict['status'] = 201
+ self.kvs[container] = {}
+
+ def get_container(self, container, delimiter=None,
+ path=None, full_listing=False, limit=None):
+ try:
+ container = self.kvs[container]
+ except KeyError:
+ raise swexc.ClientException("No such container",
+ http_status=404)
+
+ files = []
+ directories = set()
+ for k, v in six.iteritems(container.copy()):
+ if path and not k.startswith(path):
+ continue
+
+ if delimiter is not None and delimiter in k:
+ dirname = k.split(delimiter, 1)[0]
+ if dirname not in directories:
+ directories.add(dirname)
+ files.append({'subdir': dirname + delimiter})
+ else:
+ files.append({'bytes': len(v),
+ 'last_modified': None,
+ 'hash': None,
+ 'name': k,
+ 'content_type': None})
+
+ if full_listing:
+ end = None
+ elif limit:
+ end = limit
+ else:
+ # In truth, it's 10000, but 1 is enough to make sure our test fails
+ # otherwise.
+ end = 1
+
+ return ({'x-container-object-count': len(container.keys())},
+ (files + list(directories))[:end])
+
+ def put_object(self, container, key, obj):
+ if hasattr(obj, "seek"):
+ obj.seek(0)
+ obj = obj.read()
+ # TODO(jd) Maybe we should reset the seek(), but well…
+ try:
+ self.kvs[container][key] = obj
+ except KeyError:
+ raise swexc.ClientException("No such container",
+ http_status=404)
+
+ def get_object(self, container, key):
+ try:
+ return {}, self.kvs[container][key]
+ except KeyError:
+ raise swexc.ClientException("No such container/object",
+ http_status=404)
+
+ def delete_object(self, container, obj):
+ try:
+ del self.kvs[container][obj]
+ except KeyError:
+ raise swexc.ClientException("No such container/object",
+ http_status=404)
+
+ def delete_container(self, container):
+ if container not in self.kvs:
+ raise swexc.ClientException("No such container",
+ http_status=404)
+ if self.kvs[container]:
+ raise swexc.ClientException("Container not empty",
+ http_status=409)
+ del self.kvs[container]
+
+ def head_container(self, container):
+ if container not in self.kvs:
+ raise swexc.ClientException("No such container",
+ http_status=404)
+
+ def post_account(self, headers, query_string=None, data=None,
+ response_dict=None):
+ if query_string == 'bulk-delete':
+ resp = {'Response Status': '200 OK',
+ 'Response Body': '',
+ 'Number Deleted': 0,
+ 'Number Not Found': 0}
+ if response_dict is not None:
+ response_dict['status'] = 200
+ if data:
+ for path in data.splitlines():
+ try:
+ __, container, obj = (unquote(path.decode('utf8'))
+ .split('/', 2))
+ del self.kvs[container][obj]
+ resp['Number Deleted'] += 1
+ except KeyError:
+ resp['Number Not Found'] += 1
+ return {}, json.dumps(resp).encode('utf-8')
+
+ if response_dict is not None:
+ response_dict['status'] = 204
+
+ return {}, None
+
+
+@six.add_metaclass(SkipNotImplementedMeta)
+class TestCase(base.BaseTestCase):
+
+ REDIS_DB_INDEX = 0
+ REDIS_DB_LOCK = threading.Lock()
+
+ ARCHIVE_POLICIES = {
+ 'no_granularity_match': archive_policy.ArchivePolicy(
+ "no_granularity_match",
+ 0, [
+ # 2 second resolution for a day
+ archive_policy.ArchivePolicyItem(
+ granularity=2, points=3600 * 24),
+ ],
+ ),
+ 'low': archive_policy.ArchivePolicy(
+ "low", 0, [
+ # 5 minutes resolution for an hour
+ archive_policy.ArchivePolicyItem(
+ granularity=300, points=12),
+ # 1 hour resolution for a day
+ archive_policy.ArchivePolicyItem(
+ granularity=3600, points=24),
+ # 1 day resolution for a month
+ archive_policy.ArchivePolicyItem(
+ granularity=3600 * 24, points=30),
+ ],
+ ),
+ 'medium': archive_policy.ArchivePolicy(
+ "medium", 0, [
+ # 1 minute resolution for an day
+ archive_policy.ArchivePolicyItem(
+ granularity=60, points=60 * 24),
+ # 1 hour resolution for a week
+ archive_policy.ArchivePolicyItem(
+ granularity=3600, points=7 * 24),
+ # 1 day resolution for a year
+ archive_policy.ArchivePolicyItem(
+ granularity=3600 * 24, points=365),
+ ],
+ ),
+ 'high': archive_policy.ArchivePolicy(
+ "high", 0, [
+ # 1 second resolution for an hour
+ archive_policy.ArchivePolicyItem(
+ granularity=1, points=3600),
+ # 1 minute resolution for a week
+ archive_policy.ArchivePolicyItem(
+ granularity=60, points=60 * 24 * 7),
+ # 1 hour resolution for a year
+ archive_policy.ArchivePolicyItem(
+ granularity=3600, points=365 * 24),
+ ],
+ ),
+ }
+
+ @classmethod
+ def setUpClass(self):
+ super(TestCase, self).setUpClass()
+
+ # NOTE(sileht): oslotest does this in setUp() but we
+ # need it here
+ self.output = output.CaptureOutput()
+ self.output.setUp()
+ self.log = log.ConfigureLogging()
+ self.log.setUp()
+
+ self.conf = service.prepare_service([],
+ default_config_files=[])
+ py_root = os.path.abspath(os.path.join(os.path.dirname(__file__),
+ '..',))
+ self.conf.set_override('paste_config',
+ os.path.join(py_root, 'rest', 'api-paste.ini'),
+ group="api")
+ self.conf.set_override('policy_file',
+ os.path.join(py_root, 'rest', 'policy.json'),
+ group="oslo_policy")
+
+ # NOTE(jd) This allows to test S3 on AWS
+ if not os.getenv("AWS_ACCESS_KEY_ID"):
+ self.conf.set_override('s3_endpoint_url',
+ os.getenv("GNOCCHI_STORAGE_HTTP_URL"),
+ group="storage")
+ self.conf.set_override('s3_access_key_id', "gnocchi",
+ group="storage")
+ self.conf.set_override('s3_secret_access_key', "anythingworks",
+ group="storage")
+
+ self.index = indexer.get_driver(self.conf)
+ self.index.connect()
+
+ # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
+ # their tables in a single transaction even with the
+ # checkfirst=True, so what we do here is we force the upgrade code
+ # path to be sequential to avoid race conditions as the tests run
+ # in parallel.
+ self.coord = coordination.get_coordinator(
+ self.conf.storage.coordination_url,
+ str(uuid.uuid4()).encode('ascii'))
+
+ self.coord.start(start_heart=True)
+
+ with self.coord.get_lock(b"gnocchi-tests-db-lock"):
+ self.index.upgrade()
+
+ self.coord.stop()
+
+ self.archive_policies = self.ARCHIVE_POLICIES.copy()
+ for name, ap in six.iteritems(self.archive_policies):
+ # Create basic archive policies
+ try:
+ self.index.create_archive_policy(ap)
+ except indexer.ArchivePolicyAlreadyExists:
+ pass
+
+ storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file")
+ self.conf.set_override('driver', storage_driver, 'storage')
+ if storage_driver == 'ceph':
+ self.conf.set_override('ceph_conffile',
+ os.getenv("CEPH_CONF"),
+ 'storage')
+
+ def setUp(self):
+ super(TestCase, self).setUp()
+ if swexc:
+ self.useFixture(fixtures.MockPatch(
+ 'swiftclient.client.Connection',
+ FakeSwiftClient))
+
+ if self.conf.storage.driver == 'file':
+ tempdir = self.useFixture(fixtures.TempDir())
+ self.conf.set_override('file_basepath',
+ tempdir.path,
+ 'storage')
+ elif self.conf.storage.driver == 'ceph':
+ pool_name = uuid.uuid4().hex
+ subprocess.call("rados -c %s mkpool %s" % (
+ os.getenv("CEPH_CONF"), pool_name), shell=True)
+ self.conf.set_override('ceph_pool', pool_name, 'storage')
+
+ # Override the bucket prefix to be unique to avoid concurrent access
+ # with any other test
+ self.conf.set_override("s3_bucket_prefix", str(uuid.uuid4())[:26],
+ "storage")
+
+ self.storage = storage.get_driver(self.conf)
+
+ if self.conf.storage.driver == 'redis':
+ # Create one prefix per test
+ self.storage.STORAGE_PREFIX = str(uuid.uuid4())
+ self.storage.incoming.SACK_PREFIX = str(uuid.uuid4())
+
+ self.storage.upgrade(self.index, 128)
+
+ def tearDown(self):
+ self.index.disconnect()
+ self.storage.stop()
+ super(TestCase, self).tearDown()
diff --git a/gnocchi/tests/functional/__init__.py b/gnocchi/tests/functional/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py
new file mode 100644
index 00000000..90004194
--- /dev/null
+++ b/gnocchi/tests/functional/fixtures.py
@@ -0,0 +1,189 @@
+#
+# Copyright 2015 Red Hat. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Fixtures for use with gabbi tests."""
+
+import os
+import shutil
+import tempfile
+import threading
+import time
+from unittest import case
+import warnings
+
+from gabbi import fixture
+from oslo_config import cfg
+from oslo_middleware import cors
+from oslotest import log
+from oslotest import output
+import sqlalchemy_utils
+
+from gnocchi import indexer
+from gnocchi.indexer import sqlalchemy
+from gnocchi.rest import app
+from gnocchi import service
+from gnocchi import storage
+from gnocchi.tests import utils
+
+
+# NOTE(chdent): Hack to restore semblance of global configuration to
+# pass to the WSGI app used per test suite. LOAD_APP_KWARGS are the olso
+# configuration, and the pecan application configuration of
+# which the critical part is a reference to the current indexer.
+LOAD_APP_KWARGS = None
+
+
+def setup_app():
+ global LOAD_APP_KWARGS
+ return app.load_app(**LOAD_APP_KWARGS)
+
+
+class ConfigFixture(fixture.GabbiFixture):
+ """Establish the relevant configuration fixture, per test file.
+
+ Each test file gets its own oslo config and its own indexer and storage
+ instance. The indexer is based on the current database url. The storage
+ uses a temporary directory.
+
+ To use this fixture in a gabbit add::
+
+ fixtures:
+ - ConfigFixture
+ """
+
+ def __init__(self):
+ self.conf = None
+ self.tmp_dir = None
+
+ def start_fixture(self):
+ """Create necessary temp files and do the config dance."""
+
+ self.output = output.CaptureOutput()
+ self.output.setUp()
+ self.log = log.ConfigureLogging()
+ self.log.setUp()
+
+ global LOAD_APP_KWARGS
+
+ data_tmp_dir = tempfile.mkdtemp(prefix='gnocchi')
+
+ if os.getenv("GABBI_LIVE"):
+ dcf = None
+ else:
+ dcf = []
+ conf = service.prepare_service([],
+ default_config_files=dcf)
+ py_root = os.path.abspath(os.path.join(os.path.dirname(__file__),
+ '..', '..',))
+ conf.set_override('paste_config',
+ os.path.join(py_root, 'rest', 'api-paste.ini'),
+ group="api")
+ conf.set_override('policy_file',
+ os.path.join(py_root, 'rest', 'policy.json'),
+ group="oslo_policy")
+
+ # NOTE(sileht): This is not concurrency safe, but only this tests file
+ # deal with cors, so we are fine. set_override don't work because cors
+ # group doesn't yet exists, and we the CORS middleware is created it
+ # register the option and directly copy value of all configurations
+ # options making impossible to override them properly...
+ cfg.set_defaults(cors.CORS_OPTS, allowed_origin="http://foobar.com")
+
+ self.conf = conf
+ self.tmp_dir = data_tmp_dir
+
+ if conf.indexer.url is None:
+ raise case.SkipTest("No indexer configured")
+
+ # Use the presence of DEVSTACK_GATE_TEMPEST as a semaphore
+ # to signal we are not in a gate driven functional test
+ # and thus should override conf settings.
+ if 'DEVSTACK_GATE_TEMPEST' not in os.environ:
+ conf.set_override('driver', 'file', 'storage')
+ conf.set_override('file_basepath', data_tmp_dir, 'storage')
+
+ # NOTE(jd) All of that is still very SQL centric but we only support
+ # SQL for now so let's say it's good enough.
+ conf.set_override(
+ 'url',
+ sqlalchemy.SQLAlchemyIndexer._create_new_database(
+ conf.indexer.url),
+ 'indexer')
+
+ index = indexer.get_driver(conf)
+ index.connect()
+ index.upgrade()
+
+ # Set pagination to a testable value
+ conf.set_override('max_limit', 7, 'api')
+ # Those tests uses noauth mode
+ # TODO(jd) Rewrite them for basic
+ conf.set_override("auth_mode", "noauth", 'api')
+
+ self.index = index
+
+ s = storage.get_driver(conf)
+ s.upgrade(index, 128)
+
+ LOAD_APP_KWARGS = {
+ 'storage': s,
+ 'indexer': index,
+ 'conf': conf,
+ }
+
+ # start up a thread to async process measures
+ self.metricd_thread = MetricdThread(index, s)
+ self.metricd_thread.start()
+
+ def stop_fixture(self):
+ """Clean up the config fixture and storage artifacts."""
+ if hasattr(self, 'metricd_thread'):
+ self.metricd_thread.stop()
+ self.metricd_thread.join()
+
+ if hasattr(self, 'index'):
+ self.index.disconnect()
+
+ # Swallow noise from missing tables when dropping
+ # database.
+ with warnings.catch_warnings():
+ warnings.filterwarnings('ignore',
+ module='sqlalchemy.engine.default')
+ sqlalchemy_utils.drop_database(self.conf.indexer.url)
+
+ if self.tmp_dir:
+ shutil.rmtree(self.tmp_dir)
+
+ self.conf.reset()
+ self.output.cleanUp()
+ self.log.cleanUp()
+
+
+class MetricdThread(threading.Thread):
+ """Run metricd in a naive thread to process measures."""
+
+ def __init__(self, index, storer, name='metricd'):
+ super(MetricdThread, self).__init__(name=name)
+ self.index = index
+ self.storage = storer
+ self.flag = True
+
+ def run(self):
+ while self.flag:
+ metrics = utils.list_all_incoming_metrics(self.storage.incoming)
+ self.storage.process_background_tasks(self.index, metrics)
+ time.sleep(0.1)
+
+ def stop(self):
+ self.flag = False
diff --git a/gnocchi/tests/functional/gabbits/aggregation.yaml b/gnocchi/tests/functional/gabbits/aggregation.yaml
new file mode 100644
index 00000000..39c31d38
--- /dev/null
+++ b/gnocchi/tests/functional/gabbits/aggregation.yaml
@@ -0,0 +1,341 @@
+fixtures:
+ - ConfigFixture
+
+defaults:
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+
+tests:
+ - name: create archive policy
+ desc: for later use
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: low
+ definition:
+ - granularity: 1 second
+ - granularity: 300 seconds
+ status: 201
+
+# Aggregation by metric ids
+
+ - name: create metric 1
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ data:
+ archive_policy_name: low
+ status: 201
+
+ - name: create metric 2
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ data:
+ archive_policy_name: low
+ status: 201
+
+ - name: get metric list
+ GET: /v1/metric
+
+ - name: push measurements to metric 1
+ POST: /v1/metric/$RESPONSE['$[0].id']/measures
+ request_headers:
+ content-type: application/json
+ data:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+ status: 202
+
+ - name: push measurements to metric 2
+ POST: /v1/metric/$HISTORY['get metric list'].$RESPONSE['$[1].id']/measures
+ request_headers:
+ content-type: application/json
+ data:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 3.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 2
+ - timestamp: "2015-03-06T14:35:12"
+ value: 5
+ status: 202
+
+ - name: get measure aggregates by granularity not float
+ GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=foobar
+ status: 400
+
+ - name: get measure aggregates by granularity with refresh
+ GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&refresh=true
+ response_json_paths:
+ $:
+ - ['2015-03-06T14:33:57+00:00', 1.0, 23.1]
+ - ['2015-03-06T14:34:12+00:00', 1.0, 7.0]
+
+ - name: get measure aggregates by granularity
+ GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1
+ poll:
+ count: 10
+ delay: 1
+ response_json_paths:
+ $:
+ - ['2015-03-06T14:33:57+00:00', 1.0, 23.1]
+ - ['2015-03-06T14:34:12+00:00', 1.0, 7.0]
+
+ - name: get measure aggregates by granularity with timestamps
+ GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&start=2015-03-06T15:33:57%2B01:00&stop=2015-03-06T15:34:00%2B01:00
+ poll:
+ count: 10
+ delay: 1
+ response_json_paths:
+ $:
+ - ['2015-03-06T14:30:00+00:00', 300.0, 15.05]
+ - ['2015-03-06T14:33:57+00:00', 1.0, 23.1]
+
+ - name: get measure aggregates and reaggregate
+ GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&reaggregation=min
+ poll:
+ count: 10
+ delay: 1
+ response_json_paths:
+ $:
+ - ['2015-03-06T14:30:00+00:00', 300.0, 2.55]
+ - ['2015-03-06T14:33:57+00:00', 1.0, 3.1]
+ - ['2015-03-06T14:34:12+00:00', 1.0, 2.0]
+
+ - name: get measure aggregates and resample
+ GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&resample=60
+ response_json_paths:
+ $:
+ - ['2015-03-06T14:33:00+00:00', 60.0, 23.1]
+ - ['2015-03-06T14:34:00+00:00', 60.0, 7.0]
+
+ - name: get measure aggregates with fill zero
+ GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&fill=0
+ response_json_paths:
+ $:
+ - ['2015-03-06T14:33:57+00:00', 1.0, 23.1]
+ - ['2015-03-06T14:34:12+00:00', 1.0, 7.0]
+ - ['2015-03-06T14:35:12+00:00', 1.0, 2.5]
+
+ - name: get measure aggregates with fill null
+ GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&fill=null
+ response_json_paths:
+ $:
+ - ['2015-03-06T14:33:57+00:00', 1.0, 23.1]
+ - ['2015-03-06T14:34:12+00:00', 1.0, 7.0]
+ - ['2015-03-06T14:35:12+00:00', 1.0, 5.0]
+
+ - name: get measure aggregates with fill missing granularity
+ GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&fill=0
+ status: 400
+
+ - name: get measure aggregates with bad fill
+ GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&fill=asdf
+ status: 400
+
+
+# Aggregation by resource and metric_name
+
+ - name: post a resource
+ POST: /v1/resource/generic
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: bcd3441c-b5aa-4d1b-af9a-5a72322bb269
+ metrics:
+ agg_meter:
+ archive_policy_name: low
+ status: 201
+
+ - name: post another resource
+ POST: /v1/resource/generic
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: 1b0a8345-b279-4cb8-bd7a-2cb83193624f
+ metrics:
+ agg_meter:
+ archive_policy_name: low
+ status: 201
+
+ - name: push measurements to resource 1
+ POST: /v1/resource/generic/bcd3441c-b5aa-4d1b-af9a-5a72322bb269/metric/agg_meter/measures
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+ status: 202
+
+ - name: push measurements to resource 2
+ POST: /v1/resource/generic/1b0a8345-b279-4cb8-bd7a-2cb83193624f/metric/agg_meter/measures
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 3.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 2
+ - timestamp: "2015-03-06T14:35:12"
+ value: 5
+ status: 202
+
+ - name: get measure aggregates by granularity from resources with refresh
+ POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&refresh=true
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ response_json_paths:
+ $:
+ - ['2015-03-06T14:33:57+00:00', 1.0, 23.1]
+ - ['2015-03-06T14:34:12+00:00', 1.0, 7.0]
+
+ - name: get measure aggregates by granularity from resources
+ POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ poll:
+ count: 10
+ delay: 1
+ response_json_paths:
+ $:
+ - ['2015-03-06T14:33:57+00:00', 1.0, 23.1]
+ - ['2015-03-06T14:34:12+00:00', 1.0, 7.0]
+
+ - name: get measure aggregates by granularity from resources and resample
+ POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&resample=60
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ response_json_paths:
+ $:
+ - ['2015-03-06T14:33:00+00:00', 60.0, 23.1]
+ - ['2015-03-06T14:34:00+00:00', 60.0, 7.0]
+
+ - name: get measure aggregates by granularity from resources and bad resample
+ POST: /v1/aggregation/resource/generic/metric/agg_meter?resample=abc
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 400
+
+ - name: get measure aggregates by granularity from resources and resample no granularity
+ POST: /v1/aggregation/resource/generic/metric/agg_meter?resample=60
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 400
+ response_strings:
+ - A granularity must be specified to resample
+
+ - name: get measure aggregates by granularity with timestamps from resources
+ POST: /v1/aggregation/resource/generic/metric/agg_meter?start=2015-03-06T15:33:57%2B01:00&stop=2015-03-06T15:34:00%2B01:00
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ poll:
+ count: 10
+ delay: 1
+ response_json_paths:
+ $:
+ - ['2015-03-06T14:30:00+00:00', 300.0, 15.05]
+ - ['2015-03-06T14:33:57+00:00', 1.0, 23.1]
+
+ - name: get measure aggregates by granularity from resources and reaggregate
+ POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&reaggregation=min
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ poll:
+ count: 10
+ delay: 1
+ response_json_paths:
+ $:
+ - ['2015-03-06T14:33:57+00:00', 1.0, 3.1]
+ - ['2015-03-06T14:34:12+00:00', 1.0, 2.0]
+
+ - name: get measure aggregates from resources with fill zero
+ POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&fill=0
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ response_json_paths:
+ $:
+ - ['2015-03-06T14:33:57+00:00', 1.0, 23.1]
+ - ['2015-03-06T14:34:12+00:00', 1.0, 7.0]
+ - ['2015-03-06T14:35:12+00:00', 1.0, 2.5]
+
+
+# Some negative tests
+
+ - name: get measure aggregates with wrong GET
+ GET: /v1/aggregation/resource/generic/metric/agg_meter
+ status: 405
+
+ - name: get measure aggregates with wrong metric_name
+ POST: /v1/aggregation/resource/generic/metric/notexists
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 200
+ response_json_paths:
+ $.`len`: 0
+
+ - name: get measure aggregates with wrong resource
+ POST: /v1/aggregation/resource/notexits/metric/agg_meter
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 404
+ response_strings:
+ - Resource type notexits does not exist
+
+ - name: get measure aggregates with wrong path
+ POST: /v1/aggregation/re/generic/metric/agg_meter
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 404
+
+ - name: get measure aggregates with wrong path 2
+ POST: /v1/aggregation/resource/generic/notexists/agg_meter
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 404
+
+ - name: get measure aggregates with no resource name
+ POST: /v1/aggregation/resource/generic/metric
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 405
diff --git a/gnocchi/tests/functional/gabbits/archive-rule.yaml b/gnocchi/tests/functional/gabbits/archive-rule.yaml
new file mode 100644
index 00000000..bc3ea60a
--- /dev/null
+++ b/gnocchi/tests/functional/gabbits/archive-rule.yaml
@@ -0,0 +1,197 @@
+#
+## Test the Archive Policy API to achieve coverage of just the
+## ArchivePolicyRulesController.
+##
+#
+fixtures:
+ - ConfigFixture
+
+tests:
+
+# create dependent policy
+ - name: create archive policy
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: low
+ definition:
+ - granularity: 1 hour
+ status: 201
+ response_headers:
+ location: $SCHEME://$NETLOC/v1/archive_policy/low
+
+# Attempt to create an archive policy rule
+
+ - name: create archive policy rule1
+ POST: /v1/archive_policy_rule
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: test_rule1
+ metric_pattern: "*"
+ archive_policy_name: low
+ status: 201
+ response_json_paths:
+ $.metric_pattern: "*"
+ $.archive_policy_name: low
+ $.name: test_rule1
+
+ - name: create archive policy rule 2
+ POST: /v1/archive_policy_rule
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: test_rule2
+ metric_pattern: "disk.foo.*"
+ archive_policy_name: low
+ status: 201
+ response_json_paths:
+ $.metric_pattern: disk.foo.*
+ $.archive_policy_name: low
+ $.name: test_rule2
+
+ - name: create archive policy rule 3
+ POST: /v1/archive_policy_rule
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: test_rule3
+ metric_pattern: "disk.*"
+ archive_policy_name: low
+ status: 201
+ response_json_paths:
+ $.metric_pattern: disk.*
+ $.archive_policy_name: low
+ $.name: test_rule3
+
+
+# Attempt to create an invalid policy rule
+
+ - name: create invalid archive policy rule
+ POST: /v1/archive_policy_rule
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: test_rule
+ metric_pattern: "disk.foo.*"
+ status: 400
+
+ - name: missing auth archive policy rule
+ POST: /v1/archive_policy_rule
+ request_headers:
+ content-type: application/json
+ data:
+ name: test_rule
+ metric_pattern: "disk.foo.*"
+ archive_policy_name: low
+ status: 403
+
+ - name: wrong content type
+ POST: /v1/archive_policy_rule
+ request_headers:
+ content-type: text/plain
+ x-roles: admin
+ status: 415
+ response_strings:
+ - Unsupported Media Type
+
+ - name: wrong auth create rule
+ POST: /v1/archive_policy_rule
+ request_headers:
+ content-type: application/json
+ x-roles: foo
+ data:
+ name: test_rule_wrong_auth
+ metric_pattern: "disk.foo.*"
+ archive_policy_name: low
+ status: 403
+
+ - name: missing auth createrule
+ POST: /v1/archive_policy_rule
+ request_headers:
+ content-type: application/json
+ data:
+ name: test_rule_miss_auth
+ metric_pattern: "disk.foo.*"
+ archive_policy_name: low
+ status: 403
+
+ - name: bad request body
+ POST: /v1/archive_policy_rule
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ whaa: foobar
+ status: 400
+ response_strings:
+ - "Invalid input: extra keys not allowed"
+
+# get an archive policy rules
+
+ - name: get archive policy rule
+ GET: /v1/archive_policy_rule
+ status: 200
+ response_json_paths:
+ $.[0].metric_pattern: disk.foo.*
+ $.[1].metric_pattern: disk.*
+ $.[2].metric_pattern: "*"
+
+ - name: get unknown archive policy rule
+ GET: /v1/archive_policy_rule/foo
+ status: 404
+
+ - name: delete used archive policy
+ DELETE: /v1/archive_policy/low
+ request_headers:
+ x-roles: admin
+ status: 400
+
+# delete rule as non admin
+
+ - name: delete archive policy rule non admin
+ DELETE: /v1/archive_policy_rule/test_rule1
+ status: 403
+
+# delete rule
+
+ - name: delete archive policy rule1
+ DELETE: /v1/archive_policy_rule/test_rule1
+ request_headers:
+ x-roles: admin
+ status: 204
+
+ - name: delete archive policy rule2
+ DELETE: /v1/archive_policy_rule/test_rule2
+ request_headers:
+ x-roles: admin
+ status: 204
+
+
+ - name: delete archive policy rule3
+ DELETE: /v1/archive_policy_rule/test_rule3
+ request_headers:
+ x-roles: admin
+ status: 204
+
+# delete again
+
+ - name: confirm delete archive policy rule
+ DELETE: /v1/archive_policy_rule/test_rule1
+ request_headers:
+ x-roles: admin
+ status: 404
+
+ - name: delete missing archive policy rule utf8
+ DELETE: /v1/archive_policy_rule/%E2%9C%94%C3%A9%C3%B1%E2%98%83
+ request_headers:
+ x-roles: admin
+ status: 404
+ response_strings:
+ - Archive policy rule ✔éñ☃ does not exist
diff --git a/gnocchi/tests/functional/gabbits/archive.yaml b/gnocchi/tests/functional/gabbits/archive.yaml
new file mode 100644
index 00000000..42fe13c8
--- /dev/null
+++ b/gnocchi/tests/functional/gabbits/archive.yaml
@@ -0,0 +1,568 @@
+#
+# Test the Archive Policy API to achieve coverage of just the
+# ArchivePoliciesController.
+#
+
+fixtures:
+ - ConfigFixture
+
+tests:
+
+# Retrieve the empty list when there are no archive policies.
+# NOTE(chdent): This demonstrates what used to be considered a
+# security bug in JSON output:
+# http://flask.pocoo.org/docs/0.10/security/#json-security
+# The version described there is supposed to be fixed in most modern
+# browsers but there is a new version of the problem which is only
+# fixed in some:
+# http://haacked.com/archive/2009/06/25/json-hijacking.aspx/
+# The caveats point out that this is only an issue if your data is
+# sensitive, which in this case...?
+# However, the api-wg has made it recommendation that collections
+# should be returned as an object with a named key with a value of
+# a list as follows: {"archive_policies": [...]}
+# This allows for extensibility such as future support for pagination.
+# Do we care?
+
+ - name: empty archive policy list
+ GET: /v1/archive_policy
+ response_headers:
+ content-type: /application/json/
+ response_strings:
+ - "[]"
+
+ - name: empty list text
+ GET: /v1/archive_policy
+ request_headers:
+ accept: text/plain
+ status: 406
+
+ - name: empty list html
+ GET: /v1/archive_policy
+ request_headers:
+ accept: text/html
+ status: 406
+
+# Fail to create an archive policy for various reasons.
+
+ - name: wrong content type
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: text/plain
+ x-roles: admin
+ status: 415
+ response_strings:
+ - Unsupported Media Type
+
+ - name: wrong method
+ PUT: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ status: 405
+
+ - name: wrong authZ
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: clancy
+ data:
+ name: medium
+ definition:
+ - granularity: 1 second
+ status: 403
+
+ - name: missing authZ
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ data:
+ name: medium
+ definition:
+ - granularity: 1 second
+ status: 403
+
+ - name: bad request body
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ cowsay: moo
+ status: 400
+ response_strings:
+ - "Invalid input: extra keys not allowed"
+
+ - name: missing definition
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: medium
+ status: 400
+ response_strings:
+ - "Invalid input: required key not provided"
+
+ - name: empty definition
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: medium
+ definition: []
+ status: 400
+ response_strings:
+ - "Invalid input: length of value must be at least 1"
+
+ - name: wrong value definition
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: somename
+ definition: foobar
+ status: 400
+ response_strings:
+ - "Invalid input: expected a list"
+
+ - name: useless definition
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: medium
+ definition:
+ - cowsay: moo
+ status: 400
+ response_strings:
+ - "Invalid input: extra keys not allowed"
+
+# Create a valid archive policy.
+
+ - name: create archive policy
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: medium
+ definition:
+ - granularity: 1 second
+ points: 20
+ - granularity: 2 second
+ response_headers:
+ location: $SCHEME://$NETLOC/v1/archive_policy/medium
+ status: 201
+
+# Retrieve it correctly and then poorly
+
+ - name: get archive policy
+ GET: $LOCATION
+ response_headers:
+ content-type: /application/json/
+ response_json_paths:
+ $.name: medium
+ $.definition[0].granularity: "0:00:01"
+ $.definition[0].points: 20
+ $.definition[0].timespan: "0:00:20"
+ $.definition[1].granularity: "0:00:02"
+ $.definition[1].points: null
+ $.definition[1].timespan: null
+
+ - name: get wrong accept
+ GET: $LAST_URL
+ request_headers:
+ accept: text/plain
+ status: 406
+
+# Update archive policy
+
+ - name: patch archive policy with bad definition
+ PATCH: $LAST_URL
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ definition:
+ - granularity: 1 second
+ points: 50
+ timespan: 1 hour
+ - granularity: 2 second
+ status: 400
+ response_strings:
+ - timespan ≠ granularity × points
+
+ - name: patch archive policy with missing granularity
+ PATCH: $LAST_URL
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ definition:
+ - granularity: 1 second
+ points: 50
+ status: 400
+ response_strings:
+ - "Archive policy medium does not support change: Cannot add or drop granularities"
+
+ - name: patch archive policy with non-matching granularity
+ PATCH: $LAST_URL
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ definition:
+ - granularity: 5 second
+ points: 20
+ - granularity: 2 second
+ status: 400
+ response_strings:
+ - "Archive policy medium does not support change: 1.0 granularity interval was changed"
+
+ - name: patch archive policy
+ PATCH: $LAST_URL
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ definition:
+ - granularity: 1 second
+ points: 50
+ - granularity: 2 second
+ status: 200
+ response_json_paths:
+ $.name: medium
+ $.definition[0].granularity: "0:00:01"
+ $.definition[0].points: 50
+ $.definition[0].timespan: "0:00:50"
+
+ - name: get patched archive policy
+ GET: $LAST_URL
+ response_headers:
+ content-type: /application/json/
+ response_json_paths:
+ $.name: medium
+ $.definition[0].granularity: "0:00:01"
+ $.definition[0].points: 50
+ $.definition[0].timespan: "0:00:50"
+
+# Unexpected methods
+
+ - name: post single archive
+ POST: $LAST_URL
+ status: 405
+
+ - name: put single archive
+ PUT: $LAST_URL
+ status: 405
+
+# Create another one and then test duplication
+
+ - name: create second policy
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: large
+ definition:
+ - granularity: 1 hour
+ response_headers:
+ location: $SCHEME://$NETLOC/v1/archive_policy/large
+ status: 201
+
+ - name: create duplicate policy
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: large
+ definition:
+ - granularity: 1 hour
+ status: 409
+ response_strings:
+ - Archive policy large already exists
+
+# Create a unicode named policy
+
+ - name: post unicode policy name
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: ✔éñ☃
+ definition:
+ - granularity: 1 minute
+ points: 20
+ status: 201
+ response_headers:
+ location: $SCHEME://$NETLOC/v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83
+ response_json_paths:
+ name: ✔éñ☃
+
+ - name: retrieve unicode policy name
+ GET: $LOCATION
+ response_json_paths:
+ name: ✔éñ☃
+
+ - name: post small unicode policy name
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: æ
+ definition:
+ - granularity: 1 minute
+ points: 20
+ status: 201
+ response_headers:
+ location: $SCHEME://$NETLOC/v1/archive_policy/%C3%A6
+ response_json_paths:
+ name: æ
+
+ - name: retrieve small unicode policy name
+ GET: $LOCATION
+ response_json_paths:
+ name: æ
+
+# List the collection
+
+ - name: get archive policy list
+ GET: /v1/archive_policy
+ response_strings:
+ - '"name": "medium"'
+ - '"name": "large"'
+ response_json_paths:
+ $[?name = "large"].definition[?granularity = "1:00:00"].points: null
+ $[?name = "medium"].definition[?granularity = "0:00:02"].points: null
+
+# Delete one as non-admin
+
+ - name: delete single archive non admin
+ DELETE: /v1/archive_policy/medium
+ status: 403
+
+# Delete one
+
+ - name: delete single archive
+ DELETE: /v1/archive_policy/medium
+ request_headers:
+ x-roles: admin
+ status: 204
+
+# It really is gone
+
+ - name: confirm delete
+ GET: $LAST_URL
+ status: 404
+
+# Fail to delete one that does not exist
+
+ - name: delete missing archive
+ DELETE: /v1/archive_policy/grandiose
+ request_headers:
+ x-roles: admin
+ status: 404
+ response_strings:
+ - Archive policy grandiose does not exist
+
+ - name: delete archive utf8
+ DELETE: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83
+ request_headers:
+ x-roles: admin
+ status: 204
+
+ - name: delete missing archive utf8 again
+ DELETE: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83
+ request_headers:
+ x-roles: admin
+ status: 404
+ response_strings:
+ - Archive policy ✔éñ☃ does not exist
+
+# Add metric using the policy and then be unable to delete policy
+
+ - name: create metric
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee
+ x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f
+ data:
+ archive_policy_name: large
+ status: 201
+
+ - name: delete in use policy
+ DELETE: /v1/archive_policy/large
+ request_headers:
+ x-roles: admin
+ status: 400
+ response_strings:
+ - Archive policy large is still in use
+
+# Attempt to create illogical policies
+
+ - name: create illogical policy
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: complex
+ definition:
+ - granularity: 1 second
+ points: 60
+ timespan: "0:01:01"
+ status: 400
+ response_strings:
+ - timespan ≠ granularity × points
+
+ - name: create invalid points policy
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: complex
+ definition:
+ - granularity: 0
+ points: 60
+ status: 400
+ response_strings:
+ - "Invalid input: not a valid value for dictionary value"
+
+ - name: create invalid granularity policy
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: complex
+ definition:
+ - granularity: 10
+ points: 0
+ status: 400
+ response_strings:
+ - "Invalid input: not a valid value for dictionary value"
+
+ - name: create identical granularities policy
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: complex
+ definition:
+ - granularity: 1 second
+ points: 60
+ - granularity: 1 second
+ points: 120
+ status: 400
+ response_strings:
+ - "More than one archive policy uses granularity `1.0'"
+
+ - name: policy invalid unit
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: 227d0e1f-4295-4e4b-8515-c296c47d71d3
+ definition:
+ - granularity: 1 second
+ timespan: "1 shenanigan"
+ status: 400
+
+# Non admin user attempt
+
+ - name: fail to create policy non-admin
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-user-id: b45187c5-150b-4730-bcb2-b5e04e234220
+ x-project-id: 16764ee0-bffe-4843-aa36-04b002cdbc7c
+ data:
+ name: f1d150d9-02ad-4fe7-8872-c64b2bcaaa97
+ definition:
+ - granularity: 1 minute
+ points: 20
+ status: 403
+ response_strings:
+ - Access was denied to this resource
+
+# Back windows
+
+ - name: policy with back window
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: 7720a99d-cd3b-4aa4-8a6f-935bf0d46ded
+ back_window: 1
+ definition:
+ - granularity: 10s
+ points: 20
+ status: 201
+ response_json_paths:
+ $.back_window: 1
+ $.definition[0].timespan: "0:03:20"
+
+ - name: policy no back window
+ desc: and default seconds on int granularity
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: 22f2b99f-e629-4170-adc4-09b65635e056
+ back_window: 0
+ definition:
+ - granularity: 10
+ points: 20
+ status: 201
+ response_json_paths:
+ $.back_window: 0
+ $.definition[0].points: 20
+ $.definition[0].timespan: "0:03:20"
+
+# Timespan, points, granularity input tests
+
+ - name: policy float granularity
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: 595228db-ea29-4415-9d5b-ecb5366abb1b
+ definition:
+ - timespan: 1 hour
+ points: 1000
+ status: 201
+ response_json_paths:
+ $.definition[0].points: 1000
+ $.definition[0].granularity: "0:00:04"
+ $.definition[0].timespan: "1:06:40"
+
+ - name: policy float timespan
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: 6bc72791-a27e-4417-a589-afc6d2067a38
+ definition:
+ - timespan: 1 hour
+ granularity: 7s
+ status: 201
+ response_json_paths:
+ $.definition[0].points: 514
+ $.definition[0].granularity: "0:00:07"
+ $.definition[0].timespan: "0:59:58"
diff --git a/gnocchi/tests/functional/gabbits/async.yaml b/gnocchi/tests/functional/gabbits/async.yaml
new file mode 100644
index 00000000..fd2f97ae
--- /dev/null
+++ b/gnocchi/tests/functional/gabbits/async.yaml
@@ -0,0 +1,71 @@
+#
+# Test async processing of measures.
+#
+
+fixtures:
+ - ConfigFixture
+
+tests:
+
+ - name: create archive policy
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: moderate
+ definition:
+ - granularity: 1 second
+ status: 201
+
+ - name: make a generic resource
+ POST: /v1/resource/generic
+ request_headers:
+ x-user-id: edca16f4-684e-4a91-85e9-0c1ceecdd147
+ x-project-id: e8459971-fae4-4670-8ed3-55dd9139d26d
+ content-type: application/json
+ data:
+ id: 41937416-1644-497d-a0ed-b43d55a2b0ea
+ started_at: "2015-06-06T02:02:02.000000"
+ metrics:
+ some.counter:
+ archive_policy_name: moderate
+ status: 201
+
+ - name: confirm no metrics yet
+ GET: /v1/resource/generic/41937416-1644-497d-a0ed-b43d55a2b0ea/metric/some.counter/measures
+ request_headers:
+ x-user-id: edca16f4-684e-4a91-85e9-0c1ceecdd147
+ x-project-id: e8459971-fae4-4670-8ed3-55dd9139d26d
+ content-type: application/json
+ response_json_paths:
+ $: []
+
+ - name: post some measures
+ POST: /v1/resource/generic/41937416-1644-497d-a0ed-b43d55a2b0ea/metric/some.counter/measures
+ request_headers:
+ x-user-id: edca16f4-684e-4a91-85e9-0c1ceecdd147
+ x-project-id: e8459971-fae4-4670-8ed3-55dd9139d26d
+ content-type: application/json
+ data:
+ - timestamp: "2015-06-06T14:33:00"
+ value: 11
+ - timestamp: "2015-06-06T14:35:00"
+ value: 12
+ status: 202
+
+# This requires a poll as the measures are not immediately
+# aggregated.
+
+ - name: get some measures
+ GET: /v1/resource/generic/41937416-1644-497d-a0ed-b43d55a2b0ea/metric/some.counter/measures
+ request_headers:
+ x-user-id: edca16f4-684e-4a91-85e9-0c1ceecdd147
+ x-project-id: e8459971-fae4-4670-8ed3-55dd9139d26d
+ poll:
+ count: 50
+ delay: .1
+ response_strings:
+ - "2015"
+ response_json_paths:
+ $[-1][-1]: 12
diff --git a/gnocchi/tests/functional/gabbits/base.yaml b/gnocchi/tests/functional/gabbits/base.yaml
new file mode 100644
index 00000000..ef097711
--- /dev/null
+++ b/gnocchi/tests/functional/gabbits/base.yaml
@@ -0,0 +1,168 @@
+fixtures:
+ - ConfigFixture
+
+defaults:
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+
+tests:
+
+- name: get information on APIs
+ desc: Root URL must return information about API versions
+ GET: /
+ response_headers:
+ content-type: /^application\/json/
+ response_json_paths:
+ $.versions.[0].id: "v1.0"
+ $.versions.[0].status: "CURRENT"
+
+- name: archive policy post success
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: test1
+ definition:
+ - granularity: 1 minute
+ points: 20
+ status: 201
+ response_headers:
+ content-type: /^application\/json/
+ location: $SCHEME://$NETLOC/v1/archive_policy/test1
+ response_json_paths:
+ $.name: test1
+ $.definition.[0].granularity: 0:01:00
+ $.definition.[0].points: 20
+ $.definition.[0].timespan: 0:20:00
+
+- name: post archive policy no auth
+ desc: this confirms that auth handling comes before data validation
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ data:
+ definition:
+ - granularity: 1 second
+ points: 20
+ status: 403
+
+- name: post metric with archive policy
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee
+ x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f
+ data:
+ archive_policy_name: test1
+ status: 201
+ response_headers:
+ content-type: /application\/json/
+ response_json_paths:
+ $.archive_policy_name: test1
+
+- name: retrieve metric info
+ GET: $LOCATION
+ status: 200
+ request_headers:
+ content_type: /application\/json/
+ x-roles: admin
+ response_json_paths:
+ $.archive_policy.name: test1
+ $.created_by_user_id: 93180da9-7c15-40d3-a050-a374551e52ee
+ $.created_by_project_id: 99d13f22-3618-4288-82b8-6512ded77e4f
+
+- name: list the one metric
+ GET: /v1/metric
+ status: 200
+ response_json_paths:
+ $[0].archive_policy.name: test1
+
+- name: post a single measure
+ desc: post one measure
+ POST: /v1/metric/$RESPONSE['$[0].id']/measures
+ request_headers:
+ content-type: application/json
+ x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee
+ x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f
+ data:
+ - timestamp: "2013-01-01 23:23:20"
+ value: 1234.2
+ status: 202
+
+- name: Get list of resource type and URL
+ desc: Resources index page should return list of type associated with a URL
+ GET: /v1/resource/
+ response_headers:
+ content-type: /^application\/json/
+ status: 200
+ response_json_paths:
+ $.generic: $SCHEME://$NETLOC/v1/resource/generic
+
+- name: post generic resource
+ POST: /v1/resource/generic
+ request_headers:
+ content-type: application/json
+ x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee
+ x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f
+ data:
+ id: 5b7ebe90-4ad2-4c83-ad2c-f6344884ab70
+ started_at: "2014-01-03T02:02:02.000000"
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 201
+ response_headers:
+ location: $SCHEME://$NETLOC/v1/resource/generic/5b7ebe90-4ad2-4c83-ad2c-f6344884ab70
+ response_json_paths:
+ type: generic
+ started_at: "2014-01-03T02:02:02+00:00"
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ created_by_project_id: 99d13f22-3618-4288-82b8-6512ded77e4f
+
+- name: post generic resource bad id
+ POST: /v1/resource/generic
+ request_headers:
+ content-type: application/json
+ x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee
+ x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f
+ data:
+ id: 1.2.3.4
+ started_at: "2014-01-03T02:02:02.000000"
+ user_id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c
+ project_id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea
+ status: 201
+ response_headers:
+ location: $SCHEME://$NETLOC/v1/resource/generic/2d869568-70d4-5ed6-9891-7d7a3bbf572d
+ response_json_paths:
+ type: generic
+ started_at: "2014-01-03T02:02:02+00:00"
+ project_id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea
+ created_by_project_id: 99d13f22-3618-4288-82b8-6512ded77e4f
+ id: 2d869568-70d4-5ed6-9891-7d7a3bbf572d
+ original_resource_id: 1.2.3.4
+
+- name: get status denied
+ GET: /v1/status
+ status: 403
+
+- name: get status
+ GET: /v1/status
+ request_headers:
+ content-type: application/json
+ x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee
+ x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f
+ x-roles: admin
+ response_json_paths:
+ $.storage.`len`: 2
+
+- name: get status, no details
+ GET: /v1/status?details=False
+ request_headers:
+ content-type: application/json
+ x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee
+ x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f
+ x-roles: admin
+ response_json_paths:
+ $.storage.`len`: 1
diff --git a/gnocchi/tests/functional/gabbits/batch-measures.yaml b/gnocchi/tests/functional/gabbits/batch-measures.yaml
new file mode 100644
index 00000000..a121f6fb
--- /dev/null
+++ b/gnocchi/tests/functional/gabbits/batch-measures.yaml
@@ -0,0 +1,295 @@
+fixtures:
+ - ConfigFixture
+
+defaults:
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+
+tests:
+ - name: create archive policy
+ desc: for later use
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: simple
+ definition:
+ - granularity: 1 second
+ status: 201
+
+ - name: create metric
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ data:
+ archive_policy_name: simple
+ status: 201
+
+ - name: push measurements to metric
+ POST: /v1/batch/metrics/measures
+ request_headers:
+ content-type: application/json
+ data:
+ $RESPONSE['$.id']:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+ status: 202
+
+ - name: push measurements to unknown metrics
+ POST: /v1/batch/metrics/measures
+ request_headers:
+ content-type: application/json
+ data:
+ 37AEC8B7-C0D9-445B-8AB9-D3C6312DCF5C:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+ 37AEC8B7-C0D9-445B-8AB9-D3C6312DCF5D:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+ status: 400
+ response_strings:
+ - "Unknown metrics: 37aec8b7-c0d9-445b-8ab9-d3c6312dcf5c, 37aec8b7-c0d9-445b-8ab9-d3c6312dcf5d"
+
+ - name: push measurements to unknown named metrics
+ POST: /v1/batch/resources/metrics/measures
+ request_headers:
+ content-type: application/json
+ data:
+ 37AEC8B7-C0D9-445B-8AB9-D3C6312DCF5D:
+ cpu_util:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+ 46c9418d-d63b-4cdd-be89-8f57ffc5952e:
+ disk.iops:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+ status: 400
+ response_strings:
+ - "Unknown metrics: 37aec8b7-c0d9-445b-8ab9-d3c6312dcf5d/cpu_util, 46c9418d-d63b-4cdd-be89-8f57ffc5952e/disk.iops"
+
+ - name: create second metric
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ data:
+ archive_policy_name: simple
+ status: 201
+
+ - name: post a resource
+ POST: /v1/resource/generic
+ request_headers:
+ content-type: application/json
+ data:
+ id: 46c9418d-d63b-4cdd-be89-8f57ffc5952e
+ user_id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c
+ project_id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea
+ metrics:
+ disk.iops:
+ archive_policy_name: simple
+ cpu_util:
+ archive_policy_name: simple
+ status: 201
+
+ - name: post a second resource
+ POST: /v1/resource/generic
+ request_headers:
+ content-type: application/json
+ data:
+ id: f0f6038f-f82c-4f30-8d81-65db8be249fe
+ user_id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c
+ project_id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea
+ metrics:
+ net.speed:
+ archive_policy_name: simple
+ mem_usage:
+ archive_policy_name: simple
+ status: 201
+
+ - name: list metrics
+ GET: /v1/metric
+
+ - name: push measurements to two metrics
+ POST: /v1/batch/metrics/measures
+ request_headers:
+ content-type: application/json
+ data:
+ $RESPONSE['$[0].id']:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+ $RESPONSE['$[1].id']:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+ status: 202
+
+ - name: push measurements to two named metrics
+ POST: /v1/batch/resources/metrics/measures
+ request_headers:
+ content-type: application/json
+ data:
+ 46c9418d-d63b-4cdd-be89-8f57ffc5952e:
+ disk.iops:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+ cpu_util:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+ f0f6038f-f82c-4f30-8d81-65db8be249fe:
+ mem_usage:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+ net.speed:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+ status: 202
+
+ - name: create archive policy rule for auto
+ POST: /v1/archive_policy_rule
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: rule_auto
+ metric_pattern: "auto.*"
+ archive_policy_name: simple
+ status: 201
+
+ - name: push measurements to unknown named metrics and create it
+ POST: /v1/batch/resources/metrics/measures?create_metrics=true
+ request_headers:
+ content-type: application/json
+ data:
+ 46c9418d-d63b-4cdd-be89-8f57ffc5952e:
+ auto.test:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+ status: 202
+
+ - name: get created metric to check creation
+ GET: /v1/resource/generic/46c9418d-d63b-4cdd-be89-8f57ffc5952e/metric/auto.test
+
+ - name: ensure measure have been posted
+ GET: /v1/resource/generic/46c9418d-d63b-4cdd-be89-8f57ffc5952e/metric/auto.test/measures?refresh=true&start=2015-03-06T14:34
+ response_json_paths:
+ $:
+ - ["2015-03-06T14:34:12+00:00", 1.0, 12.0]
+
+ - name: push measurements to unknown named metrics and resource with create_metrics with uuid resource id
+ POST: /v1/batch/resources/metrics/measures?create_metrics=true
+ request_headers:
+ content-type: application/json
+ accept: application/json
+ data:
+ aaaaaaaa-d63b-4cdd-be89-111111111111:
+ auto.test:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+ bbbbbbbb-d63b-4cdd-be89-111111111111:
+ auto.test:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+
+ status: 400
+ response_json_paths:
+ $.description.cause: "Unknown resources"
+ $.description.detail[/original_resource_id]:
+ - original_resource_id: "aaaaaaaa-d63b-4cdd-be89-111111111111"
+ resource_id: "aaaaaaaa-d63b-4cdd-be89-111111111111"
+ - original_resource_id: "bbbbbbbb-d63b-4cdd-be89-111111111111"
+ resource_id: "bbbbbbbb-d63b-4cdd-be89-111111111111"
+
+ - name: push measurements to unknown named metrics and resource with create_metrics with uuid resource id where resources is several times listed
+ POST: /v1/batch/resources/metrics/measures?create_metrics=true
+ request_headers:
+ content-type: application/json
+ accept: application/json
+ data:
+ aaaaaaaa-d63b-4cdd-be89-111111111111:
+ auto.test:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+ auto.test2:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+ bbbbbbbb-d63b-4cdd-be89-111111111111:
+ auto.test:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+
+ status: 400
+ response_json_paths:
+ $.description.cause: "Unknown resources"
+ $.description.detail[/original_resource_id]:
+ - original_resource_id: "aaaaaaaa-d63b-4cdd-be89-111111111111"
+ resource_id: "aaaaaaaa-d63b-4cdd-be89-111111111111"
+ - original_resource_id: "bbbbbbbb-d63b-4cdd-be89-111111111111"
+ resource_id: "bbbbbbbb-d63b-4cdd-be89-111111111111"
+
+ - name: push measurements to unknown named metrics and resource with create_metrics with non uuid resource id
+ POST: /v1/batch/resources/metrics/measures?create_metrics=true
+ request_headers:
+ content-type: application/json
+ accept: application/json
+ data:
+ foobar:
+ auto.test:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+
+ status: 400
+ response_json_paths:
+ $.description.cause: "Unknown resources"
+ $.description.detail:
+ - resource_id: "6b8e287d-c01a-538c-979b-a819ee49de5d"
+ original_resource_id: "foobar"
+
+ - name: push measurements to named metrics and resource with create_metrics with wrong measure objects
+ POST: /v1/batch/resources/metrics/measures?create_metrics=true
+ request_headers:
+ content-type: application/json
+ accept: application/json
+ data:
+ 46c9418d-d63b-4cdd-be89-8f57ffc5952e:
+ auto.test:
+ - [ "2015-03-06T14:33:57", 43.1]
+ - [ "2015-03-06T14:34:12", 12]
+ status: 400
+ response_strings:
+ - "Invalid format for measures"
diff --git a/gnocchi/tests/functional/gabbits/cors.yaml b/gnocchi/tests/functional/gabbits/cors.yaml
new file mode 100644
index 00000000..bd2395d5
--- /dev/null
+++ b/gnocchi/tests/functional/gabbits/cors.yaml
@@ -0,0 +1,21 @@
+fixtures:
+ - ConfigFixture
+
+tests:
+ - name: get CORS headers for non-allowed
+ OPTIONS: /v1/status
+ request_headers:
+ Origin: http://notallowed.com
+ Access-Control-Request-Method: GET
+ response_forbidden_headers:
+ - Access-Control-Allow-Origin
+ - Access-Control-Allow-Methods
+
+ - name: get CORS headers for allowed
+ OPTIONS: /v1/status
+ request_headers:
+ Origin: http://foobar.com
+ Access-Control-Request-Method: GET
+ response_headers:
+ Access-Control-Allow-Origin: http://foobar.com
+ Access-Control-Allow-Methods: GET
diff --git a/gnocchi/tests/functional/gabbits/healthcheck.yaml b/gnocchi/tests/functional/gabbits/healthcheck.yaml
new file mode 100644
index 00000000..a2cf6fd1
--- /dev/null
+++ b/gnocchi/tests/functional/gabbits/healthcheck.yaml
@@ -0,0 +1,7 @@
+fixtures:
+ - ConfigFixture
+
+tests:
+ - name: healthcheck
+ GET: /healthcheck
+ status: 200
diff --git a/gnocchi/tests/functional/gabbits/history.yaml b/gnocchi/tests/functional/gabbits/history.yaml
new file mode 100644
index 00000000..0bdc47fd
--- /dev/null
+++ b/gnocchi/tests/functional/gabbits/history.yaml
@@ -0,0 +1,160 @@
+#
+# Test the resource history related API
+#
+
+fixtures:
+ - ConfigFixture
+
+tests:
+ - name: create archive policy
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: low
+ definition:
+ - granularity: 1 hour
+ status: 201
+ response_headers:
+ location: $SCHEME://$NETLOC/v1/archive_policy/low
+
+# Try creating a new generic resource
+
+ - name: post generic resource
+ POST: /v1/resource/generic
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: f93450f2-d8a5-4d67-9985-02511241e7d1
+ started_at: "2014-01-03T02:02:02.000000"
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 201
+ response_headers:
+ location: $SCHEME://$NETLOC/v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1
+ content-type: /^application\/json/
+ response_json_paths:
+ $.created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ $.created_by_user_id: 0fbb231484614b1a80131fc22f6afc9c
+ $.user_id: 0fbb231484614b1a80131fc22f6afc9c
+
+# Update it twice
+ - name: patch resource user_id
+ PATCH: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ user_id: f53c58a4-fdea-4c09-aac4-02135900be67
+ status: 200
+ response_json_paths:
+ user_id: f53c58a4-fdea-4c09-aac4-02135900be67
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+
+ - name: patch resource project_id
+ PATCH: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ project_id: fe20a931-1012-4cc6-addc-39556ec60907
+ metrics:
+ mymetric:
+ archive_policy_name: low
+ status: 200
+ response_json_paths:
+ user_id: f53c58a4-fdea-4c09-aac4-02135900be67
+ project_id: fe20a931-1012-4cc6-addc-39556ec60907
+
+# List resources
+
+ - name: list all resources without history
+ GET: /v1/resource/generic
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ response_json_paths:
+ $[0].user_id: f53c58a4-fdea-4c09-aac4-02135900be67
+ $[0].project_id: fe20a931-1012-4cc6-addc-39556ec60907
+
+ - name: list all resources with history
+ GET: $LAST_URL
+ request_headers:
+ accept: application/json; details=True; history=True
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ response_json_paths:
+ $.`len`: 3
+ $[0].id: f93450f2-d8a5-4d67-9985-02511241e7d1
+ $[0].user_id: 0fbb231484614b1a80131fc22f6afc9c
+ $[0].project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ $[1].id: f93450f2-d8a5-4d67-9985-02511241e7d1
+ $[1].user_id: f53c58a4-fdea-4c09-aac4-02135900be67
+ $[1].project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ $[2].id: f93450f2-d8a5-4d67-9985-02511241e7d1
+ $[2].user_id: f53c58a4-fdea-4c09-aac4-02135900be67
+ $[2].project_id: fe20a931-1012-4cc6-addc-39556ec60907
+
+ - name: patch resource metrics
+ PATCH: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ metrics:
+ foo:
+ archive_policy_name: low
+ status: 200
+
+ - name: list all resources with history no change after metrics update
+ GET: /v1/resource/generic
+ request_headers:
+ accept: application/json; details=True; history=True
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ response_json_paths:
+ $.`len`: 3
+ $[0].id: f93450f2-d8a5-4d67-9985-02511241e7d1
+ $[0].user_id: 0fbb231484614b1a80131fc22f6afc9c
+ $[0].project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ $[1].id: f93450f2-d8a5-4d67-9985-02511241e7d1
+ $[1].user_id: f53c58a4-fdea-4c09-aac4-02135900be67
+ $[1].project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ $[2].id: f93450f2-d8a5-4d67-9985-02511241e7d1
+ $[2].user_id: f53c58a4-fdea-4c09-aac4-02135900be67
+ $[2].project_id: fe20a931-1012-4cc6-addc-39556ec60907
+
+ - name: create new metrics
+ POST: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1/metric
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ foobar:
+ archive_policy_name: low
+ status: 204
+
+ - name: list all resources with history no change after metrics creation
+ GET: /v1/resource/generic
+ request_headers:
+ accept: application/json; details=True; history=True
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ response_json_paths:
+ $.`len`: 3
+ $[0].id: f93450f2-d8a5-4d67-9985-02511241e7d1
+ $[0].user_id: 0fbb231484614b1a80131fc22f6afc9c
+ $[0].project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ $[1].id: f93450f2-d8a5-4d67-9985-02511241e7d1
+ $[1].user_id: f53c58a4-fdea-4c09-aac4-02135900be67
+ $[1].project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ $[2].id: f93450f2-d8a5-4d67-9985-02511241e7d1
+ $[2].user_id: f53c58a4-fdea-4c09-aac4-02135900be67
+ $[2].project_id: fe20a931-1012-4cc6-addc-39556ec60907
diff --git a/gnocchi/tests/functional/gabbits/metric-granularity.yaml b/gnocchi/tests/functional/gabbits/metric-granularity.yaml
new file mode 100644
index 00000000..47a5efe3
--- /dev/null
+++ b/gnocchi/tests/functional/gabbits/metric-granularity.yaml
@@ -0,0 +1,60 @@
+fixtures:
+ - ConfigFixture
+
+defaults:
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+
+tests:
+ - name: create archive policy
+ desc: for later use
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: cookies
+ definition:
+ - granularity: 1 second
+ status: 201
+
+ - name: create valid metric
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ data:
+ archive_policy_name: cookies
+ status: 201
+
+ - name: push measurements to metric
+ POST: /v1/metric/$RESPONSE['$.id']/measures
+ request_headers:
+ content-type: application/json
+ data:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+ status: 202
+
+ - name: get metric list
+ GET: /v1/metric
+ status: 200
+
+ - name: get measurements invalid granularity
+ GET: /v1/metric/$RESPONSE['$[0].id']/measures?granularity=42
+ status: 404
+ response_strings:
+ - Granularity '42.0' for metric $RESPONSE['$[0].id'] does not exist
+
+ - name: get measurements granularity
+ GET: /v1/metric/$HISTORY['get metric list'].$RESPONSE['$[0].id']/measures?granularity=1
+ status: 200
+ poll:
+ count: 50
+ delay: .1
+ response_json_paths:
+ $:
+ - ["2015-03-06T14:33:57+00:00", 1.0, 43.1]
+ - ["2015-03-06T14:34:12+00:00", 1.0, 12.0]
diff --git a/gnocchi/tests/functional/gabbits/metric-list.yaml b/gnocchi/tests/functional/gabbits/metric-list.yaml
new file mode 100644
index 00000000..59f58b96
--- /dev/null
+++ b/gnocchi/tests/functional/gabbits/metric-list.yaml
@@ -0,0 +1,142 @@
+fixtures:
+ - ConfigFixture
+
+defaults:
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ x-roles: admin
+
+tests:
+ - name: create archive policy 1
+ desc: for later use
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: first_archive
+ definition:
+ - granularity: 1 second
+ status: 201
+
+ - name: create archive policy 2
+ desc: for later use
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: second_archive
+ definition:
+ - granularity: 1 second
+ status: 201
+
+ - name: create metric 1
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ data:
+ name: "disk.io.rate"
+ unit: "B/s"
+ archive_policy_name: first_archive
+ status: 201
+ response_json_paths:
+ $.archive_policy_name: first_archive
+ $.name: disk.io.rate
+ $.unit: B/s
+
+ - name: create metric 2
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ x-user-id: 4fff6179c2fc414dbedfc8cc82d6ada7
+ x-project-id: f3ca498a61c84422b953133adb71cff8
+ data:
+ name: "disk.io.rate"
+ unit: "B/s"
+ archive_policy_name: first_archive
+ status: 201
+ response_json_paths:
+ $.archive_policy_name: first_archive
+ $.name: disk.io.rate
+ $.unit: B/s
+
+ - name: create metric 3
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ x-user-id: faf30294217c4e1a91387d9c8f1fb1fb
+ x-project-id: f3ca498a61c84422b953133adb71cff8
+ data:
+ name: "cpu_util"
+ unit: "%"
+ archive_policy_name: first_archive
+ status: 201
+ response_json_paths:
+ $.archive_policy_name: first_archive
+ $.name: cpu_util
+ $.unit: "%"
+
+ - name: create metric 4
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ data:
+ name: "cpu"
+ unit: "ns"
+ archive_policy_name: second_archive
+ status: 201
+ response_json_paths:
+ $.archive_policy_name: second_archive
+ $.name: cpu
+ $.unit: ns
+
+ - name: list metrics
+ GET: /v1/metric
+ response_json_paths:
+ $.`len`: 4
+
+ - name: list metrics by id
+ GET: /v1/metric?id=$HISTORY['create metric 1'].$RESPONSE['id']
+ response_json_paths:
+ $.`len`: 1
+ $[0].name: disk.io.rate
+ $[0].archive_policy.name: first_archive
+
+ - name: list metrics by name
+ GET: /v1/metric?name=disk.io.rate
+ response_json_paths:
+ $.`len`: 2
+ $[0].name: disk.io.rate
+ $[1].name: disk.io.rate
+ $[0].archive_policy.name: first_archive
+ $[1].archive_policy.name: first_archive
+
+ - name: list metrics by unit
+ GET: /v1/metric?unit=ns
+ response_json_paths:
+ $.`len`: 1
+ $[0].name: cpu
+ $[0].archive_policy.name: second_archive
+
+ - name: list metrics by archive_policy
+ GET: /v1/metric?archive_policy_name=first_archive&sort=name:desc
+ response_json_paths:
+ $.`len`: 3
+ $[0].name: disk.io.rate
+ $[1].name: disk.io.rate
+ $[2].name: cpu_util
+ $[0].archive_policy.name: first_archive
+ $[1].archive_policy.name: first_archive
+ $[2].archive_policy.name: first_archive
+
+ - name: list metrics by user_id
+ GET: /v1/metric?user_id=faf30294217c4e1a91387d9c8f1fb1fb
+ response_json_paths:
+ $.`len`: 1
+
+ - name: list metrics by project_id
+ GET: /v1/metric?project_id=f3ca498a61c84422b953133adb71cff8
+ response_json_paths:
+ $.`len`: 2
diff --git a/gnocchi/tests/functional/gabbits/metric-timestamp-format.yaml b/gnocchi/tests/functional/gabbits/metric-timestamp-format.yaml
new file mode 100644
index 00000000..f4522880
--- /dev/null
+++ b/gnocchi/tests/functional/gabbits/metric-timestamp-format.yaml
@@ -0,0 +1,60 @@
+fixtures:
+ - ConfigFixture
+
+defaults:
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+
+tests:
+ - name: create archive policy
+ desc: for later use
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: cookies
+ definition:
+ - granularity: 1 second
+ status: 201
+
+ - name: create metric
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ data:
+ archive_policy_name: cookies
+ status: 201
+ response_json_paths:
+ $.archive_policy_name: cookies
+
+ - name: push measurements to metric with relative timestamp
+ POST: /v1/metric/$RESPONSE['$.id']/measures
+ request_headers:
+ content-type: application/json
+ data:
+ - timestamp: "-5 minutes"
+ value: 43.1
+ status: 202
+
+ - name: create metric 2
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ data:
+ archive_policy_name: cookies
+ status: 201
+ response_json_paths:
+ $.archive_policy_name: cookies
+
+ - name: push measurements to metric with mixed timestamps
+ POST: /v1/metric/$RESPONSE['$.id']/measures
+ request_headers:
+ content-type: application/json
+ data:
+ - timestamp: 1478012832
+ value: 43.1
+ - timestamp: "-5 minutes"
+ value: 43.1
+ status: 400
diff --git a/gnocchi/tests/functional/gabbits/metric.yaml b/gnocchi/tests/functional/gabbits/metric.yaml
new file mode 100644
index 00000000..e987c81c
--- /dev/null
+++ b/gnocchi/tests/functional/gabbits/metric.yaml
@@ -0,0 +1,331 @@
+fixtures:
+ - ConfigFixture
+
+defaults:
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+
+tests:
+ - name: wrong metric
+ desc: https://bugs.launchpad.net/gnocchi/+bug/1429949
+ GET: /v1/metric/foobar
+ status: 404
+
+ - name: create archive policy
+ desc: for later use
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: cookies
+ definition:
+ - granularity: 1 second
+ status: 201
+
+ - name: create archive policy rule
+ POST: /v1/archive_policy_rule
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: test_rule
+ metric_pattern: "disk.io.*"
+ archive_policy_name: cookies
+ status: 201
+
+ - name: create alt archive policy
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: cream
+ definition:
+ - granularity: 5 second
+ status: 201
+
+ - name: create alt archive policy rule
+ desc: extra rule that won't be matched
+ POST: /v1/archive_policy_rule
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: test_ignore_rule
+ metric_pattern: "disk.*"
+ archive_policy_name: cream
+ status: 201
+
+ - name: get metric empty
+ GET: /v1/metric
+ status: 200
+ response_strings:
+ - "[]"
+
+ - name: get metric list with nonexistent sort key
+ GET: /v1/metric?sort=nonexistent_key:asc
+ status: 400
+ response_strings:
+ - "Sort key supplied is invalid: nonexistent_key"
+
+ - name: create metric with name and unit
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ data:
+ name: "disk.io.rate"
+ unit: "B/s"
+ status: 201
+ response_json_paths:
+ $.archive_policy_name: cookies
+ $.name: disk.io.rate
+ $.unit: B/s
+
+ - name: create metric with invalid name
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ data:
+ name: "disk/io/rate"
+ unit: "B/s"
+ status: 400
+ response_strings:
+ - "'/' is not supported in metric name"
+
+ - name: create metric with name and over length unit
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ data:
+ name: "disk.io.rate"
+ unit: "over_length_unit_over_length_unit"
+ status: 400
+ response_strings:
+ # split to not match the u' in py2
+ - "Invalid input: length of value must be at most 31 for dictionary value @ data["
+ - "'unit']"
+
+ - name: create metric with name no rule
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ data:
+ name: "volume.io.rate"
+ status: 400
+ response_strings:
+ - No archive policy name specified and no archive policy rule found matching the metric name volume.io.rate
+
+ - name: create metric bad archive policy
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ data:
+ archive_policy_name: bad-cookie
+ status: 400
+ response_strings:
+ - Archive policy bad-cookie does not exist
+
+ - name: create metric bad content-type
+ POST: /v1/metric
+ request_headers:
+ content-type: plain/text
+ data: '{"archive_policy_name": "cookies"}'
+ status: 415
+
+ - name: create valid metric
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ data:
+ archive_policy_name: cookies
+ status: 201
+ response_json_paths:
+ $.archive_policy_name: cookies
+
+ - name: get valid metric id
+ GET: /v1/metric/$RESPONSE['$.id']
+ status: 200
+ response_json_paths:
+ $.archive_policy.name: cookies
+
+ - name: push measurements to metric before epoch
+ POST: /v1/metric/$RESPONSE['$.id']/measures
+ request_headers:
+ content-type: application/json
+ data:
+ - timestamp: "1915-03-06T14:33:57"
+ value: 43.1
+ status: 400
+ response_strings:
+ - Timestamp must be after Epoch
+
+ - name: list valid metrics
+ GET: /v1/metric
+ response_json_paths:
+ $[0].archive_policy.name: cookies
+
+ - name: push measurements to metric with bad timestamp
+ POST: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures
+ request_headers:
+ content-type: application/json
+ data:
+ - timestamp: "1915-100-06T14:33:57"
+ value: 43.1
+ status: 400
+
+ - name: push measurements to metric epoch format
+ POST: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures
+ request_headers:
+ content-type: application/json
+ data:
+ - timestamp: 1425652437.0
+ value: 43.1
+ status: 202
+
+ - name: push measurements to metric
+ POST: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures
+ request_headers:
+ content-type: application/json
+ data:
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+ status: 202
+
+ - name: get measurements by start
+ GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?refresh=true&start=2015-03-06T14:34
+ response_json_paths:
+ $:
+ - ["2015-03-06T14:34:12+00:00", 1.0, 12.0]
+
+ - name: get measurements by start with epoch
+ GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?refresh=true&start=1425652440
+ response_json_paths:
+ $:
+ - ["2015-03-06T14:34:12+00:00", 1.0, 12.0]
+
+ - name: get measurements from metric
+ GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?refresh=true
+ response_json_paths:
+ $:
+ - ["2015-03-06T14:33:57+00:00", 1.0, 43.1]
+ - ["2015-03-06T14:34:12+00:00", 1.0, 12.0]
+
+ - name: push measurements to metric again
+ POST: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures
+ request_headers:
+ content-type: application/json
+ data:
+ - timestamp: "2015-03-06T14:34:15"
+ value: 16
+ - timestamp: "2015-03-06T14:35:12"
+ value: 9
+ - timestamp: "2015-03-06T14:35:15"
+ value: 11
+ status: 202
+
+ - name: get measurements from metric and resample
+ GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?refresh=true&resample=60&granularity=1
+ response_json_paths:
+ $:
+ - ["2015-03-06T14:33:00+00:00", 60.0, 43.1]
+ - ["2015-03-06T14:34:00+00:00", 60.0, 14.0]
+ - ["2015-03-06T14:35:00+00:00", 60.0, 10.0]
+
+ - name: get measurements from metric and resample no granularity
+ GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?resample=60
+ status: 400
+ response_strings:
+ - A granularity must be specified to resample
+
+ - name: get measurements from metric and bad resample
+ GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?resample=abc
+ status: 400
+
+ - name: create valid metric two
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ data:
+ archive_policy_name: cookies
+ status: 201
+ response_json_paths:
+ $.archive_policy_name: cookies
+
+ - name: push invalid measurements to metric
+ POST: /v1/metric/$RESPONSE['$.id']/measures
+ request_headers:
+ content-type: application/json
+ data:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 12
+ - timestamp: "2015-03-06T14:34:12"
+ value: "foobar"
+ status: 400
+
+ - name: create valid metric three
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ data:
+ archive_policy_name: cookies
+ status: 201
+ response_json_paths:
+ $.archive_policy_name: cookies
+
+ - name: push invalid measurements to metric bis
+ POST: /v1/metric/$RESPONSE['$.id']/measures
+ request_headers:
+ content-type: application/json
+ data: 1
+ status: 400
+
+ - name: add measure unknown metric
+ POST: /v1/metric/fake/measures
+ request_headers:
+ content-type: application/json
+ data:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ status: 404
+
+ - name: get metric list for authenticated user
+ request_headers:
+ x-user-id: foo
+ x-project-id: bar
+ GET: /v1/metric
+
+ - name: get measures unknown metric
+ GET: /v1/metric/fake/measures
+ status: 404
+
+ - name: get metric list for aggregates
+ GET: /v1/metric
+ status: 200
+ response_json_paths:
+ $[0].archive_policy.name: cookies
+
+ - name: get measure unknown aggregates
+ GET: /v1/aggregation/metric?metric=$HISTORY['get metric list for aggregates'].$RESPONSE['$[0].id']&aggregation=last
+ status: 404
+ response_strings:
+ - Aggregation method 'last' for metric $RESPONSE['$[0].id'] does not exist
+
+ - name: aggregate measure unknown metric
+ GET: /v1/aggregation/metric?metric=cee6ef1f-52cc-4a16-bbb5-648aedfd1c37
+ status: 404
+ response_strings:
+ - Metric cee6ef1f-52cc-4a16-bbb5-648aedfd1c37 does not exist
+
+ - name: delete metric
+ DELETE: /v1/metric/$HISTORY['get metric list for aggregates'].$RESPONSE['$[0].id']
+ status: 204
+
+ - name: delete metric again
+ DELETE: $LAST_URL
+ status: 404
+
+ - name: delete non existent metric
+ DELETE: /v1/metric/foo
+ status: 404
diff --git a/gnocchi/tests/functional/gabbits/pagination.yaml b/gnocchi/tests/functional/gabbits/pagination.yaml
new file mode 100644
index 00000000..ef85a379
--- /dev/null
+++ b/gnocchi/tests/functional/gabbits/pagination.yaml
@@ -0,0 +1,506 @@
+#
+# Test the pagination API
+#
+
+fixtures:
+ - ConfigFixture
+
+tests:
+
+#
+# Creation resources for this scenarion
+#
+ - name: post resource 1
+ POST: /v1/resource/generic
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: 57a9e836-87b8-4a21-9e30-18a474b98fef
+ started_at: "2014-01-01T02:02:02.000000"
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 201
+
+ - name: post resource 2
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: 4facbf7e-a900-406d-a828-82393f7006b3
+ started_at: "2014-01-02T02:02:02.000000"
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 201
+
+ - name: post resource 3
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: 36775172-ebc9-4060-9870-a649361bc3ab
+ started_at: "2014-01-03T02:02:02.000000"
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 201
+
+ - name: post resource 4
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: 28593168-52bb-43b5-a6db-fc2343aac02a
+ started_at: "2014-01-04T02:02:02.000000"
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 201
+
+ - name: post resource 5
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150
+ started_at: "2014-01-05T02:02:02.000000"
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 201
+
+#
+# Basic resource limit/ordering tests
+#
+ - name: list first two items default order
+ GET: /v1/resource/generic?limit=2
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ response_json_paths:
+ $.`len`: 2
+ $[0].id: 57a9e836-87b8-4a21-9e30-18a474b98fef
+ $[1].id: 4facbf7e-a900-406d-a828-82393f7006b3
+
+ - name: list next third items default order
+ GET: /v1/resource/generic?limit=4&marker=4facbf7e-a900-406d-a828-82393f7006b3
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ response_json_paths:
+ $.`len`: 3
+ $[0].id: 36775172-ebc9-4060-9870-a649361bc3ab
+ $[1].id: 28593168-52bb-43b5-a6db-fc2343aac02a
+ $[2].id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150
+
+ - name: list first two items order by id witouth direction
+ GET: /v1/resource/generic?limit=2&sort=id
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 200
+ response_json_paths:
+ $.`len`: 2
+ $[0].id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150
+ $[1].id: 28593168-52bb-43b5-a6db-fc2343aac02a
+
+ - name: list first two items order by id
+ GET: /v1/resource/generic?limit=2&sort=id:asc
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ response_json_paths:
+ $.`len`: 2
+ $[0].id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150
+ $[1].id: 28593168-52bb-43b5-a6db-fc2343aac02a
+
+ - name: list next third items order by id
+ GET: /v1/resource/generic?limit=4&sort=id:asc&marker=28593168-52bb-43b5-a6db-fc2343aac02a
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ response_json_paths:
+ $.`len`: 3
+ $[0].id: 36775172-ebc9-4060-9870-a649361bc3ab
+ $[1].id: 4facbf7e-a900-406d-a828-82393f7006b3
+ $[2].id: 57a9e836-87b8-4a21-9e30-18a474b98fef
+
+ - name: search for some resources with limit, order and marker
+ POST: /v1/search/resource/generic?limit=2&sort=id:asc&marker=36775172-ebc9-4060-9870-a649361bc3ab
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ "or": [
+ {"=": {"id": 36775172-ebc9-4060-9870-a649361bc3ab}},
+ {"=": {"id": 4facbf7e-a900-406d-a828-82393f7006b3}},
+ {"=": {"id": 57a9e836-87b8-4a21-9e30-18a474b98fef}},
+ ]
+ response_json_paths:
+ $.`len`: 2
+ $[0].id: 4facbf7e-a900-406d-a828-82393f7006b3
+ $[1].id: 57a9e836-87b8-4a21-9e30-18a474b98fef
+
+#
+# Invalid resource limit/ordering
+#
+ - name: invalid sort_key
+ GET: /v1/resource/generic?sort=invalid:asc
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 400
+
+ - name: invalid sort_dir
+ GET: /v1/resource/generic?sort=id:invalid
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 400
+
+ - name: invalid marker
+ GET: /v1/resource/generic?marker=d44b3f4c-27bc-4ace-b81c-2a8e60026874
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 400
+
+ - name: invalid negative limit
+ GET: /v1/resource/generic?limit=-2
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 400
+
+ - name: invalid limit
+ GET: /v1/resource/generic?limit=invalid
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 400
+
+#
+# Default limit
+#
+
+ - name: post resource 6
+ POST: /v1/resource/generic
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: 465f87b2-61f7-4118-adec-1d96a78af401
+ started_at: "2014-01-02T02:02:02.000000"
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 201
+
+ - name: post resource 7
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: 9b6af245-57df-4ed6-a8c0-f64b77d8867f
+ started_at: "2014-01-28T02:02:02.000000"
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 201
+
+ - name: post resource 8
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: d787aa85-5743-4443-84f9-204270bc141a
+ started_at: "2014-01-31T02:02:02.000000"
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 201
+
+ - name: default limit
+ GET: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ response_json_paths:
+ $.`len`: 7
+ $[-1].id: 9b6af245-57df-4ed6-a8c0-f64b77d8867f
+
+
+ - name: update resource 5
+ PATCH: /v1/resource/generic/1e3d5702-2cbf-46e0-ba13-0ddaa3c71150
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ ended_at: "2014-01-30T02:02:02.000000"
+
+ - name: update resource 5 again
+ PATCH: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ ended_at: "2014-01-31T02:02:02.000000"
+
+ - name: default limit with history and multiple sort key
+ GET: /v1/resource/generic?history=true&sort=id:asc&sort=ended_at:desc-nullslast
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ response_json_paths:
+ $.`len`: 7
+ $[0].id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150
+ $[0].ended_at: "2014-01-31T02:02:02+00:00"
+ $[1].id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150
+ $[1].ended_at: "2014-01-30T02:02:02+00:00"
+ $[2].id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150
+ $[2].ended_at: null
+
+#
+# Create metrics
+#
+ - name: create archive policy
+ desc: for later use
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: dummy_policy
+ definition:
+ - granularity: 1 second
+ status: 201
+
+ - name: create metric with name1
+ POST: /v1/metric
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ name: "dummy1"
+ archive_policy_name: dummy_policy
+ status: 201
+
+ - name: create metric with name2
+ POST: /v1/metric
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ name: "dummy2"
+ archive_policy_name: dummy_policy
+ status: 201
+
+ - name: create metric with name3
+ POST: /v1/metric
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ name: "dummy3"
+ archive_policy_name: dummy_policy
+ status: 201
+
+ - name: create metric with name4
+ POST: /v1/metric
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ name: "dummy4"
+ archive_policy_name: dummy_policy
+ status: 201
+
+ - name: create metric with name5
+ POST: /v1/metric
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ name: "dummy5"
+ archive_policy_name: dummy_policy
+ status: 201
+
+ - name: list all default order
+ GET: /v1/metric
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+
+ - name: list first two metrics default order
+ GET: /v1/metric?limit=2
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ response_json_paths:
+ $.`len`: 2
+ $[0].name: $RESPONSE['$[0].name']
+ $[1].name: $RESPONSE['$[1].name']
+
+ - name: list next three metrics default order
+ GET: /v1/metric?limit=4&marker=$HISTORY['list all default order'].$RESPONSE['$[1].id']
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ response_json_paths:
+ $.`len`: 3
+ $[0].name: $HISTORY['list all default order'].$RESPONSE['$[2].name']
+ $[1].name: $HISTORY['list all default order'].$RESPONSE['$[3].name']
+ $[2].name: $HISTORY['list all default order'].$RESPONSE['$[4].name']
+
+ - name: list first two metrics order by user without direction
+ GET: /v1/metric?limit=2&sort=name
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 200
+ response_json_paths:
+ $.`len`: 2
+ $[0].name: dummy1
+ $[1].name: dummy2
+
+ - name: list first two metrics order by user
+ GET: /v1/metric?limit=2&sort=name:asc
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ response_json_paths:
+ $.`len`: 2
+ $[0].name: dummy1
+ $[1].name: dummy2
+
+ - name: list next third metrics order by user
+ GET: /v1/metric?limit=4&sort=name:asc&marker=$RESPONSE['$[1].id']
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ response_json_paths:
+ $.`len`: 3
+ $[0].name: dummy3
+ $[1].name: dummy4
+ $[2].name: dummy5
+
+#
+# Default metric limit
+#
+
+ - name: create metric with name6
+ POST: /v1/metric
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ archive_policy_name: dummy_policy
+ status: 201
+
+ - name: create metric with name7
+ POST: /v1/metric
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ archive_policy_name: dummy_policy
+ status: 201
+
+ - name: create metric with name8
+ POST: /v1/metric
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ archive_policy_name: dummy_policy
+ status: 201
+
+ - name: default metric limit
+ GET: /v1/metric
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ response_json_paths:
+ $.`len`: 7
+
+#
+# Invalid metrics limit/ordering
+#
+
+ - name: metric invalid sort_key
+ GET: /v1/metric?sort=invalid:asc
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 400
+
+ - name: metric invalid sort_dir
+ GET: /v1/metric?sort=id:invalid
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 400
+
+ - name: metric invalid marker
+ GET: /v1/metric?marker=d44b3f4c-27bc-4ace-b81c-2a8e60026874
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 400
+
+ - name: metric invalid negative limit
+ GET: /v1/metric?limit=-2
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 400
+
+ - name: metric invalid limit
+ GET: /v1/metric?limit=invalid
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 400
diff --git a/gnocchi/tests/functional/gabbits/resource-aggregation.yaml b/gnocchi/tests/functional/gabbits/resource-aggregation.yaml
new file mode 100644
index 00000000..c0338476
--- /dev/null
+++ b/gnocchi/tests/functional/gabbits/resource-aggregation.yaml
@@ -0,0 +1,169 @@
+fixtures:
+ - ConfigFixture
+
+tests:
+ - name: create archive policy
+ desc: for later use
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: low
+ definition:
+ - granularity: 1 second
+ - granularity: 300 seconds
+ status: 201
+
+ - name: create resource 1
+ POST: /v1/resource/generic
+ request_headers:
+ x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
+ x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8
+ content-type: application/json
+ data:
+ id: 4ed9c196-4c9f-4ba8-a5be-c9a71a82aac4
+ user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
+ project_id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8
+ metrics:
+ cpu.util:
+ archive_policy_name: low
+ status: 201
+
+ - name: post cpuutil measures 1
+ POST: /v1/resource/generic/4ed9c196-4c9f-4ba8-a5be-c9a71a82aac4/metric/cpu.util/measures
+ request_headers:
+ x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
+ x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8
+ content-type: application/json
+ data:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+ status: 202
+
+ - name: create resource 2
+ POST: /v1/resource/generic
+ request_headers:
+ x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
+ x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8
+ content-type: application/json
+ data:
+ id: 1447CD7E-48A6-4C50-A991-6677CC0D00E6
+ user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
+ project_id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8
+ metrics:
+ cpu.util:
+ archive_policy_name: low
+ status: 201
+
+ - name: post cpuutil measures 2
+ POST: /v1/resource/generic/1447CD7E-48A6-4C50-A991-6677CC0D00E6/metric/cpu.util/measures
+ request_headers:
+ x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
+ x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8
+ content-type: application/json
+ data:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 23
+ - timestamp: "2015-03-06T14:34:12"
+ value: 8
+ status: 202
+
+ - name: create resource 3
+ POST: /v1/resource/generic
+ request_headers:
+ x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
+ x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8
+ content-type: application/json
+ data:
+ id: 33333BC5-5948-4F29-B7DF-7DE607660452
+ user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
+ project_id: ee4cfc41-1cdc-4d2f-9a08-f94111d80171
+ metrics:
+ cpu.util:
+ archive_policy_name: low
+ status: 201
+
+ - name: post cpuutil measures 3
+ POST: /v1/resource/generic/33333BC5-5948-4F29-B7DF-7DE607660452/metric/cpu.util/measures
+ request_headers:
+ x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
+ x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8
+ content-type: application/json
+ data:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 230
+ - timestamp: "2015-03-06T14:34:12"
+ value: 45.41
+ status: 202
+
+ - name: aggregate metric with groupby on project_id
+ POST: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id
+ request_headers:
+ x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
+ x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8
+ content-type: application/json
+ data:
+ =:
+ user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
+ poll:
+ count: 10
+ delay: 1
+ response_json_paths:
+ $:
+ - measures:
+ - ["2015-03-06T14:30:00+00:00", 300.0, 21.525]
+ - ["2015-03-06T14:33:57+00:00", 1.0, 33.05]
+ - ["2015-03-06T14:34:12+00:00", 1.0, 10.0]
+ group:
+ project_id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8
+ - measures:
+ - ["2015-03-06T14:30:00+00:00", 300.0, 137.70499999999998]
+ - ["2015-03-06T14:33:57+00:00", 1.0, 230.0]
+ - ["2015-03-06T14:34:12+00:00", 1.0, 45.41]
+ group:
+ project_id: ee4cfc41-1cdc-4d2f-9a08-f94111d80171
+
+ - name: aggregate metric with groupby on project_id and invalid group
+ POST: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id&groupby=thisisdumb
+ request_headers:
+ x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
+ x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8
+ content-type: application/json
+ data:
+ =:
+ user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
+ status: 400
+ response_strings:
+ - Invalid groupby attribute
+
+ - name: aggregate metric with groupby on project_id and user_id
+ POST: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id&groupby=user_id
+ request_headers:
+ x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
+ x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8
+ content-type: application/json
+ data:
+ =:
+ user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
+ poll:
+ count: 10
+ delay: 1
+ response_json_paths:
+ $:
+ - measures:
+ - ['2015-03-06T14:30:00+00:00', 300.0, 21.525]
+ - ['2015-03-06T14:33:57+00:00', 1.0, 33.05]
+ - ['2015-03-06T14:34:12+00:00', 1.0, 10.0]
+ group:
+ user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
+ project_id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8
+ - measures:
+ - ['2015-03-06T14:30:00+00:00', 300.0, 137.70499999999998]
+ - ['2015-03-06T14:33:57+00:00', 1.0, 230.0]
+ - ['2015-03-06T14:34:12+00:00', 1.0, 45.41]
+ group:
+ user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
+ project_id: ee4cfc41-1cdc-4d2f-9a08-f94111d80171
diff --git a/gnocchi/tests/functional/gabbits/resource-type.yaml b/gnocchi/tests/functional/gabbits/resource-type.yaml
new file mode 100644
index 00000000..fca3aaa3
--- /dev/null
+++ b/gnocchi/tests/functional/gabbits/resource-type.yaml
@@ -0,0 +1,772 @@
+#
+# Test the resource type API to achieve coverage of just the
+# ResourceTypesController and ResourceTypeController class code.
+#
+
+fixtures:
+ - ConfigFixture
+
+defaults:
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+
+tests:
+
+ - name: list resource type
+ desc: only legacy resource types are present
+ GET: /v1/resource_type
+ response_json_paths:
+ $.`len`: 1
+
+# Some bad cases
+
+ - name: post resource type as non-admin
+ POST: $LAST_URL
+ data:
+ name: my_custom_resource
+ request_headers:
+ content-type: application/json
+ status: 403
+
+ - name: post resource type with existing name
+ POST: /v1/resource_type
+ request_headers:
+ x-roles: admin
+ content-type: application/json
+ data:
+ name: my_custom_resource
+ attributes:
+ project_id:
+ type: string
+ status: 400
+
+ - name: post resource type bad string
+ POST: $LAST_URL
+ request_headers:
+ x-roles: admin
+ content-type: application/json
+ data:
+ name: my_custom_resource
+ attributes:
+ foo:
+ type: string
+ max_length: 32
+ min_length: 5
+ noexist: foo
+ status: 400
+ response_strings:
+ # NOTE(sileht): We would prefer to have a better message but voluptuous seems a bit lost when
+ # an Any have many dict with the same key, here "type"
+ # - "Invalid input: extra keys not allowed @ data[u'attributes'][u'foo'][u'noexist']"
+ # - "Invalid input: not a valid value for dictionary value @ data[u'attributes'][u'foo'][u'type']"
+ - "Invalid input:"
+
+ - name: post resource type bad min_length value
+ POST: $LAST_URL
+ request_headers:
+ x-roles: admin
+ content-type: application/json
+ data:
+ name: my_custom_resource
+ attributes:
+ name:
+ type: string
+ required: true
+ max_length: 2
+ min_length: 5
+ status: 400
+
+ - name: post resource type bad min value
+ POST: $LAST_URL
+ request_headers:
+ x-roles: admin
+ content-type: application/json
+ data:
+ name: my_custom_resource
+ attributes:
+ int:
+ type: number
+ required: false
+ max: 3
+ min: 8
+ status: 400
+
+# Create a type
+
+ - name: post resource type
+ POST: $LAST_URL
+ request_headers:
+ x-roles: admin
+ content-type: application/json
+ data:
+ name: my_custom_resource
+ attributes:
+ name:
+ type: string
+ required: true
+ max_length: 5
+ min_length: 2
+ foobar:
+ type: string
+ required: false
+ uuid:
+ type: uuid
+ int:
+ type: number
+ required: false
+ min: -2
+ max: 3
+ intnomin:
+ type: number
+ required: false
+ max: 3
+ float:
+ type: number
+ required: false
+ min: -2.3
+ bool:
+ type: bool
+ required: false
+ status: 201
+ response_json_paths:
+ $.name: my_custom_resource
+ $.state: active
+ $.attributes:
+ name:
+ type: string
+ required: True
+ max_length: 5
+ min_length: 2
+ foobar:
+ type: string
+ required: False
+ max_length: 255
+ min_length: 0
+ uuid:
+ type: uuid
+ required: True
+ int:
+ type: number
+ required: False
+ min: -2
+ max: 3
+ intnomin:
+ type: number
+ required: False
+ min:
+ max: 3
+ float:
+ type: number
+ required: false
+ min: -2.3
+ max:
+ bool:
+ type: bool
+ required: false
+
+ response_headers:
+ location: $SCHEME://$NETLOC/v1/resource_type/my_custom_resource
+
+# Control the created type
+
+ - name: relist resource types
+ desc: we have a resource type now
+ GET: $LAST_URL
+ response_json_paths:
+ $.`len`: 2
+ $.[1].name: my_custom_resource
+ $.[1].state: active
+
+ - name: get the custom resource type
+ GET: /v1/resource_type/my_custom_resource
+ response_json_paths:
+ $.name: my_custom_resource
+ $.state: active
+ $.attributes:
+ name:
+ type: string
+ required: True
+ min_length: 2
+ max_length: 5
+ foobar:
+ type: string
+ required: False
+ min_length: 0
+ max_length: 255
+ uuid:
+ type: uuid
+ required: True
+ int:
+ type: number
+ required: False
+ min: -2
+ max: 3
+ intnomin:
+ type: number
+ required: False
+ min:
+ max: 3
+ float:
+ type: number
+ required: false
+ min: -2.3
+ max:
+ bool:
+ type: bool
+ required: false
+
+# Some bad case case on the type
+
+ - name: delete as non-admin
+ DELETE: $LAST_URL
+ status: 403
+
+# Bad resources for this type
+
+ - name: post invalid resource
+ POST: /v1/resource/my_custom_resource
+ request_headers:
+ content-type: application/json
+ data:
+ id: d11edfca-4393-4fda-b94d-b05a3a1b3747
+ name: toolong!!!
+ foobar: what
+ uuid: 07eb339e-23c0-4be2-be43-cd8247afae3b
+ status: 400
+ response_strings:
+ # split to not match the u' in py2
+ - "Invalid input: length of value must be at most 5 for dictionary value @ data["
+ - "'name']"
+
+ - name: post invalid resource uuid
+ POST: $LAST_URL
+ request_headers:
+ content-type: application/json
+ data:
+ id: d11edfca-4393-4fda-b94d-b05a3a1b3747
+ name: too
+ foobar: what
+ uuid: really!
+ status: 400
+ response_strings:
+ # split to not match the u' in py2
+ - "Invalid input: not a valid value for dictionary value @ data["
+ - "'uuid']"
+
+# Good resources for this type
+
+ - name: post custom resource
+ POST: $LAST_URL
+ request_headers:
+ content-type: application/json
+ data:
+ id: d11edfca-4393-4fda-b94d-b05a3a1b3747
+ name: bar
+ foobar: what
+ uuid: e495ebad-be64-46c0-81d6-b079beb48df9
+ int: 1
+ status: 201
+ response_json_paths:
+ $.id: d11edfca-4393-4fda-b94d-b05a3a1b3747
+ $.name: bar
+ $.foobar: what
+
+ - name: patch custom resource
+ PATCH: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747
+ request_headers:
+ content-type: application/json
+ data:
+ name: foo
+ status: 200
+ response_json_paths:
+ $.id: d11edfca-4393-4fda-b94d-b05a3a1b3747
+ $.name: foo
+ $.foobar: what
+ $.uuid: e495ebad-be64-46c0-81d6-b079beb48df9
+ $.int: 1
+
+ - name: get resource
+ GET: $LAST_URL
+ request_headers:
+ content-type: application/json
+ response_json_paths:
+ $.id: d11edfca-4393-4fda-b94d-b05a3a1b3747
+ $.name: foo
+ $.foobar: what
+ $.uuid: e495ebad-be64-46c0-81d6-b079beb48df9
+ $.int: 1
+
+ - name: post resource with default
+ POST: /v1/resource/my_custom_resource
+ request_headers:
+ content-type: application/json
+ data:
+ id: c4110aec-6e5c-43fa-b8c5-ffdfbca3ce59
+ name: foo
+ uuid: e495ebad-be64-46c0-81d6-b079beb48df9
+ status: 201
+ response_json_paths:
+ $.id: c4110aec-6e5c-43fa-b8c5-ffdfbca3ce59
+ $.name: foo
+ $.foobar:
+ $.uuid: e495ebad-be64-46c0-81d6-b079beb48df9
+ $.int:
+
+ - name: list resource history
+ GET: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747/history?sort=revision_end:asc-nullslast
+ request_headers:
+ content-type: application/json
+ response_json_paths:
+ $.`len`: 2
+ $[0].id: d11edfca-4393-4fda-b94d-b05a3a1b3747
+ $[0].name: bar
+ $[0].foobar: what
+ $[1].id: d11edfca-4393-4fda-b94d-b05a3a1b3747
+ $[1].name: foo
+ $[1].foobar: what
+
+# CRUD resource type attributes
+
+ - name: post a new resource attribute
+ PATCH: /v1/resource_type/my_custom_resource
+ request_headers:
+ x-roles: admin
+ content-type: application/json-patch+json
+ data:
+ - op: add
+ path: /attributes/newstuff
+ value:
+ type: string
+ required: False
+ min_length: 0
+ max_length: 255
+ - op: add
+ path: /attributes/newfilled
+ value:
+ type: string
+ required: False
+ min_length: 0
+ max_length: 255
+ options:
+ fill: "filled"
+ - op: add
+ path: /attributes/newbool
+ value:
+ type: bool
+ required: True
+ options:
+ fill: True
+ - op: add
+ path: /attributes/newint
+ value:
+ type: number
+ required: True
+ min: 0
+ max: 255
+ options:
+ fill: 15
+ - op: add
+ path: /attributes/newstring
+ value:
+ type: string
+ required: True
+ min_length: 0
+ max_length: 255
+ options:
+ fill: "foobar"
+ - op: add
+ path: /attributes/newuuid
+ value:
+ type: uuid
+ required: True
+ options:
+ fill: "00000000-0000-0000-0000-000000000000"
+ - op: remove
+ path: /attributes/foobar
+ status: 200
+ response_json_paths:
+ $.name: my_custom_resource
+ $.attributes:
+ name:
+ type: string
+ required: True
+ min_length: 2
+ max_length: 5
+ uuid:
+ type: uuid
+ required: True
+ int:
+ type: number
+ required: False
+ min: -2
+ max: 3
+ intnomin:
+ type: number
+ required: False
+ min:
+ max: 3
+ float:
+ type: number
+ required: false
+ min: -2.3
+ max:
+ bool:
+ type: bool
+ required: false
+ newstuff:
+ type: string
+ required: False
+ min_length: 0
+ max_length: 255
+ newfilled:
+ type: string
+ required: False
+ min_length: 0
+ max_length: 255
+ newstring:
+ type: string
+ required: True
+ min_length: 0
+ max_length: 255
+ newbool:
+ type: bool
+ required: True
+ newint:
+ type: number
+ required: True
+ min: 0
+ max: 255
+ newuuid:
+ type: uuid
+ required: True
+
+ - name: post a new resource attribute with missing fill
+ PATCH: /v1/resource_type/my_custom_resource
+ request_headers:
+ x-roles: admin
+ content-type: application/json-patch+json
+ data:
+ - op: add
+ path: /attributes/missing
+ value:
+ type: bool
+ required: True
+ options: {}
+ status: 400
+ response_strings:
+ - "Invalid input: Option 'fill' of resource attribute missing is invalid: must not be empty if required=True"
+
+ - name: post a new resource attribute with incorrect fill
+ PATCH: /v1/resource_type/my_custom_resource
+ request_headers:
+ x-roles: admin
+ content-type: application/json-patch+json
+ data:
+ - op: add
+ path: /attributes/incorrect
+ value:
+ type: number
+ required: True
+ options:
+ fill: "a-string"
+ status: 400
+ response_strings:
+ - "Invalid input: Option 'fill' of resource attribute incorrect is invalid: expected Real"
+
+ - name: get the new custom resource type
+ GET: /v1/resource_type/my_custom_resource
+ response_json_paths:
+ $.name: my_custom_resource
+ $.attributes:
+ name:
+ type: string
+ required: True
+ min_length: 2
+ max_length: 5
+ uuid:
+ type: uuid
+ required: True
+ int:
+ type: number
+ required: False
+ min: -2
+ max: 3
+ intnomin:
+ type: number
+ required: False
+ min:
+ max: 3
+ float:
+ type: number
+ required: false
+ min: -2.3
+ max:
+ bool:
+ type: bool
+ required: false
+ newstuff:
+ type: string
+ required: False
+ min_length: 0
+ max_length: 255
+ newfilled:
+ type: string
+ required: False
+ min_length: 0
+ max_length: 255
+ newstring:
+ type: string
+ required: True
+ min_length: 0
+ max_length: 255
+ newbool:
+ type: bool
+ required: True
+ newint:
+ type: number
+ required: True
+ min: 0
+ max: 255
+ newuuid:
+ type: uuid
+ required: True
+
+ - name: control new attributes of existing resource
+ GET: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747
+ request_headers:
+ content-type: application/json
+ status: 200
+ response_json_paths:
+ $.id: d11edfca-4393-4fda-b94d-b05a3a1b3747
+ $.name: foo
+ $.newstuff: null
+ $.newfilled: "filled"
+ $.newbool: true
+ $.newint: 15
+ $.newstring: foobar
+ $.newuuid: "00000000-0000-0000-0000-000000000000"
+
+ - name: control new attributes of existing resource history
+ GET: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747/history?sort=revision_end:asc-nullslast
+ request_headers:
+ content-type: application/json
+ response_json_paths:
+ $.`len`: 2
+ $[0].id: d11edfca-4393-4fda-b94d-b05a3a1b3747
+ $[0].name: bar
+ $[0].newstuff: null
+ $[0].newfilled: "filled"
+ $[0].newbool: true
+ $[0].newint: 15
+ $[0].newstring: foobar
+ $[0].newuuid: "00000000-0000-0000-0000-000000000000"
+ $[1].id: d11edfca-4393-4fda-b94d-b05a3a1b3747
+ $[1].name: foo
+ $[1].newstuff: null
+ $[1].newfilled: "filled"
+ $[1].newbool: true
+ $[1].newint: 15
+ $[1].newstring: foobar
+ $[1].newuuid: "00000000-0000-0000-0000-000000000000"
+
+# Invalid patch
+
+ - name: add/delete the same resource attribute
+ PATCH: /v1/resource_type/my_custom_resource
+ request_headers:
+ x-roles: admin
+ content-type: application/json-patch+json
+ data:
+ - op: add
+ path: /attributes/what
+ value:
+ type: string
+ required: False
+ min_length: 0
+ max_length: 255
+ - op: remove
+ path: /attributes/what
+ status: 200
+ response_json_paths:
+ $.name: my_custom_resource
+ $.attributes:
+ name:
+ type: string
+ required: True
+ min_length: 2
+ max_length: 5
+ uuid:
+ type: uuid
+ required: True
+ int:
+ type: number
+ required: False
+ min: -2
+ max: 3
+ intnomin:
+ type: number
+ required: False
+ min:
+ max: 3
+ float:
+ type: number
+ required: false
+ min: -2.3
+ max:
+ bool:
+ type: bool
+ required: false
+ newstuff:
+ type: string
+ required: False
+ min_length: 0
+ max_length: 255
+ newfilled:
+ type: string
+ required: False
+ min_length: 0
+ max_length: 255
+ newstring:
+ type: string
+ required: True
+ min_length: 0
+ max_length: 255
+ newbool:
+ type: bool
+ required: True
+ newint:
+ type: number
+ required: True
+ min: 0
+ max: 255
+ newuuid:
+ type: uuid
+ required: True
+
+ - name: delete/add the same resource attribute
+ PATCH: /v1/resource_type/my_custom_resource
+ request_headers:
+ x-roles: admin
+ content-type: application/json-patch+json
+ data:
+ - op: remove
+ path: /attributes/what
+ - op: add
+ path: /attributes/what
+ value:
+ type: string
+ required: False
+ min_length: 0
+ max_length: 255
+ status: 400
+ response_strings:
+ - "can't remove non-existent object 'what'"
+
+ - name: patch a resource attribute replace
+ PATCH: /v1/resource_type/my_custom_resource
+ request_headers:
+ x-roles: admin
+ content-type: application/json-patch+json
+ data:
+ - op: replace
+ path: /attributes/newstuff
+ value:
+ type: string
+ required: False
+ min_length: 0
+ max_length: 255
+ status: 400
+ response_strings:
+ - "Invalid input: not a valid value for dictionary value @ data[0]["
+ - "'op']"
+
+ - name: patch a resource attribute type not exist
+ PATCH: /v1/resource_type/my_custom_resource
+ request_headers:
+ x-roles: admin
+ content-type: application/json-patch+json
+ data:
+ - op: add
+ path: /attributes/newstuff
+ value:
+ type: notexist
+ required: False
+ min_length: 0
+ max_length: 255
+ status: 400
+
+ - name: patch a resource attribute type unknown
+ PATCH: /v1/resource_type/my_custom_resource
+ request_headers:
+ x-roles: admin
+ content-type: application/json-patch+json
+ data:
+ - op: remove
+ path: /attributes/unknown
+ status: 400
+ response_strings:
+ - "can't remove non-existent object 'unknown'"
+
+# Ensure we can't delete the type
+
+ - name: delete in use resource_type
+ DELETE: /v1/resource_type/my_custom_resource
+ request_headers:
+ x-roles: admin
+ status: 400
+ response_strings:
+ - Resource type my_custom_resource is still in use
+
+# Delete associated resources
+
+ - name: delete the resource
+ DELETE: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747
+ request_headers:
+ x-roles: admin
+ status: 204
+
+ - name: delete the second resource
+ DELETE: /v1/resource/my_custom_resource/c4110aec-6e5c-43fa-b8c5-ffdfbca3ce59
+ request_headers:
+ x-roles: admin
+ status: 204
+
+# Now we can deleted the type
+
+ - name: delete the custom resource type
+ DELETE: /v1/resource_type/my_custom_resource
+ request_headers:
+ x-roles: admin
+ status: 204
+
+ - name: delete non-existing custom resource type
+ DELETE: $LAST_URL
+ request_headers:
+ x-roles: admin
+ status: 404
+
+ - name: delete missing custom resource type utf8
+ DELETE: /v1/resource_type/%E2%9C%94%C3%A9%C3%B1%E2%98%83
+ request_headers:
+ x-roles: admin
+ status: 404
+ response_strings:
+ - Resource type ✔éñ☃ does not exist
+
+# Can we readd and delete the same resource type again
+
+ - name: post resource type again
+ POST: /v1/resource_type
+ request_headers:
+ x-roles: admin
+ content-type: application/json
+ data:
+ name: my_custom_resource
+ status: 201
+
+ - name: delete the custom resource type again
+ DELETE: /v1/resource_type/my_custom_resource
+ request_headers:
+ x-roles: admin
+ status: 204
diff --git a/gnocchi/tests/functional/gabbits/resource.yaml b/gnocchi/tests/functional/gabbits/resource.yaml
new file mode 100644
index 00000000..a9d7e040
--- /dev/null
+++ b/gnocchi/tests/functional/gabbits/resource.yaml
@@ -0,0 +1,1106 @@
+#
+# Test the resource API to achieve coverage of just the
+# ResourcesController and ResourceController class code.
+#
+
+fixtures:
+ - ConfigFixture
+
+tests:
+
+# We will need an archive for use in later tests so we create it
+# here. This could be done in a fixture but since the API allows it
+# may as well use it.
+
+ - name: create archive policy
+ desc: for later use
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: medium
+ definition:
+ - granularity: 1 second
+ status: 201
+
+ - name: create archive policy rule
+ POST: /v1/archive_policy_rule
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: test_rule
+ metric_pattern: "disk.io.*"
+ archive_policy_name: medium
+ status: 201
+
+# The top of the API is a bit confusing and presents some URIs which
+# are not very useful. This isn't strictly a bug but does represent
+# a measure of unfriendliness that we may wish to address. Thus the
+# xfails.
+
+ - name: root of all
+ GET: /
+ response_headers:
+ content-type: /application/json/
+ response_json_paths:
+ $.versions[0].links[0].href: $SCHEME://$NETLOC/v1/
+
+ - name: root of v1
+ GET: /v1
+ redirects: true
+ response_json_paths:
+ $.version: "1.0"
+ $.links.`len`: 11
+ $.links[0].href: $SCHEME://$NETLOC/v1
+ $.links[7].href: $SCHEME://$NETLOC/v1/resource
+
+ - name: root of resource
+ GET: /v1/resource
+ response_json_paths:
+ $.generic: $SCHEME://$NETLOC/v1/resource/generic
+
+ - name: typo of resource
+ GET: /v1/resoue
+ status: 404
+
+ - name: typo of resource extra
+ GET: /v1/resource/foobar
+ status: 404
+
+# Explore that GETting a list of resources demonstrates the expected
+# behaviors notably with regard to content negotiation.
+
+ - name: generic resource list
+ desc: there are no generic resources yet
+ GET: /v1/resource/generic
+ response_strings:
+ - "[]"
+
+ - name: generic resource bad accept
+ desc: Expect 406 on bad accept type
+ GET: $LAST_URL
+ request_headers:
+ accept: text/plain
+ status: 406
+ response_strings:
+ - 406 Not Acceptable
+
+ - name: generic resource complex accept
+ desc: failover accept media type appropriately
+ GET: $LAST_URL
+ request_headers:
+ accept: text/plain, application/json; q=0.8
+ response_strings:
+ - "[]"
+
+# Try creating a new generic resource in various ways.
+
+ - name: generic resource
+ desc: there are no generic resources yet
+ GET: /v1/resource/generic
+ response_strings:
+ - "[]"
+
+ - name: post resource no user-id
+ desc: https://bugs.launchpad.net/gnocchi/+bug/1424005
+ POST: $LAST_URL
+ request_headers:
+ # Only provide one of these auth headers
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ content-type: application/json
+ data:
+ id: f93454f2-d8a5-4d67-9985-02511241e7f3
+ started_at: "2014-01-03T02:02:02.000000"
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 201
+
+ - name: post generic resource
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: f93450f2-d8a5-4d67-9985-02511241e7d1
+ started_at: "2014-01-03T02:02:02.000000"
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 201
+ response_headers:
+ location: $SCHEME://$NETLOC/v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1
+ content-type: /^application\/json/
+ response_json_paths:
+ $.created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ $.created_by_user_id: 0fbb231484614b1a80131fc22f6afc9c
+ $.user_id: 0fbb231484614b1a80131fc22f6afc9c
+
+ - name: post same resource refuse
+ desc: We can only post one identified resource once
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: f93450f2-d8a5-4d67-9985-02511241e7d1
+ started_at: "2014-01-03T02:02:02.000000"
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 409
+
+ - name: post generic resource bad content type
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: text/plain
+ data: '{"id": "f93450f2-d8a5-4d67-9985-02511241e7d1", "started_at": "2014-01-03T02:02:02.000000", "user_id": "0fbb231484614b1a80131fc22f6afc9c", "project_id": "f3d41b770cc14f0bb94a1d5be9c0e3ea"}'
+ status: 415
+
+# Create a new generic resource, demonstrate that including no data
+# gets a useful 400 response.
+
+ - name: post generic resource no data
+ POST: /v1/resource/generic
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 400
+
+ - name: post generic with invalid metric name
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ metrics:
+ "disk/iops":
+ archive_policy_name: medium
+ status: 400
+ response_strings:
+ - "'/' is not supported in metric name"
+
+ - name: post generic resource to modify
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: 75C44741-CC60-4033-804E-2D3098C7D2E9
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 201
+ response_json_paths:
+ $.metrics: {} # empty dictionary
+
+# PATCH that generic resource to change its attributes and to
+# associate metrics. If a metric does not exist there should be a
+# graceful failure.
+ - name: patch generic resource
+ PATCH: $LOCATION
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ user_id: foobar
+ status: 200
+ response_json_paths:
+ user_id: foobar
+
+ - name: patch generic resource with same data
+ desc: Ensure no useless revision have been created
+ PATCH: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ user_id: foobar
+ status: 200
+ response_json_paths:
+ user_id: foobar
+ revision_start: $RESPONSE['$.revision_start']
+
+ - name: patch generic resource with id
+ PATCH: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: foobar
+ status: 400
+ response_strings:
+ - "Invalid input: extra keys not allowed @ data["
+ - "'id']"
+
+ - name: patch generic with metrics
+ PATCH: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ metrics:
+ disk.iops:
+ archive_policy_name: medium
+ status: 200
+ response_strings:
+ - '"disk.iops": '
+
+ - name: get generic history
+ desc: Ensure we can get the history
+ GET: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9/history?sort=revision_end:asc-nullslast
+ request_headers:
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ response_json_paths:
+ $.`len`: 2
+ $[1].revision_end: null
+ $[1].metrics.'disk.iops': $RESPONSE["metrics.'disk.iops'"]
+
+ - name: patch generic bad metric association
+ PATCH: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ metrics:
+ disk.iops: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea
+ status: 400
+ response_strings:
+ - Metric f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea does not exist
+
+ - name: patch generic with bad archive policy
+ PATCH: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ metrics:
+ disk.iops:
+ archive_policy_name: noexist
+ status: 400
+ response_strings:
+ - Archive policy noexist does not exist
+
+ - name: patch generic with no archive policy rule
+ PATCH: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ metrics:
+ disk.iops: {}
+ status: 400
+ response_strings:
+ - No archive policy name specified and no archive policy rule found matching the metric name disk.iops
+
+ - name: patch generic with archive policy rule
+ PATCH: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ metrics:
+ disk.io.rate: {}
+ status: 200
+
+ - name: get patched resource
+ desc: confirm the patched resource is properly patched
+ GET: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ user_id: foobar
+
+ - name: patch resource empty dict
+ desc: an empty dict in patch is an existence check
+ PATCH: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data: "{}"
+ status: 200
+ data:
+ user_id: foobar
+
+ - name: patch resource without change with metrics in response
+ desc: an empty dict in patch is an existence check
+ PATCH: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data: "{}"
+ status: 200
+ response_json_paths:
+ $.metrics.'disk.io.rate': $RESPONSE["$.metrics.'disk.io.rate'"]
+
+ - name: patch generic with invalid metric name
+ PATCH: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ metrics:
+ "disk/iops":
+ archive_policy_name: medium
+ status: 400
+ response_strings:
+ - "'/' is not supported in metric name"
+
+# Failure modes for history
+
+ - name: post generic history
+ desc: should don't work
+ POST: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9/history
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 405
+
+ - name: delete generic history
+ desc: should don't work
+ DELETE: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 405
+
+# Failure modes for PATCHing a resource
+
+ - name: patch resource no data
+ desc: providing no data is an error
+ PATCH: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 400
+ response_strings:
+ - "Unable to decode body:"
+
+ - name: patch resource bad data
+ desc: providing data that is not a dict is an error
+ PATCH: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 400
+ data:
+ - Beer and pickles
+ response_strings:
+ - "Invalid input: expected a dictionary"
+
+ - name: patch noexit resource
+ desc: "patching something that doesn't exist is a 404"
+ PATCH: /v1/resource/generic/77777777-CC60-4033-804E-2D3098C7D2E9
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 404
+
+# GET single resource failure modes
+
+ - name: get noexist resource
+ desc: if a resource does not exist 404
+ GET: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 404
+ response_strings:
+ - The resource could not be found.
+
+ - name: get bad resource id
+ desc: https://bugs.launchpad.net/gnocchi/+bug/1425588
+ GET: /v1/resource/generic/noexist
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 404
+ response_strings:
+ - The resource could not be found.
+
+ - name: get metrics for this not-existing resource
+ GET: /v1/resource/generic/77777777-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 404
+
+# List resources
+
+ - name: list generic resources no auth
+ GET: /v1/resource/generic
+ response_strings:
+ - "[]"
+
+ - name: list generic resources
+ GET: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ response_json_paths:
+ $[0].user_id: 0fbb231484614b1a80131fc22f6afc9c
+ $[-1].user_id: foobar
+
+ - name: list all resources
+ GET: /v1/resource/generic
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ response_strings:
+ - '"type": "generic"'
+
+# Metric handling when POSTing resources.
+
+ - name: post new generic with non-existent metrics
+ POST: /v1/resource/generic
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: 85C44741-CC60-4033-804E-2D3098C7D2E9
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ metrics:
+ cpu.util: 10
+ status: 400
+
+ - name: post new generic with metrics bad policy
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: 85C44741-CC60-4033-804E-2D3098C7D2E9
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ metrics:
+ cpu.util:
+ archive_policy_name: noexist
+ status: 400
+
+ - name: post new generic with metrics no policy rule
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: 85BABE39-F7F7-455A-877B-62C22E11AA40
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ metrics:
+ cpu.util: {}
+ status: 400
+ response_strings:
+ - No archive policy name specified and no archive policy rule found matching the metric name cpu.util
+
+ - name: post new generic with metrics using policy rule
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: 85BABE39-F7F7-455A-877B-62C22E11AA40
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ metrics:
+ disk.io.rate: {}
+ status: 201
+
+ - name: post new generic with metrics
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: d13982cb-4cce-4f84-a96e-7581be1e599c
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ metrics:
+ disk.util:
+ archive_policy_name: medium
+ status: 201
+ response_json_paths:
+ created_by_user_id: 0fbb231484614b1a80131fc22f6afc9c
+ created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+
+ - name: post new generic with metrics and un-normalized user/project id from keystone middleware
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: 85C44741-CC60-4033-804E-2D3098C7D2E9
+ metrics:
+ cpu.util:
+ archive_policy_name: medium
+ status: 201
+ response_json_paths:
+ created_by_user_id: 0fbb231484614b1a80131fc22f6afc9c
+ created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+
+
+ - name: get metrics for this resource
+ desc: with async measure handling this is a null test
+ GET: /v1/resource/generic/$RESPONSE['$.id']/metric/cpu.util/measures
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ response_strings:
+ - "[]"
+
+# Interrogate the NamedMetricController
+
+ - name: list the generics
+ GET: /v1/resource/generic
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+
+ - name: request metrics from one of the generics
+ GET: /v1/resource/generic/$RESPONSE['$[-1].id']/metric
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ response_json_paths:
+ $.`len`: 1
+ $[0].name: cpu.util
+ $[0].resource_id: 85c44741-cc60-4033-804e-2d3098c7d2e9
+
+ - name: request metrics from non uuid metrics
+ desc: 404 from GenericResourceController
+ GET: /v1/resource/generic/not.a.uuid/metric
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 404
+
+ - name: request cpuutil metric from generic
+ GET: /v1/resource/generic/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ response_json_paths:
+ $.created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ $.archive_policy.name: medium
+
+ - name: try post cpuutil metric to generic
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 405
+
+ - name: request cpuutil measures from generic
+ desc: with async measure handling this is a null test
+ GET: /v1/resource/generic/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util/measures
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ response_strings:
+ - "[]"
+
+ - name: post cpuutil measures
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+ status: 202
+
+ - name: request cpuutil measures again
+ GET: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ poll:
+ count: 50
+ delay: .1
+ response_json_paths:
+ $[0][0]: "2015-03-06T14:33:57+00:00"
+ $[0][1]: 1.0
+ $[0][2]: 43.100000000000001
+
+ - name: post metric at generic
+ POST: /v1/resource/generic/85C44741-CC60-4033-804E-2D3098C7D2E9/metric
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 204
+ data:
+ electron.spin:
+ archive_policy_name: medium
+ response_headers:
+
+ - name: post metric at generic with empty definition
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 400
+ data:
+ foo.bar: {}
+ response_strings:
+ - No archive policy name specified and no archive policy rule found matching the metric name foo.bar
+
+ - name: post metric at generic using archive policy rule
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 204
+ data:
+ disk.io.rate: {}
+
+ - name: duplicate metrics at generic
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 409
+ data:
+ electron.spin:
+ archive_policy_name: medium
+ response_strings:
+ - Named metric electron.spin already exists
+
+ - name: post metrics at generic bad policy
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 400
+ data:
+ electron.charge:
+ archive_policy_name: high
+ response_strings:
+ - Archive policy high does not exist
+
+# Check bad timestamps
+
+ - name: post new generic with bad timestamp
+ POST: /v1/resource/generic
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: 95C44741-CC60-4033-804E-2D3098C7D2E9
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ metrics:
+ cpu.util:
+ archive_policy_name: medium
+ ended_at: "2001-12-15T02:59:43"
+ started_at: "2014-12-15T02:59:43"
+ status: 400
+ response_strings:
+ - Start timestamp cannot be after end timestamp
+
+# Post metrics to unknown resource
+
+ - name: post to non uuid metrics
+ desc: 404 from GenericResourceController
+ POST: /v1/resource/generic/not.a.uuid/metric
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ some.metric:
+ archive_policy_name: medium
+ status: 404
+
+ - name: post to missing uuid metrics
+ desc: 404 from NamedMetricController
+ POST: /v1/resource/generic/d5a5994e-ee90-11e4-88cf-685b35afa334/metric
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ some.metric:
+ archive_policy_name: medium
+ status: 404
+
+# Post measurements on unknown things
+
+ - name: post measure on unknown metric
+ desc: 404 from NamedMetricController with metric error
+ POST: /v1/resource/generic/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/unknown/measures
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ status: 404
+ response_strings:
+ - Metric unknown does not exist
+
+# DELETE-ing generics
+
+ - name: delete generic
+ DELETE: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 204
+
+ - name: delete noexist generic
+ DELETE: /v1/resource/generic/77777777-CC60-4033-804E-2D3098C7D2E9
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 404
+
+# Delete a batch of resources by attributes filter
+
+ - name: create resource one
+ desc: before test batch delete, create some resources using a float in started_at
+ POST: /v1/resource/generic
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: f93450f2-aaaa-4d67-9985-02511241e7d1
+ started_at: 1388714522.0
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 201
+
+ - name: create resource two
+ desc: before test batch delete, create some resources
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: f93450f2-bbbb-4d67-9985-02511241e7d1
+ started_at: "2014-01-03T02:02:02.000000"
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 201
+
+ - name: create resource three
+ desc: before test batch delete, create some resources
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: f93450f2-cccc-4d67-9985-02511241e7d1
+ started_at: "2014-08-04T00:00:00.000000"
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 201
+
+ - name: create resource four
+ desc: before test batch delete, create some resources
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: f93450f2-dddd-4d67-9985-02511241e7d1
+ started_at: "2014-08-04T00:00:00.000000"
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 201
+
+ - name: create resource five
+ desc: before test batch delete, create some resources
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: f93450f2-eeee-4d67-9985-02511241e7d1
+ started_at: "2015-08-14T00:00:00.000000"
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 201
+
+ - name: create resource six
+ desc: before test batch delete, create some resources
+ POST: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ id: f93450f2-ffff-4d67-9985-02511241e7d1
+ started_at: "2015-08-14T00:00:00.000000"
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 201
+
+ - name: get resource one
+ desc: ensure the resources exists
+ GET: /v1/resource/generic/f93450f2-aaaa-4d67-9985-02511241e7d1
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 200
+
+ - name: get resource two
+ desc: ensure the resources exists
+ GET: /v1/resource/generic/f93450f2-bbbb-4d67-9985-02511241e7d1
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 200
+
+ - name: get resource three
+ desc: ensure the resources exists
+ GET: /v1/resource/generic/f93450f2-cccc-4d67-9985-02511241e7d1
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 200
+
+ - name: get resource four
+ desc: ensure the resources exists
+ GET: /v1/resource/generic/f93450f2-dddd-4d67-9985-02511241e7d1
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 200
+
+ - name: get resource five
+ desc: ensure the resources exists
+ GET: /v1/resource/generic/f93450f2-eeee-4d67-9985-02511241e7d1
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 200
+
+ - name: get resource six
+ desc: ensure the resources exists
+ GET: /v1/resource/generic/f93450f2-ffff-4d67-9985-02511241e7d1
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ status: 200
+
+ - name: delete random data structure
+ desc: delete an empty list test
+ DELETE: /v1/resource/generic
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ resource_ids:
+ []
+ attrs:
+ test
+ status: 400
+
+ - name: delete something empty
+ desc: use empty filter for delete
+ DELETE: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data: ""
+ status: 400
+
+ - name: delete something empty a
+ desc: use empty filter for delete
+ DELETE: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ in:
+ id: []
+ status: 200
+ response_json_paths:
+ $.deleted: 0
+
+ - name: delete something empty b
+ desc: use empty filter for delete
+ DELETE: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ in: {}
+ status: 400
+
+ - name: delete something empty c
+ desc: use empty filter for delete
+ DELETE: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ in:
+ and: []
+ status: 400
+
+ - name: delete something empty d
+ desc: use empty filter for delete
+ DELETE: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ in:
+ and:
+ - or: []
+ - id:
+ =: ""
+ status: 400
+
+ - name: delete something empty e
+ desc: use empty filter for delete
+ DELETE: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ and: []
+ status: 400
+
+ - name: delete something empty f
+ desc: use empty filter for delete
+ DELETE: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ and:
+ - in:
+ id: []
+ - started_at: ""
+ status: 400
+
+ - name: delete batch of resources filter by started_at
+ desc: delete the created resources
+ DELETE: /v1/resource/generic
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ eq:
+ started_at: "2014-08-04"
+ status: 200
+ response_json_paths:
+ $.deleted: 2
+
+ - name: delete batch of resources filter by multiple ids
+ desc: delete the created resources
+ DELETE: /v1/resource/generic
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ in:
+ id:
+ - f93450f2-aaaa-4d67-9985-02511241e7d1
+ - f93450f2-bbbb-4d67-9985-02511241e7d1
+ status: 200
+ response_json_paths:
+ $.deleted: 2
+
+ - name: delete both existent and non-existent data
+ desc: delete exits and non-exist data
+ DELETE: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ in:
+ id:
+ - f93450f2-eeee-4d67-9985-02511241e7d1
+ - f93450f2-ffff-4d67-9985-02511241e7d1
+ - f93450f2-yyyy-4d67-9985-02511241e7d1
+ - f93450f2-xxxx-4d67-9985-02511241e7d1
+ status: 200
+ response_json_paths:
+ $.deleted: 2
+
+ - name: delete multiple non-existent resources
+ desc: delete a batch of non-existent resources
+ DELETE: $LAST_URL
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+ data:
+ in:
+ id:
+ - f93450f2-zzzz-4d67-9985-02511241e7d1
+ - f93450f2-kkkk-4d67-9985-02511241e7d1
+ status: 200
+ response_json_paths:
+ $.deleted: 0
diff --git a/gnocchi/tests/functional/gabbits/search-metric.yaml b/gnocchi/tests/functional/gabbits/search-metric.yaml
new file mode 100644
index 00000000..4f477b71
--- /dev/null
+++ b/gnocchi/tests/functional/gabbits/search-metric.yaml
@@ -0,0 +1,143 @@
+#
+# Test the search API to achieve coverage of just the
+# SearchController and SearchMetricController class code.
+#
+
+fixtures:
+ - ConfigFixture
+
+defaults:
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+
+tests:
+ - name: create archive policy
+ desc: for later use
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-roles: admin
+ data:
+ name: high
+ definition:
+ - granularity: 1 second
+ timespan: 1 hour
+ - granularity: 2 second
+ timespan: 1 hour
+ response_headers:
+ location: $SCHEME://$NETLOC/v1/archive_policy/high
+ status: 201
+
+ - name: create metric
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ data:
+ archive_policy_name: high
+ status: 201
+
+ - name: post measures
+ desc: for later use
+ POST: /v1/batch/metrics/measures
+ request_headers:
+ content-type: application/json
+ data:
+ $RESPONSE['$.id']:
+ - timestamp: "2014-10-06T14:34:12"
+ value: 12
+ - timestamp: "2014-10-06T14:34:14"
+ value: 12
+ - timestamp: "2014-10-06T14:34:16"
+ value: 12
+ - timestamp: "2014-10-06T14:34:18"
+ value: 12
+ - timestamp: "2014-10-06T14:34:20"
+ value: 12
+ - timestamp: "2014-10-06T14:34:22"
+ value: 12
+ - timestamp: "2014-10-06T14:34:24"
+ value: 12
+ - timestamp: "2014-10-06T14:34:26"
+ value: 12
+ - timestamp: "2014-10-06T14:34:28"
+ value: 12
+ - timestamp: "2014-10-06T14:34:30"
+ value: 12
+ - timestamp: "2014-10-06T14:34:32"
+ value: 12
+ - timestamp: "2014-10-06T14:34:34"
+ value: 12
+ status: 202
+
+ - name: get metric id
+ GET: /v1/metric
+ status: 200
+ response_json_paths:
+ $[0].archive_policy.name: high
+
+ - name: search with one correct granularity
+ POST: /v1/search/metric?metric_id=$HISTORY['get metric id'].$RESPONSE['$[0].id']&granularity=1s
+ request_headers:
+ content-type: application/json
+ data:
+ "=": 12
+ status: 200
+
+ - name: search with multiple correct granularities
+ POST: /v1/search/metric?metric_id=$HISTORY['get metric id'].$RESPONSE['$[0].id']&granularity=1second&granularity=2s
+ request_headers:
+ content-type: application/json
+ data:
+ "=": 12
+ status: 200
+
+ - name: search with correct and incorrect granularities
+ POST: /v1/search/metric?metric_id=$HISTORY['get metric id'].$RESPONSE['$[0].id']&granularity=1s&granularity=300
+ request_headers:
+ content-type: application/json
+ data:
+ "=": 12
+ status: 400
+ response_strings:
+ - Granularity '300.0' for metric $HISTORY['get metric id'].$RESPONSE['$[0].id'] does not exist
+
+ - name: search with incorrect granularity
+ POST: /v1/search/metric?metric_id=$HISTORY['get metric id'].$RESPONSE['$[0].id']&granularity=300
+ request_headers:
+ content-type: application/json
+ data:
+ "=": 12
+ status: 400
+ response_strings:
+ - Granularity '300.0' for metric $HISTORY['get metric id'].$RESPONSE['$[0].id'] does not exist
+
+ - name: search measure with wrong start
+ POST: /v1/search/metric?metric_id=$HISTORY['get metric id'].$RESPONSE['$[0].id']&start=foobar
+ request_headers:
+ content-type: application/json
+ data:
+ ∧:
+ - ≥: 1000
+ status: 400
+ response_strings:
+ - Invalid value for start
+
+ - name: create metric 2
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ data:
+ archive_policy_name: "high"
+ status: 201
+
+ - name: search measure with wrong stop
+ POST: /v1/search/metric?metric_id=$RESPONSE['$.id']&stop=foobar
+ request_headers:
+ content-type: application/json
+ data:
+ ∧:
+ - ≥: 1000
+ status: 400
+ response_strings:
+ - Invalid value for stop
diff --git a/gnocchi/tests/functional/gabbits/search.yaml b/gnocchi/tests/functional/gabbits/search.yaml
new file mode 100644
index 00000000..c8f9bc2d
--- /dev/null
+++ b/gnocchi/tests/functional/gabbits/search.yaml
@@ -0,0 +1,89 @@
+#
+# Test the search API to achieve coverage of just the
+# SearchController and SearchResourceController class code.
+#
+
+fixtures:
+ - ConfigFixture
+
+defaults:
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+
+tests:
+ - name: typo of search
+ GET: /v1/search/notexists
+ status: 404
+
+ - name: typo of search in resource
+ GET: /v1/search/resource/foobar
+ status: 404
+
+ - name: search with invalid uuid
+ POST: /v1/search/resource/generic
+ request_headers:
+ content-type: application/json
+ data:
+ =:
+ id: "cd9eef"
+
+ - name: post generic resource
+ POST: /v1/resource/generic
+ request_headers:
+ content-type: application/json
+ data:
+ id: faef212f-0bf4-4030-a461-2186fef79be0
+ started_at: "2014-01-03T02:02:02.000000"
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 201
+
+ - name: post generic resource twice
+ POST: /v1/resource/generic
+ request_headers:
+ content-type: application/json
+ data:
+ id: df7e5e75-6a1d-4ff7-85cb-38eb9d75da7e
+ started_at: "2014-01-03T02:02:02.000000"
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 201
+
+ - name: search in_
+ POST: /v1/search/resource/generic
+ request_headers:
+ content-type: application/json
+ data:
+ in:
+ id:
+ - faef212f-0bf4-4030-a461-2186fef79be0
+ - df7e5e75-6a1d-4ff7-85cb-38eb9d75da7e
+ response_json_paths:
+ $.`len`: 2
+
+ - name: search like created_by_project_id
+ POST: /v1/search/resource/generic
+ request_headers:
+ content-type: application/json
+ data:
+ eq:
+ created_by_project_id:
+ - f3d41b770cc14f0bb94a1d5be9c0e3ea
+ response_json_paths:
+ $.`len`: 0
+
+ - name: search in_ query string
+ POST: /v1/search/resource/generic?filter=id%20in%20%5Bfaef212f-0bf4-4030-a461-2186fef79be0%2C%20df7e5e75-6a1d-4ff7-85cb-38eb9d75da7e%5D
+ request_headers:
+ content-type: application/json
+ response_json_paths:
+ $.`len`: 2
+
+ - name: search empty query
+ POST: /v1/search/resource/generic
+ request_headers:
+ content-type: application/json
+ data: {}
+ response_json_paths:
+ $.`len`: 2
diff --git a/gnocchi/tests/functional/gabbits/transformedids.yaml b/gnocchi/tests/functional/gabbits/transformedids.yaml
new file mode 100644
index 00000000..cc544f11
--- /dev/null
+++ b/gnocchi/tests/functional/gabbits/transformedids.yaml
@@ -0,0 +1,184 @@
+#
+# Test the resource API to achieve coverage of just the
+# ResourcesController and ResourceController class code.
+#
+
+fixtures:
+ - ConfigFixture
+
+defaults:
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9c
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ content-type: application/json
+
+tests:
+
+# We will need an archive for use in later tests so we create it
+# here. This could be done in a fixture but since the API allows it
+# may as well use it.
+
+ - name: create archive policy
+ desc: for later use
+ POST: /v1/archive_policy
+ request_headers:
+ x-roles: admin
+ data:
+ name: medium
+ definition:
+ - granularity: 1 second
+ status: 201
+# Check transformed uuids across the URL hierarchy
+
+ - name: post new resource non uuid for duplication test
+ POST: /v1/resource/generic
+ data:
+ id: generic zero
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ metrics:
+ cpu.util:
+ archive_policy_name: medium
+ status: 201
+ response_json_paths:
+ created_by_user_id: 0fbb231484614b1a80131fc22f6afc9c
+ created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ response_headers:
+ # is a UUID
+ location: /v1/resource/generic/[a-f0-9-]{36}/
+
+ - name: post new resource non uuid duplication
+ POST: /v1/resource/generic
+ data:
+ id: generic zero
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ metrics:
+ cpu.util:
+ archive_policy_name: medium
+ status: 409
+
+ - name: post new resource with invalid uuid
+ POST: /v1/resource/generic
+ data:
+ id: 'id-with-/'
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ status: 400
+ response_strings:
+ - "'/' is not supported in resource id"
+
+
+ - name: post new resource non uuid again different user
+ POST: /v1/resource/generic
+ request_headers:
+ x-user-id: 0fbb231484614b1a80131fc22f6afc9b
+ x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ data:
+ id: generic zero
+ metrics:
+ cpu.util:
+ archive_policy_name: medium
+ status: 201
+ response_json_paths:
+ created_by_user_id: 0fbb231484614b1a80131fc22f6afc9b
+ created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ response_headers:
+ # is a UUID
+ location: /v1/resource/generic/[a-f0-9-]{36}/
+
+ - name: post new resource non uuid
+ POST: /v1/resource/generic
+ data:
+ id: generic one
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ metrics:
+ cpu.util:
+ archive_policy_name: medium
+ status: 201
+ response_json_paths:
+ created_by_user_id: 0fbb231484614b1a80131fc22f6afc9c
+ created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ response_headers:
+ # is a UUID
+ location: /v1/resource/generic/[a-f0-9-]{36}/
+
+ - name: get new non uuid resource by external id
+ GET: /v1/resource/generic/generic%20one
+ response_json_paths:
+ $.id: $RESPONSE['$.id']
+
+ - name: get new non uuid resource by internal id
+ GET: /v1/resource/generic/$RESPONSE['$.id']
+ response_json_paths:
+ $.id: $RESPONSE['$.id']
+
+ - name: patch by external id
+ PATCH: /v1/resource/generic/generic%20one
+ data:
+ metrics:
+ cattle:
+ archive_policy_name: medium
+ status: 200
+ response_strings:
+ - '"cattle"'
+
+ - name: list metric by external resource id
+ GET: /v1/resource/generic/generic%20one/metric
+ response_json_paths:
+ $[0].name: cattle
+
+ - name: list empty measures by external resource id
+ GET: /v1/resource/generic/generic%20one/metric/cattle/measures
+ response_json_paths:
+ $: []
+
+ - name: post measures by external resource id
+ POST: /v1/resource/generic/generic%20one/metric/cattle/measures
+ data:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 43.1
+ - timestamp: "2015-03-06T14:34:12"
+ value: 12
+ status: 202
+
+ - name: list two measures by external resource id
+ GET: $LAST_URL
+ poll:
+ count: 10
+ delay: 1
+ response_json_paths:
+ $[0][2]: 43.1
+ $[1][2]: 12
+
+ - name: delete the resource by external id
+ DELETE: /v1/resource/generic/generic%20one
+ status: 204
+
+# Check length handling
+
+ - name: fail to post too long non uuid resource id
+ POST: /v1/resource/generic
+ data:
+ id: four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue that counts four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue that counts four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue that counts
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ metrics:
+ cpu.util:
+ archive_policy_name: medium
+ status: 400
+ response_strings:
+ - transformable resource id >255 max allowed characters for dictionary value
+
+ - name: post long non uuid resource id
+ POST: $LAST_URL
+ data:
+ # 255 char string
+ id: four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue that counts four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue
+ user_id: 0fbb231484614b1a80131fc22f6afc9c
+ project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
+ metrics:
+ cpu.util:
+ archive_policy_name: medium
+ status: 201
diff --git a/gnocchi/tests/functional/test_gabbi.py b/gnocchi/tests/functional/test_gabbi.py
new file mode 100644
index 00000000..489bd546
--- /dev/null
+++ b/gnocchi/tests/functional/test_gabbi.py
@@ -0,0 +1,35 @@
+#
+# Copyright 2015 Red Hat. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""A test module to exercise the Gnocchi API with gabbi."""
+
+import os
+
+from gabbi import driver
+import wsgi_intercept
+
+from gnocchi.tests.functional import fixtures
+
+
+wsgi_intercept.STRICT_RESPONSE_HEADERS = True
+TESTS_DIR = 'gabbits'
+
+
+def load_tests(loader, tests, pattern):
+ """Provide a TestSuite to the discovery process."""
+ test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR)
+ return driver.build_tests(test_dir, loader, host=None,
+ intercept=fixtures.setup_app,
+ fixture_module=fixtures)
diff --git a/gnocchi/tests/functional/test_gabbi_prefix.py b/gnocchi/tests/functional/test_gabbi_prefix.py
new file mode 100644
index 00000000..0a77ceeb
--- /dev/null
+++ b/gnocchi/tests/functional/test_gabbi_prefix.py
@@ -0,0 +1,34 @@
+#
+# Copyright 2015 Red Hat. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""A test module to exercise the Gnocchi API with gabbi."""
+
+import os
+
+from gabbi import driver
+
+from gnocchi.tests.functional import fixtures
+
+
+TESTS_DIR = 'gabbits'
+PREFIX = '/gnocchi'
+
+
+def load_tests(loader, tests, pattern):
+ """Provide a TestSuite to the discovery process."""
+ test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR)
+ return driver.build_tests(test_dir, loader, host=None, prefix=PREFIX,
+ intercept=fixtures.setup_app,
+ fixture_module=fixtures)
diff --git a/gnocchi/tests/functional_live/__init__.py b/gnocchi/tests/functional_live/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/gnocchi/tests/functional_live/gabbits/live.yaml b/gnocchi/tests/functional_live/gabbits/live.yaml
new file mode 100644
index 00000000..d63cb096
--- /dev/null
+++ b/gnocchi/tests/functional_live/gabbits/live.yaml
@@ -0,0 +1,739 @@
+#
+# Confirmation tests to run against a live web server.
+#
+# These act as a very basic sanity check.
+
+defaults:
+ request_headers:
+ x-auth-token: $ENVIRON['GNOCCHI_SERVICE_TOKEN']
+ authorization: $ENVIRON['GNOCCHI_AUTHORIZATION']
+
+tests:
+ - name: check /
+ GET: /
+
+ # Fail to create archive policy
+ - name: wrong archive policy content type
+ desc: attempt to create archive policy with invalid content-type
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: text/plain
+ status: 415
+ response_strings:
+ - Unsupported Media Type
+
+ - name: wrong method
+ desc: attempt to create archive policy with 'PUT' method
+ PUT: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ status: 405
+
+ - name: invalid authZ
+ desc: x-auth-token is invalid
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ x-auth-token: 'hello'
+ authorization: 'basic hello:'
+ data:
+ name: medium
+ definition:
+ - granularity: 1 second
+ status: 401
+
+ - name: bad archive policy body
+ desc: archive policy contains invalid key 'cowsay'
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ data:
+ cowsay: moo
+ status: 400
+ response_strings:
+ - "Invalid input: extra keys not allowed"
+
+ - name: missing definition
+ desc: archive policy is missing 'definition' keyword
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ data:
+ name: medium
+ status: 400
+ response_strings:
+ - "Invalid input: required key not provided"
+
+ - name: empty definition
+ desc: empty definition for archive policy
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ data:
+ name: medium
+ definition: []
+ status: 400
+ response_strings:
+ - "Invalid input: length of value must be at least 1"
+
+ - name: wrong value definition
+ desc: invalid type of 'definition' key
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ data:
+ name: somename
+ definition: foobar
+ status: 400
+ response_strings:
+ - "Invalid input: expected a list"
+
+ - name: useless definition
+ desc: invalid archive policy definition
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ data:
+ name: medium
+ definition:
+ - cowsay: moo
+ status: 400
+ response_strings:
+ - "Invalid input: extra keys not allowed"
+
+ #
+ # Create archive policy
+ #
+
+ - name: create archive policy
+ desc: create archve policy 'gabbilive' for live tests
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ data:
+ name: gabbilive
+ back_window: 0
+ definition:
+ - granularity: 1 second
+ points: 60
+ - granularity: 2 second
+ timespan: 1 minute
+ - points: 5
+ timespan: 5 minute
+ aggregation_methods:
+ - mean
+ - min
+ - max
+ response_headers:
+ location: $SCHEME://$NETLOC/v1/archive_policy/gabbilive
+ status: 201
+
+ # Retrieve it correctly and then poorly
+
+ - name: get archive policy
+ desc: retrieve archive policy 'gabbilive' and asster its values
+ GET: $LOCATION
+ response_headers:
+ content-type: /application/json/
+ response_json_paths:
+ $.name: gabbilive
+ $.back_window: 0
+ $.definition[0].granularity: "0:00:01"
+ $.definition[0].points: 60
+ $.definition[0].timespan: "0:01:00"
+ $.definition[1].granularity: "0:00:02"
+ $.definition[1].points: 30
+ $.definition[1].timespan: "0:01:00"
+ $.definition[2].granularity: "0:01:00"
+ $.definition[2].points: 5
+ $.definition[2].timespan: "0:05:00"
+ response_json_paths:
+ $.aggregation_methods.`sorted`: ["max", "mean", "min"]
+
+ - name: get wrong accept
+ desc: invalid 'accept' header
+ GET: /v1/archive_policy/medium
+ request_headers:
+ accept: text/plain
+ status: 406
+
+ # Unexpected methods
+
+ - name: post single archive
+ desc: unexpected 'POST' request to archive policy
+ POST: /v1/archive_policy/gabbilive
+ status: 405
+
+ - name: put single archive
+ desc: unexpected 'PUT' request to archive policy
+ PUT: /v1/archive_policy/gabbilive
+ status: 405
+
+ # Duplicated archive policy names ain't allowed
+
+ - name: create duplicate archive policy
+ desc: create archve policy 'gabbilive' for live tests
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ data:
+ name: gabbilive
+ definition:
+ - granularity: 30 second
+ points: 60
+ status: 409
+ response_strings:
+ - Archive policy gabbilive already exists
+
+ # Create a unicode named policy
+
+ - name: post unicode policy name
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ data:
+ name: ✔éñ☃
+ definition:
+ - granularity: 1 minute
+ points: 20
+ status: 201
+ response_headers:
+ location: $SCHEME://$NETLOC/v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83
+ response_json_paths:
+ name: ✔éñ☃
+
+ - name: retrieve unicode policy name
+ GET: $LOCATION
+ response_json_paths:
+ name: ✔éñ☃
+
+ - name: delete unicode archive policy
+ DELETE: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83
+ status: 204
+
+ # It really is gone
+
+ - name: confirm delete
+ desc: assert deleted unicode policy is not available
+ GET: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83
+ status: 404
+
+ # Fail to delete one that does not exist
+
+ - name: delete missing archive
+ desc: delete non-existent archive policy
+ DELETE: /v1/archive_policy/grandiose
+ status: 404
+ response_strings:
+ - Archive policy grandiose does not exist
+
+ # Attempt to create illogical policies
+
+ - name: create illogical policy
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ data:
+ name: complex
+ definition:
+ - granularity: 1 second
+ points: 60
+ timespan: "0:01:01"
+ status: 400
+ response_strings:
+ - timespan ≠ granularity × points
+
+ - name: create identical granularities policy
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ data:
+ name: complex
+ definition:
+ - granularity: 1 second
+ points: 60
+ - granularity: 1 second
+ points: 120
+ status: 400
+ response_strings:
+ - "More than one archive policy uses granularity `1.0'"
+
+ - name: policy invalid unit
+ desc: invalid unit for archive policy 'timespan' key
+ POST: /v1/archive_policy
+ request_headers:
+ content-type: application/json
+ data:
+ name: 227d0e1f-4295-4e4b-8515-c296c47d71d3
+ definition:
+ - granularity: 1 second
+ timespan: "1 shenanigan"
+ status: 400
+
+ #
+ # Archive policy rules
+ #
+
+ - name: create archive policy rule1
+ POST: /v1/archive_policy_rule
+ request_headers:
+ content-type: application/json
+ data:
+ name: gabbilive_rule
+ metric_pattern: "live.*"
+ archive_policy_name: gabbilive
+ status: 201
+ response_json_paths:
+ $.metric_pattern: "live.*"
+ $.archive_policy_name: gabbilive
+ $.name: gabbilive_rule
+
+ - name: create invalid archive policy rule
+ POST: /v1/archive_policy_rule
+ request_headers:
+ content-type: application/json
+ data:
+ name: test_rule
+ metric_pattern: "disk.foo.*"
+ status: 400
+
+ - name: missing auth archive policy rule
+ POST: /v1/archive_policy_rule
+ request_headers:
+ content-type: application/json
+ x-auth-token: 'hello'
+ authorization: 'basic hello:'
+ data:
+ name: test_rule
+ metric_pattern: "disk.foo.*"
+ archive_policy_name: low
+ status: 401
+
+ - name: wrong archive policy rule content type
+ POST: /v1/archive_policy_rule
+ request_headers:
+ content-type: text/plain
+ status: 415
+ response_strings:
+ - Unsupported Media Type
+
+ - name: bad archive policy rule body
+ POST: /v1/archive_policy_rule
+ request_headers:
+ content-type: application/json
+ data:
+ whaa: foobar
+ status: 400
+ response_strings:
+ - "Invalid input: extra keys not allowed"
+
+ # get an archive policy rules
+
+ - name: get all archive policy rules
+ GET: /v1/archive_policy_rule
+ status: 200
+ response_json_paths:
+ $[\name][0].name: "gabbilive_rule"
+ $[\name][0].metric_pattern: "live.*"
+ $[\name][0].archive_policy_name: "gabbilive"
+
+ - name: get unknown archive policy rule
+ GET: /v1/archive_policy_rule/foo
+ status: 404
+
+
+ - name: get archive policy rule
+ GET: /v1/archive_policy_rule/gabbilive_rule
+ status: 200
+ response_json_paths:
+ $.metric_pattern: "live.*"
+ $.archive_policy_name: "gabbilive"
+ $.name: "gabbilive_rule"
+
+ - name: delete archive policy in use
+ desc: fails due to https://bugs.launchpad.net/gnocchi/+bug/1569781
+ DELETE: /v1/archive_policy/gabbilive
+ status: 400
+
+ #
+ # Metrics
+ #
+
+
+ - name: get all metrics
+ GET: /v1/metric
+ status: 200
+
+ - name: create metric with name and rule
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ data:
+ name: "live.io.rate"
+ status: 201
+ response_json_paths:
+ $.archive_policy_name: gabbilive
+ $.name: live.io.rate
+
+ - name: assert metric is present in listing
+ GET: /v1/metric?id=$HISTORY['create metric with name and rule'].$RESPONSE['$.id']
+ response_json_paths:
+ $.`len`: 1
+
+ - name: assert metric is the only one with this policy
+ GET: /v1/metric?archive_policy_name=gabbilive
+ response_json_paths:
+ $.`len`: 1
+
+ - name: delete metric
+ DELETE: /v1/metric/$HISTORY['create metric with name and rule'].$RESPONSE['$.id']
+ status: 204
+
+ - name: assert metric is expunged
+ GET: $HISTORY['assert metric is present in listing'].$URL&status=delete
+ poll:
+ count: 360
+ delay: 1
+ response_json_paths:
+ $.`len`: 0
+
+ - name: create metric with name and policy
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ data:
+ name: "aagabbi.live.metric"
+ archive_policy_name: "gabbilive"
+ status: 201
+ response_json_paths:
+ $.archive_policy_name: gabbilive
+ $.name: "aagabbi.live.metric"
+
+ - name: get valid metric id
+ GET: $LOCATION
+ status: 200
+ response_json_paths:
+ $.archive_policy.name: gabbilive
+
+ - name: delete the metric
+ DELETE: /v1/metric/$RESPONSE['$.id']
+ status: 204
+
+ - name: ensure the metric is delete
+ GET: /v1/metric/$HISTORY['get valid metric id'].$RESPONSE['$.id']
+ status: 404
+
+ - name: create metric bad archive policy
+ POST: /v1/metric
+ request_headers:
+ content-type: application/json
+ data:
+ archive_policy_name: 2e2675aa-105e-4664-a30d-c407e6a0ea7f
+ status: 400
+ response_strings:
+ - Archive policy 2e2675aa-105e-4664-a30d-c407e6a0ea7f does not exist
+
+ - name: create metric bad content-type
+ POST: /v1/metric
+ request_headers:
+ content-type: plain/text
+ data: '{"archive_policy_name": "cookies"}'
+ status: 415
+
+
+ #
+ # Cleanup
+ #
+
+ - name: delete archive policy rule
+ DELETE: /v1/archive_policy_rule/gabbilive_rule
+ status: 204
+
+ - name: confirm delete archive policy rule
+ DELETE: /v1/archive_policy_rule/gabbilive_rule
+ status: 404
+
+
+ #
+ # Resources section
+ #
+
+ - name: root of resource
+ GET: /v1/resource
+ response_json_paths:
+ $.generic: $SCHEME://$NETLOC/v1/resource/generic
+
+ - name: typo of resource
+ GET: /v1/resoue
+ status: 404
+
+ - name: typo of resource extra
+ GET: /v1/resource/foobar
+ status: 404
+
+ - name: generic resource
+ GET: /v1/resource/generic
+ status: 200
+
+ - name: post resource type
+ POST: /v1/resource_type
+ request_headers:
+ content-type: application/json
+ data:
+ name: myresource
+ attributes:
+ display_name:
+ type: string
+ required: true
+ max_length: 5
+ min_length: 2
+ status: 201
+ response_headers:
+ location: $SCHEME://$NETLOC/v1/resource_type/myresource
+
+ - name: add an attribute
+ PATCH: /v1/resource_type/myresource
+ request_headers:
+ content-type: application/json-patch+json
+ data:
+ - op: "add"
+ path: "/attributes/awesome-stuff"
+ value: {"type": "bool", "required": false}
+ status: 200
+ response_json_paths:
+ $.name: myresource
+ $.attributes."awesome-stuff".type: bool
+ $.attributes.[*].`len`: 2
+
+ - name: remove an attribute
+ PATCH: /v1/resource_type/myresource
+ request_headers:
+ content-type: application/json-patch+json
+ data:
+ - op: "remove"
+ path: "/attributes/awesome-stuff"
+ status: 200
+ response_json_paths:
+ $.name: myresource
+ $.attributes.display_name.type: string
+ $.attributes.[*].`len`: 1
+
+ - name: myresource resource bad accept
+ desc: Expect 406 on bad accept type
+ request_headers:
+ accept: text/plain
+ GET: /v1/resource/myresource
+ status: 406
+ response_strings:
+ - 406 Not Acceptable
+
+ - name: myresource resource complex accept
+ desc: failover accept media type appropriately
+ request_headers:
+ accept: text/plain, application/json; q=0.8
+ GET: /v1/resource/myresource
+ status: 200
+
+ - name: post myresource resource
+ POST: /v1/resource/myresource
+ request_headers:
+ content-type: application/json
+ data:
+ id: 2ae35573-7f9f-4bb1-aae8-dad8dff5706e
+ user_id: 126204ef-989a-46fd-999b-ee45c8108f31
+ project_id: 98e785d7-9487-4159-8ab8-8230ec37537a
+ display_name: myvm
+ metrics:
+ vcpus:
+ archive_policy_name: gabbilive
+ status: 201
+ response_json_paths:
+ $.id: 2ae35573-7f9f-4bb1-aae8-dad8dff5706e
+ $.user_id: 126204ef-989a-46fd-999b-ee45c8108f31
+ $.project_id: 98e785d7-9487-4159-8ab8-8230ec37537a
+ $.display_name: "myvm"
+
+ - name: get myresource resource
+ GET: $LOCATION
+ status: 200
+ response_json_paths:
+ $.id: 2ae35573-7f9f-4bb1-aae8-dad8dff5706e
+ $.user_id: 126204ef-989a-46fd-999b-ee45c8108f31
+ $.project_id: 98e785d7-9487-4159-8ab8-8230ec37537a
+ $.display_name: "myvm"
+
+ - name: get vcpus metric
+ GET: /v1/metric/$HISTORY['get myresource resource'].$RESPONSE['$.metrics.vcpus']
+ status: 200
+ response_json_paths:
+ $.name: vcpus
+ $.resource.id: 2ae35573-7f9f-4bb1-aae8-dad8dff5706e
+
+ - name: search for myresource resource via user_id
+ POST: /v1/search/resource/myresource
+ request_headers:
+ content-type: application/json
+ data:
+ =:
+ user_id: "126204ef-989a-46fd-999b-ee45c8108f31"
+ response_json_paths:
+ $..id: 2ae35573-7f9f-4bb1-aae8-dad8dff5706e
+ $..user_id: 126204ef-989a-46fd-999b-ee45c8108f31
+ $..project_id: 98e785d7-9487-4159-8ab8-8230ec37537a
+ $..display_name: myvm
+
+ - name: search for myresource resource via user_id and 'generic' type
+ POST: /v1/search/resource/generic
+ request_headers:
+ content-type: application/json
+ data:
+ =:
+ id: "2ae35573-7f9f-4bb1-aae8-dad8dff5706e"
+ response_strings:
+ - '"user_id": "126204ef-989a-46fd-999b-ee45c8108f31"'
+
+ - name: search for myresource resource via user_id and project_id
+ POST: /v1/search/resource/generic
+ request_headers:
+ content-type: application/json
+ data:
+ and:
+ - =:
+ user_id: "126204ef-989a-46fd-999b-ee45c8108f31"
+ - =:
+ project_id: "98e785d7-9487-4159-8ab8-8230ec37537a"
+ response_strings:
+ - '"id": "2ae35573-7f9f-4bb1-aae8-dad8dff5706e"'
+
+ - name: patch myresource resource
+ PATCH: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e
+ request_headers:
+ content-type: application/json
+ data:
+ display_name: myvm2
+ status: 200
+ response_json_paths:
+ display_name: myvm2
+
+ - name: post some measures to the metric on myresource
+ POST: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e/metric/vcpus/measures
+ request_headers:
+ content-type: application/json
+ data:
+ - timestamp: "2015-03-06T14:33:57"
+ value: 2
+ - timestamp: "2015-03-06T14:34:12"
+ value: 2
+ status: 202
+
+ - name: get myresource measures with poll
+ GET: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e/metric/vcpus/measures
+ # wait up to 60 seconds before policy is deleted
+ poll:
+ count: 60
+ delay: 1
+ response_json_paths:
+ $[0][2]: 2
+ $[1][2]: 2
+
+ - name: post some more measures to the metric on myresource
+ POST: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e/metric/vcpus/measures
+ request_headers:
+ content-type: application/json
+ data:
+ - timestamp: "2015-03-06T14:34:15"
+ value: 5
+ - timestamp: "2015-03-06T14:34:20"
+ value: 5
+ status: 202
+
+ - name: get myresource measures with refresh
+ GET: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e/metric/vcpus/measures?refresh=true
+ response_json_paths:
+ $[0][2]: 2
+ $[1][2]: 4
+ $[2][2]: 2
+ $[3][2]: 2
+ $[4][2]: 5
+ $[5][2]: 5
+
+ #
+ # Search for resources
+ #
+
+ - name: typo of search
+ POST: /v1/search/notexists
+ status: 404
+
+ - name: typo of search in resource
+ POST: /v1/search/resource/foobar
+ status: 404
+
+ - name: search with invalid uuid
+ POST: /v1/search/resource/generic
+ request_headers:
+ content-type: application/json
+ data:
+ =:
+ id: "cd9eef"
+ status: 200
+ response_json_paths:
+ $.`len`: 0
+
+ - name: assert vcpus metric exists in listing
+ GET: /v1/metric?id=$HISTORY['get myresource resource'].$RESPONSE['$.metrics.vcpus']
+ poll:
+ count: 360
+ delay: 1
+ response_json_paths:
+ $.`len`: 1
+
+ - name: delete myresource resource
+ DELETE: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e
+ status: 204
+
+ # assert resource is really deleted
+ - name: assert resource resource is deleted
+ GET: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e
+ status: 404
+
+ - name: assert vcpus metric is really expurged
+ GET: $HISTORY['assert vcpus metric exists in listing'].$URL&status=delete
+ poll:
+ count: 360
+ delay: 1
+ response_json_paths:
+ $.`len`: 0
+
+ - name: post myresource resource no data
+ POST: /v1/resource/myresource
+ request_headers:
+ content-type: application/json
+ status: 400
+
+ - name: assert no metrics have the gabbilive policy
+ GET: $HISTORY['assert metric is the only one with this policy'].$URL
+ response_json_paths:
+ $.`len`: 0
+
+ - name: assert no delete metrics have the gabbilive policy
+ GET: $HISTORY['assert metric is the only one with this policy'].$URL&status=delete
+ response_json_paths:
+ $.`len`: 0
+
+ - name: delete single archive policy cleanup
+ DELETE: /v1/archive_policy/gabbilive
+ poll:
+ count: 360
+ delay: 1
+ status: 204
+
+ # It really is gone
+
+ - name: delete our resource type
+ DELETE: /v1/resource_type/myresource
+ status: 204
+
+ - name: confirm delete of cleanup
+ GET: /v1/archive_policy/gabbilive
+ status: 404
diff --git a/gnocchi/tests/functional_live/gabbits/search-resource.yaml b/gnocchi/tests/functional_live/gabbits/search-resource.yaml
new file mode 100644
index 00000000..fe254788
--- /dev/null
+++ b/gnocchi/tests/functional_live/gabbits/search-resource.yaml
@@ -0,0 +1,275 @@
+#
+# Tests to confirm resources are searchable. Run against a live setup.
+# URL: http://gnocchi.xyz/rest.html#searching-for-resources
+#
+# Instance-ResourceID-1: a64ca14f-bc7c-45b0-aa85-42cd2179e1e2
+# Instance-ResourceID-2: 7ccccfa0-92ce-4225-80ca-3ac9cb122d6a
+# Instance-ResourceID-3: c442a47c-eb33-46ce-9665-f3aa0bef54e7
+#
+# UserID-1: 33ba83ca-2f12-4ad6-8fa2-bc8b55d36e07
+# UserID-2: 81d82ef3-4deb-499d-9270-9aeb5a3ec5fe
+#
+# ProjectID-1: c9a5f184-c0d0-4daa-83c3-af6fdc0879e6
+# ProjectID-2: 40eba01c-b348-49b8-803f-67123251a00a
+#
+# ImageID-1: 7ab2f7ae-7af5-4469-bdc8-3c0f6dfab75d
+# ImageID-2: b01f2588-89dc-46b2-897b-fffae1e10975
+#
+
+defaults:
+ request_headers:
+ x-auth-token: $ENVIRON['GNOCCHI_SERVICE_TOKEN']
+ authorization: $ENVIRON['GNOCCHI_AUTHORIZATION']
+
+tests:
+ #
+ # Setup resource types if don't exist
+ #
+
+ - name: create new resource type 'instance-like'
+ POST: /v1/resource_type
+ status: 201
+ request_headers:
+ content-type: application/json
+ data:
+ name: instance-like
+ attributes:
+ display_name:
+ type: string
+ required: True
+ flavor_id:
+ type: string
+ required: True
+ host:
+ type: string
+ required: True
+ image_ref:
+ type: string
+ required: False
+ server_group:
+ type: string
+ required: False
+
+ - name: create new resource type 'image-like'
+ POST: /v1/resource_type
+ status: 201
+ request_headers:
+ content-type: application/json
+ data:
+ name: image-like
+ attributes:
+ name:
+ type: string
+ required: True
+ disk_format:
+ type: string
+ required: True
+ container_format:
+ type: string
+ required: True
+
+ #
+ # Setup test resources
+ #
+ - name: helper. create instance-like resource-1
+ POST: /v1/resource/instance-like
+ request_headers:
+ content-type: application/json
+ data:
+ display_name: vm-gabbi-1
+ id: a64ca14f-bc7c-45b0-aa85-42cd2179e1e2
+ user_id: 33ba83ca-2f12-4ad6-8fa2-bc8b55d36e07
+ flavor_id: "1"
+ image_ref: 7ab2f7ae-7af5-4469-bdc8-3c0f6dfab75d
+ host: compute-0-gabbi.localdomain
+ project_id: c9a5f184-c0d0-4daa-83c3-af6fdc0879e6
+ status: 201
+
+ - name: helper. create instance-like resource-2
+ POST: /v1/resource/instance-like
+ request_headers:
+ content-type: application/json
+ data:
+ display_name: vm-gabbi-2
+ id: 7ccccfa0-92ce-4225-80ca-3ac9cb122d6a
+ user_id: 33ba83ca-2f12-4ad6-8fa2-bc8b55d36e07
+ flavor_id: "2"
+ image_ref: b01f2588-89dc-46b2-897b-fffae1e10975
+ host: compute-1-gabbi.localdomain
+ project_id: c9a5f184-c0d0-4daa-83c3-af6fdc0879e6
+ status: 201
+
+ - name: helper. create instance-like resource-3
+ POST: /v1/resource/instance-like
+ request_headers:
+ content-type: application/json
+ data:
+ display_name: vm-gabbi-3
+ id: c442a47c-eb33-46ce-9665-f3aa0bef54e7
+ user_id: 81d82ef3-4deb-499d-9270-9aeb5a3ec5fe
+ flavor_id: "2"
+ image_ref: b01f2588-89dc-46b2-897b-fffae1e10975
+ host: compute-1-gabbi.localdomain
+ project_id: 40eba01c-b348-49b8-803f-67123251a00a
+ status: 201
+
+ - name: helper. create image-like resource-1
+ POST: /v1/resource/image-like
+ request_headers:
+ content-type: application/json
+ data:
+ id: 7ab2f7ae-7af5-4469-bdc8-3c0f6dfab75d
+ container_format: bare
+ disk_format: qcow2
+ name: gabbi-image-1
+ user_id: 81d82ef3-4deb-499d-9270-9aeb5a3ec5fe
+ project_id: 40eba01c-b348-49b8-803f-67123251a00a
+ status: 201
+
+ #
+ # Actual tests
+ #
+
+ - name: search for all resources with a specific user_id
+ desc: search through all resource types
+ POST: /v1/search/resource/generic
+ request_headers:
+ content-type: application/json
+ data:
+ =:
+ user_id: 81d82ef3-4deb-499d-9270-9aeb5a3ec5fe
+ status: 200
+ response_json_paths:
+ $.`len`: 2
+ response_json_paths:
+ $.[0].type: instance-like
+ $.[1].type: image-like
+ $.[0].id: c442a47c-eb33-46ce-9665-f3aa0bef54e7
+ $.[1].id: 7ab2f7ae-7af5-4469-bdc8-3c0f6dfab75d
+
+ - name: search for all resources of instance-like type create by specific user_id
+ desc: all instances created by a specified user
+ POST: /v1/search/resource/generic
+ request_headers:
+ content-type: application/json
+ data:
+ and:
+ - =:
+ type: instance-like
+ - =:
+ user_id: 33ba83ca-2f12-4ad6-8fa2-bc8b55d36e07
+ status: 200
+ response_json_paths:
+ $.`len`: 2
+ response_strings:
+ - '"id": "a64ca14f-bc7c-45b0-aa85-42cd2179e1e2"'
+ - '"id": "7ccccfa0-92ce-4225-80ca-3ac9cb122d6a"'
+ response_json_paths:
+ $.[0].id: a64ca14f-bc7c-45b0-aa85-42cd2179e1e2
+ $.[1].id: 7ccccfa0-92ce-4225-80ca-3ac9cb122d6a
+ $.[0].type: instance-like
+ $.[1].type: instance-like
+ $.[0].metrics.`len`: 0
+ $.[1].metrics.`len`: 0
+
+ - name: search for all resources with a specific project_id
+ desc: search for all resources in a specific project
+ POST: /v1/search/resource/generic
+ request_headers:
+ content-type: application/json
+ data:
+ =:
+ project_id: c9a5f184-c0d0-4daa-83c3-af6fdc0879e6
+ status: 200
+ response_json_paths:
+ $.`len`: 2
+
+ - name: search for intances on a specific compute using "like" keyword
+ desc: search for vms hosted on a specific compute node
+ POST: /v1/search/resource/instance-like
+ request_headers:
+ content-type: application/json
+ data:
+ like:
+ host: 'compute-1-gabbi%'
+ response_json_paths:
+ $.`len`: 2
+ response_strings:
+ - '"project_id": "40eba01c-b348-49b8-803f-67123251a00a"'
+ - '"project_id": "c9a5f184-c0d0-4daa-83c3-af6fdc0879e6"'
+ - '"user_id": "33ba83ca-2f12-4ad6-8fa2-bc8b55d36e07"'
+ - '"user_id": "81d82ef3-4deb-499d-9270-9aeb5a3ec5fe"'
+ - '"display_name": "vm-gabbi-2"'
+ - '"display_name": "vm-gabbi-3"'
+
+ - name: search for instances using complex search with "like" keyword and user_id
+ desc: search for vms of specified user hosted on a specific compute node
+ POST: /v1/search/resource/instance-like
+ request_headers:
+ content-type: application/json
+ data:
+ and:
+ - like:
+ host: 'compute-%-gabbi%'
+ - =:
+ user_id: 33ba83ca-2f12-4ad6-8fa2-bc8b55d36e07
+ response_json_paths:
+ $.`len`: 2
+ response_strings:
+ - '"display_name": "vm-gabbi-1"'
+ - '"display_name": "vm-gabbi-2"'
+ - '"project_id": "c9a5f184-c0d0-4daa-83c3-af6fdc0879e6"'
+
+ - name: search for resources of instance-like or image-like type with specific user_id
+ desc: search for all image-like or instance-like resources created by a specific user
+ POST: /v1/search/resource/generic
+ request_headers:
+ content-type: application/json
+ data:
+ and:
+ - =:
+ user_id: 81d82ef3-4deb-499d-9270-9aeb5a3ec5fe
+
+ - or:
+ - =:
+ type: instance-like
+
+ - =:
+ type: image-like
+ status: 200
+ response_json_paths:
+ $.`len`: 2
+ response_strings:
+ - '"type": "image-like"'
+ - '"type": "instance-like"'
+ - '"id": "7ab2f7ae-7af5-4469-bdc8-3c0f6dfab75d"'
+ - '"id": "c442a47c-eb33-46ce-9665-f3aa0bef54e7"'
+
+ #
+ # Tear down resources
+ #
+
+ - name: helper. delete instance-like resource-1
+ DELETE: /v1/resource/instance-like/a64ca14f-bc7c-45b0-aa85-42cd2179e1e2
+ status: 204
+
+ - name: helper. delete instance-like resource-2
+ DELETE: /v1/resource/instance-like/7ccccfa0-92ce-4225-80ca-3ac9cb122d6a
+ status: 204
+
+ - name: helper. delete instance-like resource-3
+ DELETE: /v1/resource/instance-like/c442a47c-eb33-46ce-9665-f3aa0bef54e7
+ status: 204
+
+ - name: helper. delete image-like resource
+ DELETE: /v1/resource/image-like/7ab2f7ae-7af5-4469-bdc8-3c0f6dfab75d
+ status: 204
+
+ - name: helper. delete resource-type instance-like
+ DELETE: /v1/resource_type/instance-like
+ status: 204
+
+ - name: helper. delete resource-type image-like
+ DELETE: /v1/resource_type/image-like
+ status: 204
+
diff --git a/gnocchi/tests/functional_live/test_gabbi_live.py b/gnocchi/tests/functional_live/test_gabbi_live.py
new file mode 100644
index 00000000..aeed07a8
--- /dev/null
+++ b/gnocchi/tests/functional_live/test_gabbi_live.py
@@ -0,0 +1,48 @@
+#
+# Copyright 2015 Red Hat. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""A test module to exercise the Gnocchi API with gabbi."""
+
+import os
+
+from gabbi import driver
+import six.moves.urllib.parse as urlparse
+
+
+TESTS_DIR = 'gabbits'
+
+
+def load_tests(loader, tests, pattern):
+ """Provide a TestSuite to the discovery process."""
+ gnocchi_url = os.getenv('GNOCCHI_ENDPOINT')
+ if gnocchi_url:
+ parsed_url = urlparse.urlsplit(gnocchi_url)
+ prefix = parsed_url.path.rstrip('/') # turn it into a prefix
+
+ # NOTE(chdent): gabbi requires a port be passed or it will
+ # default to 8001, so we must dance a little dance to get
+ # the right ports. Probably gabbi needs to change.
+ # https://github.com/cdent/gabbi/issues/50
+ port = 443 if parsed_url.scheme == 'https' else 80
+ if parsed_url.port:
+ port = parsed_url.port
+
+ test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR)
+ return driver.build_tests(test_dir, loader,
+ host=parsed_url.hostname,
+ port=port,
+ prefix=prefix)
+ elif os.getenv("GABBI_LIVE"):
+ raise RuntimeError('"GNOCCHI_ENDPOINT" is not set')
diff --git a/gnocchi/tests/indexer/__init__.py b/gnocchi/tests/indexer/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/gnocchi/tests/indexer/sqlalchemy/__init__.py b/gnocchi/tests/indexer/sqlalchemy/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py
new file mode 100644
index 00000000..781236fd
--- /dev/null
+++ b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py
@@ -0,0 +1,92 @@
+# Copyright 2015 eNovance
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import abc
+
+import fixtures
+import mock
+import oslo_db.exception
+from oslo_db.sqlalchemy import test_migrations
+import six
+import sqlalchemy as sa
+import sqlalchemy_utils
+
+from gnocchi import indexer
+from gnocchi.indexer import sqlalchemy
+from gnocchi.indexer import sqlalchemy_base
+from gnocchi.tests import base
+
+
+class ABCSkip(base.SkipNotImplementedMeta, abc.ABCMeta):
+ pass
+
+
+class ModelsMigrationsSync(
+ six.with_metaclass(ABCSkip,
+ base.TestCase,
+ test_migrations.ModelsMigrationsSync)):
+
+ def _set_timeout(self):
+ self.useFixture(fixtures.Timeout(120, gentle=True))
+
+ def setUp(self):
+ super(ModelsMigrationsSync, self).setUp()
+ self.db = mock.Mock()
+ self.conf.set_override(
+ 'url',
+ sqlalchemy.SQLAlchemyIndexer._create_new_database(
+ self.conf.indexer.url),
+ 'indexer')
+ self.index = indexer.get_driver(self.conf)
+ self.index.connect()
+ self.index.upgrade(nocreate=True)
+ self.addCleanup(self._drop_database)
+
+ def _drop_database(self):
+ try:
+ sqlalchemy_utils.drop_database(self.conf.indexer.url)
+ except oslo_db.exception.DBNonExistentDatabase:
+ # NOTE(sileht): oslo db >= 4.15.0 cleanup this for us
+ pass
+
+ @staticmethod
+ def get_metadata():
+ return sqlalchemy_base.Base.metadata
+
+ def get_engine(self):
+ return self.index.get_engine()
+
+ def db_sync(self, engine):
+ # NOTE(sileht): We ensure all resource type sqlalchemy model are loaded
+ # in this process
+ for rt in self.index.list_resource_types():
+ if rt.state == "active":
+ self.index._RESOURCE_TYPE_MANAGER.get_classes(rt)
+
+ def filter_metadata_diff(self, diff):
+ tables_to_keep = []
+ for rt in self.index.list_resource_types():
+ if rt.name.startswith("indexer_test"):
+ tables_to_keep.extend([rt.tablename,
+ "%s_history" % rt.tablename])
+ new_diff = []
+ for line in diff:
+ if len(line) >= 2:
+ item = line[1]
+ # NOTE(sileht): skip resource types created for tests
+ if (isinstance(item, sa.Table)
+ and item.name in tables_to_keep):
+ continue
+ new_diff.append(line)
+ return new_diff
diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py
new file mode 100644
index 00000000..d5d4e900
--- /dev/null
+++ b/gnocchi/tests/test_aggregates.py
@@ -0,0 +1,116 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright 2014-2015 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import datetime
+import uuid
+
+import pandas
+from stevedore import extension
+
+from gnocchi import aggregates
+from gnocchi.aggregates import moving_stats
+from gnocchi import storage
+from gnocchi.tests import base as tests_base
+from gnocchi.tests import utils as tests_utils
+from gnocchi import utils
+
+
+class TestAggregates(tests_base.TestCase):
+
+ def setUp(self):
+ super(TestAggregates, self).setUp()
+ mgr = extension.ExtensionManager('gnocchi.aggregates',
+ invoke_on_load=True)
+ self.custom_agg = dict((x.name, x.obj) for x in mgr)
+
+ def test_extension_dict(self):
+ self.assertIsInstance(self.custom_agg['moving-average'],
+ moving_stats.MovingAverage)
+
+ def test_check_window_valid(self):
+ for agg_method in self.custom_agg:
+ window = '60s'
+ agg_obj = self.custom_agg[agg_method]
+ result = agg_obj.check_window_valid(window)
+ self.assertEqual(60.0, result)
+
+ window = '60'
+ agg_obj = self.custom_agg[agg_method]
+ result = agg_obj.check_window_valid(window)
+ self.assertEqual(60.0, result)
+
+ def _test_create_metric_and_data(self, data, spacing):
+ metric = storage.Metric(
+ uuid.uuid4(), self.archive_policies['medium'])
+ start_time = utils.datetime_utc(2014, 1, 1, 12)
+ incr = datetime.timedelta(seconds=spacing)
+ measures = [storage.Measure(
+ utils.dt_in_unix_ns(start_time + incr * n), val)
+ for n, val in enumerate(data)]
+ self.index.create_metric(metric.id, str(uuid.uuid4()), 'medium')
+ self.storage.incoming.add_measures(metric, measures)
+ metrics = tests_utils.list_all_incoming_metrics(self.storage.incoming)
+ self.storage.process_background_tasks(self.index, metrics, sync=True)
+
+ return metric
+
+ def test_retrieve_data(self):
+ metric = self._test_create_metric_and_data([69, 42, 6, 44, 7],
+ spacing=20)
+ for agg_method in self.custom_agg:
+ agg_obj = self.custom_agg[agg_method]
+ window = 90.0
+ self.assertRaises(aggregates.CustomAggFailure,
+ agg_obj.retrieve_data,
+ self.storage, metric,
+ start=None, stop=None,
+ window=window)
+
+ window = 120.0
+ result = pandas.Series()
+ grain, result = agg_obj.retrieve_data(self.storage, metric,
+ start=None, stop=None,
+ window=window)
+ self.assertEqual(60.0, grain)
+ self.assertEqual(39.0, result[datetime.datetime(2014, 1, 1, 12)])
+ self.assertEqual(25.5,
+ result[datetime.datetime(2014, 1, 1, 12, 1)])
+ self.storage.delete_metric(metric)
+
+ def test_compute_moving_average(self):
+ metric = self._test_create_metric_and_data([69, 42, 6, 44, 7],
+ spacing=20)
+ agg_obj = self.custom_agg['moving-average']
+ window = '120s'
+
+ center = 'False'
+ result = agg_obj.compute(self.storage, metric,
+ start=None, stop=None,
+ window=window, center=center)
+ expected = [(utils.datetime_utc(2014, 1, 1, 12), 120.0, 32.25)]
+ self.assertEqual(expected, result)
+
+ center = 'True'
+ result = agg_obj.compute(self.storage, metric,
+ start=None, stop=None,
+ window=window, center=center)
+
+ expected = [(utils.datetime_utc(2014, 1, 1, 12, 1), 120.0, 28.875)]
+ self.assertEqual(expected, result)
+ # (FIXME) atmalagon: doing a centered average when
+ # there are only two points in the retrieved data seems weird.
+ # better to raise an error or return nan in this case?
+
+ self.storage.delete_metric(metric)
diff --git a/gnocchi/tests/test_archive_policy.py b/gnocchi/tests/test_archive_policy.py
new file mode 100644
index 00000000..3b2afb08
--- /dev/null
+++ b/gnocchi/tests/test_archive_policy.py
@@ -0,0 +1,98 @@
+# -*- encoding: utf-8 -*-
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from oslotest import base
+
+from gnocchi import archive_policy
+from gnocchi import service
+
+
+class TestArchivePolicy(base.BaseTestCase):
+
+ def test_several_equal_granularities(self):
+ self.assertRaises(ValueError,
+ archive_policy.ArchivePolicy,
+ "foobar",
+ 0,
+ [(10, 12), (20, 30), (20, 30)],
+ ["*"])
+
+ def test_aggregation_methods(self):
+ conf = service.prepare_service([],
+ default_config_files=[])
+
+ ap = archive_policy.ArchivePolicy("foobar",
+ 0,
+ [],
+ ["*"])
+ self.assertEqual(
+ archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS,
+ ap.aggregation_methods)
+
+ ap = archive_policy.ArchivePolicy("foobar",
+ 0,
+ [],
+ ["last"])
+ self.assertEqual(
+ set(["last"]),
+ ap.aggregation_methods)
+
+ ap = archive_policy.ArchivePolicy("foobar",
+ 0,
+ [],
+ ["*", "-mean"])
+ self.assertEqual(
+ (archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS
+ - set(["mean"])),
+ ap.aggregation_methods)
+
+ ap = archive_policy.ArchivePolicy("foobar",
+ 0,
+ [],
+ ["-mean", "-last"])
+ self.assertEqual(
+ (set(conf.archive_policy.default_aggregation_methods)
+ - set(["mean", "last"])),
+ ap.aggregation_methods)
+
+ ap = archive_policy.ArchivePolicy("foobar",
+ 0,
+ [],
+ ["+12pct"])
+ self.assertEqual(
+ (set(conf.archive_policy.default_aggregation_methods)
+ .union(set(["12pct"]))),
+ ap.aggregation_methods)
+
+ def test_max_block_size(self):
+ ap = archive_policy.ArchivePolicy("foobar",
+ 0,
+ [(20, 60), (10, 300), (10, 5)],
+ ["-mean", "-last"])
+ self.assertEqual(ap.max_block_size, 300)
+
+
+class TestArchivePolicyItem(base.BaseTestCase):
+ def test_zero_size(self):
+ self.assertRaises(ValueError,
+ archive_policy.ArchivePolicyItem,
+ 0, 1)
+ self.assertRaises(ValueError,
+ archive_policy.ArchivePolicyItem,
+ 1, 0)
+ self.assertRaises(ValueError,
+ archive_policy.ArchivePolicyItem,
+ -1, 1)
+ self.assertRaises(ValueError,
+ archive_policy.ArchivePolicyItem,
+ 1, -1)
diff --git a/gnocchi/tests/test_bin.py b/gnocchi/tests/test_bin.py
new file mode 100644
index 00000000..e70bb865
--- /dev/null
+++ b/gnocchi/tests/test_bin.py
@@ -0,0 +1,24 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import subprocess
+
+from oslotest import base
+
+
+class BinTestCase(base.BaseTestCase):
+ def test_gnocchi_config_generator_run(self):
+ subp = subprocess.Popen(['gnocchi-config-generator'])
+ self.assertEqual(0, subp.wait())
diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py
new file mode 100644
index 00000000..82ec819a
--- /dev/null
+++ b/gnocchi/tests/test_carbonara.py
@@ -0,0 +1,1292 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2014-2016 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import datetime
+import functools
+import math
+
+import fixtures
+import iso8601
+from oslotest import base
+import pandas
+import six
+
+from gnocchi import carbonara
+
+
+class TestBoundTimeSerie(base.BaseTestCase):
+ def test_benchmark(self):
+ self.useFixture(fixtures.Timeout(300, gentle=True))
+ carbonara.BoundTimeSerie.benchmark()
+
+ @staticmethod
+ def test_base():
+ carbonara.BoundTimeSerie.from_data(
+ [datetime.datetime(2014, 1, 1, 12, 0, 0),
+ datetime.datetime(2014, 1, 1, 12, 0, 4),
+ datetime.datetime(2014, 1, 1, 12, 0, 9)],
+ [3, 5, 6])
+
+ def test_block_size(self):
+ ts = carbonara.BoundTimeSerie.from_data(
+ [datetime.datetime(2014, 1, 1, 12, 0, 0),
+ datetime.datetime(2014, 1, 1, 12, 0, 4),
+ datetime.datetime(2014, 1, 1, 12, 0, 9)],
+ [3, 5, 6],
+ block_size='5s')
+ self.assertEqual(1, len(ts))
+ ts.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 10), 3),
+ (datetime.datetime(2014, 1, 1, 12, 0, 11), 4)])
+ self.assertEqual(2, len(ts))
+
+ def test_block_size_back_window(self):
+ ts = carbonara.BoundTimeSerie.from_data(
+ [datetime.datetime(2014, 1, 1, 12, 0, 0),
+ datetime.datetime(2014, 1, 1, 12, 0, 4),
+ datetime.datetime(2014, 1, 1, 12, 0, 9)],
+ [3, 5, 6],
+ block_size='5s',
+ back_window=1)
+ self.assertEqual(3, len(ts))
+ ts.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 10), 3),
+ (datetime.datetime(2014, 1, 1, 12, 0, 11), 4)])
+ self.assertEqual(3, len(ts))
+
+ def test_block_size_unordered(self):
+ ts = carbonara.BoundTimeSerie.from_data(
+ [datetime.datetime(2014, 1, 1, 12, 0, 0),
+ datetime.datetime(2014, 1, 1, 12, 0, 5),
+ datetime.datetime(2014, 1, 1, 12, 0, 9)],
+ [10, 5, 23],
+ block_size='5s')
+ self.assertEqual(2, len(ts))
+ ts.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 11), 3),
+ (datetime.datetime(2014, 1, 1, 12, 0, 10), 4)])
+ self.assertEqual(2, len(ts))
+
+ def test_duplicate_timestamps(self):
+ ts = carbonara.BoundTimeSerie.from_data(
+ [datetime.datetime(2014, 1, 1, 12, 0, 0),
+ datetime.datetime(2014, 1, 1, 12, 0, 9)],
+ [10, 23])
+ self.assertEqual(2, len(ts))
+ self.assertEqual(10.0, ts[0])
+ self.assertEqual(23.0, ts[1])
+
+ ts.set_values([(datetime.datetime(2014, 1, 1, 13, 0, 10), 3),
+ (datetime.datetime(2014, 1, 1, 13, 0, 11), 9),
+ (datetime.datetime(2014, 1, 1, 13, 0, 11), 8),
+ (datetime.datetime(2014, 1, 1, 13, 0, 11), 7),
+ (datetime.datetime(2014, 1, 1, 13, 0, 11), 4)])
+ self.assertEqual(4, len(ts))
+ self.assertEqual(10.0, ts[0])
+ self.assertEqual(23.0, ts[1])
+ self.assertEqual(3.0, ts[2])
+ self.assertEqual(4.0, ts[3])
+
+
+class TestAggregatedTimeSerie(base.BaseTestCase):
+ @staticmethod
+ def test_base():
+ carbonara.AggregatedTimeSerie.from_data(
+ 3, 'mean',
+ [datetime.datetime(2014, 1, 1, 12, 0, 0),
+ datetime.datetime(2014, 1, 1, 12, 0, 4),
+ datetime.datetime(2014, 1, 1, 12, 0, 9)],
+ [3, 5, 6])
+ carbonara.AggregatedTimeSerie.from_data(
+ "4s", 'mean',
+ [datetime.datetime(2014, 1, 1, 12, 0, 0),
+ datetime.datetime(2014, 1, 1, 12, 0, 4),
+ datetime.datetime(2014, 1, 1, 12, 0, 9)],
+ [3, 5, 6])
+
+ def test_benchmark(self):
+ self.useFixture(fixtures.Timeout(300, gentle=True))
+ carbonara.AggregatedTimeSerie.benchmark()
+
+ def test_fetch_basic(self):
+ ts = carbonara.AggregatedTimeSerie.from_data(
+ timestamps=[datetime.datetime(2014, 1, 1, 12, 0, 0),
+ datetime.datetime(2014, 1, 1, 12, 0, 4),
+ datetime.datetime(2014, 1, 1, 12, 0, 9)],
+ aggregation_method='mean',
+ values=[3, 5, 6],
+ sampling="1s")
+ self.assertEqual(
+ [(datetime.datetime(2014, 1, 1, 12), 1, 3),
+ (datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5),
+ (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)],
+ ts.fetch())
+ self.assertEqual(
+ [(datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5),
+ (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)],
+ ts.fetch(from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 4)))
+ self.assertEqual(
+ [(datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5),
+ (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)],
+ ts.fetch(
+ from_timestamp=iso8601.parse_date(
+ "2014-01-01 12:00:04")))
+ self.assertEqual(
+ [(datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5),
+ (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)],
+ ts.fetch(
+ from_timestamp=iso8601.parse_date(
+ "2014-01-01 13:00:04+01:00")))
+
+ def test_before_epoch(self):
+ ts = carbonara.TimeSerie.from_tuples(
+ [(datetime.datetime(1950, 1, 1, 12), 3),
+ (datetime.datetime(2014, 1, 1, 12), 5),
+ (datetime.datetime(2014, 1, 1, 12), 6)])
+
+ self.assertRaises(carbonara.BeforeEpochError,
+ ts.group_serie, 60)
+
+ @staticmethod
+ def _resample(ts, sampling, agg, max_size=None):
+ grouped = ts.group_serie(sampling)
+ return carbonara.AggregatedTimeSerie.from_grouped_serie(
+ grouped, sampling, agg, max_size=max_size)
+
+ def test_74_percentile_serialized(self):
+ ts = carbonara.TimeSerie.from_tuples(
+ [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3),
+ (datetime.datetime(2014, 1, 1, 12, 0, 4), 5),
+ (datetime.datetime(2014, 1, 1, 12, 0, 9), 6)])
+ ts = self._resample(ts, 60, '74pct')
+
+ self.assertEqual(1, len(ts))
+ self.assertEqual(5.48, ts[datetime.datetime(2014, 1, 1, 12, 0, 0)])
+
+ # Serialize and unserialize
+ key = ts.get_split_key()
+ o, s = ts.serialize(key)
+ saved_ts = carbonara.AggregatedTimeSerie.unserialize(
+ s, key, '74pct', ts.sampling)
+
+ ts = carbonara.TimeSerie.from_tuples(
+ [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3),
+ (datetime.datetime(2014, 1, 1, 12, 0, 4), 5),
+ (datetime.datetime(2014, 1, 1, 12, 0, 9), 6)])
+ ts = self._resample(ts, 60, '74pct')
+ ts.merge(saved_ts)
+
+ self.assertEqual(1, len(ts))
+ self.assertEqual(5.48, ts[datetime.datetime(2014, 1, 1, 12, 0, 0)])
+
+ def test_95_percentile(self):
+ ts = carbonara.TimeSerie.from_tuples(
+ [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3),
+ (datetime.datetime(2014, 1, 1, 12, 0, 4), 5),
+ (datetime.datetime(2014, 1, 1, 12, 0, 9), 6)])
+ ts = self._resample(ts, 60, '95pct')
+
+ self.assertEqual(1, len(ts))
+ self.assertEqual(5.9000000000000004,
+ ts[datetime.datetime(2014, 1, 1, 12, 0, 0)])
+
+ def _do_test_aggregation(self, name, v1, v2):
+ ts = carbonara.TimeSerie.from_tuples(
+ [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3),
+ (datetime.datetime(2014, 1, 1, 12, 0, 4), 6),
+ (datetime.datetime(2014, 1, 1, 12, 0, 9), 5),
+ (datetime.datetime(2014, 1, 1, 12, 1, 4), 8),
+ (datetime.datetime(2014, 1, 1, 12, 1, 6), 9)])
+ ts = self._resample(ts, 60, name)
+
+ self.assertEqual(2, len(ts))
+ self.assertEqual(v1, ts[datetime.datetime(2014, 1, 1, 12, 0, 0)])
+ self.assertEqual(v2, ts[datetime.datetime(2014, 1, 1, 12, 1, 0)])
+
+ def test_aggregation_first(self):
+ self._do_test_aggregation('first', 3, 8)
+
+ def test_aggregation_last(self):
+ self._do_test_aggregation('last', 5, 9)
+
+ def test_aggregation_count(self):
+ self._do_test_aggregation('count', 3, 2)
+
+ def test_aggregation_sum(self):
+ self._do_test_aggregation('sum', 14, 17)
+
+ def test_aggregation_mean(self):
+ self._do_test_aggregation('mean', 4.666666666666667, 8.5)
+
+ def test_aggregation_median(self):
+ self._do_test_aggregation('median', 5.0, 8.5)
+
+ def test_aggregation_min(self):
+ self._do_test_aggregation('min', 3, 8)
+
+ def test_aggregation_max(self):
+ self._do_test_aggregation('max', 6, 9)
+
+ def test_aggregation_std(self):
+ self._do_test_aggregation('std', 1.5275252316519465,
+ 0.70710678118654757)
+
+ def test_aggregation_std_with_unique(self):
+ ts = carbonara.TimeSerie.from_tuples(
+ [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3)])
+ ts = self._resample(ts, 60, 'std')
+ self.assertEqual(0, len(ts), ts.ts.values)
+
+ ts = carbonara.TimeSerie.from_tuples(
+ [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3),
+ (datetime.datetime(2014, 1, 1, 12, 0, 4), 6),
+ (datetime.datetime(2014, 1, 1, 12, 0, 9), 5),
+ (datetime.datetime(2014, 1, 1, 12, 1, 6), 9)])
+ ts = self._resample(ts, 60, "std")
+
+ self.assertEqual(1, len(ts))
+ self.assertEqual(1.5275252316519465,
+ ts[datetime.datetime(2014, 1, 1, 12, 0, 0)])
+
+ def test_different_length_in_timestamps_and_data(self):
+ self.assertRaises(ValueError,
+ carbonara.AggregatedTimeSerie.from_data,
+ 3, 'mean',
+ [datetime.datetime(2014, 1, 1, 12, 0, 0),
+ datetime.datetime(2014, 1, 1, 12, 0, 4),
+ datetime.datetime(2014, 1, 1, 12, 0, 9)],
+ [3, 5])
+
+ def test_max_size(self):
+ ts = carbonara.TimeSerie.from_data(
+ [datetime.datetime(2014, 1, 1, 12, 0, 0),
+ datetime.datetime(2014, 1, 1, 12, 0, 4),
+ datetime.datetime(2014, 1, 1, 12, 0, 9)],
+ [3, 5, 6])
+ ts = self._resample(ts, 1, 'mean', max_size=2)
+
+ self.assertEqual(2, len(ts))
+ self.assertEqual(5, ts[0])
+ self.assertEqual(6, ts[1])
+
+ def test_down_sampling(self):
+ ts = carbonara.TimeSerie.from_data(
+ [datetime.datetime(2014, 1, 1, 12, 0, 0),
+ datetime.datetime(2014, 1, 1, 12, 0, 4),
+ datetime.datetime(2014, 1, 1, 12, 0, 9)],
+ [3, 5, 7])
+ ts = self._resample(ts, 300, 'mean')
+
+ self.assertEqual(1, len(ts))
+ self.assertEqual(5, ts[datetime.datetime(2014, 1, 1, 12, 0, 0)])
+
+ def test_down_sampling_with_max_size(self):
+ ts = carbonara.TimeSerie.from_data(
+ [datetime.datetime(2014, 1, 1, 12, 0, 0),
+ datetime.datetime(2014, 1, 1, 12, 1, 4),
+ datetime.datetime(2014, 1, 1, 12, 1, 9),
+ datetime.datetime(2014, 1, 1, 12, 2, 12)],
+ [3, 5, 7, 1])
+ ts = self._resample(ts, 60, 'mean', max_size=2)
+
+ self.assertEqual(2, len(ts))
+ self.assertEqual(6, ts[datetime.datetime(2014, 1, 1, 12, 1, 0)])
+ self.assertEqual(1, ts[datetime.datetime(2014, 1, 1, 12, 2, 0)])
+
+ def test_down_sampling_with_max_size_and_method_max(self):
+ ts = carbonara.TimeSerie.from_data(
+ [datetime.datetime(2014, 1, 1, 12, 0, 0),
+ datetime.datetime(2014, 1, 1, 12, 1, 4),
+ datetime.datetime(2014, 1, 1, 12, 1, 9),
+ datetime.datetime(2014, 1, 1, 12, 2, 12)],
+ [3, 5, 70, 1])
+ ts = self._resample(ts, 60, 'max', max_size=2)
+
+ self.assertEqual(2, len(ts))
+ self.assertEqual(70, ts[datetime.datetime(2014, 1, 1, 12, 1, 0)])
+ self.assertEqual(1, ts[datetime.datetime(2014, 1, 1, 12, 2, 0)])
+
+ @staticmethod
+ def _resample_and_merge(ts, agg_dict):
+ """Helper method that mimics _add_measures workflow."""
+ grouped = ts.group_serie(agg_dict['sampling'])
+ existing = agg_dict.get('return')
+ agg_dict['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie(
+ grouped, agg_dict['sampling'], agg_dict['agg'],
+ max_size=agg_dict.get('size'))
+ if existing:
+ agg_dict['return'].merge(existing)
+
+ def test_aggregated_different_archive_no_overlap(self):
+ tsc1 = {'sampling': 60, 'size': 50, 'agg': 'mean'}
+ tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling'])
+ tsc2 = {'sampling': 60, 'size': 50, 'agg': 'mean'}
+ tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling'])
+
+ tsb1.set_values([(datetime.datetime(2014, 1, 1, 11, 46, 4), 4)],
+ before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=tsc1))
+ tsb2.set_values([(datetime.datetime(2014, 1, 1, 9, 1, 4), 4)],
+ before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=tsc2))
+
+ dtfrom = datetime.datetime(2014, 1, 1, 11, 0, 0)
+ self.assertRaises(carbonara.UnAggregableTimeseries,
+ carbonara.AggregatedTimeSerie.aggregated,
+ [tsc1['return'], tsc2['return']],
+ from_timestamp=dtfrom, aggregation='mean')
+
+ def test_aggregated_different_archive_no_overlap2(self):
+ tsc1 = {'sampling': 60, 'size': 50, 'agg': 'mean'}
+ tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling'])
+ tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=50,
+ aggregation_method='mean')
+
+ tsb1.set_values([(datetime.datetime(2014, 1, 1, 12, 3, 0), 4)],
+ before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=tsc1))
+ self.assertRaises(carbonara.UnAggregableTimeseries,
+ carbonara.AggregatedTimeSerie.aggregated,
+ [tsc1['return'], tsc2], aggregation='mean')
+
+ def test_aggregated_different_archive_overlap(self):
+ tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'}
+ tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling'])
+ tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'}
+ tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling'])
+
+ # NOTE(sileht): minute 8 is missing in both and
+ # minute 7 in tsc2 too, but it looks like we have
+ # enough point to do the aggregation
+ tsb1.set_values([
+ (datetime.datetime(2014, 1, 1, 11, 0, 0), 4),
+ (datetime.datetime(2014, 1, 1, 12, 1, 0), 3),
+ (datetime.datetime(2014, 1, 1, 12, 2, 0), 2),
+ (datetime.datetime(2014, 1, 1, 12, 3, 0), 4),
+ (datetime.datetime(2014, 1, 1, 12, 4, 0), 2),
+ (datetime.datetime(2014, 1, 1, 12, 5, 0), 3),
+ (datetime.datetime(2014, 1, 1, 12, 6, 0), 4),
+ (datetime.datetime(2014, 1, 1, 12, 7, 0), 10),
+ (datetime.datetime(2014, 1, 1, 12, 9, 0), 2),
+ ], before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=tsc1))
+
+ tsb2.set_values([
+ (datetime.datetime(2014, 1, 1, 12, 1, 0), 3),
+ (datetime.datetime(2014, 1, 1, 12, 2, 0), 4),
+ (datetime.datetime(2014, 1, 1, 12, 3, 0), 4),
+ (datetime.datetime(2014, 1, 1, 12, 4, 0), 6),
+ (datetime.datetime(2014, 1, 1, 12, 5, 0), 3),
+ (datetime.datetime(2014, 1, 1, 12, 6, 0), 6),
+ (datetime.datetime(2014, 1, 1, 12, 9, 0), 2),
+ (datetime.datetime(2014, 1, 1, 12, 11, 0), 2),
+ (datetime.datetime(2014, 1, 1, 12, 12, 0), 2),
+ ], before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=tsc2))
+
+ dtfrom = datetime.datetime(2014, 1, 1, 12, 0, 0)
+ dtto = datetime.datetime(2014, 1, 1, 12, 10, 0)
+
+ # By default we require 100% of point that overlap
+ # so that fail
+ self.assertRaises(carbonara.UnAggregableTimeseries,
+ carbonara.AggregatedTimeSerie.aggregated,
+ [tsc1['return'], tsc2['return']],
+ from_timestamp=dtfrom,
+ to_timestamp=dtto, aggregation='mean')
+
+ # Retry with 80% and it works
+ output = carbonara.AggregatedTimeSerie.aggregated([
+ tsc1['return'], tsc2['return']],
+ from_timestamp=dtfrom, to_timestamp=dtto,
+ aggregation='mean', needed_percent_of_overlap=80.0)
+
+ self.assertEqual([
+ (datetime.datetime(
+ 2014, 1, 1, 12, 1, 0
+ ), 60.0, 3.0),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 2, 0
+ ), 60.0, 3.0),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 3, 0
+ ), 60.0, 4.0),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 4, 0
+ ), 60.0, 4.0),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 5, 0
+ ), 60.0, 3.0),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 6, 0
+ ), 60.0, 5.0),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 7, 0
+ ), 60.0, 10.0),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 9, 0
+ ), 60.0, 2.0),
+ ], output)
+
+ def test_aggregated_different_archive_overlap_edge_missing1(self):
+ tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'}
+ tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling'])
+ tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'}
+ tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling'])
+
+ tsb1.set_values([
+ (datetime.datetime(2014, 1, 1, 12, 3, 0), 9),
+ (datetime.datetime(2014, 1, 1, 12, 4, 0), 1),
+ (datetime.datetime(2014, 1, 1, 12, 5, 0), 2),
+ (datetime.datetime(2014, 1, 1, 12, 6, 0), 7),
+ (datetime.datetime(2014, 1, 1, 12, 7, 0), 5),
+ (datetime.datetime(2014, 1, 1, 12, 8, 0), 3),
+ ], before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=tsc1))
+
+ tsb2.set_values([
+ (datetime.datetime(2014, 1, 1, 11, 0, 0), 6),
+ (datetime.datetime(2014, 1, 1, 12, 1, 0), 2),
+ (datetime.datetime(2014, 1, 1, 12, 2, 0), 13),
+ (datetime.datetime(2014, 1, 1, 12, 3, 0), 24),
+ (datetime.datetime(2014, 1, 1, 12, 4, 0), 4),
+ (datetime.datetime(2014, 1, 1, 12, 5, 0), 16),
+ (datetime.datetime(2014, 1, 1, 12, 6, 0), 12),
+ ], before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=tsc2))
+
+ # By default we require 100% of point that overlap
+ # but we allow that the last datapoint is missing
+ # of the precisest granularity
+ output = carbonara.AggregatedTimeSerie.aggregated([
+ tsc1['return'], tsc2['return']], aggregation='sum')
+
+ self.assertEqual([
+ (datetime.datetime(
+ 2014, 1, 1, 12, 3, 0
+ ), 60.0, 33.0),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 4, 0
+ ), 60.0, 5.0),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 5, 0
+ ), 60.0, 18.0),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 6, 0
+ ), 60.0, 19.0),
+ ], output)
+
+ def test_aggregated_different_archive_overlap_edge_missing2(self):
+ tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'}
+ tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling'])
+ tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'}
+ tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling'])
+
+ tsb1.set_values([
+ (datetime.datetime(2014, 1, 1, 12, 3, 0), 4),
+ ], before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=tsc1))
+
+ tsb2.set_values([
+ (datetime.datetime(2014, 1, 1, 11, 0, 0), 4),
+ (datetime.datetime(2014, 1, 1, 12, 3, 0), 4),
+ ], before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=tsc2))
+
+ output = carbonara.AggregatedTimeSerie.aggregated(
+ [tsc1['return'], tsc2['return']], aggregation='mean')
+ self.assertEqual([
+ (datetime.datetime(
+ 2014, 1, 1, 12, 3, 0
+ ), 60.0, 4.0),
+ ], output)
+
+ def test_fetch(self):
+ ts = {'sampling': 60, 'size': 10, 'agg': 'mean'}
+ tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
+
+ tsb.set_values([
+ (datetime.datetime(2014, 1, 1, 11, 46, 4), 4),
+ (datetime.datetime(2014, 1, 1, 11, 47, 34), 8),
+ (datetime.datetime(2014, 1, 1, 11, 50, 54), 50),
+ (datetime.datetime(2014, 1, 1, 11, 54, 45), 4),
+ (datetime.datetime(2014, 1, 1, 11, 56, 49), 4),
+ (datetime.datetime(2014, 1, 1, 11, 57, 22), 6),
+ (datetime.datetime(2014, 1, 1, 11, 58, 22), 5),
+ (datetime.datetime(2014, 1, 1, 12, 1, 4), 4),
+ (datetime.datetime(2014, 1, 1, 12, 1, 9), 7),
+ (datetime.datetime(2014, 1, 1, 12, 2, 1), 15),
+ (datetime.datetime(2014, 1, 1, 12, 2, 12), 1),
+ (datetime.datetime(2014, 1, 1, 12, 3, 0), 3),
+ (datetime.datetime(2014, 1, 1, 12, 4, 9), 7),
+ (datetime.datetime(2014, 1, 1, 12, 5, 1), 15),
+ (datetime.datetime(2014, 1, 1, 12, 5, 12), 1),
+ (datetime.datetime(2014, 1, 1, 12, 6, 0, 2), 3),
+ ], before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=ts))
+
+ tsb.set_values([
+ (datetime.datetime(2014, 1, 1, 12, 6), 5),
+ ], before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=ts))
+
+ self.assertEqual([
+ (datetime.datetime(2014, 1, 1, 11, 54), 60.0, 4.0),
+ (datetime.datetime(2014, 1, 1, 11, 56), 60.0, 4.0),
+ (datetime.datetime(2014, 1, 1, 11, 57), 60.0, 6.0),
+ (datetime.datetime(2014, 1, 1, 11, 58), 60.0, 5.0),
+ (datetime.datetime(2014, 1, 1, 12, 1), 60.0, 5.5),
+ (datetime.datetime(2014, 1, 1, 12, 2), 60.0, 8.0),
+ (datetime.datetime(2014, 1, 1, 12, 3), 60.0, 3.0),
+ (datetime.datetime(2014, 1, 1, 12, 4), 60.0, 7.0),
+ (datetime.datetime(2014, 1, 1, 12, 5), 60.0, 8.0),
+ (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 4.0)
+ ], ts['return'].fetch())
+
+ self.assertEqual([
+ (datetime.datetime(2014, 1, 1, 12, 1), 60.0, 5.5),
+ (datetime.datetime(2014, 1, 1, 12, 2), 60.0, 8.0),
+ (datetime.datetime(2014, 1, 1, 12, 3), 60.0, 3.0),
+ (datetime.datetime(2014, 1, 1, 12, 4), 60.0, 7.0),
+ (datetime.datetime(2014, 1, 1, 12, 5), 60.0, 8.0),
+ (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 4.0)
+ ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))
+
+ def test_aggregated_some_overlap_with_fill_zero(self):
+ tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'}
+ tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling'])
+ tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'}
+ tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling'])
+
+ tsb1.set_values([
+ (datetime.datetime(2014, 1, 1, 12, 3, 0), 9),
+ (datetime.datetime(2014, 1, 1, 12, 4, 0), 1),
+ (datetime.datetime(2014, 1, 1, 12, 5, 0), 2),
+ (datetime.datetime(2014, 1, 1, 12, 6, 0), 7),
+ (datetime.datetime(2014, 1, 1, 12, 7, 0), 5),
+ (datetime.datetime(2014, 1, 1, 12, 8, 0), 3),
+ ], before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=tsc1))
+
+ tsb2.set_values([
+ (datetime.datetime(2014, 1, 1, 12, 0, 0), 6),
+ (datetime.datetime(2014, 1, 1, 12, 1, 0), 2),
+ (datetime.datetime(2014, 1, 1, 12, 2, 0), 13),
+ (datetime.datetime(2014, 1, 1, 12, 3, 0), 24),
+ (datetime.datetime(2014, 1, 1, 12, 4, 0), 4),
+ (datetime.datetime(2014, 1, 1, 12, 5, 0), 16),
+ (datetime.datetime(2014, 1, 1, 12, 6, 0), 12),
+ ], before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=tsc2))
+
+ output = carbonara.AggregatedTimeSerie.aggregated([
+ tsc1['return'], tsc2['return']], aggregation='mean', fill=0)
+
+ self.assertEqual([
+ (datetime.datetime(2014, 1, 1, 12, 0, 0), 60.0, 3.0),
+ (datetime.datetime(2014, 1, 1, 12, 1, 0), 60.0, 1.0),
+ (datetime.datetime(2014, 1, 1, 12, 2, 0), 60.0, 6.5),
+ (datetime.datetime(2014, 1, 1, 12, 3, 0), 60.0, 16.5),
+ (datetime.datetime(2014, 1, 1, 12, 4, 0), 60.0, 2.5),
+ (datetime.datetime(2014, 1, 1, 12, 5, 0), 60.0, 9.0),
+ (datetime.datetime(2014, 1, 1, 12, 6, 0), 60.0, 9.5),
+ (datetime.datetime(2014, 1, 1, 12, 7, 0), 60.0, 2.5),
+ (datetime.datetime(2014, 1, 1, 12, 8, 0), 60.0, 1.5),
+ ], output)
+
+ def test_aggregated_some_overlap_with_fill_null(self):
+ tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'}
+ tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling'])
+ tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'}
+ tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling'])
+
+ tsb1.set_values([
+ (datetime.datetime(2014, 1, 1, 12, 3, 0), 9),
+ (datetime.datetime(2014, 1, 1, 12, 4, 0), 1),
+ (datetime.datetime(2014, 1, 1, 12, 5, 0), 2),
+ (datetime.datetime(2014, 1, 1, 12, 6, 0), 7),
+ (datetime.datetime(2014, 1, 1, 12, 7, 0), 5),
+ (datetime.datetime(2014, 1, 1, 12, 8, 0), 3),
+ ], before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=tsc1))
+
+ tsb2.set_values([
+ (datetime.datetime(2014, 1, 1, 12, 0, 0), 6),
+ (datetime.datetime(2014, 1, 1, 12, 1, 0), 2),
+ (datetime.datetime(2014, 1, 1, 12, 2, 0), 13),
+ (datetime.datetime(2014, 1, 1, 12, 3, 0), 24),
+ (datetime.datetime(2014, 1, 1, 12, 4, 0), 4),
+ (datetime.datetime(2014, 1, 1, 12, 5, 0), 16),
+ (datetime.datetime(2014, 1, 1, 12, 6, 0), 12),
+ ], before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=tsc2))
+
+ output = carbonara.AggregatedTimeSerie.aggregated([
+ tsc1['return'], tsc2['return']], aggregation='mean', fill='null')
+
+ self.assertEqual([
+ (datetime.datetime(2014, 1, 1, 12, 0, 0), 60.0, 6.0),
+ (datetime.datetime(2014, 1, 1, 12, 1, 0), 60.0, 2.0),
+ (datetime.datetime(2014, 1, 1, 12, 2, 0), 60.0, 13.0),
+ (datetime.datetime(2014, 1, 1, 12, 3, 0), 60.0, 16.5),
+ (datetime.datetime(2014, 1, 1, 12, 4, 0), 60.0, 2.5),
+ (datetime.datetime(2014, 1, 1, 12, 5, 0), 60.0, 9.0),
+ (datetime.datetime(2014, 1, 1, 12, 6, 0), 60.0, 9.5),
+ (datetime.datetime(2014, 1, 1, 12, 7, 0), 60.0, 5.0),
+ (datetime.datetime(2014, 1, 1, 12, 8, 0), 60.0, 3.0),
+ ], output)
+
+ def test_aggregate_no_points_with_fill_zero(self):
+ tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'}
+ tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling'])
+ tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'}
+ tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling'])
+
+ tsb1.set_values([
+ (datetime.datetime(2014, 1, 1, 12, 3, 0), 9),
+ (datetime.datetime(2014, 1, 1, 12, 4, 0), 1),
+ (datetime.datetime(2014, 1, 1, 12, 7, 0), 5),
+ (datetime.datetime(2014, 1, 1, 12, 8, 0), 3),
+ ], before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=tsc1))
+
+ tsb2.set_values([
+ (datetime.datetime(2014, 1, 1, 12, 0, 0), 6),
+ (datetime.datetime(2014, 1, 1, 12, 1, 0), 2),
+ (datetime.datetime(2014, 1, 1, 12, 2, 0), 13),
+ (datetime.datetime(2014, 1, 1, 12, 3, 0), 24),
+ (datetime.datetime(2014, 1, 1, 12, 4, 0), 4),
+ ], before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=tsc2))
+
+ output = carbonara.AggregatedTimeSerie.aggregated([
+ tsc1['return'], tsc2['return']], aggregation='mean', fill=0)
+
+ self.assertEqual([
+ (datetime.datetime(2014, 1, 1, 12, 0, 0), 60.0, 3.0),
+ (datetime.datetime(2014, 1, 1, 12, 1, 0), 60.0, 1.0),
+ (datetime.datetime(2014, 1, 1, 12, 2, 0), 60.0, 6.5),
+ (datetime.datetime(2014, 1, 1, 12, 3, 0), 60.0, 16.5),
+ (datetime.datetime(2014, 1, 1, 12, 4, 0), 60.0, 2.5),
+ (datetime.datetime(2014, 1, 1, 12, 7, 0), 60.0, 2.5),
+ (datetime.datetime(2014, 1, 1, 12, 8, 0), 60.0, 1.5),
+ ], output)
+
+ def test_fetch_agg_pct(self):
+ ts = {'sampling': 1, 'size': 3600 * 24, 'agg': '90pct'}
+ tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
+
+ tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 0), 3),
+ (datetime.datetime(2014, 1, 1, 12, 0, 0, 123), 4),
+ (datetime.datetime(2014, 1, 1, 12, 0, 2), 4)],
+ before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=ts))
+
+ result = ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))
+ reference = [
+ (datetime.datetime(
+ 2014, 1, 1, 12, 0, 0
+ ), 1.0, 3.9),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 0, 2
+ ), 1.0, 4)
+ ]
+
+ self.assertEqual(len(reference), len(result))
+
+ for ref, res in zip(reference, result):
+ self.assertEqual(ref[0], res[0])
+ self.assertEqual(ref[1], res[1])
+ # Rounding \o/
+ self.assertAlmostEqual(ref[2], res[2])
+
+ tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 2, 113), 110)],
+ before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=ts))
+
+ result = ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))
+ reference = [
+ (datetime.datetime(
+ 2014, 1, 1, 12, 0, 0
+ ), 1.0, 3.9),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 0, 2
+ ), 1.0, 99.4)
+ ]
+
+ self.assertEqual(len(reference), len(result))
+
+ for ref, res in zip(reference, result):
+ self.assertEqual(ref[0], res[0])
+ self.assertEqual(ref[1], res[1])
+ # Rounding \o/
+ self.assertAlmostEqual(ref[2], res[2])
+
+ def test_fetch_nano(self):
+ ts = {'sampling': 0.2, 'size': 10, 'agg': 'mean'}
+ tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
+
+ tsb.set_values([
+ (datetime.datetime(2014, 1, 1, 11, 46, 0, 200123), 4),
+ (datetime.datetime(2014, 1, 1, 11, 46, 0, 340000), 8),
+ (datetime.datetime(2014, 1, 1, 11, 47, 0, 323154), 50),
+ (datetime.datetime(2014, 1, 1, 11, 48, 0, 590903), 4),
+ (datetime.datetime(2014, 1, 1, 11, 48, 0, 903291), 4),
+ ], before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=ts))
+
+ tsb.set_values([
+ (datetime.datetime(2014, 1, 1, 11, 48, 0, 821312), 5),
+ ], before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=ts))
+
+ self.assertEqual([
+ (datetime.datetime(2014, 1, 1, 11, 46, 0, 200000), 0.2, 6.0),
+ (datetime.datetime(2014, 1, 1, 11, 47, 0, 200000), 0.2, 50.0),
+ (datetime.datetime(2014, 1, 1, 11, 48, 0, 400000), 0.2, 4.0),
+ (datetime.datetime(2014, 1, 1, 11, 48, 0, 800000), 0.2, 4.5)
+ ], ts['return'].fetch())
+
+ def test_fetch_agg_std(self):
+ # NOTE (gordc): this is a good test to ensure we drop NaN entries
+ # 2014-01-01 12:00:00 will appear if we don't dropna()
+ ts = {'sampling': 60, 'size': 60, 'agg': 'std'}
+ tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
+
+ tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 0), 3),
+ (datetime.datetime(2014, 1, 1, 12, 1, 4), 4),
+ (datetime.datetime(2014, 1, 1, 12, 1, 9), 7),
+ (datetime.datetime(2014, 1, 1, 12, 2, 1), 15),
+ (datetime.datetime(2014, 1, 1, 12, 2, 12), 1)],
+ before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=ts))
+
+ self.assertEqual([
+ (datetime.datetime(
+ 2014, 1, 1, 12, 1, 0
+ ), 60.0, 2.1213203435596424),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 2, 0
+ ), 60.0, 9.8994949366116654),
+ ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))
+
+ tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 2, 13), 110)],
+ before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=ts))
+
+ self.assertEqual([
+ (datetime.datetime(
+ 2014, 1, 1, 12, 1, 0
+ ), 60.0, 2.1213203435596424),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 2, 0
+ ), 60.0, 59.304300012730948),
+ ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))
+
+ def test_fetch_agg_max(self):
+ ts = {'sampling': 60, 'size': 60, 'agg': 'max'}
+ tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
+
+ tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 0), 3),
+ (datetime.datetime(2014, 1, 1, 12, 1, 4), 4),
+ (datetime.datetime(2014, 1, 1, 12, 1, 9), 7),
+ (datetime.datetime(2014, 1, 1, 12, 2, 1), 15),
+ (datetime.datetime(2014, 1, 1, 12, 2, 12), 1)],
+ before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=ts))
+
+ self.assertEqual([
+ (datetime.datetime(
+ 2014, 1, 1, 12, 0, 0
+ ), 60.0, 3),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 1, 0
+ ), 60.0, 7),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 2, 0
+ ), 60.0, 15),
+ ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))
+
+ tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 2, 13), 110)],
+ before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=ts))
+
+ self.assertEqual([
+ (datetime.datetime(
+ 2014, 1, 1, 12, 0, 0
+ ), 60.0, 3),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 1, 0
+ ), 60.0, 7),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 2, 0
+ ), 60.0, 110),
+ ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))
+
+ def test_serialize(self):
+ ts = {'sampling': 0.5, 'agg': 'mean'}
+ tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
+
+ tsb.set_values([
+ (datetime.datetime(2014, 1, 1, 12, 0, 0, 1234), 3),
+ (datetime.datetime(2014, 1, 1, 12, 0, 0, 321), 6),
+ (datetime.datetime(2014, 1, 1, 12, 1, 4, 234), 5),
+ (datetime.datetime(2014, 1, 1, 12, 1, 9, 32), 7),
+ (datetime.datetime(2014, 1, 1, 12, 2, 12, 532), 1),
+ ], before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=ts))
+
+ key = ts['return'].get_split_key()
+ o, s = ts['return'].serialize(key)
+ self.assertEqual(ts['return'],
+ carbonara.AggregatedTimeSerie.unserialize(
+ s, key,
+ 'mean', 0.5))
+
+ def test_no_truncation(self):
+ ts = {'sampling': 60, 'agg': 'mean'}
+ tsb = carbonara.BoundTimeSerie()
+
+ for i in six.moves.range(1, 11):
+ tsb.set_values([
+ (datetime.datetime(2014, 1, 1, 12, i, i), float(i))
+ ], before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=ts))
+ tsb.set_values([
+ (datetime.datetime(2014, 1, 1, 12, i, i + 1), float(i + 1))
+ ], before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=ts))
+ self.assertEqual(i, len(ts['return'].fetch()))
+
+ def test_back_window(self):
+ """Back window testing.
+
+ Test the back window on an archive is not longer than the window we
+ aggregate on.
+ """
+ ts = {'sampling': 1, 'size': 60, 'agg': 'mean'}
+ tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
+
+ tsb.set_values([
+ (datetime.datetime(2014, 1, 1, 12, 0, 1, 2300), 1),
+ (datetime.datetime(2014, 1, 1, 12, 0, 1, 4600), 2),
+ (datetime.datetime(2014, 1, 1, 12, 0, 2, 4500), 3),
+ (datetime.datetime(2014, 1, 1, 12, 0, 2, 7800), 4),
+ (datetime.datetime(2014, 1, 1, 12, 0, 3, 8), 2.5),
+ ], before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=ts))
+
+ self.assertEqual(
+ [
+ (datetime.datetime(
+ 2014, 1, 1, 12, 0, 1
+ ), 1.0, 1.5),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 0, 2
+ ), 1.0, 3.5),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 0, 3
+ ), 1.0, 2.5),
+ ],
+ ts['return'].fetch())
+
+ try:
+ tsb.set_values([
+ (datetime.datetime(2014, 1, 1, 12, 0, 2, 99), 9),
+ ])
+ except carbonara.NoDeloreanAvailable as e:
+ self.assertEqual(
+ six.text_type(e),
+ u"2014-01-01 12:00:02.000099 is before 2014-01-01 12:00:03")
+ self.assertEqual(datetime.datetime(2014, 1, 1, 12, 0, 2, 99),
+ e.bad_timestamp)
+ self.assertEqual(datetime.datetime(2014, 1, 1, 12, 0, 3),
+ e.first_timestamp)
+ else:
+ self.fail("No exception raised")
+
+ def test_back_window_ignore(self):
+ """Back window testing.
+
+ Test the back window on an archive is not longer than the window we
+ aggregate on.
+ """
+ ts = {'sampling': 1, 'size': 60, 'agg': 'mean'}
+ tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
+
+ tsb.set_values([
+ (datetime.datetime(2014, 1, 1, 12, 0, 1, 2300), 1),
+ (datetime.datetime(2014, 1, 1, 12, 0, 1, 4600), 2),
+ (datetime.datetime(2014, 1, 1, 12, 0, 2, 4500), 3),
+ (datetime.datetime(2014, 1, 1, 12, 0, 2, 7800), 4),
+ (datetime.datetime(2014, 1, 1, 12, 0, 3, 8), 2.5),
+ ], before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=ts))
+
+ self.assertEqual(
+ [
+ (datetime.datetime(
+ 2014, 1, 1, 12, 0, 1
+ ), 1.0, 1.5),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 0, 2
+ ), 1.0, 3.5),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 0, 3
+ ), 1.0, 2.5),
+ ],
+ ts['return'].fetch())
+
+ tsb.set_values([
+ (datetime.datetime(2014, 1, 1, 12, 0, 2, 99), 9),
+ ], ignore_too_old_timestamps=True,
+ before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=ts))
+
+ self.assertEqual(
+ [
+ (datetime.datetime(
+ 2014, 1, 1, 12, 0, 1
+ ), 1.0, 1.5),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 0, 2
+ ), 1.0, 3.5),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 0, 3
+ ), 1.0, 2.5),
+ ],
+ ts['return'].fetch())
+
+ tsb.set_values([
+ (datetime.datetime(2014, 1, 1, 12, 0, 2, 99), 9),
+ (datetime.datetime(2014, 1, 1, 12, 0, 3, 9), 4.5),
+ ], ignore_too_old_timestamps=True,
+ before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=ts))
+
+ self.assertEqual(
+ [
+ (datetime.datetime(
+ 2014, 1, 1, 12, 0, 1
+ ), 1.0, 1.5),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 0, 2
+ ), 1.0, 3.5),
+ (datetime.datetime(
+ 2014, 1, 1, 12, 0, 3
+ ), 1.0, 3.5),
+ ],
+ ts['return'].fetch())
+
+ def test_aggregated_nominal(self):
+ tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'}
+ tsc12 = {'sampling': 300, 'size': 6, 'agg': 'mean'}
+ tsb1 = carbonara.BoundTimeSerie(block_size=tsc12['sampling'])
+ tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'}
+ tsc22 = {'sampling': 300, 'size': 6, 'agg': 'mean'}
+ tsb2 = carbonara.BoundTimeSerie(block_size=tsc22['sampling'])
+
+ def ts1_update(ts):
+ grouped = ts.group_serie(tsc1['sampling'])
+ existing = tsc1.get('return')
+ tsc1['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie(
+ grouped, tsc1['sampling'], tsc1['agg'],
+ max_size=tsc1['size'])
+ if existing:
+ tsc1['return'].merge(existing)
+ grouped = ts.group_serie(tsc12['sampling'])
+ existing = tsc12.get('return')
+ tsc12['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie(
+ grouped, tsc12['sampling'], tsc12['agg'],
+ max_size=tsc12['size'])
+ if existing:
+ tsc12['return'].merge(existing)
+
+ def ts2_update(ts):
+ grouped = ts.group_serie(tsc2['sampling'])
+ existing = tsc2.get('return')
+ tsc2['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie(
+ grouped, tsc2['sampling'], tsc2['agg'],
+ max_size=tsc2['size'])
+ if existing:
+ tsc2['return'].merge(existing)
+ grouped = ts.group_serie(tsc22['sampling'])
+ existing = tsc22.get('return')
+ tsc22['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie(
+ grouped, tsc22['sampling'], tsc22['agg'],
+ max_size=tsc22['size'])
+ if existing:
+ tsc22['return'].merge(existing)
+
+ tsb1.set_values([
+ (datetime.datetime(2014, 1, 1, 11, 46, 4), 4),
+ (datetime.datetime(2014, 1, 1, 11, 47, 34), 8),
+ (datetime.datetime(2014, 1, 1, 11, 50, 54), 50),
+ (datetime.datetime(2014, 1, 1, 11, 54, 45), 4),
+ (datetime.datetime(2014, 1, 1, 11, 56, 49), 4),
+ (datetime.datetime(2014, 1, 1, 11, 57, 22), 6),
+ (datetime.datetime(2014, 1, 1, 11, 58, 22), 5),
+ (datetime.datetime(2014, 1, 1, 12, 1, 4), 4),
+ (datetime.datetime(2014, 1, 1, 12, 1, 9), 7),
+ (datetime.datetime(2014, 1, 1, 12, 2, 1), 15),
+ (datetime.datetime(2014, 1, 1, 12, 2, 12), 1),
+ (datetime.datetime(2014, 1, 1, 12, 3, 0), 3),
+ (datetime.datetime(2014, 1, 1, 12, 4, 9), 7),
+ (datetime.datetime(2014, 1, 1, 12, 5, 1), 15),
+ (datetime.datetime(2014, 1, 1, 12, 5, 12), 1),
+ (datetime.datetime(2014, 1, 1, 12, 6, 0), 3),
+ ], before_truncate_callback=ts1_update)
+
+ tsb2.set_values([
+ (datetime.datetime(2014, 1, 1, 11, 46, 4), 6),
+ (datetime.datetime(2014, 1, 1, 11, 47, 34), 5),
+ (datetime.datetime(2014, 1, 1, 11, 50, 54), 51),
+ (datetime.datetime(2014, 1, 1, 11, 54, 45), 5),
+ (datetime.datetime(2014, 1, 1, 11, 56, 49), 5),
+ (datetime.datetime(2014, 1, 1, 11, 57, 22), 7),
+ (datetime.datetime(2014, 1, 1, 11, 58, 22), 5),
+ (datetime.datetime(2014, 1, 1, 12, 1, 4), 5),
+ (datetime.datetime(2014, 1, 1, 12, 1, 9), 8),
+ (datetime.datetime(2014, 1, 1, 12, 2, 1), 10),
+ (datetime.datetime(2014, 1, 1, 12, 2, 12), 2),
+ (datetime.datetime(2014, 1, 1, 12, 3, 0), 6),
+ (datetime.datetime(2014, 1, 1, 12, 4, 9), 4),
+ (datetime.datetime(2014, 1, 1, 12, 5, 1), 10),
+ (datetime.datetime(2014, 1, 1, 12, 5, 12), 1),
+ (datetime.datetime(2014, 1, 1, 12, 6, 0), 1),
+ ], before_truncate_callback=ts2_update)
+
+ output = carbonara.AggregatedTimeSerie.aggregated(
+ [tsc1['return'], tsc12['return'], tsc2['return'], tsc22['return']],
+ 'mean')
+ self.assertEqual([
+ (datetime.datetime(2014, 1, 1, 11, 45), 300.0, 5.75),
+ (datetime.datetime(2014, 1, 1, 11, 50), 300.0, 27.5),
+ (datetime.datetime(2014, 1, 1, 11, 55), 300.0, 5.3333333333333339),
+ (datetime.datetime(2014, 1, 1, 12, 0), 300.0, 6.0),
+ (datetime.datetime(2014, 1, 1, 12, 5), 300.0, 5.1666666666666661),
+ (datetime.datetime(2014, 1, 1, 11, 54), 60.0, 4.5),
+ (datetime.datetime(2014, 1, 1, 11, 56), 60.0, 4.5),
+ (datetime.datetime(2014, 1, 1, 11, 57), 60.0, 6.5),
+ (datetime.datetime(2014, 1, 1, 11, 58), 60.0, 5.0),
+ (datetime.datetime(2014, 1, 1, 12, 1), 60.0, 6.0),
+ (datetime.datetime(2014, 1, 1, 12, 2), 60.0, 7.0),
+ (datetime.datetime(2014, 1, 1, 12, 3), 60.0, 4.5),
+ (datetime.datetime(2014, 1, 1, 12, 4), 60.0, 5.5),
+ (datetime.datetime(2014, 1, 1, 12, 5), 60.0, 6.75),
+ (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 2.0),
+ ], output)
+
+ def test_aggregated_partial_overlap(self):
+ tsc1 = {'sampling': 1, 'size': 86400, 'agg': 'mean'}
+ tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling'])
+ tsc2 = {'sampling': 1, 'size': 60, 'agg': 'mean'}
+ tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling'])
+
+ tsb1.set_values([
+ (datetime.datetime(2015, 12, 3, 13, 19, 15), 1),
+ (datetime.datetime(2015, 12, 3, 13, 20, 15), 1),
+ (datetime.datetime(2015, 12, 3, 13, 21, 15), 1),
+ (datetime.datetime(2015, 12, 3, 13, 22, 15), 1),
+ ], before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=tsc1))
+
+ tsb2.set_values([
+ (datetime.datetime(2015, 12, 3, 13, 21, 15), 10),
+ (datetime.datetime(2015, 12, 3, 13, 22, 15), 10),
+ (datetime.datetime(2015, 12, 3, 13, 23, 15), 10),
+ (datetime.datetime(2015, 12, 3, 13, 24, 15), 10),
+ ], before_truncate_callback=functools.partial(
+ self._resample_and_merge, agg_dict=tsc2))
+
+ output = carbonara.AggregatedTimeSerie.aggregated(
+ [tsc1['return'], tsc2['return']], aggregation="sum")
+
+ self.assertEqual([
+ (datetime.datetime(
+ 2015, 12, 3, 13, 21, 15
+ ), 1.0, 11.0),
+ (datetime.datetime(
+ 2015, 12, 3, 13, 22, 15
+ ), 1.0, 11.0),
+ ], output)
+
+ dtfrom = datetime.datetime(2015, 12, 3, 13, 17, 0)
+ dtto = datetime.datetime(2015, 12, 3, 13, 25, 0)
+
+ output = carbonara.AggregatedTimeSerie.aggregated(
+ [tsc1['return'], tsc2['return']],
+ from_timestamp=dtfrom, to_timestamp=dtto,
+ aggregation="sum", needed_percent_of_overlap=0)
+
+ self.assertEqual([
+ (datetime.datetime(
+ 2015, 12, 3, 13, 19, 15
+ ), 1.0, 1.0),
+ (datetime.datetime(
+ 2015, 12, 3, 13, 20, 15
+ ), 1.0, 1.0),
+ (datetime.datetime(
+ 2015, 12, 3, 13, 21, 15
+ ), 1.0, 11.0),
+ (datetime.datetime(
+ 2015, 12, 3, 13, 22, 15
+ ), 1.0, 11.0),
+ (datetime.datetime(
+ 2015, 12, 3, 13, 23, 15
+ ), 1.0, 10.0),
+ (datetime.datetime(
+ 2015, 12, 3, 13, 24, 15
+ ), 1.0, 10.0),
+ ], output)
+
+ # By default we require 100% of point that overlap
+ # so that fail if from or to is set
+ self.assertRaises(carbonara.UnAggregableTimeseries,
+ carbonara.AggregatedTimeSerie.aggregated,
+ [tsc1['return'], tsc2['return']],
+ to_timestamp=dtto, aggregation='mean')
+ self.assertRaises(carbonara.UnAggregableTimeseries,
+ carbonara.AggregatedTimeSerie.aggregated,
+ [tsc1['return'], tsc2['return']],
+ from_timestamp=dtfrom, aggregation='mean')
+
+ # Retry with 50% and it works
+ output = carbonara.AggregatedTimeSerie.aggregated(
+ [tsc1['return'], tsc2['return']], from_timestamp=dtfrom,
+ aggregation="sum",
+ needed_percent_of_overlap=50.0)
+ self.assertEqual([
+ (datetime.datetime(
+ 2015, 12, 3, 13, 19, 15
+ ), 1.0, 1.0),
+ (datetime.datetime(
+ 2015, 12, 3, 13, 20, 15
+ ), 1.0, 1.0),
+ (datetime.datetime(
+ 2015, 12, 3, 13, 21, 15
+ ), 1.0, 11.0),
+ (datetime.datetime(
+ 2015, 12, 3, 13, 22, 15
+ ), 1.0, 11.0),
+ ], output)
+
+ output = carbonara.AggregatedTimeSerie.aggregated(
+ [tsc1['return'], tsc2['return']], to_timestamp=dtto,
+ aggregation="sum",
+ needed_percent_of_overlap=50.0)
+ self.assertEqual([
+ (datetime.datetime(
+ 2015, 12, 3, 13, 21, 15
+ ), 1.0, 11.0),
+ (datetime.datetime(
+ 2015, 12, 3, 13, 22, 15
+ ), 1.0, 11.0),
+ (datetime.datetime(
+ 2015, 12, 3, 13, 23, 15
+ ), 1.0, 10.0),
+ (datetime.datetime(
+ 2015, 12, 3, 13, 24, 15
+ ), 1.0, 10.0),
+ ], output)
+
+ def test_split_key(self):
+ self.assertEqual(
+ datetime.datetime(2014, 10, 7),
+ carbonara.SplitKey.from_timestamp_and_sampling(
+ datetime.datetime(2015, 1, 1, 15, 3), 3600).as_datetime())
+ self.assertEqual(
+ datetime.datetime(2014, 12, 31, 18),
+ carbonara.SplitKey.from_timestamp_and_sampling(
+ datetime.datetime(2015, 1, 1, 15, 3), 58).as_datetime())
+ self.assertEqual(
+ 1420048800.0,
+ float(carbonara.SplitKey.from_timestamp_and_sampling(
+ datetime.datetime(2015, 1, 1, 15, 3), 58)))
+
+ key = carbonara.SplitKey.from_timestamp_and_sampling(
+ datetime.datetime(2015, 1, 1, 15, 3), 3600)
+
+ self.assertGreater(key, pandas.Timestamp(0))
+
+ self.assertGreaterEqual(key, pandas.Timestamp(0))
+
+ def test_split_key_next(self):
+ self.assertEqual(
+ datetime.datetime(2015, 3, 6),
+ next(carbonara.SplitKey.from_timestamp_and_sampling(
+ datetime.datetime(2015, 1, 1, 15, 3), 3600)).as_datetime())
+ self.assertEqual(
+ datetime.datetime(2015, 8, 3),
+ next(next(carbonara.SplitKey.from_timestamp_and_sampling(
+ datetime.datetime(2015, 1, 1, 15, 3), 3600))).as_datetime())
+ self.assertEqual(
+ 113529600000.0,
+ float(next(carbonara.SplitKey.from_timestamp_and_sampling(
+ datetime.datetime(2015, 1, 1, 15, 3), 3600 * 24 * 365))))
+
+ def test_split(self):
+ sampling = 5
+ points = 100000
+ ts = carbonara.TimeSerie.from_data(
+ timestamps=map(datetime.datetime.utcfromtimestamp,
+ six.moves.range(points)),
+ values=six.moves.range(points))
+ agg = self._resample(ts, sampling, 'mean')
+
+ grouped_points = list(agg.split())
+
+ self.assertEqual(
+ math.ceil((points / float(sampling))
+ / carbonara.SplitKey.POINTS_PER_SPLIT),
+ len(grouped_points))
+ self.assertEqual("0.0",
+ str(carbonara.SplitKey(grouped_points[0][0], 0)))
+ # 3600 × 5s = 5 hours
+ self.assertEqual(datetime.datetime(1970, 1, 1, 5),
+ grouped_points[1][0].as_datetime())
+ self.assertEqual(carbonara.SplitKey.POINTS_PER_SPLIT,
+ len(grouped_points[0][1]))
+
+ def test_from_timeseries(self):
+ sampling = 5
+ points = 100000
+ ts = carbonara.TimeSerie.from_data(
+ timestamps=map(datetime.datetime.utcfromtimestamp,
+ six.moves.range(points)),
+ values=six.moves.range(points))
+ agg = self._resample(ts, sampling, 'mean')
+
+ split = [t[1] for t in list(agg.split())]
+
+ self.assertEqual(agg,
+ carbonara.AggregatedTimeSerie.from_timeseries(
+ split,
+ sampling=agg.sampling,
+ max_size=agg.max_size,
+ aggregation_method=agg.aggregation_method))
+
+ def test_resample(self):
+ ts = carbonara.TimeSerie.from_data(
+ [datetime.datetime(2014, 1, 1, 12, 0, 0),
+ datetime.datetime(2014, 1, 1, 12, 0, 4),
+ datetime.datetime(2014, 1, 1, 12, 0, 9),
+ datetime.datetime(2014, 1, 1, 12, 0, 11),
+ datetime.datetime(2014, 1, 1, 12, 0, 12)],
+ [3, 5, 6, 2, 4])
+ agg_ts = self._resample(ts, 5, 'mean')
+ self.assertEqual(3, len(agg_ts))
+
+ agg_ts = agg_ts.resample(10)
+ self.assertEqual(2, len(agg_ts))
+ self.assertEqual(5, agg_ts[0])
+ self.assertEqual(3, agg_ts[1])
diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py
new file mode 100644
index 00000000..f6a29263
--- /dev/null
+++ b/gnocchi/tests/test_indexer.py
@@ -0,0 +1,1245 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2014-2015 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import datetime
+import operator
+import uuid
+
+import mock
+
+from gnocchi import archive_policy
+from gnocchi import indexer
+from gnocchi.tests import base as tests_base
+from gnocchi import utils
+
+
+class MockException(Exception):
+ pass
+
+
+class TestIndexer(tests_base.TestCase):
+ def test_get_driver(self):
+ driver = indexer.get_driver(self.conf)
+ self.assertIsInstance(driver, indexer.IndexerDriver)
+
+
+class TestIndexerDriver(tests_base.TestCase):
+
+ def test_create_archive_policy_already_exists(self):
+ # NOTE(jd) This archive policy
+ # is created by gnocchi.tests on setUp() :)
+ self.assertRaises(indexer.ArchivePolicyAlreadyExists,
+ self.index.create_archive_policy,
+ archive_policy.ArchivePolicy("high", 0, {}))
+
+ def test_get_archive_policy(self):
+ ap = self.index.get_archive_policy("low")
+ self.assertEqual({
+ 'back_window': 0,
+ 'aggregation_methods':
+ set(self.conf.archive_policy.default_aggregation_methods),
+ 'definition': [
+ {u'granularity': 300, u'points': 12, u'timespan': 3600},
+ {u'granularity': 3600, u'points': 24, u'timespan': 86400},
+ {u'granularity': 86400, u'points': 30, u'timespan': 2592000}],
+ 'name': u'low'}, dict(ap))
+
+ def test_update_archive_policy(self):
+ self.assertRaises(indexer.UnsupportedArchivePolicyChange,
+ self.index.update_archive_policy, "low",
+ [archive_policy.ArchivePolicyItem(granularity=300,
+ points=10)])
+ self.assertRaises(indexer.UnsupportedArchivePolicyChange,
+ self.index.update_archive_policy, "low",
+ [archive_policy.ArchivePolicyItem(granularity=300,
+ points=12),
+ archive_policy.ArchivePolicyItem(granularity=3600,
+ points=12),
+ archive_policy.ArchivePolicyItem(granularity=5,
+ points=6)])
+ apname = str(uuid.uuid4())
+ self.index.create_archive_policy(archive_policy.ArchivePolicy(
+ apname, 0, [(12, 300), (24, 3600), (30, 86400)]))
+ ap = self.index.update_archive_policy(
+ apname, [archive_policy.ArchivePolicyItem(granularity=300,
+ points=6),
+ archive_policy.ArchivePolicyItem(granularity=3600,
+ points=24),
+ archive_policy.ArchivePolicyItem(granularity=86400,
+ points=30)])
+ self.assertEqual({
+ 'back_window': 0,
+ 'aggregation_methods':
+ set(self.conf.archive_policy.default_aggregation_methods),
+ 'definition': [
+ {u'granularity': 300, u'points': 6, u'timespan': 1800},
+ {u'granularity': 3600, u'points': 24, u'timespan': 86400},
+ {u'granularity': 86400, u'points': 30, u'timespan': 2592000}],
+ 'name': apname}, dict(ap))
+ ap = self.index.update_archive_policy(
+ apname, [archive_policy.ArchivePolicyItem(granularity=300,
+ points=12),
+ archive_policy.ArchivePolicyItem(granularity=3600,
+ points=24),
+ archive_policy.ArchivePolicyItem(granularity=86400,
+ points=30)])
+ self.assertEqual({
+ 'back_window': 0,
+ 'aggregation_methods':
+ set(self.conf.archive_policy.default_aggregation_methods),
+ 'definition': [
+ {u'granularity': 300, u'points': 12, u'timespan': 3600},
+ {u'granularity': 3600, u'points': 24, u'timespan': 86400},
+ {u'granularity': 86400, u'points': 30, u'timespan': 2592000}],
+ 'name': apname}, dict(ap))
+
+ def test_delete_archive_policy(self):
+ name = str(uuid.uuid4())
+ self.index.create_archive_policy(
+ archive_policy.ArchivePolicy(name, 0, {}))
+ self.index.delete_archive_policy(name)
+ self.assertRaises(indexer.NoSuchArchivePolicy,
+ self.index.delete_archive_policy,
+ name)
+ self.assertRaises(indexer.NoSuchArchivePolicy,
+ self.index.delete_archive_policy,
+ str(uuid.uuid4()))
+ metric_id = uuid.uuid4()
+ self.index.create_metric(metric_id, str(uuid.uuid4()), "low")
+ self.assertRaises(indexer.ArchivePolicyInUse,
+ self.index.delete_archive_policy,
+ "low")
+ self.index.delete_metric(metric_id)
+
+ def test_list_ap_rules_ordered(self):
+ name = str(uuid.uuid4())
+ self.index.create_archive_policy(
+ archive_policy.ArchivePolicy(name, 0, {}))
+ self.index.create_archive_policy_rule('rule1', 'abc.*', name)
+ self.index.create_archive_policy_rule('rule2', 'abc.xyz.*', name)
+ self.index.create_archive_policy_rule('rule3', 'abc.xyz', name)
+ rules = self.index.list_archive_policy_rules()
+ # NOTE(jd) The test is not isolated, there might be more than 3 rules
+ found = 0
+ for r in rules:
+ if r['metric_pattern'] == 'abc.xyz.*':
+ found = 1
+ if found == 1 and r['metric_pattern'] == 'abc.xyz':
+ found = 2
+ if found == 2 and r['metric_pattern'] == 'abc.*':
+ break
+ else:
+ self.fail("Metric patterns are not ordered")
+
+ # Ensure we can't delete the archive policy
+ self.assertRaises(indexer.ArchivePolicyInUse,
+ self.index.delete_archive_policy, name)
+
+ def test_create_metric(self):
+ r1 = uuid.uuid4()
+ creator = str(uuid.uuid4())
+ m = self.index.create_metric(r1, creator, "low")
+ self.assertEqual(r1, m.id)
+ self.assertEqual(m.creator, creator)
+ self.assertIsNone(m.name)
+ self.assertIsNone(m.unit)
+ self.assertIsNone(m.resource_id)
+ m2 = self.index.list_metrics(id=r1)
+ self.assertEqual([m], m2)
+
+ def test_create_named_metric_duplicate(self):
+ m1 = uuid.uuid4()
+ r1 = uuid.uuid4()
+ name = "foobar"
+ creator = str(uuid.uuid4())
+ self.index.create_resource('generic', r1, creator)
+ m = self.index.create_metric(m1, creator, "low",
+ name=name,
+ resource_id=r1)
+ self.assertEqual(m1, m.id)
+ self.assertEqual(m.creator, creator)
+ self.assertEqual(name, m.name)
+ self.assertEqual(r1, m.resource_id)
+ m2 = self.index.list_metrics(id=m1)
+ self.assertEqual([m], m2)
+
+ self.assertRaises(indexer.NamedMetricAlreadyExists,
+ self.index.create_metric, m1, creator, "low",
+ name=name, resource_id=r1)
+
+ def test_expunge_metric(self):
+ r1 = uuid.uuid4()
+ creator = str(uuid.uuid4())
+ m = self.index.create_metric(r1, creator, "low")
+ self.index.delete_metric(m.id)
+ try:
+ self.index.expunge_metric(m.id)
+ except indexer.NoSuchMetric:
+ # It's possible another test process expunged the metric just
+ # before us; in that case, we're good, we'll just check that the
+ # next call actually really raises NoSuchMetric anyway
+ pass
+ self.assertRaises(indexer.NoSuchMetric,
+ self.index.delete_metric,
+ m.id)
+ self.assertRaises(indexer.NoSuchMetric,
+ self.index.expunge_metric,
+ m.id)
+
+ def test_create_resource(self):
+ r1 = uuid.uuid4()
+ creator = str(uuid.uuid4())
+ rc = self.index.create_resource('generic', r1, creator)
+ self.assertIsNotNone(rc.started_at)
+ self.assertIsNotNone(rc.revision_start)
+ self.assertEqual({"id": r1,
+ "revision_start": rc.revision_start,
+ "revision_end": None,
+ "creator": creator,
+ "created_by_user_id": creator,
+ "created_by_project_id": "",
+ "user_id": None,
+ "project_id": None,
+ "started_at": rc.started_at,
+ "ended_at": None,
+ "original_resource_id": str(r1),
+ "type": "generic",
+ "metrics": {}},
+ rc.jsonify())
+ rg = self.index.get_resource('generic', r1, with_metrics=True)
+ self.assertEqual(rc, rg)
+ self.assertEqual(rc.metrics, rg.metrics)
+
+ def test_create_resource_with_original_resource_id(self):
+ r1 = uuid.uuid4()
+ creator = str(uuid.uuid4())
+ rc = self.index.create_resource('generic', r1, creator,
+ original_resource_id="foobar")
+ self.assertIsNotNone(rc.started_at)
+ self.assertIsNotNone(rc.revision_start)
+ self.assertEqual({"id": r1,
+ "revision_start": rc.revision_start,
+ "revision_end": None,
+ "creator": creator,
+ "created_by_user_id": creator,
+ "created_by_project_id": "",
+ "user_id": None,
+ "project_id": None,
+ "started_at": rc.started_at,
+ "ended_at": None,
+ "original_resource_id": "foobar",
+ "type": "generic",
+ "metrics": {}},
+ rc.jsonify())
+ rg = self.index.get_resource('generic', r1, with_metrics=True)
+ self.assertEqual(rc, rg)
+ self.assertEqual(rc.metrics, rg.metrics)
+
+ def test_split_user_project_for_legacy_reasons(self):
+ r1 = uuid.uuid4()
+ user = str(uuid.uuid4())
+ project = str(uuid.uuid4())
+ creator = user + ":" + project
+ rc = self.index.create_resource('generic', r1, creator)
+ self.assertIsNotNone(rc.started_at)
+ self.assertIsNotNone(rc.revision_start)
+ self.assertEqual({"id": r1,
+ "revision_start": rc.revision_start,
+ "revision_end": None,
+ "creator": creator,
+ "created_by_user_id": user,
+ "created_by_project_id": project,
+ "user_id": None,
+ "project_id": None,
+ "started_at": rc.started_at,
+ "ended_at": None,
+ "original_resource_id": str(r1),
+ "type": "generic",
+ "metrics": {}},
+ rc.jsonify())
+ rg = self.index.get_resource('generic', r1, with_metrics=True)
+ self.assertEqual(rc, rg)
+ self.assertEqual(rc.metrics, rg.metrics)
+
+ def test_create_non_existent_metric(self):
+ e = uuid.uuid4()
+ try:
+ self.index.create_resource(
+ 'generic', uuid.uuid4(), str(uuid.uuid4()), str(uuid.uuid4()),
+ metrics={"foo": e})
+ except indexer.NoSuchMetric as ex:
+ self.assertEqual(e, ex.metric)
+ else:
+ self.fail("Exception not raised")
+
+ def test_create_resource_already_exists(self):
+ r1 = uuid.uuid4()
+ creator = str(uuid.uuid4())
+ self.index.create_resource('generic', r1, creator)
+ self.assertRaises(indexer.ResourceAlreadyExists,
+ self.index.create_resource,
+ 'generic', r1, creator)
+
+ def test_create_resource_with_new_metrics(self):
+ r1 = uuid.uuid4()
+ creator = str(uuid.uuid4())
+ rc = self.index.create_resource(
+ 'generic', r1, creator,
+ metrics={"foobar": {"archive_policy_name": "low"}})
+ self.assertEqual(1, len(rc.metrics))
+ m = self.index.list_metrics(id=rc.metrics[0].id)
+ self.assertEqual(m[0], rc.metrics[0])
+
+ def test_delete_resource(self):
+ r1 = uuid.uuid4()
+ self.index.create_resource('generic', r1, str(uuid.uuid4()),
+ str(uuid.uuid4()))
+ self.index.delete_resource(r1)
+ self.assertRaises(indexer.NoSuchResource,
+ self.index.delete_resource,
+ r1)
+
+ def test_delete_resource_with_metrics(self):
+ creator = str(uuid.uuid4())
+ e1 = uuid.uuid4()
+ e2 = uuid.uuid4()
+ self.index.create_metric(e1, creator, archive_policy_name="low")
+ self.index.create_metric(e2, creator, archive_policy_name="low")
+ r1 = uuid.uuid4()
+ self.index.create_resource('generic', r1, creator,
+ metrics={'foo': e1, 'bar': e2})
+ self.index.delete_resource(r1)
+ self.assertRaises(indexer.NoSuchResource,
+ self.index.delete_resource,
+ r1)
+ metrics = self.index.list_metrics(ids=[e1, e2])
+ self.assertEqual([], metrics)
+
+ def test_delete_resource_non_existent(self):
+ r1 = uuid.uuid4()
+ self.assertRaises(indexer.NoSuchResource,
+ self.index.delete_resource,
+ r1)
+
+ def test_create_resource_with_start_timestamp(self):
+ r1 = uuid.uuid4()
+ ts = utils.datetime_utc(2014, 1, 1, 23, 34, 23, 1234)
+ creator = str(uuid.uuid4())
+ rc = self.index.create_resource('generic', r1, creator, started_at=ts)
+ self.assertEqual({"id": r1,
+ "revision_start": rc.revision_start,
+ "revision_end": None,
+ "creator": creator,
+ "created_by_user_id": creator,
+ "created_by_project_id": "",
+ "user_id": None,
+ "project_id": None,
+ "started_at": ts,
+ "ended_at": None,
+ "original_resource_id": str(r1),
+ "type": "generic",
+ "metrics": {}}, rc.jsonify())
+ r = self.index.get_resource('generic', r1, with_metrics=True)
+ self.assertEqual(rc, r)
+
+ def test_create_resource_with_metrics(self):
+ r1 = uuid.uuid4()
+ e1 = uuid.uuid4()
+ e2 = uuid.uuid4()
+ creator = str(uuid.uuid4())
+ self.index.create_metric(e1, creator,
+ archive_policy_name="low")
+ self.index.create_metric(e2, creator,
+ archive_policy_name="low")
+ rc = self.index.create_resource('generic', r1, creator,
+ metrics={'foo': e1, 'bar': e2})
+ self.assertIsNotNone(rc.started_at)
+ self.assertIsNotNone(rc.revision_start)
+ self.assertEqual({"id": r1,
+ "revision_start": rc.revision_start,
+ "revision_end": None,
+ "creator": creator,
+ "created_by_user_id": creator,
+ "created_by_project_id": "",
+ "user_id": None,
+ "project_id": None,
+ "started_at": rc.started_at,
+ "ended_at": None,
+ "original_resource_id": str(r1),
+ "type": "generic",
+ "metrics": {'foo': str(e1), 'bar': str(e2)}},
+ rc.jsonify())
+ r = self.index.get_resource('generic', r1, with_metrics=True)
+ self.assertIsNotNone(r.started_at)
+ self.assertEqual({"id": r1,
+ "revision_start": r.revision_start,
+ "revision_end": None,
+ "creator": creator,
+ "created_by_user_id": creator,
+ "created_by_project_id": "",
+ "type": "generic",
+ "started_at": rc.started_at,
+ "ended_at": None,
+ "user_id": None,
+ "project_id": None,
+ "original_resource_id": str(r1),
+ "metrics": {'foo': str(e1), 'bar': str(e2)}},
+ r.jsonify())
+
+ def test_update_non_existent_resource_end_timestamp(self):
+ r1 = uuid.uuid4()
+ self.assertRaises(
+ indexer.NoSuchResource,
+ self.index.update_resource,
+ 'generic',
+ r1,
+ ended_at=datetime.datetime(2014, 1, 1, 2, 3, 4))
+
+ def test_update_resource_end_timestamp(self):
+ r1 = uuid.uuid4()
+ creator = str(uuid.uuid4())
+ self.index.create_resource('generic', r1, creator)
+ self.index.update_resource(
+ 'generic',
+ r1,
+ ended_at=utils.datetime_utc(2043, 1, 1, 2, 3, 4))
+ r = self.index.get_resource('generic', r1, with_metrics=True)
+ self.assertIsNotNone(r.started_at)
+ self.assertIsNone(r.user_id)
+ self.assertIsNone(r.project_id)
+ self.assertIsNone(r.revision_end)
+ self.assertIsNotNone(r.revision_start)
+ self.assertEqual(r1, r.id)
+ self.assertEqual(creator, r.creator)
+ self.assertEqual(utils.datetime_utc(2043, 1, 1, 2, 3, 4), r.ended_at)
+ self.assertEqual("generic", r.type)
+ self.assertEqual(0, len(r.metrics))
+ self.index.update_resource(
+ 'generic',
+ r1,
+ ended_at=None)
+ r = self.index.get_resource('generic', r1, with_metrics=True)
+ self.assertIsNotNone(r.started_at)
+ self.assertIsNotNone(r.revision_start)
+ self.assertEqual({"id": r1,
+ "revision_start": r.revision_start,
+ "revision_end": None,
+ "ended_at": None,
+ "created_by_project_id": "",
+ "created_by_user_id": creator,
+ "creator": creator,
+ "user_id": None,
+ "project_id": None,
+ "type": "generic",
+ "started_at": r.started_at,
+ "original_resource_id": str(r1),
+ "metrics": {}}, r.jsonify())
+
+ def test_update_resource_metrics(self):
+ r1 = uuid.uuid4()
+ e1 = uuid.uuid4()
+ e2 = uuid.uuid4()
+ creator = str(uuid.uuid4())
+ self.index.create_metric(e1, creator, archive_policy_name="low")
+ self.index.create_resource('generic', r1, creator, metrics={'foo': e1})
+ self.index.create_metric(e2, creator, archive_policy_name="low")
+ rc = self.index.update_resource('generic', r1, metrics={'bar': e2})
+ r = self.index.get_resource('generic', r1, with_metrics=True)
+ self.assertEqual(rc, r)
+
+ def test_update_resource_metrics_append(self):
+ r1 = uuid.uuid4()
+ e1 = uuid.uuid4()
+ e2 = uuid.uuid4()
+ creator = str(uuid.uuid4())
+ self.index.create_metric(e1, creator,
+ archive_policy_name="low")
+ self.index.create_metric(e2, creator,
+ archive_policy_name="low")
+ self.index.create_resource('generic', r1, creator,
+ metrics={'foo': e1})
+ rc = self.index.update_resource('generic', r1, metrics={'bar': e2},
+ append_metrics=True)
+ r = self.index.get_resource('generic', r1, with_metrics=True)
+ self.assertEqual(rc, r)
+ metric_names = [m.name for m in rc.metrics]
+ self.assertIn('foo', metric_names)
+ self.assertIn('bar', metric_names)
+
+ def test_update_resource_metrics_append_fail(self):
+ r1 = uuid.uuid4()
+ e1 = uuid.uuid4()
+ e2 = uuid.uuid4()
+ creator = str(uuid.uuid4())
+ self.index.create_metric(e1, creator,
+ archive_policy_name="low")
+ self.index.create_metric(e2, creator,
+ archive_policy_name="low")
+ self.index.create_resource('generic', r1, creator,
+ metrics={'foo': e1})
+
+ self.assertRaises(indexer.NamedMetricAlreadyExists,
+ self.index.update_resource,
+ 'generic', r1, metrics={'foo': e2},
+ append_metrics=True)
+ r = self.index.get_resource('generic', r1, with_metrics=True)
+ self.assertEqual(e1, r.metrics[0].id)
+
+ def test_update_resource_attribute(self):
+ mgr = self.index.get_resource_type_schema()
+ resource_type = str(uuid.uuid4())
+ rtype = mgr.resource_type_from_dict(resource_type, {
+ "col1": {"type": "string", "required": True,
+ "min_length": 2, "max_length": 15}
+ }, 'creating')
+ r1 = uuid.uuid4()
+ creator = str(uuid.uuid4())
+ # Create
+ self.index.create_resource_type(rtype)
+
+ rc = self.index.create_resource(resource_type, r1, creator,
+ col1="foo")
+ rc = self.index.update_resource(resource_type, r1, col1="foo")
+ r = self.index.get_resource(resource_type, r1, with_metrics=True)
+ self.assertEqual(rc, r)
+
+ def test_update_resource_no_change(self):
+ mgr = self.index.get_resource_type_schema()
+ resource_type = str(uuid.uuid4())
+ rtype = mgr.resource_type_from_dict(resource_type, {
+ "col1": {"type": "string", "required": True,
+ "min_length": 2, "max_length": 15}
+ }, 'creating')
+ self.index.create_resource_type(rtype)
+ r1 = uuid.uuid4()
+ creator = str(uuid.uuid4())
+ rc = self.index.create_resource(resource_type, r1, creator,
+ col1="foo")
+ updated = self.index.update_resource(resource_type, r1, col1="foo",
+ create_revision=False)
+ r = self.index.list_resources(resource_type,
+ {"=": {"id": r1}},
+ history=True)
+ self.assertEqual(1, len(r))
+ self.assertEqual(dict(rc), dict(r[0]))
+ self.assertEqual(dict(updated), dict(r[0]))
+
+ def test_update_resource_ended_at_fail(self):
+ r1 = uuid.uuid4()
+ creator = str(uuid.uuid4())
+ self.index.create_resource('generic', r1, creator)
+ self.assertRaises(
+ indexer.ResourceValueError,
+ self.index.update_resource,
+ 'generic', r1,
+ ended_at=utils.datetime_utc(2010, 1, 1, 1, 1, 1))
+
+ def test_update_resource_unknown_attribute(self):
+ mgr = self.index.get_resource_type_schema()
+ resource_type = str(uuid.uuid4())
+ rtype = mgr.resource_type_from_dict(resource_type, {
+ "col1": {"type": "string", "required": False,
+ "min_length": 1, "max_length": 2},
+ }, 'creating')
+ self.index.create_resource_type(rtype)
+ r1 = uuid.uuid4()
+ self.index.create_resource(resource_type, r1,
+ str(uuid.uuid4()), str(uuid.uuid4()))
+ self.assertRaises(indexer.ResourceAttributeError,
+ self.index.update_resource,
+ resource_type,
+ r1, foo="bar")
+
+ def test_update_non_existent_metric(self):
+ r1 = uuid.uuid4()
+ e1 = uuid.uuid4()
+ self.index.create_resource('generic', r1, str(uuid.uuid4()),
+ str(uuid.uuid4()))
+ self.assertRaises(indexer.NoSuchMetric,
+ self.index.update_resource,
+ 'generic',
+ r1, metrics={'bar': e1})
+
+ def test_update_non_existent_resource(self):
+ r1 = uuid.uuid4()
+ e1 = uuid.uuid4()
+ self.index.create_metric(e1, str(uuid.uuid4()),
+ archive_policy_name="low")
+ self.assertRaises(indexer.NoSuchResource,
+ self.index.update_resource,
+ 'generic',
+ r1, metrics={'bar': e1})
+
+ def test_create_resource_with_non_existent_metrics(self):
+ r1 = uuid.uuid4()
+ e1 = uuid.uuid4()
+ self.assertRaises(indexer.NoSuchMetric,
+ self.index.create_resource,
+ 'generic',
+ r1, str(uuid.uuid4()), str(uuid.uuid4()),
+ metrics={'foo': e1})
+
+ def test_delete_metric_on_resource(self):
+ r1 = uuid.uuid4()
+ e1 = uuid.uuid4()
+ e2 = uuid.uuid4()
+ creator = str(uuid.uuid4())
+ self.index.create_metric(e1, creator,
+ archive_policy_name="low")
+ self.index.create_metric(e2, creator,
+ archive_policy_name="low")
+ rc = self.index.create_resource('generic', r1, creator,
+ metrics={'foo': e1, 'bar': e2})
+ self.index.delete_metric(e1)
+ self.assertRaises(indexer.NoSuchMetric, self.index.delete_metric, e1)
+ r = self.index.get_resource('generic', r1, with_metrics=True)
+ self.assertIsNotNone(r.started_at)
+ self.assertIsNotNone(r.revision_start)
+ self.assertEqual({"id": r1,
+ "started_at": r.started_at,
+ "revision_start": rc.revision_start,
+ "revision_end": None,
+ "ended_at": None,
+ "creator": creator,
+ "created_by_project_id": "",
+ "created_by_user_id": creator,
+ "user_id": None,
+ "project_id": None,
+ "original_resource_id": str(r1),
+ "type": "generic",
+ "metrics": {'bar': str(e2)}}, r.jsonify())
+
+ def test_delete_resource_custom(self):
+ mgr = self.index.get_resource_type_schema()
+ resource_type = str(uuid.uuid4())
+ self.index.create_resource_type(
+ mgr.resource_type_from_dict(resource_type, {
+ "flavor_id": {"type": "string",
+ "min_length": 1,
+ "max_length": 20,
+ "required": True}
+ }, 'creating'))
+ r1 = uuid.uuid4()
+ created = self.index.create_resource(resource_type, r1,
+ str(uuid.uuid4()),
+ str(uuid.uuid4()),
+ flavor_id="foo")
+ got = self.index.get_resource(resource_type, r1, with_metrics=True)
+ self.assertEqual(created, got)
+ self.index.delete_resource(r1)
+ got = self.index.get_resource(resource_type, r1)
+ self.assertIsNone(got)
+
+ def test_list_resources_by_unknown_field(self):
+ self.assertRaises(indexer.ResourceAttributeError,
+ self.index.list_resources,
+ 'generic',
+ attribute_filter={"=": {"fern": "bar"}})
+
+ def test_list_resources_by_user(self):
+ r1 = uuid.uuid4()
+ user = str(uuid.uuid4())
+ project = str(uuid.uuid4())
+ g = self.index.create_resource('generic', r1, user + ":" + project,
+ user, project)
+ resources = self.index.list_resources(
+ 'generic',
+ attribute_filter={"=": {"user_id": user}})
+ self.assertEqual(1, len(resources))
+ self.assertEqual(g, resources[0])
+ resources = self.index.list_resources(
+ 'generic',
+ attribute_filter={"=": {"user_id": 'bad-user'}})
+ self.assertEqual(0, len(resources))
+
+ def test_list_resources_by_created_by_user_id(self):
+ r1 = uuid.uuid4()
+ creator = str(uuid.uuid4())
+ g = self.index.create_resource('generic', r1, creator + ":" + creator)
+ resources = self.index.list_resources(
+ 'generic',
+ attribute_filter={"=": {"created_by_user_id": creator}})
+ self.assertEqual([g], resources)
+ resources = self.index.list_resources(
+ 'generic',
+ attribute_filter={"=": {"created_by_user_id": 'bad-user'}})
+ self.assertEqual([], resources)
+
+ def test_list_resources_by_creator(self):
+ r1 = uuid.uuid4()
+ creator = str(uuid.uuid4())
+ g = self.index.create_resource('generic', r1, creator)
+ resources = self.index.list_resources(
+ 'generic',
+ attribute_filter={"=": {"creator": creator}})
+ self.assertEqual(1, len(resources))
+ self.assertEqual(g, resources[0])
+ resources = self.index.list_resources(
+ 'generic',
+ attribute_filter={"=": {"creator": 'bad-user'}})
+ self.assertEqual(0, len(resources))
+
+ def test_list_resources_by_user_with_details(self):
+ r1 = uuid.uuid4()
+ user = str(uuid.uuid4())
+ project = str(uuid.uuid4())
+ creator = user + ":" + project
+ g = self.index.create_resource('generic', r1, creator,
+ user, project)
+ mgr = self.index.get_resource_type_schema()
+ resource_type = str(uuid.uuid4())
+ self.index.create_resource_type(
+ mgr.resource_type_from_dict(resource_type, {}, 'creating'))
+ r2 = uuid.uuid4()
+ i = self.index.create_resource(resource_type, r2, creator,
+ user, project)
+ resources = self.index.list_resources(
+ 'generic',
+ attribute_filter={"=": {"user_id": user}},
+ details=True,
+ )
+ self.assertEqual(2, len(resources))
+ self.assertIn(g, resources)
+ self.assertIn(i, resources)
+
+ def test_list_resources_by_project(self):
+ r1 = uuid.uuid4()
+ user = str(uuid.uuid4())
+ project = str(uuid.uuid4())
+ creator = user + ":" + project
+ g = self.index.create_resource('generic', r1, creator, user, project)
+ resources = self.index.list_resources(
+ 'generic',
+ attribute_filter={"=": {"project_id": project}})
+ self.assertEqual(1, len(resources))
+ self.assertEqual(g, resources[0])
+ resources = self.index.list_resources(
+ 'generic',
+ attribute_filter={"=": {"project_id": 'bad-project'}})
+ self.assertEqual(0, len(resources))
+
+ def test_list_resources_by_duration(self):
+ r1 = uuid.uuid4()
+ user = str(uuid.uuid4())
+ project = str(uuid.uuid4())
+ g = self.index.create_resource(
+ 'generic', r1, user + ":" + project, user, project,
+ started_at=utils.datetime_utc(2010, 1, 1, 12, 0),
+ ended_at=utils.datetime_utc(2010, 1, 1, 13, 0))
+ resources = self.index.list_resources(
+ 'generic',
+ attribute_filter={"and": [
+ {"=": {"user_id": user}},
+ {">": {"lifespan": 1800}},
+ ]})
+ self.assertEqual(1, len(resources))
+ self.assertEqual(g, resources[0])
+ resources = self.index.list_resources(
+ 'generic',
+ attribute_filter={"and": [
+ {"=": {"project_id": project}},
+ {">": {"lifespan": 7200}},
+ ]})
+ self.assertEqual(0, len(resources))
+
+ def test_list_resources(self):
+ # NOTE(jd) So this test is a bit fuzzy right now as we uses the same
+ # database for all tests and the tests are running concurrently, but
+ # for now it'll be better than nothing.
+ r1 = uuid.uuid4()
+ g = self.index.create_resource('generic', r1,
+ str(uuid.uuid4()), str(uuid.uuid4()))
+ mgr = self.index.get_resource_type_schema()
+ resource_type = str(uuid.uuid4())
+ self.index.create_resource_type(
+ mgr.resource_type_from_dict(resource_type, {}, 'creating'))
+ r2 = uuid.uuid4()
+ i = self.index.create_resource(resource_type, r2,
+ str(uuid.uuid4()), str(uuid.uuid4()))
+ resources = self.index.list_resources('generic')
+ self.assertGreaterEqual(len(resources), 2)
+ g_found = False
+ i_found = False
+ for r in resources:
+ if r.id == r1:
+ self.assertEqual(g, r)
+ g_found = True
+ elif r.id == r2:
+ i_found = True
+ if i_found and g_found:
+ break
+ else:
+ self.fail("Some resources were not found")
+
+ resources = self.index.list_resources(resource_type)
+ self.assertGreaterEqual(len(resources), 1)
+ for r in resources:
+ if r.id == r2:
+ self.assertEqual(i, r)
+ break
+ else:
+ self.fail("Some resources were not found")
+
+ def test_list_resource_attribute_type_numeric(self):
+ """Test that we can pass an integer to filter on a string type."""
+ mgr = self.index.get_resource_type_schema()
+ resource_type = str(uuid.uuid4())
+ self.index.create_resource_type(
+ mgr.resource_type_from_dict(resource_type, {
+ "flavor_id": {"type": "string",
+ "min_length": 1,
+ "max_length": 20,
+ "required": False},
+ }, 'creating'))
+ r = self.index.list_resources(
+ resource_type, attribute_filter={"=": {"flavor_id": 1.0}})
+ self.assertEqual(0, len(r))
+
+ def test_list_resource_weird_date(self):
+ self.assertRaises(
+ indexer.QueryValueError,
+ self.index.list_resources,
+ 'generic',
+ attribute_filter={"=": {"started_at": "f00bar"}})
+
+ def test_list_resources_without_history(self):
+ e = uuid.uuid4()
+ rid = uuid.uuid4()
+ user = str(uuid.uuid4())
+ project = str(uuid.uuid4())
+ new_user = str(uuid.uuid4())
+ new_project = str(uuid.uuid4())
+
+ self.index.create_metric(e, user + ":" + project,
+ archive_policy_name="low")
+
+ self.index.create_resource('generic', rid, user + ":" + project,
+ user, project,
+ metrics={'foo': e})
+ r2 = self.index.update_resource('generic', rid, user_id=new_user,
+ project_id=new_project,
+ append_metrics=True).jsonify()
+
+ self.assertEqual({'foo': str(e)}, r2['metrics'])
+ self.assertEqual(new_user, r2['user_id'])
+ self.assertEqual(new_project, r2['project_id'])
+ resources = self.index.list_resources('generic', history=False,
+ details=True)
+ self.assertGreaterEqual(len(resources), 1)
+ expected_resources = [r.jsonify() for r in resources
+ if r.id == rid]
+ self.assertIn(r2, expected_resources)
+
+ def test_list_resources_with_history(self):
+ e1 = uuid.uuid4()
+ e2 = uuid.uuid4()
+ rid = uuid.uuid4()
+ user = str(uuid.uuid4())
+ project = str(uuid.uuid4())
+ creator = user + ":" + project
+ new_user = str(uuid.uuid4())
+ new_project = str(uuid.uuid4())
+
+ self.index.create_metric(e1, creator, archive_policy_name="low")
+ self.index.create_metric(e2, creator, archive_policy_name="low")
+ self.index.create_metric(uuid.uuid4(), creator,
+ archive_policy_name="low")
+
+ r1 = self.index.create_resource('generic', rid, creator, user, project,
+ metrics={'foo': e1, 'bar': e2}
+ ).jsonify()
+ r2 = self.index.update_resource('generic', rid, user_id=new_user,
+ project_id=new_project,
+ append_metrics=True).jsonify()
+
+ r1['revision_end'] = r2['revision_start']
+ r2['revision_end'] = None
+ self.assertEqual({'foo': str(e1),
+ 'bar': str(e2)}, r2['metrics'])
+ self.assertEqual(new_user, r2['user_id'])
+ self.assertEqual(new_project, r2['project_id'])
+ resources = self.index.list_resources('generic', history=True,
+ details=False,
+ attribute_filter={
+ "=": {"id": rid}})
+ self.assertGreaterEqual(len(resources), 2)
+ resources = sorted(
+ [r.jsonify() for r in resources],
+ key=operator.itemgetter("revision_start"))
+ self.assertEqual([r1, r2], resources)
+
+ def test_list_resources_custom_with_history(self):
+ e1 = uuid.uuid4()
+ e2 = uuid.uuid4()
+ rid = uuid.uuid4()
+ creator = str(uuid.uuid4())
+ user = str(uuid.uuid4())
+ project = str(uuid.uuid4())
+ new_user = str(uuid.uuid4())
+ new_project = str(uuid.uuid4())
+
+ mgr = self.index.get_resource_type_schema()
+ resource_type = str(uuid.uuid4())
+ self.index.create_resource_type(
+ mgr.resource_type_from_dict(resource_type, {
+ "col1": {"type": "string", "required": True,
+ "min_length": 2, "max_length": 15}
+ }, 'creating'))
+
+ self.index.create_metric(e1, creator,
+ archive_policy_name="low")
+ self.index.create_metric(e2, creator,
+ archive_policy_name="low")
+ self.index.create_metric(uuid.uuid4(), creator,
+ archive_policy_name="low")
+
+ r1 = self.index.create_resource(resource_type, rid, creator,
+ user, project,
+ col1="foo",
+ metrics={'foo': e1, 'bar': e2}
+ ).jsonify()
+ r2 = self.index.update_resource(resource_type, rid, user_id=new_user,
+ project_id=new_project,
+ col1="bar",
+ append_metrics=True).jsonify()
+
+ r1['revision_end'] = r2['revision_start']
+ r2['revision_end'] = None
+ self.assertEqual({'foo': str(e1),
+ 'bar': str(e2)}, r2['metrics'])
+ self.assertEqual(new_user, r2['user_id'])
+ self.assertEqual(new_project, r2['project_id'])
+ self.assertEqual('bar', r2['col1'])
+ resources = self.index.list_resources(resource_type, history=True,
+ details=False,
+ attribute_filter={
+ "=": {"id": rid}})
+ self.assertGreaterEqual(len(resources), 2)
+ resources = sorted(
+ [r.jsonify() for r in resources],
+ key=operator.itemgetter("revision_start"))
+ self.assertEqual([r1, r2], resources)
+
+ def test_list_resources_started_after_ended_before(self):
+ # NOTE(jd) So this test is a bit fuzzy right now as we uses the same
+ # database for all tests and the tests are running concurrently, but
+ # for now it'll be better than nothing.
+ r1 = uuid.uuid4()
+ creator = str(uuid.uuid4())
+ g = self.index.create_resource(
+ 'generic', r1, creator,
+ started_at=utils.datetime_utc(2000, 1, 1, 23, 23, 23),
+ ended_at=utils.datetime_utc(2000, 1, 3, 23, 23, 23))
+ r2 = uuid.uuid4()
+ mgr = self.index.get_resource_type_schema()
+ resource_type = str(uuid.uuid4())
+ self.index.create_resource_type(
+ mgr.resource_type_from_dict(resource_type, {}, 'creating'))
+ i = self.index.create_resource(
+ resource_type, r2, creator,
+ started_at=utils.datetime_utc(2000, 1, 1, 23, 23, 23),
+ ended_at=utils.datetime_utc(2000, 1, 4, 23, 23, 23))
+ resources = self.index.list_resources(
+ 'generic',
+ attribute_filter={
+ "and":
+ [{">=": {"started_at":
+ utils.datetime_utc(2000, 1, 1, 23, 23, 23)}},
+ {"<": {"ended_at":
+ utils.datetime_utc(2000, 1, 5, 23, 23, 23)}}]})
+ self.assertGreaterEqual(len(resources), 2)
+ g_found = False
+ i_found = False
+ for r in resources:
+ if r.id == r1:
+ self.assertEqual(g, r)
+ g_found = True
+ elif r.id == r2:
+ i_found = True
+ if i_found and g_found:
+ break
+ else:
+ self.fail("Some resources were not found")
+
+ resources = self.index.list_resources(
+ resource_type,
+ attribute_filter={
+ ">=": {
+ "started_at": datetime.datetime(2000, 1, 1, 23, 23, 23)
+ },
+ })
+ self.assertGreaterEqual(len(resources), 1)
+ for r in resources:
+ if r.id == r2:
+ self.assertEqual(i, r)
+ break
+ else:
+ self.fail("Some resources were not found")
+
+ resources = self.index.list_resources(
+ 'generic',
+ attribute_filter={
+ "<": {
+ "ended_at": datetime.datetime(1999, 1, 1, 23, 23, 23)
+ },
+ })
+ self.assertEqual(0, len(resources))
+
+ def test_deletes_resources(self):
+ r1 = uuid.uuid4()
+ r2 = uuid.uuid4()
+ user = str(uuid.uuid4())
+ project = str(uuid.uuid4())
+ creator = user + ":" + project
+ metrics = {'foo': {'archive_policy_name': 'medium'}}
+ g1 = self.index.create_resource('generic', r1, creator,
+ user, project, metrics=metrics)
+ g2 = self.index.create_resource('generic', r2, creator,
+ user, project, metrics=metrics)
+
+ metrics = self.index.list_metrics(ids=[g1['metrics'][0]['id'],
+ g2['metrics'][0]['id']])
+ self.assertEqual(2, len(metrics))
+ for m in metrics:
+ self.assertEqual('active', m['status'])
+
+ deleted = self.index.delete_resources(
+ 'generic',
+ attribute_filter={"=": {"user_id": user}})
+ self.assertEqual(2, deleted)
+
+ resources = self.index.list_resources(
+ 'generic',
+ attribute_filter={"=": {"user_id": user}})
+ self.assertEqual(0, len(resources))
+
+ metrics = self.index.list_metrics(ids=[g1['metrics'][0]['id'],
+ g2['metrics'][0]['id']],
+ status='delete')
+ self.assertEqual(2, len(metrics))
+ for m in metrics:
+ self.assertEqual('delete', m['status'])
+
+ def test_get_metric(self):
+ e1 = uuid.uuid4()
+ creator = str(uuid.uuid4())
+ self.index.create_metric(e1, creator, archive_policy_name="low")
+
+ metric = self.index.list_metrics(id=e1)
+ self.assertEqual(1, len(metric))
+ metric = metric[0]
+ self.assertEqual(e1, metric.id)
+ self.assertEqual(metric.creator, creator)
+ self.assertIsNone(metric.name)
+ self.assertIsNone(metric.resource_id)
+
+ def test_get_metric_with_details(self):
+ e1 = uuid.uuid4()
+ creator = str(uuid.uuid4())
+ self.index.create_metric(e1,
+ creator,
+ archive_policy_name="low")
+
+ metric = self.index.list_metrics(id=e1)
+ self.assertEqual(1, len(metric))
+ metric = metric[0]
+ self.assertEqual(e1, metric.id)
+ self.assertEqual(metric.creator, creator)
+ self.assertIsNone(metric.name)
+ self.assertIsNone(metric.resource_id)
+ self.assertEqual(self.archive_policies['low'], metric.archive_policy)
+
+ def test_get_metric_with_bad_uuid(self):
+ e1 = uuid.uuid4()
+ self.assertEqual([], self.index.list_metrics(id=e1))
+
+ def test_get_metric_empty_list_uuids(self):
+ self.assertEqual([], self.index.list_metrics(ids=[]))
+
+ def test_list_metrics(self):
+ e1 = uuid.uuid4()
+ creator = str(uuid.uuid4())
+ self.index.create_metric(e1, creator, archive_policy_name="low")
+ e2 = uuid.uuid4()
+ self.index.create_metric(e2, creator, archive_policy_name="low")
+ metrics = self.index.list_metrics()
+ id_list = [m.id for m in metrics]
+ self.assertIn(e1, id_list)
+ # Test ordering
+ if e1 < e2:
+ self.assertLess(id_list.index(e1), id_list.index(e2))
+ else:
+ self.assertLess(id_list.index(e2), id_list.index(e1))
+
+ def test_list_metrics_delete_status(self):
+ e1 = uuid.uuid4()
+ self.index.create_metric(e1, str(uuid.uuid4()),
+ archive_policy_name="low")
+ self.index.delete_metric(e1)
+ metrics = self.index.list_metrics()
+ self.assertNotIn(e1, [m.id for m in metrics])
+
+ def test_resource_type_crud(self):
+ mgr = self.index.get_resource_type_schema()
+ rtype = mgr.resource_type_from_dict("indexer_test", {
+ "col1": {"type": "string", "required": True,
+ "min_length": 2, "max_length": 15}
+ }, "creating")
+
+ # Create
+ self.index.create_resource_type(rtype)
+ self.assertRaises(indexer.ResourceTypeAlreadyExists,
+ self.index.create_resource_type,
+ rtype)
+
+ # Get
+ rtype = self.index.get_resource_type("indexer_test")
+ self.assertEqual("indexer_test", rtype.name)
+ self.assertEqual(1, len(rtype.attributes))
+ self.assertEqual("col1", rtype.attributes[0].name)
+ self.assertEqual("string", rtype.attributes[0].typename)
+ self.assertEqual(15, rtype.attributes[0].max_length)
+ self.assertEqual(2, rtype.attributes[0].min_length)
+ self.assertEqual("active", rtype.state)
+
+ # List
+ rtypes = self.index.list_resource_types()
+ for rtype in rtypes:
+ if rtype.name == "indexer_test":
+ break
+ else:
+ self.fail("indexer_test not found")
+
+ # Test resource itself
+ rid = uuid.uuid4()
+ self.index.create_resource("indexer_test", rid,
+ str(uuid.uuid4()),
+ str(uuid.uuid4()),
+ col1="col1_value")
+ r = self.index.get_resource("indexer_test", rid)
+ self.assertEqual("indexer_test", r.type)
+ self.assertEqual("col1_value", r.col1)
+
+ # Update the resource type
+ add_attrs = mgr.resource_type_from_dict("indexer_test", {
+ "col2": {"type": "number", "required": False,
+ "max": 100, "min": 0},
+ "col3": {"type": "number", "required": True,
+ "max": 100, "min": 0, "options": {'fill': 15}}
+ }, "creating").attributes
+ self.index.update_resource_type("indexer_test",
+ add_attributes=add_attrs)
+
+ # Check the new attribute
+ r = self.index.get_resource("indexer_test", rid)
+ self.assertIsNone(r.col2)
+ self.assertEqual(15, r.col3)
+
+ self.index.update_resource("indexer_test", rid, col2=10)
+
+ rl = self.index.list_resources('indexer_test',
+ {"=": {"id": rid}},
+ history=True,
+ sorts=['revision_start:asc',
+ 'started_at:asc'])
+ self.assertEqual(2, len(rl))
+ self.assertIsNone(rl[0].col2)
+ self.assertEqual(10, rl[1].col2)
+ self.assertEqual(15, rl[0].col3)
+ self.assertEqual(15, rl[1].col3)
+
+ # Deletion
+ self.assertRaises(indexer.ResourceTypeInUse,
+ self.index.delete_resource_type,
+ "indexer_test")
+ self.index.delete_resource(rid)
+ self.index.delete_resource_type("indexer_test")
+
+ # Ensure it's deleted
+ self.assertRaises(indexer.NoSuchResourceType,
+ self.index.get_resource_type,
+ "indexer_test")
+
+ self.assertRaises(indexer.NoSuchResourceType,
+ self.index.delete_resource_type,
+ "indexer_test")
+
+ def _get_rt_state(self, name):
+ return self.index.get_resource_type(name).state
+
+ def test_resource_type_unexpected_creation_error(self):
+ mgr = self.index.get_resource_type_schema()
+ rtype = mgr.resource_type_from_dict("indexer_test_fail", {
+ "col1": {"type": "string", "required": True,
+ "min_length": 2, "max_length": 15}
+ }, "creating")
+
+ states = {'before': None,
+ 'after': None}
+
+ def map_and_create_mock(rt, conn):
+ states['before'] = self._get_rt_state("indexer_test_fail")
+ raise MockException("boom!")
+
+ with mock.patch.object(self.index._RESOURCE_TYPE_MANAGER,
+ "map_and_create_tables",
+ side_effect=map_and_create_mock):
+ self.assertRaises(MockException,
+ self.index.create_resource_type,
+ rtype)
+ states['after'] = self._get_rt_state('indexer_test_fail')
+
+ self.assertEqual([('after', 'creation_error'),
+ ('before', 'creating')],
+ sorted(states.items()))
+
+ def test_resource_type_unexpected_deleting_error(self):
+ mgr = self.index.get_resource_type_schema()
+ rtype = mgr.resource_type_from_dict("indexer_test_fail2", {
+ "col1": {"type": "string", "required": True,
+ "min_length": 2, "max_length": 15}
+ }, "creating")
+ self.index.create_resource_type(rtype)
+
+ states = {'before': None,
+ 'after': None}
+
+ def map_and_create_mock(rt, conn):
+ states['before'] = self._get_rt_state("indexer_test_fail2")
+ raise MockException("boom!")
+
+ with mock.patch.object(self.index._RESOURCE_TYPE_MANAGER,
+ "unmap_and_delete_tables",
+ side_effect=map_and_create_mock):
+ self.assertRaises(MockException,
+ self.index.delete_resource_type,
+ rtype.name)
+ states['after'] = self._get_rt_state('indexer_test_fail2')
+
+ self.assertEqual([('after', 'deletion_error'),
+ ('before', 'deleting')],
+ sorted(states.items()))
+
+ # We can cleanup the mess !
+ self.index.delete_resource_type("indexer_test_fail2")
+
+ # Ensure it's deleted
+ self.assertRaises(indexer.NoSuchResourceType,
+ self.index.get_resource_type,
+ "indexer_test_fail2")
+
+ self.assertRaises(indexer.NoSuchResourceType,
+ self.index.delete_resource_type,
+ "indexer_test_fail2")
diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py
new file mode 100644
index 00000000..9caf9b39
--- /dev/null
+++ b/gnocchi/tests/test_rest.py
@@ -0,0 +1,1915 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2016 Red Hat, Inc.
+# Copyright © 2014-2015 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import base64
+import calendar
+import contextlib
+import datetime
+from email import utils as email_utils
+import hashlib
+import json
+import uuid
+
+import iso8601
+from keystonemiddleware import fixture as ksm_fixture
+import mock
+import six
+from stevedore import extension
+import testscenarios
+from testtools import testcase
+import webtest
+
+from gnocchi import archive_policy
+from gnocchi import rest
+from gnocchi.rest import app
+from gnocchi.tests import base as tests_base
+from gnocchi.tests import utils as tests_utils
+from gnocchi import utils
+
+
+load_tests = testscenarios.load_tests_apply_scenarios
+
+
+class TestingApp(webtest.TestApp):
+ VALID_TOKEN_ADMIN = str(uuid.uuid4())
+ USER_ID_ADMIN = str(uuid.uuid4())
+ PROJECT_ID_ADMIN = str(uuid.uuid4())
+
+ VALID_TOKEN = str(uuid.uuid4())
+ USER_ID = str(uuid.uuid4())
+ PROJECT_ID = str(uuid.uuid4())
+
+ VALID_TOKEN_2 = str(uuid.uuid4())
+ USER_ID_2 = str(uuid.uuid4())
+ PROJECT_ID_2 = str(uuid.uuid4())
+
+ INVALID_TOKEN = str(uuid.uuid4())
+
+ def __init__(self, *args, **kwargs):
+ self.auth_mode = kwargs.pop('auth_mode')
+ self.storage = kwargs.pop('storage')
+ self.indexer = kwargs.pop('indexer')
+ super(TestingApp, self).__init__(*args, **kwargs)
+ # Setup Keystone auth_token fake cache
+ self.token = self.VALID_TOKEN
+ # Setup default user for basic auth
+ self.user = self.USER_ID.encode('ascii')
+
+ @contextlib.contextmanager
+ def use_admin_user(self):
+ if self.auth_mode == "keystone":
+ old_token = self.token
+ self.token = self.VALID_TOKEN_ADMIN
+ try:
+ yield
+ finally:
+ self.token = old_token
+ elif self.auth_mode == "basic":
+ old_user = self.user
+ self.user = b"admin"
+ try:
+ yield
+ finally:
+ self.user = old_user
+ elif self.auth_mode == "noauth":
+ raise testcase.TestSkipped("auth mode is noauth")
+ else:
+ raise RuntimeError("Unknown auth_mode")
+
+ @contextlib.contextmanager
+ def use_another_user(self):
+ if self.auth_mode != "keystone":
+ raise testcase.TestSkipped("Auth mode is not Keystone")
+ old_token = self.token
+ self.token = self.VALID_TOKEN_2
+ try:
+ yield
+ finally:
+ self.token = old_token
+
+ @contextlib.contextmanager
+ def use_invalid_token(self):
+ if self.auth_mode != "keystone":
+ raise testcase.TestSkipped("Auth mode is not Keystone")
+ old_token = self.token
+ self.token = self.INVALID_TOKEN
+ try:
+ yield
+ finally:
+ self.token = old_token
+
+ def do_request(self, req, *args, **kwargs):
+ if self.auth_mode in "keystone":
+ if self.token is not None:
+ req.headers['X-Auth-Token'] = self.token
+ elif self.auth_mode == "basic":
+ req.headers['Authorization'] = (
+ b"basic " + base64.b64encode(self.user + b":")
+ )
+ elif self.auth_mode == "noauth":
+ req.headers['X-User-Id'] = self.USER_ID
+ req.headers['X-Project-Id'] = self.PROJECT_ID
+ response = super(TestingApp, self).do_request(req, *args, **kwargs)
+ metrics = tests_utils.list_all_incoming_metrics(self.storage.incoming)
+ self.storage.process_background_tasks(self.indexer, metrics, sync=True)
+ return response
+
+
+class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios):
+
+ scenarios = [
+ ('basic', dict(auth_mode="basic")),
+ ('keystone', dict(auth_mode="keystone")),
+ ('noauth', dict(auth_mode="noauth")),
+ ]
+
+ def setUp(self):
+ super(RestTest, self).setUp()
+
+ if self.auth_mode == "keystone":
+ self.auth_token_fixture = self.useFixture(
+ ksm_fixture.AuthTokenFixture())
+ self.auth_token_fixture.add_token_data(
+ is_v2=True,
+ token_id=TestingApp.VALID_TOKEN_ADMIN,
+ user_id=TestingApp.USER_ID_ADMIN,
+ user_name='adminusername',
+ project_id=TestingApp.PROJECT_ID_ADMIN,
+ role_list=['admin'])
+ self.auth_token_fixture.add_token_data(
+ is_v2=True,
+ token_id=TestingApp.VALID_TOKEN,
+ user_id=TestingApp.USER_ID,
+ user_name='myusername',
+ project_id=TestingApp.PROJECT_ID,
+ role_list=["member"])
+ self.auth_token_fixture.add_token_data(
+ is_v2=True,
+ token_id=TestingApp.VALID_TOKEN_2,
+ user_id=TestingApp.USER_ID_2,
+ user_name='myusername2',
+ project_id=TestingApp.PROJECT_ID_2,
+ role_list=["member"])
+
+ self.conf.set_override("auth_mode", self.auth_mode, group="api")
+
+ self.app = TestingApp(app.load_app(conf=self.conf,
+ indexer=self.index,
+ storage=self.storage,
+ not_implemented_middleware=False),
+ storage=self.storage,
+ indexer=self.index,
+ auth_mode=self.auth_mode)
+
+ # NOTE(jd) Used at least by docs
+ @staticmethod
+ def runTest():
+ pass
+
+
+class RootTest(RestTest):
+ def test_deserialize_force_json(self):
+ with self.app.use_admin_user():
+ self.app.post(
+ "/v1/archive_policy",
+ params="foo",
+ status=415)
+
+ def test_capabilities(self):
+ custom_agg = extension.Extension('test_aggregation', None, None, None)
+ mgr = extension.ExtensionManager.make_test_instance(
+ [custom_agg], 'gnocchi.aggregates')
+ aggregation_methods = set(
+ archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS)
+
+ with mock.patch.object(extension, 'ExtensionManager',
+ return_value=mgr):
+ result = self.app.get("/v1/capabilities").json
+ self.assertEqual(
+ sorted(aggregation_methods),
+ sorted(result['aggregation_methods']))
+ self.assertEqual(
+ ['test_aggregation'],
+ result['dynamic_aggregation_methods'])
+
+ def test_status(self):
+ with self.app.use_admin_user():
+ r = self.app.get("/v1/status")
+ status = json.loads(r.text)
+ self.assertIsInstance(status['storage']['measures_to_process'], dict)
+ self.assertIsInstance(status['storage']['summary']['metrics'], int)
+ self.assertIsInstance(status['storage']['summary']['measures'], int)
+
+
+class ArchivePolicyTest(RestTest):
+ """Test the ArchivePolicies REST API.
+
+ See also gnocchi/tests/gabbi/gabbits/archive.yaml
+ """
+
+ # TODO(chdent): The tests left here involve inspecting the
+ # aggregation methods which gabbi can't currently handle because
+ # the ordering of the results is not predictable.
+
+ def test_post_archive_policy_with_agg_methods(self):
+ name = str(uuid.uuid4())
+ with self.app.use_admin_user():
+ result = self.app.post_json(
+ "/v1/archive_policy",
+ params={"name": name,
+ "aggregation_methods": ["mean"],
+ "definition":
+ [{
+ "granularity": "1 minute",
+ "points": 20,
+ }]},
+ status=201)
+ self.assertEqual("application/json", result.content_type)
+ ap = json.loads(result.text)
+ self.assertEqual(['mean'], ap['aggregation_methods'])
+
+ def test_post_archive_policy_with_agg_methods_minus(self):
+ name = str(uuid.uuid4())
+ with self.app.use_admin_user():
+ result = self.app.post_json(
+ "/v1/archive_policy",
+ params={"name": name,
+ "aggregation_methods": ["-mean"],
+ "definition":
+ [{
+ "granularity": "1 minute",
+ "points": 20,
+ }]},
+ status=201)
+ self.assertEqual("application/json", result.content_type)
+ ap = json.loads(result.text)
+ self.assertEqual(
+ (set(self.conf.archive_policy.default_aggregation_methods)
+ - set(['mean'])),
+ set(ap['aggregation_methods']))
+
+ def test_get_archive_policy(self):
+ result = self.app.get("/v1/archive_policy/medium")
+ ap = json.loads(result.text)
+ ap_dict = self.archive_policies['medium'].jsonify()
+ ap_dict['definition'] = [
+ archive_policy.ArchivePolicyItem(**d).jsonify()
+ for d in ap_dict['definition']
+ ]
+ self.assertEqual(set(ap['aggregation_methods']),
+ ap_dict['aggregation_methods'])
+ del ap['aggregation_methods']
+ del ap_dict['aggregation_methods']
+ self.assertEqual(ap_dict, ap)
+
+ def test_list_archive_policy(self):
+ result = self.app.get("/v1/archive_policy")
+ aps = json.loads(result.text)
+ # Transform list to set
+ for ap in aps:
+ ap['aggregation_methods'] = set(ap['aggregation_methods'])
+ for name, ap in six.iteritems(self.archive_policies):
+ apj = ap.jsonify()
+ apj['definition'] = [
+ archive_policy.ArchivePolicyItem(**d).jsonify()
+ for d in ap.definition
+ ]
+ self.assertIn(apj, aps)
+
+
+class MetricTest(RestTest):
+
+ def test_get_metric_with_another_user_linked_resource(self):
+ result = self.app.post_json(
+ "/v1/resource/generic",
+ params={
+ "id": str(uuid.uuid4()),
+ "started_at": "2014-01-01 02:02:02",
+ "user_id": TestingApp.USER_ID_2,
+ "project_id": TestingApp.PROJECT_ID_2,
+ "metrics": {"foobar": {"archive_policy_name": "low"}},
+ })
+ resource = json.loads(result.text)
+ metric_id = resource["metrics"]["foobar"]
+ with self.app.use_another_user():
+ self.app.get("/v1/metric/%s" % metric_id)
+
+ def test_get_metric_with_another_user(self):
+ result = self.app.post_json("/v1/metric",
+ params={"archive_policy_name": "medium"},
+ status=201)
+ self.assertEqual("application/json", result.content_type)
+
+ with self.app.use_another_user():
+ self.app.get(result.headers['Location'], status=403)
+
+ def test_post_archive_policy_no_mean(self):
+ """Test that we have a 404 if mean is not in AP."""
+ ap = str(uuid.uuid4())
+ with self.app.use_admin_user():
+ self.app.post_json(
+ "/v1/archive_policy",
+ params={"name": ap,
+ "aggregation_methods": ["max"],
+ "definition": [{
+ "granularity": "10s",
+ "points": 20,
+ }]},
+ status=201)
+ result = self.app.post_json(
+ "/v1/metric",
+ params={"archive_policy_name": ap},
+ status=201)
+ metric = json.loads(result.text)
+ self.app.post_json("/v1/metric/%s/measures" % metric['id'],
+ params=[{"timestamp": '2013-01-01 12:00:01',
+ "value": 8},
+ {"timestamp": '2013-01-01 12:00:02',
+ "value": 16}])
+ self.app.get("/v1/metric/%s/measures" % metric['id'],
+ status=404)
+
+ def test_delete_metric_another_user(self):
+ result = self.app.post_json("/v1/metric",
+ params={"archive_policy_name": "medium"})
+ metric = json.loads(result.text)
+ with self.app.use_another_user():
+ self.app.delete("/v1/metric/" + metric['id'], status=403)
+
+ def test_add_measure_with_another_user(self):
+ result = self.app.post_json("/v1/metric",
+ params={"archive_policy_name": "high"})
+ metric = json.loads(result.text)
+ with self.app.use_another_user():
+ self.app.post_json(
+ "/v1/metric/%s/measures" % metric['id'],
+ params=[{"timestamp": '2013-01-01 23:23:23',
+ "value": 1234.2}],
+ status=403)
+
+ def test_add_measures_back_window(self):
+ ap_name = str(uuid.uuid4())
+ with self.app.use_admin_user():
+ self.app.post_json(
+ "/v1/archive_policy",
+ params={"name": ap_name,
+ "back_window": 2,
+ "definition":
+ [{
+ "granularity": "1 minute",
+ "points": 20,
+ }]},
+ status=201)
+ result = self.app.post_json("/v1/metric",
+ params={"archive_policy_name": ap_name})
+ metric = json.loads(result.text)
+ self.app.post_json(
+ "/v1/metric/%s/measures" % metric['id'],
+ params=[{"timestamp": '2013-01-01 23:30:23',
+ "value": 1234.2}],
+ status=202)
+ self.app.post_json(
+ "/v1/metric/%s/measures" % metric['id'],
+ params=[{"timestamp": '2013-01-01 23:29:23',
+ "value": 1234.2}],
+ status=202)
+ self.app.post_json(
+ "/v1/metric/%s/measures" % metric['id'],
+ params=[{"timestamp": '2013-01-01 23:28:23',
+ "value": 1234.2}],
+ status=202)
+ # This one is too old and should not be taken into account
+ self.app.post_json(
+ "/v1/metric/%s/measures" % metric['id'],
+ params=[{"timestamp": '2012-01-01 23:27:23',
+ "value": 1234.2}],
+ status=202)
+
+ ret = self.app.get("/v1/metric/%s/measures" % metric['id'])
+ result = json.loads(ret.text)
+ self.assertEqual(
+ [[u'2013-01-01T23:28:00+00:00', 60.0, 1234.2],
+ [u'2013-01-01T23:29:00+00:00', 60.0, 1234.2],
+ [u'2013-01-01T23:30:00+00:00', 60.0, 1234.2]],
+ result)
+
+ def test_get_measure_with_another_user(self):
+ result = self.app.post_json("/v1/metric",
+ params={"archive_policy_name": "low"})
+ metric = json.loads(result.text)
+ self.app.post_json("/v1/metric/%s/measures" % metric['id'],
+ params=[{"timestamp": '2013-01-01 23:23:23',
+ "value": 1234.2}])
+ with self.app.use_another_user():
+ self.app.get("/v1/metric/%s/measures" % metric['id'],
+ status=403)
+
+ @mock.patch.object(utils, 'utcnow')
+ def test_get_measure_start_relative(self, utcnow):
+ """Make sure the timestamps can be relative to now."""
+ utcnow.return_value = datetime.datetime(2014, 1, 1, 10, 23)
+ result = self.app.post_json("/v1/metric",
+ params={"archive_policy_name": "high"})
+ metric = json.loads(result.text)
+ self.app.post_json("/v1/metric/%s/measures" % metric['id'],
+ params=[{"timestamp": utils.utcnow().isoformat(),
+ "value": 1234.2}])
+ ret = self.app.get(
+ "/v1/metric/%s/measures?start=-10 minutes"
+ % metric['id'],
+ status=200)
+ result = json.loads(ret.text)
+ now = utils.datetime_utc(2014, 1, 1, 10, 23)
+ self.assertEqual([
+ ['2014-01-01T10:00:00+00:00', 3600.0, 1234.2],
+ [(now
+ - datetime.timedelta(
+ seconds=now.second,
+ microseconds=now.microsecond)).isoformat(),
+ 60.0, 1234.2],
+ [(now
+ - datetime.timedelta(
+ microseconds=now.microsecond)).isoformat(),
+ 1.0, 1234.2]], result)
+
+ def test_get_measure_stop(self):
+ result = self.app.post_json("/v1/metric",
+ params={"archive_policy_name": "high"})
+ metric = json.loads(result.text)
+ self.app.post_json("/v1/metric/%s/measures" % metric['id'],
+ params=[{"timestamp": '2013-01-01 12:00:00',
+ "value": 1234.2},
+ {"timestamp": '2013-01-01 12:00:02',
+ "value": 456}])
+ ret = self.app.get("/v1/metric/%s/measures"
+ "?stop=2013-01-01 12:00:01" % metric['id'],
+ status=200)
+ result = json.loads(ret.text)
+ self.assertEqual(
+ [[u'2013-01-01T12:00:00+00:00', 3600.0, 845.1],
+ [u'2013-01-01T12:00:00+00:00', 60.0, 845.1],
+ [u'2013-01-01T12:00:00+00:00', 1.0, 1234.2]],
+ result)
+
+ def test_get_measure_aggregation(self):
+ result = self.app.post_json("/v1/metric",
+ params={"archive_policy_name": "medium"})
+ metric = json.loads(result.text)
+ self.app.post_json("/v1/metric/%s/measures" % metric['id'],
+ params=[{"timestamp": '2013-01-01 12:00:01',
+ "value": 123.2},
+ {"timestamp": '2013-01-01 12:00:03',
+ "value": 12345.2},
+ {"timestamp": '2013-01-01 12:00:02',
+ "value": 1234.2}])
+ ret = self.app.get(
+ "/v1/metric/%s/measures?aggregation=max" % metric['id'],
+ status=200)
+ result = json.loads(ret.text)
+ self.assertEqual([[u'2013-01-01T00:00:00+00:00', 86400.0, 12345.2],
+ [u'2013-01-01T12:00:00+00:00', 3600.0, 12345.2],
+ [u'2013-01-01T12:00:00+00:00', 60.0, 12345.2]],
+ result)
+
+ def test_get_moving_average(self):
+ result = self.app.post_json("/v1/metric",
+ params={"archive_policy_name": "medium"})
+ metric = json.loads(result.text)
+ self.app.post_json("/v1/metric/%s/measures" % metric['id'],
+ params=[{"timestamp": '2013-01-01 12:00:00',
+ "value": 69},
+ {"timestamp": '2013-01-01 12:00:20',
+ "value": 42},
+ {"timestamp": '2013-01-01 12:00:40',
+ "value": 6},
+ {"timestamp": '2013-01-01 12:01:00',
+ "value": 44},
+ {"timestamp": '2013-01-01 12:01:20',
+ "value": 7}])
+
+ path = "/v1/metric/%s/measures?aggregation=%s&window=%ds"
+ ret = self.app.get(path % (metric['id'], 'moving-average', 120),
+ status=200)
+ result = json.loads(ret.text)
+ expected = [[u'2013-01-01T12:00:00+00:00', 120.0, 32.25]]
+ self.assertEqual(expected, result)
+ ret = self.app.get(path % (metric['id'], 'moving-average', 90),
+ status=400)
+ self.assertIn('No data available that is either full-res',
+ ret.text)
+ path = "/v1/metric/%s/measures?aggregation=%s"
+ ret = self.app.get(path % (metric['id'], 'moving-average'),
+ status=400)
+ self.assertIn('Moving aggregate must have window specified',
+ ret.text)
+
+ def test_get_moving_average_invalid_window(self):
+ result = self.app.post_json("/v1/metric",
+ params={"archive_policy_name": "medium"})
+ metric = json.loads(result.text)
+ self.app.post_json("/v1/metric/%s/measures" % metric['id'],
+ params=[{"timestamp": '2013-01-01 12:00:00',
+ "value": 69},
+ {"timestamp": '2013-01-01 12:00:20',
+ "value": 42},
+ {"timestamp": '2013-01-01 12:00:40',
+ "value": 6},
+ {"timestamp": '2013-01-01 12:01:00',
+ "value": 44},
+ {"timestamp": '2013-01-01 12:01:20',
+ "value": 7}])
+
+ path = "/v1/metric/%s/measures?aggregation=%s&window=foobar"
+ ret = self.app.get(path % (metric['id'], 'moving-average'),
+ status=400)
+ self.assertIn('Invalid value for window', ret.text)
+
+ def test_get_resource_missing_named_metric_measure_aggregation(self):
+ mgr = self.index.get_resource_type_schema()
+ resource_type = str(uuid.uuid4())
+ self.index.create_resource_type(
+ mgr.resource_type_from_dict(resource_type, {
+ "server_group": {"type": "string",
+ "min_length": 1,
+ "max_length": 40,
+ "required": True}
+ }, 'creating'))
+
+ attributes = {
+ "server_group": str(uuid.uuid4()),
+ }
+ result = self.app.post_json("/v1/metric",
+ params={"archive_policy_name": "medium"})
+ metric1 = json.loads(result.text)
+ self.app.post_json("/v1/metric/%s/measures" % metric1['id'],
+ params=[{"timestamp": '2013-01-01 12:00:01',
+ "value": 8},
+ {"timestamp": '2013-01-01 12:00:02',
+ "value": 16}])
+
+ result = self.app.post_json("/v1/metric",
+ params={"archive_policy_name": "medium"})
+ metric2 = json.loads(result.text)
+ self.app.post_json("/v1/metric/%s/measures" % metric2['id'],
+ params=[{"timestamp": '2013-01-01 12:00:01',
+ "value": 0},
+ {"timestamp": '2013-01-01 12:00:02',
+ "value": 4}])
+
+ attributes['id'] = str(uuid.uuid4())
+ attributes['metrics'] = {'foo': metric1['id']}
+ self.app.post_json("/v1/resource/" + resource_type,
+ params=attributes)
+
+ attributes['id'] = str(uuid.uuid4())
+ attributes['metrics'] = {'bar': metric2['id']}
+ self.app.post_json("/v1/resource/" + resource_type,
+ params=attributes)
+
+ result = self.app.post_json(
+ "/v1/aggregation/resource/%s/metric/foo?aggregation=max"
+ % resource_type,
+ params={"=": {"server_group": attributes['server_group']}})
+
+ measures = json.loads(result.text)
+ self.assertEqual([[u'2013-01-01T00:00:00+00:00', 86400.0, 16.0],
+ [u'2013-01-01T12:00:00+00:00', 3600.0, 16.0],
+ [u'2013-01-01T12:00:00+00:00', 60.0, 16.0]],
+ measures)
+
+ def test_search_value(self):
+ result = self.app.post_json("/v1/metric",
+ params={"archive_policy_name": "high"})
+ metric = json.loads(result.text)
+ self.app.post_json("/v1/metric/%s/measures" % metric['id'],
+ params=[{"timestamp": '2013-01-01 12:00:00',
+ "value": 1234.2},
+ {"timestamp": '2013-01-01 12:00:02',
+ "value": 456}])
+ metric1 = metric['id']
+ result = self.app.post_json("/v1/metric",
+ params={"archive_policy_name": "high"})
+ metric = json.loads(result.text)
+ self.app.post_json("/v1/metric/%s/measures" % metric['id'],
+ params=[{"timestamp": '2013-01-01 12:30:00',
+ "value": 1234.2},
+ {"timestamp": '2013-01-01 12:00:02',
+ "value": 456}])
+ metric2 = metric['id']
+
+ ret = self.app.post_json(
+ "/v1/search/metric?metric_id=%s&metric_id=%s"
+ "&stop=2013-01-01 12:10:00" % (metric1, metric2),
+ params={u"∧": [{u"≥": 1000}]},
+ status=200)
+ result = json.loads(ret.text)
+ self.assertEqual(
+ {metric1: [[u'2013-01-01T12:00:00+00:00', 1.0, 1234.2]],
+ metric2: []},
+ result)
+
+
+class ResourceTest(RestTest):
+ def setUp(self):
+ super(ResourceTest, self).setUp()
+ self.attributes = {
+ "id": str(uuid.uuid4()),
+ "started_at": "2014-01-03T02:02:02+00:00",
+ "user_id": str(uuid.uuid4()),
+ "project_id": str(uuid.uuid4()),
+ "name": "my-name",
+ }
+ self.patchable_attributes = {
+ "ended_at": "2014-01-03T02:02:02+00:00",
+ "name": "new-name",
+ }
+ self.resource = self.attributes.copy()
+ # Set original_resource_id
+ self.resource['original_resource_id'] = self.resource['id']
+ self.resource['created_by_user_id'] = TestingApp.USER_ID
+ if self.auth_mode in ("keystone", "noauth"):
+ self.resource['created_by_project_id'] = TestingApp.PROJECT_ID
+ self.resource['creator'] = (
+ TestingApp.USER_ID + ":" + TestingApp.PROJECT_ID
+ )
+ elif self.auth_mode == "basic":
+ self.resource['created_by_project_id'] = ""
+ self.resource['creator'] = TestingApp.USER_ID
+ self.resource['ended_at'] = None
+ self.resource['metrics'] = {}
+ if 'user_id' not in self.resource:
+ self.resource['user_id'] = None
+ if 'project_id' not in self.resource:
+ self.resource['project_id'] = None
+
+ mgr = self.index.get_resource_type_schema()
+ self.resource_type = str(uuid.uuid4())
+ self.index.create_resource_type(
+ mgr.resource_type_from_dict(self.resource_type, {
+ "name": {"type": "string",
+ "min_length": 1,
+ "max_length": 40,
+ "required": True}
+ }, "creating"))
+ self.resource['type'] = self.resource_type
+
+ @mock.patch.object(utils, 'utcnow')
+ def test_post_resource(self, utcnow):
+ utcnow.return_value = utils.datetime_utc(2014, 1, 1, 10, 23)
+ result = self.app.post_json(
+ "/v1/resource/" + self.resource_type,
+ params=self.attributes,
+ status=201)
+ resource = json.loads(result.text)
+ self.assertEqual("http://localhost/v1/resource/"
+ + self.resource_type + "/" + self.attributes['id'],
+ result.headers['Location'])
+ self.assertIsNone(resource['revision_end'])
+ self.assertEqual(resource['revision_start'],
+ "2014-01-01T10:23:00+00:00")
+ self._check_etag(result, resource)
+ del resource['revision_start']
+ del resource['revision_end']
+ self.assertEqual(self.resource, resource)
+
+ def test_post_resource_with_invalid_metric(self):
+ metric_id = str(uuid.uuid4())
+ self.attributes['metrics'] = {"foo": metric_id}
+ result = self.app.post_json(
+ "/v1/resource/" + self.resource_type,
+ params=self.attributes,
+ status=400)
+ self.assertIn("Metric %s does not exist" % metric_id,
+ result.text)
+
+ def test_post_resource_with_metric_from_other_user(self):
+ with self.app.use_another_user():
+ metric = self.app.post_json(
+ "/v1/metric",
+ params={'archive_policy_name': "high"})
+ metric_id = json.loads(metric.text)['id']
+ self.attributes['metrics'] = {"foo": metric_id}
+ result = self.app.post_json(
+ "/v1/resource/" + self.resource_type,
+ params=self.attributes,
+ status=400)
+ self.assertIn("Metric %s does not exist" % metric_id,
+ result.text)
+
+ def test_post_resource_already_exist(self):
+ result = self.app.post_json(
+ "/v1/resource/" + self.resource_type,
+ params=self.attributes,
+ status=201)
+ result = self.app.post_json(
+ "/v1/resource/" + self.resource_type,
+ params=self.attributes,
+ status=409)
+ self.assertIn("Resource %s already exists" % self.attributes['id'],
+ result.text)
+
+ def test_post_invalid_timestamp(self):
+ self.attributes['started_at'] = "2014-01-01 02:02:02"
+ self.attributes['ended_at'] = "2013-01-01 02:02:02"
+ self.app.post_json(
+ "/v1/resource/" + self.resource_type,
+ params=self.attributes,
+ status=400)
+
+ @staticmethod
+ def _strtime_to_httpdate(dt):
+ return email_utils.formatdate(calendar.timegm(
+ iso8601.parse_date(dt).timetuple()), usegmt=True)
+
+ def _check_etag(self, response, resource):
+ lastmodified = self._strtime_to_httpdate(resource['revision_start'])
+ etag = hashlib.sha1()
+ etag.update(resource['id'].encode('utf-8'))
+ etag.update(resource['revision_start'].encode('utf8'))
+ self.assertEqual(response.headers['Last-Modified'], lastmodified)
+ self.assertEqual(response.headers['ETag'], '"%s"' % etag.hexdigest())
+
+ @mock.patch.object(utils, 'utcnow')
+ def test_get_resource(self, utcnow):
+ utcnow.return_value = utils.datetime_utc(2014, 1, 1, 10, 23)
+ result = self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes,
+ status=201)
+ result = self.app.get("/v1/resource/"
+ + self.resource_type
+ + "/"
+ + self.attributes['id'])
+ resource = json.loads(result.text)
+ self.assertIsNone(resource['revision_end'])
+ self.assertEqual(resource['revision_start'],
+ "2014-01-01T10:23:00+00:00")
+ self._check_etag(result, resource)
+ del resource['revision_start']
+ del resource['revision_end']
+ self.assertEqual(self.resource, resource)
+
+ def test_get_resource_etag(self):
+ result = self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes,
+ status=201)
+ result = self.app.get("/v1/resource/"
+ + self.resource_type
+ + "/"
+ + self.attributes['id'])
+ resource = json.loads(result.text)
+ etag = hashlib.sha1()
+ etag.update(resource['id'].encode('utf-8'))
+ etag.update(resource['revision_start'].encode('utf-8'))
+ etag = etag.hexdigest()
+ lastmodified = self._strtime_to_httpdate(resource['revision_start'])
+ oldlastmodified = self._strtime_to_httpdate("2000-01-01 00:00:00")
+
+ # if-match and if-unmodified-since
+ self.app.get("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'],
+ headers={'if-match': 'fake'},
+ status=412)
+ self.app.get("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'],
+ headers={'if-match': etag},
+ status=200)
+ self.app.get("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'],
+ headers={'if-unmodified-since': lastmodified},
+ status=200)
+ self.app.get("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'],
+ headers={'if-unmodified-since': oldlastmodified},
+ status=412)
+ # Some case with '*'
+ self.app.get("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'],
+ headers={'if-none-match': '*'},
+ status=304)
+ self.app.get("/v1/resource/" + self.resource_type
+ + "/wrongid",
+ headers={'if-none-match': '*'},
+ status=404)
+ # always prefers if-match if both provided
+ self.app.get("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'],
+ headers={'if-match': etag,
+ 'if-unmodified-since': lastmodified},
+ status=200)
+ self.app.get("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'],
+ headers={'if-match': etag,
+ 'if-unmodified-since': oldlastmodified},
+ status=200)
+ self.app.get("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'],
+ headers={'if-match': '*',
+ 'if-unmodified-since': oldlastmodified},
+ status=200)
+
+ # if-none-match and if-modified-since
+ self.app.get("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'],
+ headers={'if-none-match': etag},
+ status=304)
+ self.app.get("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'],
+ headers={'if-none-match': 'fake'},
+ status=200)
+ self.app.get("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'],
+ headers={'if-modified-since': lastmodified},
+ status=304)
+ self.app.get("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'],
+ headers={'if-modified-since': oldlastmodified},
+ status=200)
+ # always prefers if-none-match if both provided
+ self.app.get("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'],
+ headers={'if-modified-since': oldlastmodified,
+ 'if-none-match': etag},
+ status=304)
+ self.app.get("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'],
+ headers={'if-modified-since': oldlastmodified,
+ 'if-none-match': '*'},
+ status=304)
+ self.app.get("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'],
+ headers={'if-modified-since': lastmodified,
+ 'if-none-match': '*'},
+ status=304)
+ # Some case with '*'
+ self.app.get("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'],
+ headers={'if-match': '*'},
+ status=200)
+ self.app.get("/v1/resource/" + self.resource_type
+ + "/wrongid",
+ headers={'if-match': '*'},
+ status=404)
+
+ # if-none-match and if-match
+ self.app.get("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'],
+ headers={'if-none-match': etag,
+ 'if-match': etag},
+ status=304)
+
+ # if-none-match returns 412 instead 304 for PUT/PATCH/DELETE
+ self.app.patch_json("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'],
+ headers={'if-none-match': '*'},
+ status=412)
+ self.app.delete("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'],
+ headers={'if-none-match': '*'},
+ status=412)
+
+ # if-modified-since is ignored with PATCH/PUT/DELETE
+ self.app.patch_json("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'],
+ params=self.patchable_attributes,
+ headers={'if-modified-since': lastmodified},
+ status=200)
+ self.app.delete("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'],
+ headers={'if-modified-since': lastmodified},
+ status=204)
+
+ def test_get_resource_non_admin(self):
+ with self.app.use_another_user():
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes,
+ status=201)
+ self.app.get("/v1/resource/"
+ + self.resource_type
+ + "/"
+ + self.attributes['id'],
+ status=200)
+
+ def test_get_resource_unauthorized(self):
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes,
+ status=201)
+ with self.app.use_another_user():
+ self.app.get("/v1/resource/"
+ + self.resource_type
+ + "/"
+ + self.attributes['id'],
+ status=403)
+
+ def test_get_resource_named_metric(self):
+ self.attributes['metrics'] = {'foo': {'archive_policy_name': "high"}}
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes)
+ self.app.get("/v1/resource/"
+ + self.resource_type
+ + "/"
+ + self.attributes['id']
+ + "/metric/foo/measures",
+ status=200)
+
+ def test_list_resource_metrics_unauthorized(self):
+ self.attributes['metrics'] = {'foo': {'archive_policy_name': "high"}}
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes)
+ with self.app.use_another_user():
+ self.app.get(
+ "/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'] + "/metric",
+ status=403)
+
+ def test_delete_resource_named_metric(self):
+ self.attributes['metrics'] = {'foo': {'archive_policy_name': "high"}}
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes)
+ self.app.delete("/v1/resource/"
+ + self.resource_type
+ + "/"
+ + self.attributes['id']
+ + "/metric/foo",
+ status=204)
+ self.app.delete("/v1/resource/"
+ + self.resource_type
+ + "/"
+ + self.attributes['id']
+ + "/metric/foo/measures",
+ status=404)
+
+ def test_get_resource_unknown_named_metric(self):
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes)
+ self.app.get("/v1/resource/"
+ + self.resource_type
+ + "/"
+ + self.attributes['id']
+ + "/metric/foo",
+ status=404)
+
+ def test_post_append_metrics_already_exists(self):
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes)
+
+ metrics = {'foo': {'archive_policy_name': "high"}}
+ self.app.post_json("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'] + "/metric",
+ params=metrics, status=204)
+ metrics = {'foo': {'archive_policy_name': "low"}}
+ self.app.post_json("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id']
+ + "/metric",
+ params=metrics,
+ status=409)
+
+ result = self.app.get("/v1/resource/"
+ + self.resource_type + "/"
+ + self.attributes['id'])
+ result = json.loads(result.text)
+ self.assertTrue(uuid.UUID(result['metrics']['foo']))
+
+ def test_post_append_metrics(self):
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes)
+
+ metrics = {'foo': {'archive_policy_name': "high"}}
+ self.app.post_json("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'] + "/metric",
+ params=metrics, status=204)
+ result = self.app.get("/v1/resource/"
+ + self.resource_type + "/"
+ + self.attributes['id'])
+ result = json.loads(result.text)
+ self.assertTrue(uuid.UUID(result['metrics']['foo']))
+
+ def test_post_append_metrics_created_by_different_user(self):
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes)
+ with self.app.use_another_user():
+ metric = self.app.post_json(
+ "/v1/metric",
+ params={'archive_policy_name': "high"})
+ metric_id = json.loads(metric.text)['id']
+ result = self.app.post_json("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'] + "/metric",
+ params={str(uuid.uuid4()): metric_id},
+ status=400)
+ self.assertIn("Metric %s does not exist" % metric_id, result.text)
+
+ @mock.patch.object(utils, 'utcnow')
+ def test_patch_resource_metrics(self, utcnow):
+ utcnow.return_value = utils.datetime_utc(2014, 1, 1, 10, 23)
+ result = self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes,
+ status=201)
+ r = json.loads(result.text)
+
+ utcnow.return_value = utils.datetime_utc(2014, 1, 2, 6, 49)
+ new_metrics = {'foo': {'archive_policy_name': "medium"}}
+ self.app.patch_json(
+ "/v1/resource/" + self.resource_type + "/"
+ + self.attributes['id'],
+ params={'metrics': new_metrics},
+ status=200)
+ result = self.app.get("/v1/resource/"
+ + self.resource_type + "/"
+ + self.attributes['id'])
+ result = json.loads(result.text)
+ self.assertTrue(uuid.UUID(result['metrics']['foo']))
+ self.assertIsNone(result['revision_end'])
+ self.assertIsNone(r['revision_end'])
+ self.assertEqual(result['revision_start'], "2014-01-01T10:23:00+00:00")
+ self.assertEqual(r['revision_start'], "2014-01-01T10:23:00+00:00")
+
+ del result['metrics']
+ del result['revision_start']
+ del result['revision_end']
+ del r['metrics']
+ del r['revision_start']
+ del r['revision_end']
+ self.assertEqual(r, result)
+
+ def test_patch_resource_existent_metrics_from_another_user(self):
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes)
+ with self.app.use_another_user():
+ result = self.app.post_json(
+ "/v1/metric",
+ params={'archive_policy_name': "medium"})
+ metric_id = json.loads(result.text)['id']
+ result = self.app.patch_json(
+ "/v1/resource/"
+ + self.resource_type
+ + "/"
+ + self.attributes['id'],
+ params={'metrics': {'foo': metric_id}},
+ status=400)
+ self.assertIn("Metric %s does not exist" % metric_id, result.text)
+ result = self.app.get("/v1/resource/"
+ + self.resource_type
+ + "/"
+ + self.attributes['id'])
+ result = json.loads(result.text)
+ self.assertEqual({}, result['metrics'])
+
+ def test_patch_resource_non_existent_metrics(self):
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes,
+ status=201)
+ e1 = str(uuid.uuid4())
+ result = self.app.patch_json(
+ "/v1/resource/"
+ + self.resource_type
+ + "/"
+ + self.attributes['id'],
+ params={'metrics': {'foo': e1}},
+ status=400)
+ self.assertIn("Metric %s does not exist" % e1, result.text)
+ result = self.app.get("/v1/resource/"
+ + self.resource_type
+ + "/"
+ + self.attributes['id'])
+ result = json.loads(result.text)
+ self.assertEqual({}, result['metrics'])
+
+ @mock.patch.object(utils, 'utcnow')
+ def test_patch_resource_attributes(self, utcnow):
+ utcnow.return_value = utils.datetime_utc(2014, 1, 1, 10, 23)
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes,
+ status=201)
+ utcnow.return_value = utils.datetime_utc(2014, 1, 2, 6, 48)
+ presponse = self.app.patch_json(
+ "/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'],
+ params=self.patchable_attributes,
+ status=200)
+ response = self.app.get("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'])
+ result = json.loads(response.text)
+ presult = json.loads(presponse.text)
+ self.assertEqual(result, presult)
+ for k, v in six.iteritems(self.patchable_attributes):
+ self.assertEqual(v, result[k])
+ self.assertIsNone(result['revision_end'])
+ self.assertEqual(result['revision_start'],
+ "2014-01-02T06:48:00+00:00")
+ self._check_etag(response, result)
+
+ # Check the history
+ history = self.app.post_json(
+ "/v1/search/resource/" + self.resource_type,
+ headers={"Accept": "application/json; history=true"},
+ params={"=": {"id": result['id']}},
+ status=200)
+ history = json.loads(history.text)
+ self.assertGreaterEqual(len(history), 2)
+ self.assertEqual(result, history[1])
+
+ h = history[0]
+ for k, v in six.iteritems(self.attributes):
+ self.assertEqual(v, h[k])
+ self.assertEqual(h['revision_end'],
+ "2014-01-02T06:48:00+00:00")
+ self.assertEqual(h['revision_start'],
+ "2014-01-01T10:23:00+00:00")
+
+ def test_patch_resource_attributes_unauthorized(self):
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes,
+ status=201)
+ with self.app.use_another_user():
+ self.app.patch_json(
+ "/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'],
+ params=self.patchable_attributes,
+ status=403)
+
+ def test_patch_resource_ended_at_before_started_at(self):
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes,
+ status=201)
+ self.app.patch_json(
+ "/v1/resource/"
+ + self.resource_type
+ + "/"
+ + self.attributes['id'],
+ params={'ended_at': "2000-05-05 23:23:23"},
+ status=400)
+
+ def test_patch_resource_no_partial_update(self):
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes,
+ status=201)
+ e1 = str(uuid.uuid4())
+ result = self.app.patch_json(
+ "/v1/resource/" + self.resource_type + "/"
+ + self.attributes['id'],
+ params={'ended_at': "2044-05-05 23:23:23",
+ 'metrics': {"foo": e1}},
+ status=400)
+ self.assertIn("Metric %s does not exist" % e1, result.text)
+ result = self.app.get("/v1/resource/"
+ + self.resource_type + "/"
+ + self.attributes['id'])
+ result = json.loads(result.text)
+ del result['revision_start']
+ del result['revision_end']
+ self.assertEqual(self.resource, result)
+
+ def test_patch_resource_non_existent(self):
+ self.app.patch_json(
+ "/v1/resource/" + self.resource_type
+ + "/" + str(uuid.uuid4()),
+ params={},
+ status=404)
+
+ def test_patch_resource_non_existent_with_body(self):
+ self.app.patch_json(
+ "/v1/resource/" + self.resource_type
+ + "/" + str(uuid.uuid4()),
+ params=self.patchable_attributes,
+ status=404)
+
+ def test_patch_resource_unknown_field(self):
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes)
+ result = self.app.patch_json(
+ "/v1/resource/" + self.resource_type + "/"
+ + self.attributes['id'],
+ params={'foobar': 123},
+ status=400)
+ self.assertIn(b'Invalid input: extra keys not allowed @ data['
+ + repr(u'foobar').encode('ascii') + b"]",
+ result.body)
+
+ def test_delete_resource(self):
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes)
+ self.app.get("/v1/resource/" + self.resource_type + "/"
+ + self.attributes['id'],
+ status=200)
+ self.app.delete("/v1/resource/" + self.resource_type + "/"
+ + self.attributes['id'],
+ status=204)
+ self.app.get("/v1/resource/" + self.resource_type + "/"
+ + self.attributes['id'],
+ status=404)
+
+ def test_delete_resource_with_metrics(self):
+ metric = self.app.post_json(
+ "/v1/metric",
+ params={'archive_policy_name': "high"})
+ metric_id = json.loads(metric.text)['id']
+ metric_name = six.text_type(uuid.uuid4())
+ self.attributes['metrics'] = {metric_name: metric_id}
+ self.app.get("/v1/metric/" + metric_id,
+ status=200)
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes)
+ self.app.get("/v1/resource/" + self.resource_type + "/"
+ + self.attributes['id'],
+ status=200)
+ self.app.delete("/v1/resource/" + self.resource_type + "/"
+ + self.attributes['id'],
+ status=204)
+ self.app.get("/v1/resource/" + self.resource_type + "/"
+ + self.attributes['id'],
+ status=404)
+ self.app.get("/v1/metric/" + metric_id,
+ status=404)
+
+ def test_delete_resource_unauthorized(self):
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes)
+ with self.app.use_another_user():
+ self.app.delete("/v1/resource/" + self.resource_type + "/"
+ + self.attributes['id'],
+ status=403)
+
+ def test_delete_resource_non_existent(self):
+ result = self.app.delete("/v1/resource/" + self.resource_type + "/"
+ + self.attributes['id'],
+ status=404)
+ self.assertIn(
+ "Resource %s does not exist" % self.attributes['id'],
+ result.text)
+
+ def test_post_resource_with_metrics(self):
+ result = self.app.post_json("/v1/metric",
+ params={"archive_policy_name": "medium"})
+ metric = json.loads(result.text)
+ self.attributes['metrics'] = {"foo": metric['id']}
+ result = self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes,
+ status=201)
+ resource = json.loads(result.text)
+ self.assertEqual("http://localhost/v1/resource/"
+ + self.resource_type + "/"
+ + self.attributes['id'],
+ result.headers['Location'])
+ self.resource['metrics'] = self.attributes['metrics']
+ del resource['revision_start']
+ del resource['revision_end']
+ self.assertEqual(self.resource, resource)
+
+ def test_post_resource_with_null_metrics(self):
+ self.attributes['metrics'] = {"foo": {"archive_policy_name": "low"}}
+ result = self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes,
+ status=201)
+ resource = json.loads(result.text)
+ self.assertEqual("http://localhost/v1/resource/"
+ + self.resource_type + "/"
+ + self.attributes['id'],
+ result.headers['Location'])
+ self.assertEqual(self.attributes['id'], resource["id"])
+ metric_id = uuid.UUID(resource['metrics']['foo'])
+ result = self.app.get("/v1/metric/" + str(metric_id) + "/measures",
+ status=200)
+
+ def test_search_datetime(self):
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes,
+ status=201)
+ result = self.app.get("/v1/resource/" + self.resource_type
+ + "/" + self.attributes['id'])
+ result = json.loads(result.text)
+
+ resources = self.app.post_json(
+ "/v1/search/resource/" + self.resource_type,
+ params={"and": [{"=": {"id": result['id']}},
+ {"=": {"ended_at": None}}]},
+ status=200)
+ resources = json.loads(resources.text)
+ self.assertGreaterEqual(len(resources), 1)
+ self.assertEqual(result, resources[0])
+
+ resources = self.app.post_json(
+ "/v1/search/resource/" + self.resource_type,
+ headers={"Accept": "application/json; history=true"},
+ params={"and": [
+ {"=": {"id": result['id']}},
+ {"or": [{">=": {"revision_end": '2014-01-03T02:02:02'}},
+ {"=": {"revision_end": None}}]}
+ ]},
+ status=200)
+ resources = json.loads(resources.text)
+ self.assertGreaterEqual(len(resources), 1)
+ self.assertEqual(result, resources[0])
+
+ def test_search_resource_by_original_resource_id(self):
+ result = self.app.post_json(
+ "/v1/resource/" + self.resource_type,
+ params=self.attributes)
+ created_resource = json.loads(result.text)
+ original_id = created_resource['original_resource_id']
+ result = self.app.post_json(
+ "/v1/search/resource/" + self.resource_type,
+ params={"eq": {"original_resource_id": original_id}},
+ status=200)
+ resources = json.loads(result.text)
+ self.assertGreaterEqual(len(resources), 1)
+ self.assertEqual(created_resource, resources[0])
+
+ def test_search_resources_by_user(self):
+ u1 = str(uuid.uuid4())
+ self.attributes['user_id'] = u1
+ result = self.app.post_json(
+ "/v1/resource/" + self.resource_type,
+ params=self.attributes)
+ created_resource = json.loads(result.text)
+ result = self.app.post_json("/v1/search/resource/generic",
+ params={"eq": {"user_id": u1}},
+ status=200)
+ resources = json.loads(result.text)
+ self.assertGreaterEqual(len(resources), 1)
+ result = self.app.post_json(
+ "/v1/search/resource/" + self.resource_type,
+ params={"=": {"user_id": u1}},
+ status=200)
+ resources = json.loads(result.text)
+ self.assertGreaterEqual(len(resources), 1)
+ self.assertEqual(created_resource, resources[0])
+
+ def test_search_resources_with_another_project_id(self):
+ u1 = str(uuid.uuid4())
+ result = self.app.post_json(
+ "/v1/resource/generic",
+ params={
+ "id": str(uuid.uuid4()),
+ "started_at": "2014-01-01 02:02:02",
+ "user_id": u1,
+ "project_id": TestingApp.PROJECT_ID_2,
+ })
+ g = json.loads(result.text)
+
+ with self.app.use_another_user():
+ result = self.app.post_json(
+ "/v1/resource/generic",
+ params={
+ "id": str(uuid.uuid4()),
+ "started_at": "2014-01-01 03:03:03",
+ "user_id": u1,
+ "project_id": str(uuid.uuid4()),
+ })
+ j = json.loads(result.text)
+ g_found = False
+ j_found = False
+
+ result = self.app.post_json(
+ "/v1/search/resource/generic",
+ params={"=": {"user_id": u1}},
+ status=200)
+ resources = json.loads(result.text)
+ self.assertGreaterEqual(len(resources), 2)
+ for r in resources:
+ if r['id'] == str(g['id']):
+ self.assertEqual(g, r)
+ g_found = True
+ elif r['id'] == str(j['id']):
+ self.assertEqual(j, r)
+ j_found = True
+ if g_found and j_found:
+ break
+ else:
+ self.fail("Some resources were not found")
+
+ def test_search_resources_by_unknown_field(self):
+ result = self.app.post_json(
+ "/v1/search/resource/" + self.resource_type,
+ params={"=": {"foobar": "baz"}},
+ status=400)
+ self.assertIn("Resource type " + self.resource_type
+ + " has no foobar attribute",
+ result.text)
+
+ def test_search_resources_started_after(self):
+ # NOTE(jd) So this test is a bit fuzzy right now as we uses the same
+ # database for all tests and the tests are running concurrently, but
+ # for now it'll be better than nothing.
+ result = self.app.post_json(
+ "/v1/resource/generic/",
+ params={
+ "id": str(uuid.uuid4()),
+ "started_at": "2014-01-01 02:02:02",
+ "user_id": str(uuid.uuid4()),
+ "project_id": str(uuid.uuid4()),
+ })
+ g = json.loads(result.text)
+ result = self.app.post_json(
+ "/v1/resource/" + self.resource_type,
+ params=self.attributes)
+ i = json.loads(result.text)
+ result = self.app.post_json(
+ "/v1/search/resource/generic",
+ params={"≥": {"started_at": "2014-01-01"}},
+ status=200)
+ resources = json.loads(result.text)
+ self.assertGreaterEqual(len(resources), 2)
+
+ i_found = False
+ g_found = False
+ for r in resources:
+ if r['id'] == str(g['id']):
+ self.assertEqual(g, r)
+ g_found = True
+ elif r['id'] == str(i['id']):
+ i_found = True
+ if i_found and g_found:
+ break
+ else:
+ self.fail("Some resources were not found")
+
+ result = self.app.post_json(
+ "/v1/search/resource/" + self.resource_type,
+ params={">=": {"started_at": "2014-01-03"}})
+ resources = json.loads(result.text)
+ self.assertGreaterEqual(len(resources), 1)
+ for r in resources:
+ if r['id'] == str(i['id']):
+ self.assertEqual(i, r)
+ break
+ else:
+ self.fail("Some resources were not found")
+
+ def test_list_resources_with_bad_details(self):
+ result = self.app.get("/v1/resource/generic?details=awesome",
+ status=400)
+ self.assertIn(
+ b"Unable to parse `details': invalid truth value",
+ result.body)
+
+ def test_list_resources_with_bad_details_in_accept(self):
+ result = self.app.get("/v1/resource/generic",
+ headers={
+ "Accept": "application/json; details=foo",
+ },
+ status=400)
+ self.assertIn(
+ b"Unable to parse `Accept header': invalid truth value",
+ result.body)
+
+ def _do_test_list_resources_with_detail(self, request):
+ # NOTE(jd) So this test is a bit fuzzy right now as we uses the same
+ # database for all tests and the tests are running concurrently, but
+ # for now it'll be better than nothing.
+ result = self.app.post_json(
+ "/v1/resource/generic",
+ params={
+ "id": str(uuid.uuid4()),
+ "started_at": "2014-01-01 02:02:02",
+ "user_id": str(uuid.uuid4()),
+ "project_id": str(uuid.uuid4()),
+ })
+ g = json.loads(result.text)
+ result = self.app.post_json(
+ "/v1/resource/" + self.resource_type,
+ params=self.attributes)
+ i = json.loads(result.text)
+ result = request()
+ self.assertEqual(200, result.status_code)
+ resources = json.loads(result.text)
+ self.assertGreaterEqual(len(resources), 2)
+
+ i_found = False
+ g_found = False
+ for r in resources:
+ if r['id'] == str(g['id']):
+ self.assertEqual(g, r)
+ g_found = True
+ elif r['id'] == str(i['id']):
+ i_found = True
+ # Check we got all the details
+ self.assertEqual(i, r)
+ if i_found and g_found:
+ break
+ else:
+ self.fail("Some resources were not found")
+
+ result = self.app.get("/v1/resource/" + self.resource_type)
+ resources = json.loads(result.text)
+ self.assertGreaterEqual(len(resources), 1)
+ for r in resources:
+ if r['id'] == str(i['id']):
+ self.assertEqual(i, r)
+ break
+ else:
+ self.fail("Some resources were not found")
+
+ def test_list_resources_with_another_project_id(self):
+ result = self.app.post_json(
+ "/v1/resource/generic",
+ params={
+ "id": str(uuid.uuid4()),
+ "started_at": "2014-01-01 02:02:02",
+ "user_id": TestingApp.USER_ID_2,
+ "project_id": TestingApp.PROJECT_ID_2,
+ })
+ g = json.loads(result.text)
+
+ with self.app.use_another_user():
+ result = self.app.post_json(
+ "/v1/resource/generic",
+ params={
+ "id": str(uuid.uuid4()),
+ "started_at": "2014-01-01 03:03:03",
+ "user_id": str(uuid.uuid4()),
+ "project_id": str(uuid.uuid4()),
+ })
+ j = json.loads(result.text)
+
+ g_found = False
+ j_found = False
+
+ result = self.app.get("/v1/resource/generic")
+ self.assertEqual(200, result.status_code)
+ resources = json.loads(result.text)
+ self.assertGreaterEqual(len(resources), 2)
+ for r in resources:
+ if r['id'] == str(g['id']):
+ self.assertEqual(g, r)
+ g_found = True
+ elif r['id'] == str(j['id']):
+ self.assertEqual(j, r)
+ j_found = True
+ if g_found and j_found:
+ break
+ else:
+ self.fail("Some resources were not found")
+
+ def test_list_resources_with_details(self):
+ self._do_test_list_resources_with_detail(
+ lambda: self.app.get("/v1/resource/generic?details=true"))
+
+ def test_list_resources_with_details_via_accept(self):
+ self._do_test_list_resources_with_detail(
+ lambda: self.app.get(
+ "/v1/resource/generic",
+ headers={"Accept": "application/json; details=true"}))
+
+ def test_search_resources_with_details(self):
+ self._do_test_list_resources_with_detail(
+ lambda: self.app.post("/v1/search/resource/generic?details=true"))
+
+ def test_search_resources_with_details_via_accept(self):
+ self._do_test_list_resources_with_detail(
+ lambda: self.app.post(
+ "/v1/search/resource/generic",
+ headers={"Accept": "application/json; details=true"}))
+
+ def test_get_res_named_metric_measure_aggregated_policies_invalid(self):
+ result = self.app.post_json("/v1/metric",
+ params={"archive_policy_name": "low"})
+ metric1 = json.loads(result.text)
+ self.app.post_json("/v1/metric/%s/measures" % metric1['id'],
+ params=[{"timestamp": '2013-01-01 12:00:01',
+ "value": 16}])
+
+ result = self.app.post_json("/v1/metric",
+ params={"archive_policy_name":
+ "no_granularity_match"})
+ metric2 = json.loads(result.text)
+ self.app.post_json("/v1/metric/%s/measures" % metric2['id'],
+ params=[{"timestamp": '2013-01-01 12:00:01',
+ "value": 4}])
+
+ # NOTE(sileht): because the database is never cleaned between each test
+ # we must ensure that the query will not match resources from an other
+ # test, to achieve this we set a different name on each test.
+ name = str(uuid.uuid4())
+ self.attributes['name'] = name
+
+ self.attributes['metrics'] = {'foo': metric1['id']}
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes)
+
+ self.attributes['id'] = str(uuid.uuid4())
+ self.attributes['metrics'] = {'foo': metric2['id']}
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes)
+
+ result = self.app.post_json(
+ "/v1/aggregation/resource/"
+ + self.resource_type + "/metric/foo?aggregation=max",
+ params={"=": {"name": name}},
+ status=400)
+ self.assertIn(b"One of the metrics being aggregated doesn't have "
+ b"matching granularity",
+ result.body)
+
+ def test_get_res_named_metric_measure_aggregation_nooverlap(self):
+ result = self.app.post_json("/v1/metric",
+ params={"archive_policy_name": "medium"})
+ metric1 = json.loads(result.text)
+ self.app.post_json("/v1/metric/%s/measures" % metric1['id'],
+ params=[{"timestamp": '2013-01-01 12:00:01',
+ "value": 8},
+ {"timestamp": '2013-01-01 12:00:02',
+ "value": 16}])
+
+ result = self.app.post_json("/v1/metric",
+ params={"archive_policy_name": "medium"})
+ metric2 = json.loads(result.text)
+
+ # NOTE(sileht): because the database is never cleaned between each test
+ # we must ensure that the query will not match resources from an other
+ # test, to achieve this we set a different name on each test.
+ name = str(uuid.uuid4())
+ self.attributes['name'] = name
+
+ self.attributes['metrics'] = {'foo': metric1['id']}
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes)
+
+ self.attributes['id'] = str(uuid.uuid4())
+ self.attributes['metrics'] = {'foo': metric2['id']}
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes)
+
+ result = self.app.post_json(
+ "/v1/aggregation/resource/" + self.resource_type
+ + "/metric/foo?aggregation=max",
+ params={"=": {"name": name}},
+ expect_errors=True)
+
+ self.assertEqual(400, result.status_code, result.text)
+ self.assertIn("No overlap", result.text)
+
+ result = self.app.post_json(
+ "/v1/aggregation/resource/"
+ + self.resource_type + "/metric/foo?aggregation=min"
+ + "&needed_overlap=0",
+ params={"=": {"name": name}},
+ expect_errors=True)
+
+ self.assertEqual(200, result.status_code, result.text)
+ measures = json.loads(result.text)
+ self.assertEqual([['2013-01-01T00:00:00+00:00', 86400.0, 8.0],
+ ['2013-01-01T12:00:00+00:00', 3600.0, 8.0],
+ ['2013-01-01T12:00:00+00:00', 60.0, 8.0]],
+ measures)
+
+ def test_get_res_named_metric_measure_aggregation_nominal(self):
+ result = self.app.post_json("/v1/metric",
+ params={"archive_policy_name": "medium"})
+ metric1 = json.loads(result.text)
+ self.app.post_json("/v1/metric/%s/measures" % metric1['id'],
+ params=[{"timestamp": '2013-01-01 12:00:01',
+ "value": 8},
+ {"timestamp": '2013-01-01 12:00:02',
+ "value": 16}])
+
+ result = self.app.post_json("/v1/metric",
+ params={"archive_policy_name": "medium"})
+ metric2 = json.loads(result.text)
+ self.app.post_json("/v1/metric/%s/measures" % metric2['id'],
+ params=[{"timestamp": '2013-01-01 12:00:01',
+ "value": 0},
+ {"timestamp": '2013-01-01 12:00:02',
+ "value": 4}])
+
+ # NOTE(sileht): because the database is never cleaned between each test
+ # we must ensure that the query will not match resources from an other
+ # test, to achieve this we set a different name on each test.
+ name = str(uuid.uuid4())
+ self.attributes['name'] = name
+
+ self.attributes['metrics'] = {'foo': metric1['id']}
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes)
+
+ self.attributes['id'] = str(uuid.uuid4())
+ self.attributes['metrics'] = {'foo': metric2['id']}
+ self.app.post_json("/v1/resource/" + self.resource_type,
+ params=self.attributes)
+
+ result = self.app.post_json(
+ "/v1/aggregation/resource/" + self.resource_type
+ + "/metric/foo?aggregation=max",
+ params={"=": {"name": name}},
+ expect_errors=True)
+
+ self.assertEqual(200, result.status_code, result.text)
+ measures = json.loads(result.text)
+ self.assertEqual([[u'2013-01-01T00:00:00+00:00', 86400.0, 16.0],
+ [u'2013-01-01T12:00:00+00:00', 3600.0, 16.0],
+ [u'2013-01-01T12:00:00+00:00', 60.0, 16.0]],
+ measures)
+
+ result = self.app.post_json(
+ "/v1/aggregation/resource/"
+ + self.resource_type + "/metric/foo?aggregation=min",
+ params={"=": {"name": name}},
+ expect_errors=True)
+
+ self.assertEqual(200, result.status_code)
+ measures = json.loads(result.text)
+ self.assertEqual([['2013-01-01T00:00:00+00:00', 86400.0, 0],
+ ['2013-01-01T12:00:00+00:00', 3600.0, 0],
+ ['2013-01-01T12:00:00+00:00', 60.0, 0]],
+ measures)
+
+ def test_get_aggregated_measures_across_entities_no_match(self):
+ result = self.app.post_json(
+ "/v1/aggregation/resource/"
+ + self.resource_type + "/metric/foo?aggregation=min",
+ params={"=": {"name": "none!"}},
+ expect_errors=True)
+
+ self.assertEqual(200, result.status_code)
+ measures = json.loads(result.text)
+ self.assertEqual([], measures)
+
+ def test_get_aggregated_measures_across_entities(self):
+ result = self.app.post_json("/v1/metric",
+ params={"archive_policy_name": "medium"})
+ metric1 = json.loads(result.text)
+ self.app.post_json("/v1/metric/%s/measures" % metric1['id'],
+ params=[{"timestamp": '2013-01-01 12:00:01',
+ "value": 8},
+ {"timestamp": '2013-01-01 12:00:02',
+ "value": 16}])
+
+ result = self.app.post_json("/v1/metric",
+ params={"archive_policy_name": "medium"})
+ metric2 = json.loads(result.text)
+ self.app.post_json("/v1/metric/%s/measures" % metric2['id'],
+ params=[{"timestamp": '2013-01-01 12:00:01',
+ "value": 0},
+ {"timestamp": '2013-01-01 12:00:02',
+ "value": 4}])
+ # Check with one metric
+ result = self.app.get("/v1/aggregation/metric"
+ "?aggregation=mean&metric=%s" % (metric2['id']))
+ measures = json.loads(result.text)
+ self.assertEqual([[u'2013-01-01T00:00:00+00:00', 86400.0, 2.0],
+ [u'2013-01-01T12:00:00+00:00', 3600.0, 2.0],
+ [u'2013-01-01T12:00:00+00:00', 60.0, 2.0]],
+ measures)
+
+ # Check with two metrics
+ result = self.app.get("/v1/aggregation/metric"
+ "?aggregation=mean&metric=%s&metric=%s" %
+ (metric1['id'], metric2['id']))
+ measures = json.loads(result.text)
+ self.assertEqual([[u'2013-01-01T00:00:00+00:00', 86400.0, 7.0],
+ [u'2013-01-01T12:00:00+00:00', 3600.0, 7.0],
+ [u'2013-01-01T12:00:00+00:00', 60.0, 7.0]],
+ measures)
+
+ def test_search_resources_with_like(self):
+ result = self.app.post_json(
+ "/v1/resource/" + self.resource_type,
+ params=self.attributes)
+ created_resource = json.loads(result.text)
+
+ result = self.app.post_json(
+ "/v1/search/resource/" + self.resource_type,
+ params={"like": {"name": "my%"}},
+ status=200)
+
+ resources = json.loads(result.text)
+ self.assertIn(created_resource, resources)
+
+ result = self.app.post_json(
+ "/v1/search/resource/" + self.resource_type,
+ params={"like": {"name": str(uuid.uuid4())}},
+ status=200)
+ resources = json.loads(result.text)
+ self.assertEqual([], resources)
+
+
+class GenericResourceTest(RestTest):
+ def test_list_resources_tied_to_user(self):
+ resource_id = str(uuid.uuid4())
+ self.app.post_json(
+ "/v1/resource/generic",
+ params={
+ "id": resource_id,
+ "started_at": "2014-01-01 02:02:02",
+ "user_id": str(uuid.uuid4()),
+ "project_id": str(uuid.uuid4()),
+ })
+
+ with self.app.use_another_user():
+ result = self.app.get("/v1/resource/generic")
+ resources = json.loads(result.text)
+ for resource in resources:
+ if resource['id'] == resource_id:
+ self.fail("Resource found")
+
+ def test_get_resources_metric_tied_to_user(self):
+ resource_id = str(uuid.uuid4())
+ self.app.post_json(
+ "/v1/resource/generic",
+ params={
+ "id": resource_id,
+ "started_at": "2014-01-01 02:02:02",
+ "user_id": TestingApp.USER_ID_2,
+ "project_id": TestingApp.PROJECT_ID_2,
+ "metrics": {"foobar": {"archive_policy_name": "low"}},
+ })
+
+ # This user created it, she can access it
+ self.app.get(
+ "/v1/resource/generic/%s/metric/foobar" % resource_id)
+
+ with self.app.use_another_user():
+ # This user "owns it", it should be able to access it
+ self.app.get(
+ "/v1/resource/generic/%s/metric/foobar" % resource_id)
+
+ def test_search_resources_invalid_query(self):
+ result = self.app.post_json(
+ "/v1/search/resource/generic",
+ params={"wrongoperator": {"user_id": "bar"}},
+ status=400)
+ self.assertIn(
+ "Invalid input: extra keys not allowed @ data["
+ + repr(u'wrongoperator') + "]",
+ result.text)
+
+
+class QueryStringSearchAttrFilterTest(tests_base.TestCase):
+ def _do_test(self, expr, expected):
+ req = rest.QueryStringSearchAttrFilter.parse(expr)
+ self.assertEqual(expected, req)
+
+ def test_search_query_builder(self):
+ self._do_test('foo=7EED6CC3-EDC8-48C9-8EF6-8A36B9ACC91C',
+ {"=": {"foo": "7EED6CC3-EDC8-48C9-8EF6-8A36B9ACC91C"}})
+ self._do_test('foo=7EED6CC3EDC848C98EF68A36B9ACC91C',
+ {"=": {"foo": "7EED6CC3EDC848C98EF68A36B9ACC91C"}})
+ self._do_test('foo=bar', {"=": {"foo": "bar"}})
+ self._do_test('foo!=1', {"!=": {"foo": 1.0}})
+ self._do_test('foo=True', {"=": {"foo": True}})
+ self._do_test('foo=null', {"=": {"foo": None}})
+ self._do_test('foo="null"', {"=": {"foo": "null"}})
+ self._do_test('foo in ["null", "foo"]',
+ {"in": {"foo": ["null", "foo"]}})
+ self._do_test(u'foo="quote" and bar≠1',
+ {"and": [{u"≠": {"bar": 1}},
+ {"=": {"foo": "quote"}}]})
+ self._do_test('foo="quote" or bar like "%%foo"',
+ {"or": [{"like": {"bar": "%%foo"}},
+ {"=": {"foo": "quote"}}]})
+
+ self._do_test('not (foo="quote" or bar like "%%foo" or foo="what!" '
+ 'or bar="who?")',
+ {"not": {"or": [
+ {"=": {"bar": "who?"}},
+ {"=": {"foo": "what!"}},
+ {"like": {"bar": "%%foo"}},
+ {"=": {"foo": "quote"}},
+ ]}})
+
+ self._do_test('(foo="quote" or bar like "%%foo" or not foo="what!" '
+ 'or bar="who?") and cat="meme"',
+ {"and": [
+ {"=": {"cat": "meme"}},
+ {"or": [
+ {"=": {"bar": "who?"}},
+ {"not": {"=": {"foo": "what!"}}},
+ {"like": {"bar": "%%foo"}},
+ {"=": {"foo": "quote"}},
+ ]}
+ ]})
+
+ self._do_test('foo="quote" or bar like "%%foo" or foo="what!" '
+ 'or bar="who?" and cat="meme"',
+ {"or": [
+ {"and": [
+ {"=": {"cat": "meme"}},
+ {"=": {"bar": "who?"}},
+ ]},
+ {"=": {"foo": "what!"}},
+ {"like": {"bar": "%%foo"}},
+ {"=": {"foo": "quote"}},
+ ]})
+
+ self._do_test('foo="quote" or bar like "%%foo" and foo="what!" '
+ 'or bar="who?" or cat="meme"',
+ {"or": [
+ {"=": {"cat": "meme"}},
+ {"=": {"bar": "who?"}},
+ {"and": [
+ {"=": {"foo": "what!"}},
+ {"like": {"bar": "%%foo"}},
+ ]},
+ {"=": {"foo": "quote"}},
+ ]})
diff --git a/gnocchi/tests/test_statsd.py b/gnocchi/tests/test_statsd.py
new file mode 100644
index 00000000..fc0713d6
--- /dev/null
+++ b/gnocchi/tests/test_statsd.py
@@ -0,0 +1,160 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2016 Red Hat, Inc.
+# Copyright © 2015 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import uuid
+
+import mock
+
+from gnocchi import indexer
+from gnocchi import statsd
+from gnocchi.tests import base as tests_base
+from gnocchi import utils
+
+
+class TestStatsd(tests_base.TestCase):
+
+ STATSD_USER_ID = str(uuid.uuid4())
+ STATSD_PROJECT_ID = str(uuid.uuid4())
+ STATSD_ARCHIVE_POLICY_NAME = "medium"
+
+ def setUp(self):
+ super(TestStatsd, self).setUp()
+
+ self.conf.set_override("resource_id",
+ str(uuid.uuid4()), "statsd")
+ self.conf.set_override("creator",
+ self.STATSD_USER_ID, "statsd")
+ self.conf.set_override("archive_policy_name",
+ self.STATSD_ARCHIVE_POLICY_NAME, "statsd")
+
+ self.stats = statsd.Stats(self.conf)
+ # Replace storage/indexer with correct ones that have been upgraded
+ self.stats.storage = self.storage
+ self.stats.indexer = self.index
+ self.server = statsd.StatsdServer(self.stats)
+
+ def test_flush_empty(self):
+ self.server.stats.flush()
+
+ @mock.patch.object(utils, 'utcnow')
+ def _test_gauge_or_ms(self, metric_type, utcnow):
+ metric_name = "test_gauge_or_ms"
+ metric_key = metric_name + "|" + metric_type
+ utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 58, 36)
+ self.server.datagram_received(
+ ("%s:1|%s" % (metric_name, metric_type)).encode('ascii'),
+ ("127.0.0.1", 12345))
+ self.stats.flush()
+
+ r = self.stats.indexer.get_resource('generic',
+ self.conf.statsd.resource_id,
+ with_metrics=True)
+
+ metric = r.get_metric(metric_key)
+
+ self.stats.storage.process_background_tasks(
+ self.stats.indexer, [str(metric.id)], sync=True)
+
+ measures = self.stats.storage.get_measures(metric)
+ self.assertEqual([
+ (utils.datetime_utc(2015, 1, 7), 86400.0, 1.0),
+ (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 1.0),
+ (utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0)
+ ], measures)
+
+ utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 59, 37)
+ # This one is going to be ignored
+ self.server.datagram_received(
+ ("%s:45|%s" % (metric_name, metric_type)).encode('ascii'),
+ ("127.0.0.1", 12345))
+ self.server.datagram_received(
+ ("%s:2|%s" % (metric_name, metric_type)).encode('ascii'),
+ ("127.0.0.1", 12345))
+ self.stats.flush()
+
+ self.stats.storage.process_background_tasks(
+ self.stats.indexer, [str(metric.id)], sync=True)
+
+ measures = self.stats.storage.get_measures(metric)
+ self.assertEqual([
+ (utils.datetime_utc(2015, 1, 7), 86400.0, 1.5),
+ (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 1.5),
+ (utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0),
+ (utils.datetime_utc(2015, 1, 7, 13, 59), 60.0, 2.0)
+ ], measures)
+
+ def test_gauge(self):
+ self._test_gauge_or_ms("g")
+
+ def test_ms(self):
+ self._test_gauge_or_ms("ms")
+
+ @mock.patch.object(utils, 'utcnow')
+ def test_counter(self, utcnow):
+ metric_name = "test_counter"
+ metric_key = metric_name + "|c"
+ utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 58, 36)
+ self.server.datagram_received(
+ ("%s:1|c" % metric_name).encode('ascii'),
+ ("127.0.0.1", 12345))
+ self.stats.flush()
+
+ r = self.stats.indexer.get_resource('generic',
+ self.conf.statsd.resource_id,
+ with_metrics=True)
+ metric = r.get_metric(metric_key)
+ self.assertIsNotNone(metric)
+
+ self.stats.storage.process_background_tasks(
+ self.stats.indexer, [str(metric.id)], sync=True)
+
+ measures = self.stats.storage.get_measures(metric)
+ self.assertEqual([
+ (utils.datetime_utc(2015, 1, 7), 86400.0, 1.0),
+ (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 1.0),
+ (utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0)], measures)
+
+ utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 59, 37)
+ self.server.datagram_received(
+ ("%s:45|c" % metric_name).encode('ascii'),
+ ("127.0.0.1", 12345))
+ self.server.datagram_received(
+ ("%s:2|c|@0.2" % metric_name).encode('ascii'),
+ ("127.0.0.1", 12345))
+ self.stats.flush()
+
+ self.stats.storage.process_background_tasks(
+ self.stats.indexer, [str(metric.id)], sync=True)
+
+ measures = self.stats.storage.get_measures(metric)
+ self.assertEqual([
+ (utils.datetime_utc(2015, 1, 7), 86400.0, 28),
+ (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 28),
+ (utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0),
+ (utils.datetime_utc(2015, 1, 7, 13, 59), 60.0, 55.0)], measures)
+
+
+class TestStatsdArchivePolicyRule(TestStatsd):
+ STATSD_ARCHIVE_POLICY_NAME = ""
+
+ def setUp(self):
+ super(TestStatsdArchivePolicyRule, self).setUp()
+ try:
+ self.stats.indexer.create_archive_policy_rule(
+ "statsd", "*", "medium")
+ except indexer.ArchivePolicyRuleAlreadyExists:
+ # Created by another test run
+ pass
diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py
new file mode 100644
index 00000000..7047f44d
--- /dev/null
+++ b/gnocchi/tests/test_storage.py
@@ -0,0 +1,1001 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2014-2015 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import datetime
+import uuid
+
+import iso8601
+import mock
+from oslotest import base
+import six.moves
+
+from gnocchi import archive_policy
+from gnocchi import carbonara
+from gnocchi import indexer
+from gnocchi import storage
+from gnocchi.storage import _carbonara
+from gnocchi.tests import base as tests_base
+from gnocchi.tests import utils as tests_utils
+from gnocchi import utils
+
+
+class TestStorageDriver(tests_base.TestCase):
+ def setUp(self):
+ super(TestStorageDriver, self).setUp()
+ # A lot of tests wants a metric, create one
+ self.metric, __ = self._create_metric()
+
+ def _create_metric(self, archive_policy_name="low"):
+ m = storage.Metric(uuid.uuid4(),
+ self.archive_policies[archive_policy_name])
+ m_sql = self.index.create_metric(m.id, str(uuid.uuid4()),
+ archive_policy_name)
+ return m, m_sql
+
+ def trigger_processing(self, metrics=None):
+ if metrics is None:
+ metrics = [str(self.metric.id)]
+ self.storage.process_background_tasks(self.index, metrics, sync=True)
+
+ def test_get_driver(self):
+ driver = storage.get_driver(self.conf)
+ self.assertIsInstance(driver, storage.StorageDriver)
+
+ def test_corrupted_data(self):
+ if not isinstance(self.storage, _carbonara.CarbonaraBasedStorage):
+ self.skipTest("This driver is not based on Carbonara")
+
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69),
+ ])
+ self.trigger_processing()
+
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 13, 0, 1), 1),
+ ])
+
+ with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.unserialize',
+ side_effect=carbonara.InvalidData()):
+ with mock.patch('gnocchi.carbonara.BoundTimeSerie.unserialize',
+ side_effect=carbonara.InvalidData()):
+ self.trigger_processing()
+
+ m = self.storage.get_measures(self.metric)
+ self.assertIn((utils.datetime_utc(2014, 1, 1), 86400.0, 1), m)
+ self.assertIn((utils.datetime_utc(2014, 1, 1, 13), 3600.0, 1), m)
+ self.assertIn((utils.datetime_utc(2014, 1, 1, 13), 300.0, 1), m)
+
+ def test_aborted_initial_processing(self):
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 5),
+ ])
+ with mock.patch.object(self.storage, '_store_unaggregated_timeserie',
+ side_effect=Exception):
+ try:
+ self.trigger_processing()
+ except Exception:
+ pass
+
+ with mock.patch('gnocchi.storage._carbonara.LOG') as LOG:
+ self.trigger_processing()
+ self.assertFalse(LOG.error.called)
+
+ m = self.storage.get_measures(self.metric)
+ self.assertIn((utils.datetime_utc(2014, 1, 1), 86400.0, 5.0), m)
+ self.assertIn((utils.datetime_utc(2014, 1, 1, 12), 3600.0, 5.0), m)
+ self.assertIn((utils.datetime_utc(2014, 1, 1, 12), 300.0, 5.0), m)
+
+ def test_list_metric_with_measures_to_process(self):
+ metrics = tests_utils.list_all_incoming_metrics(self.storage.incoming)
+ self.assertEqual(set(), metrics)
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69),
+ ])
+ metrics = tests_utils.list_all_incoming_metrics(self.storage.incoming)
+ self.assertEqual(set([str(self.metric.id)]), metrics)
+ self.trigger_processing()
+ metrics = tests_utils.list_all_incoming_metrics(self.storage.incoming)
+ self.assertEqual(set([]), metrics)
+
+ def test_delete_nonempty_metric(self):
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69),
+ ])
+ self.trigger_processing()
+ self.storage.delete_metric(self.metric, sync=True)
+ self.trigger_processing()
+ self.assertEqual([], self.storage.get_measures(self.metric))
+ self.assertRaises(storage.MetricDoesNotExist,
+ self.storage._get_unaggregated_timeserie,
+ self.metric)
+
+ def test_delete_nonempty_metric_unprocessed(self):
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69),
+ ])
+ self.index.delete_metric(self.metric.id)
+ self.trigger_processing()
+ __, __, details = self.storage.incoming._build_report(True)
+ self.assertIn(str(self.metric.id), details)
+ self.storage.expunge_metrics(self.index, sync=True)
+ __, __, details = self.storage.incoming._build_report(True)
+ self.assertNotIn(str(self.metric.id), details)
+
+ def test_delete_expunge_metric(self):
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69),
+ ])
+ self.trigger_processing()
+ self.index.delete_metric(self.metric.id)
+ self.storage.expunge_metrics(self.index, sync=True)
+ self.assertRaises(indexer.NoSuchMetric, self.index.delete_metric,
+ self.metric.id)
+
+ def test_measures_reporting(self):
+ report = self.storage.incoming.measures_report(True)
+ self.assertIsInstance(report, dict)
+ self.assertIn('summary', report)
+ self.assertIn('metrics', report['summary'])
+ self.assertIn('measures', report['summary'])
+ self.assertIn('details', report)
+ self.assertIsInstance(report['details'], dict)
+ report = self.storage.incoming.measures_report(False)
+ self.assertIsInstance(report, dict)
+ self.assertIn('summary', report)
+ self.assertIn('metrics', report['summary'])
+ self.assertIn('measures', report['summary'])
+ self.assertNotIn('details', report)
+
+ def test_add_measures_big(self):
+ m, __ = self._create_metric('high')
+ self.storage.incoming.add_measures(m, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, i, j), 100)
+ for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)])
+ self.trigger_processing([str(m.id)])
+
+ self.assertEqual(3661, len(self.storage.get_measures(m)))
+
+ @mock.patch('gnocchi.carbonara.SplitKey.POINTS_PER_SPLIT', 48)
+ def test_add_measures_update_subset_split(self):
+ m, m_sql = self._create_metric('medium')
+ measures = [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 6, i, j, 0), 100)
+ for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)]
+ self.storage.incoming.add_measures(m, measures)
+ self.trigger_processing([str(m.id)])
+
+ # add measure to end, in same aggregate time as last point.
+ self.storage.incoming.add_measures(m, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 6, 1, 58, 1), 100)])
+
+ with mock.patch.object(self.storage, '_store_metric_measures') as c:
+ # should only resample last aggregate
+ self.trigger_processing([str(m.id)])
+ count = 0
+ for call in c.mock_calls:
+ # policy is 60 points and split is 48. should only update 2nd half
+ args = call[1]
+ if args[0] == m_sql and args[2] == 'mean' and args[3] == 60.0:
+ count += 1
+ self.assertEqual(1, count)
+
+ def test_add_measures_update_subset(self):
+ m, m_sql = self._create_metric('medium')
+ measures = [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 6, i, j, 0), 100)
+ for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)]
+ self.storage.incoming.add_measures(m, measures)
+ self.trigger_processing([str(m.id)])
+
+ # add measure to end, in same aggregate time as last point.
+ new_point = utils.dt_to_unix_ns(2014, 1, 6, 1, 58, 1)
+ self.storage.incoming.add_measures(
+ m, [storage.Measure(new_point, 100)])
+
+ with mock.patch.object(self.storage.incoming, 'add_measures') as c:
+ self.trigger_processing([str(m.id)])
+ for __, args, __ in c.mock_calls:
+ self.assertEqual(
+ list(args[3])[0][0], carbonara.round_timestamp(
+ new_point, args[1].granularity * 10e8))
+
+ def test_delete_old_measures(self):
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44),
+ ])
+ self.trigger_processing()
+
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75),
+ (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75),
+ (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0),
+ (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 23.0),
+ (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0),
+ ], self.storage.get_measures(self.metric))
+
+ # One year later…
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2015, 1, 1, 12, 0, 1), 69),
+ ])
+ self.trigger_processing()
+
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75),
+ (utils.datetime_utc(2015, 1, 1), 86400.0, 69),
+ (utils.datetime_utc(2015, 1, 1, 12), 3600.0, 69),
+ (utils.datetime_utc(2015, 1, 1, 12), 300.0, 69),
+ ], self.storage.get_measures(self.metric))
+
+ self.assertEqual({"1244160000.0"},
+ self.storage._list_split_keys_for_metric(
+ self.metric, "mean", 86400.0))
+ self.assertEqual({"1412640000.0"},
+ self.storage._list_split_keys_for_metric(
+ self.metric, "mean", 3600.0))
+ self.assertEqual({"1419120000.0"},
+ self.storage._list_split_keys_for_metric(
+ self.metric, "mean", 300.0))
+
+ def test_rewrite_measures(self):
+ # Create an archive policy that spans on several splits. Each split
+ # being 3600 points, let's go for 36k points so we have 10 splits.
+ apname = str(uuid.uuid4())
+ ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)])
+ self.index.create_archive_policy(ap)
+ self.metric = storage.Metric(uuid.uuid4(), ap)
+ self.index.create_metric(self.metric.id, str(uuid.uuid4()),
+ apname)
+
+ # First store some points scattered across different splits
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2016, 1, 1, 12, 0, 1), 69),
+ storage.Measure(utils.dt_to_unix_ns(2016, 1, 2, 13, 7, 31), 42),
+ storage.Measure(utils.dt_to_unix_ns(2016, 1, 4, 14, 9, 31), 4),
+ storage.Measure(utils.dt_to_unix_ns(2016, 1, 6, 15, 12, 45), 44),
+ ])
+ self.trigger_processing()
+
+ splits = {'1451520000.0', '1451736000.0', '1451952000.0'}
+ self.assertEqual(splits,
+ self.storage._list_split_keys_for_metric(
+ self.metric, "mean", 60.0))
+
+ if self.storage.WRITE_FULL:
+ assertCompressedIfWriteFull = self.assertTrue
+ else:
+ assertCompressedIfWriteFull = self.assertFalse
+
+ data = self.storage._get_measures(
+ self.metric, '1451520000.0', "mean", 60.0)
+ self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
+ data = self.storage._get_measures(
+ self.metric, '1451736000.0', "mean", 60.0)
+ self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
+ data = self.storage._get_measures(
+ self.metric, '1451952000.0', "mean", 60.0)
+ assertCompressedIfWriteFull(
+ carbonara.AggregatedTimeSerie.is_compressed(data))
+
+ self.assertEqual([
+ (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69),
+ (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42),
+ (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4),
+ (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44),
+ ], self.storage.get_measures(self.metric, granularity=60.0))
+
+ # Now store brand new points that should force a rewrite of one of the
+ # split (keep in mind the back window size in one hour here). We move
+ # the BoundTimeSerie processing timeserie far away from its current
+ # range.
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 16, 18, 45), 45),
+ storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 17, 12, 45), 46),
+ ])
+ self.trigger_processing()
+
+ self.assertEqual({'1452384000.0', '1451736000.0',
+ '1451520000.0', '1451952000.0'},
+ self.storage._list_split_keys_for_metric(
+ self.metric, "mean", 60.0))
+ data = self.storage._get_measures(
+ self.metric, '1451520000.0', "mean", 60.0)
+ self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
+ data = self.storage._get_measures(
+ self.metric, '1451736000.0', "mean", 60.0)
+ self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
+ data = self.storage._get_measures(
+ self.metric, '1451952000.0', "mean", 60.0)
+ # Now this one is compressed because it has been rewritten!
+ self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
+ data = self.storage._get_measures(
+ self.metric, '1452384000.0', "mean", 60.0)
+ assertCompressedIfWriteFull(
+ carbonara.AggregatedTimeSerie.is_compressed(data))
+
+ self.assertEqual([
+ (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69),
+ (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42),
+ (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4),
+ (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44),
+ (utils.datetime_utc(2016, 1, 10, 16, 18), 60.0, 45),
+ (utils.datetime_utc(2016, 1, 10, 17, 12), 60.0, 46),
+ ], self.storage.get_measures(self.metric, granularity=60.0))
+
+ def test_rewrite_measures_oldest_mutable_timestamp_eq_next_key(self):
+ """See LP#1655422"""
+ # Create an archive policy that spans on several splits. Each split
+ # being 3600 points, let's go for 36k points so we have 10 splits.
+ apname = str(uuid.uuid4())
+ ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)])
+ self.index.create_archive_policy(ap)
+ self.metric = storage.Metric(uuid.uuid4(), ap)
+ self.index.create_metric(self.metric.id, str(uuid.uuid4()),
+ apname)
+
+ # First store some points scattered across different splits
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2016, 1, 1, 12, 0, 1), 69),
+ storage.Measure(utils.dt_to_unix_ns(2016, 1, 2, 13, 7, 31), 42),
+ storage.Measure(utils.dt_to_unix_ns(2016, 1, 4, 14, 9, 31), 4),
+ storage.Measure(utils.dt_to_unix_ns(2016, 1, 6, 15, 12, 45), 44),
+ ])
+ self.trigger_processing()
+
+ splits = {'1451520000.0', '1451736000.0', '1451952000.0'}
+ self.assertEqual(splits,
+ self.storage._list_split_keys_for_metric(
+ self.metric, "mean", 60.0))
+
+ if self.storage.WRITE_FULL:
+ assertCompressedIfWriteFull = self.assertTrue
+ else:
+ assertCompressedIfWriteFull = self.assertFalse
+
+ data = self.storage._get_measures(
+ self.metric, '1451520000.0', "mean", 60.0)
+ self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
+ data = self.storage._get_measures(
+ self.metric, '1451736000.0', "mean", 60.0)
+ self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
+ data = self.storage._get_measures(
+ self.metric, '1451952000.0', "mean", 60.0)
+ assertCompressedIfWriteFull(
+ carbonara.AggregatedTimeSerie.is_compressed(data))
+
+ self.assertEqual([
+ (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69),
+ (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42),
+ (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4),
+ (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44),
+ ], self.storage.get_measures(self.metric, granularity=60.0))
+
+ # Now store brand new points that should force a rewrite of one of the
+ # split (keep in mind the back window size in one hour here). We move
+ # the BoundTimeSerie processing timeserie far away from its current
+ # range.
+
+ # Here we test a special case where the oldest_mutable_timestamp will
+ # be 2016-01-10TOO:OO:OO = 1452384000.0, our new split key.
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 0, 12), 45),
+ ])
+ self.trigger_processing()
+
+ self.assertEqual({'1452384000.0', '1451736000.0',
+ '1451520000.0', '1451952000.0'},
+ self.storage._list_split_keys_for_metric(
+ self.metric, "mean", 60.0))
+ data = self.storage._get_measures(
+ self.metric, '1451520000.0', "mean", 60.0)
+ self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
+ data = self.storage._get_measures(
+ self.metric, '1451736000.0', "mean", 60.0)
+ self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
+ data = self.storage._get_measures(
+ self.metric, '1451952000.0', "mean", 60.0)
+ # Now this one is compressed because it has been rewritten!
+ self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
+ data = self.storage._get_measures(
+ self.metric, '1452384000.0', "mean", 60.0)
+ assertCompressedIfWriteFull(
+ carbonara.AggregatedTimeSerie.is_compressed(data))
+
+ self.assertEqual([
+ (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69),
+ (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42),
+ (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4),
+ (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44),
+ (utils.datetime_utc(2016, 1, 10, 0, 12), 60.0, 45),
+ ], self.storage.get_measures(self.metric, granularity=60.0))
+
+ def test_rewrite_measures_corruption_missing_file(self):
+ # Create an archive policy that spans on several splits. Each split
+ # being 3600 points, let's go for 36k points so we have 10 splits.
+ apname = str(uuid.uuid4())
+ ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)])
+ self.index.create_archive_policy(ap)
+ self.metric = storage.Metric(uuid.uuid4(), ap)
+ self.index.create_metric(self.metric.id, str(uuid.uuid4()),
+ apname)
+
+ # First store some points scattered across different splits
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2016, 1, 1, 12, 0, 1), 69),
+ storage.Measure(utils.dt_to_unix_ns(2016, 1, 2, 13, 7, 31), 42),
+ storage.Measure(utils.dt_to_unix_ns(2016, 1, 4, 14, 9, 31), 4),
+ storage.Measure(utils.dt_to_unix_ns(2016, 1, 6, 15, 12, 45), 44),
+ ])
+ self.trigger_processing()
+
+ splits = {'1451520000.0', '1451736000.0', '1451952000.0'}
+ self.assertEqual(splits,
+ self.storage._list_split_keys_for_metric(
+ self.metric, "mean", 60.0))
+
+ if self.storage.WRITE_FULL:
+ assertCompressedIfWriteFull = self.assertTrue
+ else:
+ assertCompressedIfWriteFull = self.assertFalse
+
+ data = self.storage._get_measures(
+ self.metric, '1451520000.0', "mean", 60.0)
+ self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
+ data = self.storage._get_measures(
+ self.metric, '1451736000.0', "mean", 60.0)
+ self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
+ data = self.storage._get_measures(
+ self.metric, '1451952000.0', "mean", 60.0)
+ assertCompressedIfWriteFull(
+ carbonara.AggregatedTimeSerie.is_compressed(data))
+
+ self.assertEqual([
+ (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69),
+ (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42),
+ (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4),
+ (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44),
+ ], self.storage.get_measures(self.metric, granularity=60.0))
+
+ # Test what happens if we delete the latest split and then need to
+ # compress it!
+ self.storage._delete_metric_measures(self.metric,
+ '1451952000.0',
+ 'mean', 60.0)
+
+ # Now store brand new points that should force a rewrite of one of the
+ # split (keep in mind the back window size in one hour here). We move
+ # the BoundTimeSerie processing timeserie far away from its current
+ # range.
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 16, 18, 45), 45),
+ storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 17, 12, 45), 46),
+ ])
+ self.trigger_processing()
+
+ def test_rewrite_measures_corruption_bad_data(self):
+ # Create an archive policy that spans on several splits. Each split
+ # being 3600 points, let's go for 36k points so we have 10 splits.
+ apname = str(uuid.uuid4())
+ ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)])
+ self.index.create_archive_policy(ap)
+ self.metric = storage.Metric(uuid.uuid4(), ap)
+ self.index.create_metric(self.metric.id, str(uuid.uuid4()),
+ apname)
+
+ # First store some points scattered across different splits
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2016, 1, 1, 12, 0, 1), 69),
+ storage.Measure(utils.dt_to_unix_ns(2016, 1, 2, 13, 7, 31), 42),
+ storage.Measure(utils.dt_to_unix_ns(2016, 1, 4, 14, 9, 31), 4),
+ storage.Measure(utils.dt_to_unix_ns(2016, 1, 6, 15, 12, 45), 44),
+ ])
+ self.trigger_processing()
+
+ splits = {'1451520000.0', '1451736000.0', '1451952000.0'}
+ self.assertEqual(splits,
+ self.storage._list_split_keys_for_metric(
+ self.metric, "mean", 60.0))
+
+ if self.storage.WRITE_FULL:
+ assertCompressedIfWriteFull = self.assertTrue
+ else:
+ assertCompressedIfWriteFull = self.assertFalse
+
+ data = self.storage._get_measures(
+ self.metric, '1451520000.0', "mean", 60.0)
+ self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
+ data = self.storage._get_measures(
+ self.metric, '1451736000.0', "mean", 60.0)
+ self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
+ data = self.storage._get_measures(
+ self.metric, '1451952000.0', "mean", 60.0)
+ assertCompressedIfWriteFull(
+ carbonara.AggregatedTimeSerie.is_compressed(data))
+
+ self.assertEqual([
+ (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69),
+ (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42),
+ (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4),
+ (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44),
+ ], self.storage.get_measures(self.metric, granularity=60.0))
+
+ # Test what happens if we write garbage
+ self.storage._store_metric_measures(
+ self.metric, '1451952000.0', "mean", 60.0, b"oh really?")
+
+ # Now store brand new points that should force a rewrite of one of the
+ # split (keep in mind the back window size in one hour here). We move
+ # the BoundTimeSerie processing timeserie far away from its current
+ # range.
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 16, 18, 45), 45),
+ storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 17, 12, 45), 46),
+ ])
+ self.trigger_processing()
+
+ def test_updated_measures(self):
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42),
+ ])
+ self.trigger_processing()
+
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1), 86400.0, 55.5),
+ (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 55.5),
+ (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69),
+ (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 42.0),
+ ], self.storage.get_measures(self.metric))
+
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44),
+ ])
+ self.trigger_processing()
+
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75),
+ (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75),
+ (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0),
+ (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 23.0),
+ (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0),
+ ], self.storage.get_measures(self.metric))
+
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1), 86400.0, 69),
+ (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 69.0),
+ (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0),
+ (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 42.0),
+ (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0),
+ ], self.storage.get_measures(self.metric, aggregation='max'))
+
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1), 86400.0, 4),
+ (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 4),
+ (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0),
+ (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 4.0),
+ (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0),
+ ], self.storage.get_measures(self.metric, aggregation='min'))
+
+ def test_add_and_get_measures(self):
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44),
+ ])
+ self.trigger_processing()
+
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75),
+ (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75),
+ (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0),
+ (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 23.0),
+ (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0),
+ ], self.storage.get_measures(self.metric))
+
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75),
+ (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75),
+ (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0),
+ ], self.storage.get_measures(
+ self.metric,
+ from_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 0)))
+
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75),
+ (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75),
+ (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0),
+ (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 23.0),
+ ], self.storage.get_measures(
+ self.metric,
+ to_timestamp=datetime.datetime(2014, 1, 1, 12, 6, 0)))
+
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75),
+ (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75),
+ (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0),
+ ], self.storage.get_measures(
+ self.metric,
+ to_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10),
+ from_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10)))
+
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75),
+ (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75),
+ (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0),
+ ], self.storage.get_measures(
+ self.metric,
+ from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0),
+ to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2)))
+
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75),
+ (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75),
+ (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0),
+ ], self.storage.get_measures(
+ self.metric,
+ from_timestamp=iso8601.parse_date("2014-1-1 13:00:00+01:00"),
+ to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2)))
+
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75),
+ ], self.storage.get_measures(
+ self.metric,
+ from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0),
+ to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2),
+ granularity=3600.0))
+
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0),
+ ], self.storage.get_measures(
+ self.metric,
+ from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0),
+ to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2),
+ granularity=300.0))
+
+ self.assertRaises(storage.GranularityDoesNotExist,
+ self.storage.get_measures,
+ self.metric,
+ granularity=42)
+
+ def test_get_cross_metric_measures_unknown_metric(self):
+ self.assertEqual([],
+ self.storage.get_cross_metric_measures(
+ [storage.Metric(uuid.uuid4(),
+ self.archive_policies['low']),
+ storage.Metric(uuid.uuid4(),
+ self.archive_policies['low'])]))
+
+ def test_get_measure_unknown_aggregation(self):
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44),
+ ])
+ self.assertRaises(storage.AggregationDoesNotExist,
+ self.storage.get_measures,
+ self.metric, aggregation='last')
+
+ def test_get_cross_metric_measures_unknown_aggregation(self):
+ metric2 = storage.Metric(uuid.uuid4(),
+ self.archive_policies['low'])
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44),
+ ])
+ self.storage.incoming.add_measures(metric2, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44),
+ ])
+ self.assertRaises(storage.AggregationDoesNotExist,
+ self.storage.get_cross_metric_measures,
+ [self.metric, metric2],
+ aggregation='last')
+
+ def test_get_cross_metric_measures_unknown_granularity(self):
+ metric2 = storage.Metric(uuid.uuid4(),
+ self.archive_policies['low'])
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44),
+ ])
+ self.storage.incoming.add_measures(metric2, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44),
+ ])
+ self.assertRaises(storage.GranularityDoesNotExist,
+ self.storage.get_cross_metric_measures,
+ [self.metric, metric2],
+ granularity=12345.456)
+
+ def test_add_and_get_cross_metric_measures_different_archives(self):
+ metric2 = storage.Metric(uuid.uuid4(),
+ self.archive_policies['no_granularity_match'])
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44),
+ ])
+ self.storage.incoming.add_measures(metric2, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44),
+ ])
+
+ self.assertRaises(storage.MetricUnaggregatable,
+ self.storage.get_cross_metric_measures,
+ [self.metric, metric2])
+
+ def test_add_and_get_cross_metric_measures(self):
+ metric2, __ = self._create_metric()
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44),
+ ])
+ self.storage.incoming.add_measures(metric2, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 5), 9),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 41), 2),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 10, 31), 4),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 13, 10), 4),
+ ])
+ self.trigger_processing([str(self.metric.id), str(metric2.id)])
+
+ values = self.storage.get_cross_metric_measures([self.metric, metric2])
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1, 0, 0, 0), 86400.0, 22.25),
+ (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 22.25),
+ (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0),
+ (utils.datetime_utc(2014, 1, 1, 12, 5, 0), 300.0, 12.5),
+ (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 24.0)
+ ], values)
+
+ values = self.storage.get_cross_metric_measures([self.metric, metric2],
+ reaggregation='max')
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1, 0, 0, 0), 86400.0, 39.75),
+ (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 39.75),
+ (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 69),
+ (utils.datetime_utc(2014, 1, 1, 12, 5, 0), 300.0, 23),
+ (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 44)
+ ], values)
+
+ values = self.storage.get_cross_metric_measures(
+ [self.metric, metric2],
+ from_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 0))
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1), 86400.0, 22.25),
+ (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 22.25),
+ (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 24.0),
+ ], values)
+
+ values = self.storage.get_cross_metric_measures(
+ [self.metric, metric2],
+ to_timestamp=datetime.datetime(2014, 1, 1, 12, 5, 0))
+
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1, 0, 0, 0), 86400.0, 22.25),
+ (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 22.25),
+ (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0),
+ ], values)
+
+ values = self.storage.get_cross_metric_measures(
+ [self.metric, metric2],
+ from_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10),
+ to_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10))
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1), 86400.0, 22.25),
+ (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 22.25),
+ (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 24.0),
+ ], values)
+
+ values = self.storage.get_cross_metric_measures(
+ [self.metric, metric2],
+ from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0),
+ to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 1))
+
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1), 86400.0, 22.25),
+ (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 22.25),
+ (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0),
+ ], values)
+
+ values = self.storage.get_cross_metric_measures(
+ [self.metric, metric2],
+ from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0),
+ to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 1),
+ granularity=300.0)
+
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0),
+ ], values)
+
+ def test_add_and_get_cross_metric_measures_with_holes(self):
+ metric2, __ = self._create_metric()
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 5, 31), 8),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 42),
+ ])
+ self.storage.incoming.add_measures(metric2, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 5), 9),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 2),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 6),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 13, 10), 2),
+ ])
+ self.trigger_processing([str(self.metric.id), str(metric2.id)])
+
+ values = self.storage.get_cross_metric_measures([self.metric, metric2])
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1, 0, 0, 0), 86400.0, 18.875),
+ (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 18.875),
+ (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0),
+ (utils.datetime_utc(2014, 1, 1, 12, 5, 0), 300.0, 11.0),
+ (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 22.0)
+ ], values)
+
+ def test_search_value(self):
+ metric2, __ = self._create_metric()
+ self.storage.incoming.add_measures(self.metric, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1,), 69),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 5, 31), 8),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 42),
+ ])
+
+ self.storage.incoming.add_measures(metric2, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 5), 9),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 2),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 6),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 13, 10), 2),
+ ])
+ self.trigger_processing([str(self.metric.id), str(metric2.id)])
+
+ self.assertEqual(
+ {metric2: [],
+ self.metric: [
+ (utils.datetime_utc(2014, 1, 1), 86400, 33),
+ (utils.datetime_utc(2014, 1, 1, 12), 3600, 33),
+ (utils.datetime_utc(2014, 1, 1, 12), 300, 69),
+ (utils.datetime_utc(2014, 1, 1, 12, 10), 300, 42)]},
+ self.storage.search_value(
+ [metric2, self.metric],
+ {u"≥": 30}))
+
+ self.assertEqual(
+ {metric2: [], self.metric: []},
+ self.storage.search_value(
+ [metric2, self.metric],
+ {u"∧": [
+ {u"eq": 100},
+ {u"≠": 50}]}))
+
+ def test_resize_policy(self):
+ name = str(uuid.uuid4())
+ ap = archive_policy.ArchivePolicy(name, 0, [(3, 5)])
+ self.index.create_archive_policy(ap)
+ m = self.index.create_metric(uuid.uuid4(), str(uuid.uuid4()), name)
+ m = self.index.list_metrics(ids=[m.id])[0]
+ self.storage.incoming.add_measures(m, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 0), 1),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 5), 1),
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 10), 1),
+ ])
+ self.trigger_processing([str(m.id)])
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0),
+ (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0),
+ (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0),
+ ], self.storage.get_measures(m))
+ # expand to more points
+ self.index.update_archive_policy(
+ name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)])
+ m = self.index.list_metrics(ids=[m.id])[0]
+ self.storage.incoming.add_measures(m, [
+ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 15), 1),
+ ])
+ self.trigger_processing([str(m.id)])
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0),
+ (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0),
+ (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0),
+ (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0),
+ ], self.storage.get_measures(m))
+ # shrink timespan
+ self.index.update_archive_policy(
+ name, [archive_policy.ArchivePolicyItem(granularity=5, points=2)])
+ m = self.index.list_metrics(ids=[m.id])[0]
+ self.assertEqual([
+ (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0),
+ (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0),
+ ], self.storage.get_measures(m))
+
+
+class TestMeasureQuery(base.BaseTestCase):
+ def test_equal(self):
+ q = storage.MeasureQuery({"=": 4})
+ self.assertTrue(q(4))
+ self.assertFalse(q(40))
+
+ def test_gt(self):
+ q = storage.MeasureQuery({">": 4})
+ self.assertTrue(q(40))
+ self.assertFalse(q(4))
+
+ def test_and(self):
+ q = storage.MeasureQuery({"and": [{">": 4}, {"<": 10}]})
+ self.assertTrue(q(5))
+ self.assertFalse(q(40))
+ self.assertFalse(q(1))
+
+ def test_or(self):
+ q = storage.MeasureQuery({"or": [{"=": 4}, {"=": 10}]})
+ self.assertTrue(q(4))
+ self.assertTrue(q(10))
+ self.assertFalse(q(-1))
+
+ def test_modulo(self):
+ q = storage.MeasureQuery({"=": [{"%": 5}, 0]})
+ self.assertTrue(q(5))
+ self.assertTrue(q(10))
+ self.assertFalse(q(-1))
+ self.assertFalse(q(6))
+
+ def test_math(self):
+ q = storage.MeasureQuery(
+ {
+ u"and": [
+ # v+5 is bigger 0
+ {u"≥": [{u"+": 5}, 0]},
+ # v-6 is not 5
+ {u"≠": [5, {u"-": 6}]},
+ ],
+ }
+ )
+ self.assertTrue(q(5))
+ self.assertTrue(q(10))
+ self.assertFalse(q(11))
+
+ def test_empty(self):
+ q = storage.MeasureQuery({})
+ self.assertFalse(q(5))
+ self.assertFalse(q(10))
+
+ def test_bad_format(self):
+ self.assertRaises(storage.InvalidQuery,
+ storage.MeasureQuery,
+ {"foo": [{"=": 4}, {"=": 10}]})
+
+ self.assertRaises(storage.InvalidQuery,
+ storage.MeasureQuery,
+ {"=": [1, 2, 3]})
diff --git a/gnocchi/tests/test_utils.py b/gnocchi/tests/test_utils.py
new file mode 100644
index 00000000..d90bc287
--- /dev/null
+++ b/gnocchi/tests/test_utils.py
@@ -0,0 +1,105 @@
+# -*- encoding: utf-8 -*-
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import datetime
+import os
+import uuid
+
+import iso8601
+import mock
+
+from gnocchi.tests import base as tests_base
+from gnocchi import utils
+
+
+class TestUtils(tests_base.TestCase):
+ def _do_test_datetime_to_unix_timezone_change(self, expected, dt):
+ self.assertEqual(expected, utils.datetime_to_unix(dt))
+ with mock.patch.dict(os.environ, {'TZ': 'UTC'}):
+ self.assertEqual(expected, utils.datetime_to_unix(dt))
+ with mock.patch.dict(os.environ, {'TZ': 'Europe/Paris'}):
+ self.assertEqual(expected, utils.datetime_to_unix(dt))
+ with mock.patch.dict(os.environ, {'TZ': 'US/Eastern'}):
+ self.assertEqual(expected, utils.datetime_to_unix(dt))
+
+ def test_datetime_to_unix_timezone_change_utc(self):
+ dt = datetime.datetime(2015, 1, 1, 10, 0, tzinfo=iso8601.iso8601.UTC)
+ self._do_test_datetime_to_unix_timezone_change(1420106400.0, dt)
+
+ def test_datetime_to_unix_timezone_change_offset(self):
+ dt = datetime.datetime(2015, 1, 1, 15, 0,
+ tzinfo=iso8601.iso8601.FixedOffset(5, 0, '+5h'))
+ self._do_test_datetime_to_unix_timezone_change(1420106400.0, dt)
+
+ def test_to_timestamps_epoch(self):
+ self.assertEqual(
+ utils.to_datetime("1425652440"),
+ datetime.datetime(2015, 3, 6, 14, 34,
+ tzinfo=iso8601.iso8601.UTC))
+ self.assertEqual(
+ utils.to_datetime("1425652440.4"),
+ datetime.datetime(2015, 3, 6, 14, 34, 0, 400000,
+ tzinfo=iso8601.iso8601.UTC))
+ self.assertEqual(
+ utils.to_datetime(1425652440),
+ datetime.datetime(2015, 3, 6, 14, 34,
+ tzinfo=iso8601.iso8601.UTC))
+ self.assertEqual(
+ utils.to_datetime(utils.to_timestamp(1425652440.4)),
+ datetime.datetime(2015, 3, 6, 14, 34, 0, 400000,
+ tzinfo=iso8601.iso8601.UTC))
+
+
+class TestResourceUUID(tests_base.TestCase):
+ def test_conversion(self):
+ self.assertEqual(
+ uuid.UUID('ba571521-1de6-5aff-b183-1535fd6eb5d0'),
+ utils.ResourceUUID(
+ uuid.UUID('ba571521-1de6-5aff-b183-1535fd6eb5d0'),
+ "bar"))
+ self.assertEqual(
+ uuid.UUID('ba571521-1de6-5aff-b183-1535fd6eb5d0'),
+ utils.ResourceUUID("foo", "bar"))
+ self.assertEqual(
+ uuid.UUID('4efb21f6-3d19-5fe3-910b-be8f0f727846'),
+ utils.ResourceUUID("foo", None))
+ self.assertEqual(
+ uuid.UUID('853e5c64-f45e-58b2-999c-96df856fbe3d'),
+ utils.ResourceUUID("foo", ""))
+
+
+class StopWatchTest(tests_base.TestCase):
+ def test_no_states(self):
+ watch = utils.StopWatch()
+ self.assertRaises(RuntimeError, watch.stop)
+
+ def test_start_stop(self):
+ watch = utils.StopWatch()
+ watch.start()
+ watch.stop()
+
+ def test_no_elapsed(self):
+ watch = utils.StopWatch()
+ self.assertRaises(RuntimeError, watch.elapsed)
+
+ def test_elapsed(self):
+ watch = utils.StopWatch()
+ watch.start()
+ watch.stop()
+ elapsed = watch.elapsed()
+ self.assertAlmostEqual(elapsed, watch.elapsed())
+
+ def test_context_manager(self):
+ with utils.StopWatch() as watch:
+ pass
+ self.assertGreater(watch.elapsed(), 0)
diff --git a/gnocchi/tests/utils.py b/gnocchi/tests/utils.py
new file mode 100644
index 00000000..e9b0b339
--- /dev/null
+++ b/gnocchi/tests/utils.py
@@ -0,0 +1,19 @@
+# -*- encoding: utf-8 -*-
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import six
+
+
+def list_all_incoming_metrics(incoming):
+ return set.union(*[incoming.list_metric_with_measures_to_process(i)
+ for i in six.moves.range(incoming.NUM_SACKS)])
diff --git a/gnocchi/utils.py b/gnocchi/utils.py
new file mode 100644
index 00000000..b7e92263
--- /dev/null
+++ b/gnocchi/utils.py
@@ -0,0 +1,299 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2015-2017 Red Hat, Inc.
+# Copyright © 2015-2016 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import datetime
+import distutils.util
+import errno
+import itertools
+import multiprocessing
+import numbers
+import os
+import uuid
+
+import iso8601
+import monotonic
+import numpy
+from oslo_log import log
+import pandas as pd
+import six
+import tenacity
+from tooz import coordination
+
+LOG = log.getLogger(__name__)
+
+# uuid5 namespace for id transformation.
+# NOTE(chdent): This UUID must stay the same, forever, across all
+# of gnocchi to preserve its value as a URN namespace.
+RESOURCE_ID_NAMESPACE = uuid.UUID('0a7a15ff-aa13-4ac2-897c-9bdf30ce175b')
+
+
+def ResourceUUID(value, creator):
+ if isinstance(value, uuid.UUID):
+ return value
+ if '/' in value:
+ raise ValueError("'/' is not supported in resource id")
+ try:
+ return uuid.UUID(value)
+ except ValueError:
+ if len(value) <= 255:
+ if creator is None:
+ creator = "\x00"
+ # value/creator must be str (unicode) in Python 3 and str (bytes)
+ # in Python 2. It's not logical, I know.
+ if six.PY2:
+ value = value.encode('utf-8')
+ creator = creator.encode('utf-8')
+ return uuid.uuid5(RESOURCE_ID_NAMESPACE,
+ value + "\x00" + creator)
+ raise ValueError(
+ 'transformable resource id >255 max allowed characters')
+
+
+def UUID(value):
+ try:
+ return uuid.UUID(value)
+ except Exception as e:
+ raise ValueError(e)
+
+
+# Retry with exponential backoff for up to 1 minute
+retry = tenacity.retry(
+ wait=tenacity.wait_exponential(multiplier=0.5, max=60),
+ # Never retry except when explicitly asked by raising TryAgain
+ retry=tenacity.retry_never,
+ reraise=True)
+
+
+# TODO(jd) Move this to tooz?
+@retry
+def _enable_coordination(coord):
+ try:
+ coord.start(start_heart=True)
+ except Exception as e:
+ LOG.error("Unable to start coordinator: %s", e)
+ raise tenacity.TryAgain(e)
+
+
+def get_coordinator_and_start(url):
+ my_id = uuid.uuid4().bytes
+ coord = coordination.get_coordinator(url, my_id)
+ _enable_coordination(coord)
+ return coord, my_id
+
+
+unix_universal_start64 = numpy.datetime64("1970")
+
+
+def to_timestamps(values):
+ try:
+ values = list(values)
+ if isinstance(values[0], numbers.Real):
+ times = pd.to_datetime(values, utc=True, box=False, unit='s')
+ elif (isinstance(values[0], datetime.datetime) or
+ is_valid_timestamp(values[0])):
+ times = pd.to_datetime(values, utc=True, box=False)
+ else:
+ try:
+ float(values[0])
+ except ValueError:
+ times = (utcnow() + pd.to_timedelta(values)).values
+ else:
+ times = pd.to_datetime(list(map(float, values)),
+ utc=True, box=False, unit='s')
+ except ValueError:
+ raise ValueError("Unable to convert timestamps")
+
+ if (times < unix_universal_start64).any():
+ raise ValueError('Timestamp must be after Epoch')
+
+ return times
+
+
+def is_valid_timestamp(value):
+ try:
+ pd.to_datetime(value)
+ except Exception:
+ return False
+ return True
+
+
+def to_timestamp(value):
+ return to_timestamps((value,))[0]
+
+
+def to_datetime(value):
+ return timestamp_to_datetime(to_timestamp(value))
+
+
+def timestamp_to_datetime(v):
+ return datetime.datetime.utcfromtimestamp(
+ v.astype(float) / 10e8).replace(tzinfo=iso8601.iso8601.UTC)
+
+
+def to_timespan(value):
+ if value is None:
+ raise ValueError("Invalid timespan")
+ try:
+ seconds = float(value)
+ except Exception:
+ try:
+ seconds = pd.to_timedelta(value).total_seconds()
+ except Exception:
+ raise ValueError("Unable to parse timespan")
+ if seconds <= 0:
+ raise ValueError("Timespan must be positive")
+ return datetime.timedelta(seconds=seconds)
+
+
+def utcnow():
+ """Version of utcnow() that returns utcnow with a correct TZ."""
+ return datetime.datetime.now(tz=iso8601.iso8601.UTC)
+
+
+def normalize_time(timestamp):
+ """Normalize time in arbitrary timezone to UTC naive object."""
+ offset = timestamp.utcoffset()
+ if offset is None:
+ return timestamp
+ return timestamp.replace(tzinfo=None) - offset
+
+
+def datetime_utc(*args):
+ return datetime.datetime(*args, tzinfo=iso8601.iso8601.UTC)
+
+
+unix_universal_start = datetime_utc(1970, 1, 1)
+
+
+def datetime_to_unix(timestamp):
+ return (timestamp - unix_universal_start).total_seconds()
+
+
+def dt_to_unix_ns(*args):
+ return int(datetime_to_unix(datetime.datetime(
+ *args, tzinfo=iso8601.iso8601.UTC)) * int(10e8))
+
+
+def dt_in_unix_ns(timestamp):
+ return int(datetime_to_unix(timestamp) * int(10e8))
+
+
+def get_default_workers():
+ try:
+ default_workers = multiprocessing.cpu_count() or 1
+ except NotImplementedError:
+ default_workers = 1
+ return default_workers
+
+
+def grouper(iterable, n):
+ it = iter(iterable)
+ while True:
+ chunk = tuple(itertools.islice(it, n))
+ if not chunk:
+ return
+ yield chunk
+
+
+def ensure_paths(paths):
+ for p in paths:
+ try:
+ os.makedirs(p)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+
+def strtobool(v):
+ if isinstance(v, bool):
+ return v
+ return bool(distutils.util.strtobool(v))
+
+
+class StopWatch(object):
+ """A simple timer/stopwatch helper class.
+
+ Inspired by: apache-commons-lang java stopwatch.
+
+ Not thread-safe (when a single watch is mutated by multiple threads at
+ the same time). Thread-safe when used by a single thread (not shared) or
+ when operations are performed in a thread-safe manner on these objects by
+ wrapping those operations with locks.
+
+ It will use the `monotonic`_ pypi library to find an appropriate
+ monotonically increasing time providing function (which typically varies
+ depending on operating system and python version).
+
+ .. _monotonic: https://pypi.python.org/pypi/monotonic/
+ """
+ _STARTED = object()
+ _STOPPED = object()
+
+ def __init__(self):
+ self._started_at = None
+ self._stopped_at = None
+ self._state = None
+
+ def start(self):
+ """Starts the watch (if not already started).
+
+ NOTE(harlowja): resets any splits previously captured (if any).
+ """
+ if self._state == self._STARTED:
+ return self
+ self._started_at = monotonic.monotonic()
+ self._state = self._STARTED
+ return self
+
+ @staticmethod
+ def _delta_seconds(earlier, later):
+ # Uses max to avoid the delta/time going backwards (and thus negative).
+ return max(0.0, later - earlier)
+
+ def elapsed(self):
+ """Returns how many seconds have elapsed."""
+ if self._state not in (self._STARTED, self._STOPPED):
+ raise RuntimeError("Can not get the elapsed time of a stopwatch"
+ " if it has not been started/stopped")
+ if self._state == self._STOPPED:
+ elapsed = self._delta_seconds(self._started_at, self._stopped_at)
+ else:
+ elapsed = self._delta_seconds(
+ self._started_at, monotonic.monotonic())
+ return elapsed
+
+ def __enter__(self):
+ """Starts the watch."""
+ self.start()
+ return self
+
+ def __exit__(self, type, value, traceback):
+ """Stops the watch (ignoring errors if stop fails)."""
+ try:
+ self.stop()
+ except RuntimeError:
+ pass
+
+ def stop(self):
+ """Stops the watch."""
+ if self._state == self._STOPPED:
+ return self
+ if self._state != self._STARTED:
+ raise RuntimeError("Can not stop a stopwatch that has not been"
+ " started")
+ self._stopped_at = monotonic.monotonic()
+ self._state = self._STOPPED
+ return self
diff --git a/releasenotes/notes/.placeholder b/releasenotes/notes/.placeholder
new file mode 100644
index 00000000..e69de29b
diff --git a/releasenotes/notes/add-parameter-granularity-7f22c677dc1b1238.yaml b/releasenotes/notes/add-parameter-granularity-7f22c677dc1b1238.yaml
new file mode 100644
index 00000000..2f833808
--- /dev/null
+++ b/releasenotes/notes/add-parameter-granularity-7f22c677dc1b1238.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - Allow to search for values in metrics by using
+ one or more granularities.
diff --git a/releasenotes/notes/archive_policy_bool-9313cae7122c4a2f.yaml b/releasenotes/notes/archive_policy_bool-9313cae7122c4a2f.yaml
new file mode 100644
index 00000000..682a4e4c
--- /dev/null
+++ b/releasenotes/notes/archive_policy_bool-9313cae7122c4a2f.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - >-
+ A new archive policy named *bool* is provided by default. It provides a
+ cheap and easy way to store boolean measures (0 and 1).
diff --git a/releasenotes/notes/auth_type_option-c335b219afba5569.yaml b/releasenotes/notes/auth_type_option-c335b219afba5569.yaml
new file mode 100644
index 00000000..53727864
--- /dev/null
+++ b/releasenotes/notes/auth_type_option-c335b219afba5569.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - >-
+ The new `auth_type` option specifies which authentication system to use for
+ the REST API. Its default is still `noauth`.
diff --git a/releasenotes/notes/auth_type_pluggable-76a3c73cac8eec6a.yaml b/releasenotes/notes/auth_type_pluggable-76a3c73cac8eec6a.yaml
new file mode 100644
index 00000000..f198eb8a
--- /dev/null
+++ b/releasenotes/notes/auth_type_pluggable-76a3c73cac8eec6a.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - >-
+ The REST API authentication mechanism is now pluggable. You can write your
+ own plugin to specify how segregation and policy should be enforced.
diff --git a/releasenotes/notes/backfill-cross-aggregation-2de54c7c30b2eb67.yaml b/releasenotes/notes/backfill-cross-aggregation-2de54c7c30b2eb67.yaml
new file mode 100644
index 00000000..cdfeee45
--- /dev/null
+++ b/releasenotes/notes/backfill-cross-aggregation-2de54c7c30b2eb67.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - Add support to backfill timestamps with missing points in a subset of
+ timeseries when computing aggregation across multiple metrics. User can
+ specify `fill` value with either a float or `null` value. A granularity
+ must be specified in addition to `fill`.
diff --git a/releasenotes/notes/batch_resource_measures_create_metrics-f73790a8475ad628.yaml b/releasenotes/notes/batch_resource_measures_create_metrics-f73790a8475ad628.yaml
new file mode 100644
index 00000000..afccc58b
--- /dev/null
+++ b/releasenotes/notes/batch_resource_measures_create_metrics-f73790a8475ad628.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - "When sending measures in batch for resources, it is now possible to pass
+ `create_metric=true` to the query parameters so missing metrics are created.
+ This only works if an archive policy rule matching those named metrics matches."
diff --git a/releasenotes/notes/ceph-omap-34e069dfb3df764d.yaml b/releasenotes/notes/ceph-omap-34e069dfb3df764d.yaml
new file mode 100644
index 00000000..d053330b
--- /dev/null
+++ b/releasenotes/notes/ceph-omap-34e069dfb3df764d.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - Ceph driver has moved the storage of measures metadata
+ from xattr to omap API. Already created measures are migrated
+ during gnocchi-upgrade run.
diff --git a/releasenotes/notes/ceph-read-async-ca2f7512c6842adb.yaml b/releasenotes/notes/ceph-read-async-ca2f7512c6842adb.yaml
new file mode 100644
index 00000000..2dfe37de
--- /dev/null
+++ b/releasenotes/notes/ceph-read-async-ca2f7512c6842adb.yaml
@@ -0,0 +1,4 @@
+---
+other:
+ - ceph driver now uses the rados async api to retrieve
+ measurements to process in parallel.
diff --git a/releasenotes/notes/creator_field-6b715c917f6afc93.yaml b/releasenotes/notes/creator_field-6b715c917f6afc93.yaml
new file mode 100644
index 00000000..e9b3bfd1
--- /dev/null
+++ b/releasenotes/notes/creator_field-6b715c917f6afc93.yaml
@@ -0,0 +1,6 @@
+---
+deprecations:
+ - >-
+ The `created_by_user_id` and `created_by_project_id` field are now
+ deprecated and being merged into a unique `creator` field. The old fields
+ are still returned and managed by the API for now.
diff --git a/releasenotes/notes/delete-resources-f10d21fc02f53f16.yaml b/releasenotes/notes/delete-resources-f10d21fc02f53f16.yaml
new file mode 100644
index 00000000..0f6b0421
--- /dev/null
+++ b/releasenotes/notes/delete-resources-f10d21fc02f53f16.yaml
@@ -0,0 +1,3 @@
+---
+feature:
+ - A new REST API call is provided to delete multiple resources at once using a search filter.
diff --git a/releasenotes/notes/deprecate-noauth-01b7e961d9a17e9e.yaml b/releasenotes/notes/deprecate-noauth-01b7e961d9a17e9e.yaml
new file mode 100644
index 00000000..635097c6
--- /dev/null
+++ b/releasenotes/notes/deprecate-noauth-01b7e961d9a17e9e.yaml
@@ -0,0 +1,4 @@
+---
+deprecations:
+ - The `noauth` authentication mechanism is deprecated and will be removed in
+ a next version.
diff --git a/releasenotes/notes/dynamic-resampling-b5e545b1485c152f.yaml b/releasenotes/notes/dynamic-resampling-b5e545b1485c152f.yaml
new file mode 100644
index 00000000..b2c5167b
--- /dev/null
+++ b/releasenotes/notes/dynamic-resampling-b5e545b1485c152f.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - Add `resample` parameter to support resampling stored time-series to
+ another granularity not necessarily in existing archive policy. If both
+ resampling and reaggregation parameters are specified, resampling will
+ occur prior to reaggregation.
diff --git a/releasenotes/notes/fnmatch-python-2.7-c524ce1e1b238b0a.yaml b/releasenotes/notes/fnmatch-python-2.7-c524ce1e1b238b0a.yaml
new file mode 100644
index 00000000..bab5e73a
--- /dev/null
+++ b/releasenotes/notes/fnmatch-python-2.7-c524ce1e1b238b0a.yaml
@@ -0,0 +1,5 @@
+---
+other:
+ - |
+ A workaround for a Python 2.7 bug in `fnmatch` has been removed. Makes sure
+ you use at least Python 2.7.9 to avoid running into it.
diff --git a/releasenotes/notes/forbid-slash-b3ec2bc77cc34b49.yaml b/releasenotes/notes/forbid-slash-b3ec2bc77cc34b49.yaml
new file mode 100644
index 00000000..5999cb7f
--- /dev/null
+++ b/releasenotes/notes/forbid-slash-b3ec2bc77cc34b49.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - \'/\' in resource id and metric name have been accepted by mistake, because
+ they can be POSTed but not GETed/PATCHed/DELETEd. Now this char is forbidden
+ in resource id and metric name, REST api will return 400 if it presents.
+ Metric name and resource id already present with a \'/\' have their \'/\' replaced
+ by \'_\'.
diff --git a/releasenotes/notes/gnocchi_config_generator-0fc337ba8e3afd5f.yaml b/releasenotes/notes/gnocchi_config_generator-0fc337ba8e3afd5f.yaml
new file mode 100644
index 00000000..73af05f2
--- /dev/null
+++ b/releasenotes/notes/gnocchi_config_generator-0fc337ba8e3afd5f.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - >-
+ The `gnocchi-config-generator` program can now generates a default
+ configuration file, usable as a template for custom tweaking.
diff --git a/releasenotes/notes/healthcheck-middleware-81c2f0d02ebdb5cc.yaml b/releasenotes/notes/healthcheck-middleware-81c2f0d02ebdb5cc.yaml
new file mode 100644
index 00000000..5e28af9c
--- /dev/null
+++ b/releasenotes/notes/healthcheck-middleware-81c2f0d02ebdb5cc.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - A healthcheck endpoint is provided by default at /healthcheck. It leverages
+ oslo_middleware healthcheck middleware. It allows to retrieve information
+ about the health of the API service.
diff --git a/releasenotes/notes/incoming-sacks-413f4818882ab83d.yaml b/releasenotes/notes/incoming-sacks-413f4818882ab83d.yaml
new file mode 100644
index 00000000..c2cf17ff
--- /dev/null
+++ b/releasenotes/notes/incoming-sacks-413f4818882ab83d.yaml
@@ -0,0 +1,14 @@
+---
+features:
+ - |
+ New measures are now sharded into sacks to better distribute data across
+ storage driver as well as allow for improved scheduling of aggregation
+ workload.
+upgrade:
+ - |
+ The storage driver needs to be upgraded. The number of sacks to distribute
+ across can be configured on upgrade by passing in ``num-storage-sacks``
+ value on upgrade. A default number of sacks will be created if not set.
+ This can be reconfigured post-upgrade as well by using
+ ``gnocchi-change-sack-size`` cli. See documentation for hints on the number
+ of sacks to set for your environment and upgrade notes
diff --git a/releasenotes/notes/lighten-default-archive-policies-455561c027edf4ad.yaml b/releasenotes/notes/lighten-default-archive-policies-455561c027edf4ad.yaml
new file mode 100644
index 00000000..a213d3e3
--- /dev/null
+++ b/releasenotes/notes/lighten-default-archive-policies-455561c027edf4ad.yaml
@@ -0,0 +1,5 @@
+---
+other:
+ - The default archive policies "low" and "medium" are now storing less data
+ than they used to be. They are only using respectively 1 and 2 definition
+ of archiving policy, which speeds up by 66% and 33% their computing speed.
diff --git a/releasenotes/notes/mysql_precise_datetime-57f868f3f42302e2.yaml b/releasenotes/notes/mysql_precise_datetime-57f868f3f42302e2.yaml
new file mode 100644
index 00000000..579c835d
--- /dev/null
+++ b/releasenotes/notes/mysql_precise_datetime-57f868f3f42302e2.yaml
@@ -0,0 +1,4 @@
+---
+other:
+ - Gnocchi now leverages microseconds timestamps available since MySQL 5.6.4,
+ meaning it is now the minimum required version of MySQL.
diff --git a/releasenotes/notes/noauth-force-headers-dda926ce83f810e8.yaml b/releasenotes/notes/noauth-force-headers-dda926ce83f810e8.yaml
new file mode 100644
index 00000000..004ef170
--- /dev/null
+++ b/releasenotes/notes/noauth-force-headers-dda926ce83f810e8.yaml
@@ -0,0 +1,5 @@
+---
+other:
+ - >-
+ The `noauth` authentication mode now requires that the `X-User-Id` and/or
+ `X-Project-Id` to be present.
diff --git a/releasenotes/notes/noauth-keystone-compat-e8f760591d593f07.yaml b/releasenotes/notes/noauth-keystone-compat-e8f760591d593f07.yaml
new file mode 100644
index 00000000..0aaffc38
--- /dev/null
+++ b/releasenotes/notes/noauth-keystone-compat-e8f760591d593f07.yaml
@@ -0,0 +1,9 @@
+---
+upgrade:
+ - >-
+ The `auth_type` option has a new default value set to "basic". This mode
+ does not do any segregation and uses the standard HTTP `Authorization`
+ header for authentication. The old "noauth" authentication mechanism based
+ on the Keystone headers (`X-User-Id`, `X-Creator-Id` and `X-Roles`) and the
+ Keystone segregation rules, which was the default up to Gnocchi 3.0, is
+ still available.
diff --git a/releasenotes/notes/pecan-debug-removed-1a9dbc4a0a6ad581.yaml b/releasenotes/notes/pecan-debug-removed-1a9dbc4a0a6ad581.yaml
new file mode 100644
index 00000000..9098b81f
--- /dev/null
+++ b/releasenotes/notes/pecan-debug-removed-1a9dbc4a0a6ad581.yaml
@@ -0,0 +1,3 @@
+---
+upgrade:
+ - The api.pecan_debug has been removed.
diff --git a/releasenotes/notes/redis-driver-299dc443170364bc.yaml b/releasenotes/notes/redis-driver-299dc443170364bc.yaml
new file mode 100644
index 00000000..b8214f27
--- /dev/null
+++ b/releasenotes/notes/redis-driver-299dc443170364bc.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ A Redis driver has been introduced for storing incoming measures and
+ computed timeseries.
diff --git a/releasenotes/notes/reloading-734a639a667c93ee.yaml b/releasenotes/notes/reloading-734a639a667c93ee.yaml
new file mode 100644
index 00000000..0cf2eb73
--- /dev/null
+++ b/releasenotes/notes/reloading-734a639a667c93ee.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - gnocchi-metricd now uses the cotyledon/oslo.config helper to handle
+ configuration file reloading. You can dynamically change the number
+ of workers by changing the configuration file and sending SIGHUP to the
+ metricd master process.
diff --git a/releasenotes/notes/remove-legacy-ceilometer-resources-16da2061d6d3f506.yaml b/releasenotes/notes/remove-legacy-ceilometer-resources-16da2061d6d3f506.yaml
new file mode 100644
index 00000000..4d6e0f87
--- /dev/null
+++ b/releasenotes/notes/remove-legacy-ceilometer-resources-16da2061d6d3f506.yaml
@@ -0,0 +1,3 @@
+---
+deprecations:
+ - The creation of the legacy Ceilometer resource types has been removed.
diff --git a/releasenotes/notes/removed-median-and-95pct-from-default-aggregation-methods-2f5ec059855e17f9.yaml b/releasenotes/notes/removed-median-and-95pct-from-default-aggregation-methods-2f5ec059855e17f9.yaml
new file mode 100644
index 00000000..75ff241a
--- /dev/null
+++ b/releasenotes/notes/removed-median-and-95pct-from-default-aggregation-methods-2f5ec059855e17f9.yaml
@@ -0,0 +1,5 @@
+---
+other:
+ - The default archive policies list does not contain the 95pct and median
+ aggregation methods by default. These are the least used methods and should
+ make gnocchi-metricd faster by more than 25% in the default scenario.
diff --git a/releasenotes/notes/resource-type-patch-8b6a85009db0671c.yaml b/releasenotes/notes/resource-type-patch-8b6a85009db0671c.yaml
new file mode 100644
index 00000000..a837c72d
--- /dev/null
+++ b/releasenotes/notes/resource-type-patch-8b6a85009db0671c.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |-
+ A new REST API endpoint have been added to be able to update a
+ resource-type: "PATCH /v1/resource-type/foobar". The expected payload is in
+ RFC6902 format. Some examples can be found in the documentation.
diff --git a/releasenotes/notes/resource-type-required-attributes-f446c220d54c8eb7.yaml b/releasenotes/notes/resource-type-required-attributes-f446c220d54c8eb7.yaml
new file mode 100644
index 00000000..a91c8176
--- /dev/null
+++ b/releasenotes/notes/resource-type-required-attributes-f446c220d54c8eb7.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - When updating a resource attribute, it's now possible to pass the option
+ 'fill' for each attribute to fill existing resources.
+ - required=True is now supported when updating resource type. This requires
+ the option 'fill' to be set.
diff --git a/releasenotes/notes/s3-bucket-limit-224951bb6a81ddce.yaml b/releasenotes/notes/s3-bucket-limit-224951bb6a81ddce.yaml
new file mode 100644
index 00000000..1dba0232
--- /dev/null
+++ b/releasenotes/notes/s3-bucket-limit-224951bb6a81ddce.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ Previously, s3 storage driver stored aggregates in a bucket per metric.
+ This would quickly run into bucket limit set by s3. s3 storage driver is
+ fixed so it stores all aggregates for all metrics in a single bucket.
+ Buckets previously created by Gnocchi will need to be deleted as they will
+ no longer be handled.
diff --git a/releasenotes/notes/s3_consistency_check_timeout-a30db3bd07a9a281.yaml b/releasenotes/notes/s3_consistency_check_timeout-a30db3bd07a9a281.yaml
new file mode 100644
index 00000000..5b5426ee
--- /dev/null
+++ b/releasenotes/notes/s3_consistency_check_timeout-a30db3bd07a9a281.yaml
@@ -0,0 +1,9 @@
+---
+features:
+ - |
+ The S3 driver now checks for data consistency by default. S3 does not
+ guarantee read-after-write consistency when overwriting data. Gnocchi now
+ waits up to `s3_check_consistency_timeout` seconds before returning and
+ unlocking a metric for new processing. This makes sure that the data that
+ will be read by the next workers will be consistent and that no data will
+ be lost. This feature can be disabled by setting the value to 0.
diff --git a/releasenotes/notes/s3_driver-4b30122bdbe0385d.yaml b/releasenotes/notes/s3_driver-4b30122bdbe0385d.yaml
new file mode 100644
index 00000000..535c6d1e
--- /dev/null
+++ b/releasenotes/notes/s3_driver-4b30122bdbe0385d.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - New storage driver for AWS S3.
+ This new driver works in the same way that the Swift driver, expect that it
+ leverages the Amazon Web Services S3 object storage API.
diff --git a/releasenotes/notes/storage-engine-v3-b34bd0723abf292f.yaml b/releasenotes/notes/storage-engine-v3-b34bd0723abf292f.yaml
new file mode 100644
index 00000000..cb2ef22a
--- /dev/null
+++ b/releasenotes/notes/storage-engine-v3-b34bd0723abf292f.yaml
@@ -0,0 +1,13 @@
+---
+features:
+ - The Carbonara based storage engine has been updated and greatly improved.
+ It now features fast write for Ceph (no change for file and Swift based
+ drivers) by using an append method.
+ It also features on the fly data compression (using LZ4) of the aggregated
+ time serie, reducing the data space usage by at least 50 %.
+upgrade:
+ - gnocchi-upgrade must be run before running the new version of
+ gnocchi-metricd and the HTTP REST API in order to upgrade from version 2 of
+ the Carbonara storage engine to version 3. It will read all metrics and
+ convert them to new version 3 serialization format (compressing the data),
+ which might take some time.
diff --git a/releasenotes/notes/storage-incoming-586b3e81de8deb4f.yaml b/releasenotes/notes/storage-incoming-586b3e81de8deb4f.yaml
new file mode 100644
index 00000000..f1d63bb6
--- /dev/null
+++ b/releasenotes/notes/storage-incoming-586b3e81de8deb4f.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - The storage of new measures that ought to be processed by *metricd* can now
+ be stored using different storage drivers. By default, the driver used is
+ still the regular storage driver configured. See the `[incoming]` section
+ in the configuration file.
diff --git a/releasenotes/notes/swift_keystone_v3-606da8228fc13a32.yaml b/releasenotes/notes/swift_keystone_v3-606da8228fc13a32.yaml
new file mode 100644
index 00000000..9a52e062
--- /dev/null
+++ b/releasenotes/notes/swift_keystone_v3-606da8228fc13a32.yaml
@@ -0,0 +1,3 @@
+---
+features:
+ - Swift now supports authentication with Keystone v3 API.
diff --git a/releasenotes/notes/upgrade-code-removal-from-2.2-and-3.0-a01fc64ecb39c327.yaml b/releasenotes/notes/upgrade-code-removal-from-2.2-and-3.0-a01fc64ecb39c327.yaml
new file mode 100644
index 00000000..bd0480ca
--- /dev/null
+++ b/releasenotes/notes/upgrade-code-removal-from-2.2-and-3.0-a01fc64ecb39c327.yaml
@@ -0,0 +1,4 @@
+---
+upgrade:
+ - |
+ The storage upgrade is only supported from version 3.1.
diff --git a/releasenotes/notes/uuid5-change-8a8c467d2b2d4c85.yaml b/releasenotes/notes/uuid5-change-8a8c467d2b2d4c85.yaml
new file mode 100644
index 00000000..ec6b6c51
--- /dev/null
+++ b/releasenotes/notes/uuid5-change-8a8c467d2b2d4c85.yaml
@@ -0,0 +1,12 @@
+---
+issues:
+ - >-
+ The conversion mechanism provided by the API to convert non-UUID resource
+ id to UUID is now also based on the user creating/accessing the resource.
+ This makes sure that the conversion generates a unique UUID for the user
+ and that several users can use the same string as `original_resource_id`.
+upgrade:
+ - >-
+ Since `original_resource_id` is now unique per creator, that means users
+ cannot refer to resource by using the `original_resource_id` if the
+ resource was not created by them.
diff --git a/releasenotes/notes/wsgi-script-deprecation-c6753a844ca0b411.yaml b/releasenotes/notes/wsgi-script-deprecation-c6753a844ca0b411.yaml
new file mode 100644
index 00000000..d2739ec7
--- /dev/null
+++ b/releasenotes/notes/wsgi-script-deprecation-c6753a844ca0b411.yaml
@@ -0,0 +1,7 @@
+---
+deprecations:
+ - |
+ The custom gnocchi/rest/app.wsgi is now deprecated, the gnocchi-api binary
+ should be used as wsgi script file. For example, with uwsgi "--wsgi-file
+ /usr/lib/python2.7/gnocchi/rest/app.wsgi" should be replaced by
+ "--wsgi-file /usr/bin/gnocchi-api".
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 00000000..e06a0ecf
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,24 @@
+pbr
+numpy>=1.9.0
+iso8601
+oslo.config>=3.22.0
+oslo.log>=2.3.0
+oslo.policy>=0.3.0
+oslo.middleware>=3.22.0
+pandas>=0.18.0
+scipy>=0.18.1 # BSD
+pecan>=0.9
+futures
+jsonpatch
+cotyledon>=1.5.0
+six
+stevedore
+ujson
+voluptuous
+werkzeug
+trollius; python_version < '3.4'
+tenacity>=3.1.0 # Apache-2.0
+WebOb>=1.4.1
+Paste
+PasteDeploy
+monotonic
diff --git a/run-func-tests.sh b/run-func-tests.sh
new file mode 100755
index 00000000..cf28931d
--- /dev/null
+++ b/run-func-tests.sh
@@ -0,0 +1,52 @@
+#!/bin/bash -x
+set -e
+
+cleanup(){
+ type -t indexer_stop >/dev/null && indexer_stop || true
+ type -t storage_stop >/dev/null && storage_stop || true
+}
+trap cleanup EXIT
+
+GNOCCHI_TEST_STORAGE_DRIVERS=${GNOCCHI_TEST_STORAGE_DRIVERS:-file}
+GNOCCHI_TEST_INDEXER_DRIVERS=${GNOCCHI_TEST_INDEXER_DRIVERS:-postgresql}
+for storage in ${GNOCCHI_TEST_STORAGE_DRIVERS}; do
+ for indexer in ${GNOCCHI_TEST_INDEXER_DRIVERS}; do
+ case $storage in
+ ceph)
+ eval $(pifpaf -e STORAGE run ceph)
+ rados -c $STORAGE_CEPH_CONF mkpool gnocchi
+ STORAGE_URL=ceph://$STORAGE_CEPH_CONF
+ ;;
+ s3)
+ if ! which s3rver >/dev/null 2>&1
+ then
+ mkdir -p npm-s3rver
+ export NPM_CONFIG_PREFIX=npm-s3rver
+ npm install s3rver --global
+ export PATH=$PWD/npm-s3rver/bin:$PATH
+ fi
+ eval $(pifpaf -e STORAGE run s3rver)
+ ;;
+ file)
+ STORAGE_URL=file://
+ ;;
+
+ swift|redis)
+ eval $(pifpaf -e STORAGE run $storage)
+ ;;
+ *)
+ echo "Unsupported storage backend by functional tests: $storage"
+ exit 1
+ ;;
+ esac
+
+ eval $(pifpaf -e INDEXER run $indexer)
+
+ export GNOCCHI_SERVICE_TOKEN="" # Just make gabbi happy
+ export GNOCCHI_AUTHORIZATION="basic YWRtaW46" # admin in base64
+ export OS_TEST_PATH=gnocchi/tests/functional_live
+ pifpaf -e GNOCCHI run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL --coordination-driver redis -- ./tools/pretty_tox.sh $*
+
+ cleanup
+ done
+done
diff --git a/run-tests.sh b/run-tests.sh
new file mode 100755
index 00000000..0e6d11f8
--- /dev/null
+++ b/run-tests.sh
@@ -0,0 +1,31 @@
+#!/bin/bash -x
+set -e
+GNOCCHI_TEST_STORAGE_DRIVERS=${GNOCCHI_TEST_STORAGE_DRIVERS:-file}
+GNOCCHI_TEST_INDEXER_DRIVERS=${GNOCCHI_TEST_INDEXER_DRIVERS:-postgresql}
+for storage in ${GNOCCHI_TEST_STORAGE_DRIVERS}
+do
+ export GNOCCHI_TEST_STORAGE_DRIVER=$storage
+ for indexer in ${GNOCCHI_TEST_INDEXER_DRIVERS}
+ do
+ case $GNOCCHI_TEST_STORAGE_DRIVER in
+ ceph|redis)
+ pifpaf run $GNOCCHI_TEST_STORAGE_DRIVER -- pifpaf -g GNOCCHI_INDEXER_URL run $indexer -- ./tools/pretty_tox.sh $*
+ ;;
+ s3)
+ if ! which s3rver >/dev/null 2>&1
+ then
+ mkdir npm-s3rver
+ export NPM_CONFIG_PREFIX=npm-s3rver
+ npm install s3rver --global
+ export PATH=$PWD/npm-s3rver/bin:$PATH
+ fi
+ pifpaf -e GNOCCHI_STORAGE run s3rver -- \
+ pifpaf -e GNOCCHI_INDEXER run $indexer -- \
+ ./tools/pretty_tox.sh $*
+ ;;
+ *)
+ pifpaf -g GNOCCHI_INDEXER_URL run $indexer -- ./tools/pretty_tox.sh $*
+ ;;
+ esac
+ done
+done
diff --git a/run-upgrade-tests.sh b/run-upgrade-tests.sh
new file mode 100755
index 00000000..be2d188b
--- /dev/null
+++ b/run-upgrade-tests.sh
@@ -0,0 +1,99 @@
+#!/bin/bash
+set -e
+
+export GNOCCHI_DATA=$(mktemp -d -t gnocchi.XXXX)
+
+GDATE=$((which gdate >/dev/null && echo gdate) || echo date)
+
+old_version=$(pip freeze | sed -n '/gnocchi==/s/.*==\(.*\)/\1/p')
+
+RESOURCE_IDS=(
+ "5a301761-aaaa-46e2-8900-8b4f6fe6675a"
+ "5a301761-bbbb-46e2-8900-8b4f6fe6675a"
+ "5a301761-cccc-46e2-8900-8b4f6fe6675a"
+ "non-uuid"
+)
+
+dump_data(){
+ dir="$1"
+ mkdir -p $dir
+ echo "* Dumping measures aggregations to $dir"
+ gnocchi resource list -c id -c type -c project_id -c user_id -c original_resource_id -c started_at -c ended_at -c revision_start -c revision_end | tee $dir/resources.list
+ for resource_id in ${RESOURCE_IDS[@]} $RESOURCE_ID_EXT; do
+ for agg in min max mean sum ; do
+ gnocchi measures show --aggregation $agg --resource-id $resource_id metric > $dir/${agg}.txt
+ done
+ done
+}
+
+inject_data() {
+ echo "* Injecting measures in Gnocchi"
+ # TODO(sileht): Generate better data that ensure we have enought split that cover all
+ # situation
+
+ for resource_id in ${RESOURCE_IDS[@]}; do
+ gnocchi resource create generic --attribute id:$resource_id -n metric:high > /dev/null
+ done
+
+ {
+ measures_sep=""
+ MEASURES=$(for i in $(seq 0 10 288000); do
+ now=$($GDATE --iso-8601=s -d "-${i}minute") ; value=$((RANDOM % 13 + 52))
+ echo -n "$measures_sep {\"timestamp\": \"$now\", \"value\": $value }"
+ measures_sep=","
+ done)
+ echo -n '{'
+ resource_sep=""
+ for resource_id in ${RESOURCE_IDS[@]} $RESOURCE_ID_EXT; do
+ echo -n "$resource_sep \"$resource_id\": { \"metric\": [ $MEASURES ] }"
+ resource_sep=","
+ done
+ echo -n '}'
+ } | gnocchi measures batch-resources-metrics -
+
+ echo "* Waiting for measures computation"
+ while [ $(gnocchi status -f value -c "storage/total number of measures to process") -gt 0 ]; do sleep 1 ; done
+}
+
+pifpaf_stop(){
+ :
+}
+
+cleanup(){
+ pifpaf_stop
+ rm -rf $GNOCCHI_DATA
+}
+trap cleanup EXIT
+
+
+if [ "$STORAGE_DAEMON" == "ceph" ]; then
+ rados -c $STORAGE_CEPH_CONF mkpool gnocchi
+ STORAGE_URL=ceph://$STORAGE_CEPH_CONF
+else
+ STORAGE_URL=file://$GNOCCHI_DATA
+fi
+
+eval $(pifpaf run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL)
+export OS_AUTH_TYPE=gnocchi-basic
+export GNOCCHI_USER=$GNOCCHI_USER_ID
+original_statsd_resource_id=$GNOCCHI_STATSD_RESOURCE_ID
+inject_data $GNOCCHI_DATA
+dump_data $GNOCCHI_DATA/old
+pifpaf_stop
+
+new_version=$(python setup.py --version)
+echo "* Upgrading Gnocchi from $old_version to $new_version"
+pip install -q -U .[${GNOCCHI_VARIANT}]
+
+eval $(pifpaf --debug run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL)
+# Gnocchi 3.1 uses basic auth by default
+export OS_AUTH_TYPE=gnocchi-basic
+export GNOCCHI_USER=$GNOCCHI_USER_ID
+
+# pifpaf creates a new statsd resource on each start
+gnocchi resource delete $GNOCCHI_STATSD_RESOURCE_ID
+
+dump_data $GNOCCHI_DATA/new
+
+echo "* Checking output difference between Gnocchi $old_version and $new_version"
+diff -uNr $GNOCCHI_DATA/old $GNOCCHI_DATA/new
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 00000000..6675c97b
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,158 @@
+[metadata]
+name = gnocchi
+url = http://launchpad.net/gnocchi
+summary = Metric as a Service
+description-file =
+ README.rst
+author = OpenStack
+author-email = openstack-dev@lists.openstack.org
+home-page = http://gnocchi.xyz
+classifier =
+ Environment :: OpenStack
+ Intended Audience :: Information Technology
+ Intended Audience :: System Administrators
+ License :: OSI Approved :: Apache Software License
+ Operating System :: POSIX :: Linux
+ Programming Language :: Python
+ Programming Language :: Python :: 2
+ Programming Language :: Python :: 2.7
+ Programming Language :: Python :: 3.5
+ Topic :: System :: Monitoring
+
+[extras]
+keystone =
+ keystonemiddleware>=4.0.0
+mysql =
+ pymysql
+ oslo.db>=4.8.0,!=4.13.1,!=4.13.2,!=4.15.0
+ sqlalchemy
+ sqlalchemy-utils
+ alembic>=0.7.6,!=0.8.1,!=0.9.0
+postgresql =
+ psycopg2
+ oslo.db>=4.8.0,!=4.13.1,!=4.13.2,!=4.15.0
+ sqlalchemy
+ sqlalchemy-utils
+ alembic>=0.7.6,!=0.8.1,!=0.9.0
+s3 =
+ boto3
+ botocore>=1.5
+ lz4>=0.9.0
+ tooz>=1.38
+redis =
+ redis>=2.10.0 # MIT
+ lz4>=0.9.0
+ tooz>=1.38
+swift =
+ python-swiftclient>=3.1.0
+ lz4>=0.9.0
+ tooz>=1.38
+ceph =
+ lz4>=0.9.0
+ tooz>=1.38
+ceph_recommended_lib =
+ cradox>=1.0.9
+ceph_alternative_lib =
+ python-rados>=10.1.0 # not available on pypi
+file =
+ lz4>=0.9.0
+ tooz>=1.38
+doc =
+ sphinx<1.6.0
+ sphinx_rtd_theme
+ sphinxcontrib-httpdomain
+ PyYAML
+ Jinja2
+ reno>=1.6.2
+test =
+ pifpaf>=1.0.1
+ gabbi>=1.30.0
+ coverage>=3.6
+ fixtures
+ mock
+ oslotest
+ python-subunit>=0.0.18
+ os-testr
+ testrepository
+ testscenarios
+ testresources>=0.2.4 # Apache-2.0/BSD
+ testtools>=0.9.38
+ WebTest>=2.0.16
+ doc8
+ tooz>=1.38
+ keystonemiddleware>=4.0.0
+ wsgi_intercept>=1.4.1
+test-swift =
+ python-swiftclient
+
+[global]
+setup-hooks =
+ pbr.hooks.setup_hook
+
+[build_py]
+pre-hook.build_config = gnocchi.genconfig.prehook
+
+[files]
+packages =
+ gnocchi
+
+[entry_points]
+gnocchi.indexer.sqlalchemy.resource_type_attribute =
+ string = gnocchi.indexer.sqlalchemy_extension:StringSchema
+ uuid = gnocchi.indexer.sqlalchemy_extension:UUIDSchema
+ number = gnocchi.indexer.sqlalchemy_extension:NumberSchema
+ bool = gnocchi.indexer.sqlalchemy_extension:BoolSchema
+
+gnocchi.storage =
+ swift = gnocchi.storage.swift:SwiftStorage
+ ceph = gnocchi.storage.ceph:CephStorage
+ file = gnocchi.storage.file:FileStorage
+ s3 = gnocchi.storage.s3:S3Storage
+ redis = gnocchi.storage.redis:RedisStorage
+
+gnocchi.incoming =
+ ceph = gnocchi.storage.incoming.ceph:CephStorage
+ file = gnocchi.storage.incoming.file:FileStorage
+ swift = gnocchi.storage.incoming.swift:SwiftStorage
+ s3 = gnocchi.storage.incoming.s3:S3Storage
+ redis = gnocchi.storage.incoming.redis:RedisStorage
+
+gnocchi.indexer =
+ mysql = gnocchi.indexer.sqlalchemy:SQLAlchemyIndexer
+ mysql+pymysql = gnocchi.indexer.sqlalchemy:SQLAlchemyIndexer
+ postgresql = gnocchi.indexer.sqlalchemy:SQLAlchemyIndexer
+
+gnocchi.aggregates =
+ moving-average = gnocchi.aggregates.moving_stats:MovingAverage
+
+gnocchi.rest.auth_helper =
+ noauth = gnocchi.rest.auth_helper:NoAuthHelper
+ keystone = gnocchi.rest.auth_helper:KeystoneAuthHelper
+ basic = gnocchi.rest.auth_helper:BasicAuthHelper
+
+console_scripts =
+ gnocchi-config-generator = gnocchi.cli:config_generator
+ gnocchi-upgrade = gnocchi.cli:upgrade
+ gnocchi-change-sack-size = gnocchi.cli:change_sack_size
+ gnocchi-statsd = gnocchi.cli:statsd
+ gnocchi-metricd = gnocchi.cli:metricd
+
+wsgi_scripts =
+ gnocchi-api = gnocchi.rest.app:build_wsgi_app
+
+oslo.config.opts =
+ gnocchi = gnocchi.opts:list_opts
+
+oslo.config.opts.defaults =
+ gnocchi = gnocchi.opts:set_defaults
+
+tempest.test_plugins =
+ gnocchi_tests = gnocchi.tempest.plugin:GnocchiTempestPlugin
+
+[build_sphinx]
+all_files = 1
+build-dir = doc/build
+source-dir = doc/source
+
+[wheel]
+universal = 1
diff --git a/setup.py b/setup.py
new file mode 100755
index 00000000..b96f524b
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+# Copyright (c) 2014 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import setuptools
+
+setuptools.setup(
+ setup_requires=['pbr'],
+ pbr=True)
diff --git a/tools/duration_perf_analyse.py b/tools/duration_perf_analyse.py
new file mode 100644
index 00000000..a6e35ad9
--- /dev/null
+++ b/tools/duration_perf_analyse.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2014 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Tools to analyse the result of multiple call of duration_perf_test.py:
+#
+# $ clients=10
+# $ parallel --progress -j $clients python duration_perf_test.py \
+# --result myresults/client{} ::: $(seq 0 $clients)
+# $ python duration_perf_analyse.py myresults
+# * get_measures:
+# Time
+# count 1000.000000
+# mean 0.032090
+# std 0.028287
+# ...
+#
+
+
+import argparse
+import os
+
+import pandas
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('result',
+ help=('Path of the results of perf_tool.py.'),
+ default='result')
+
+ data = {
+ 'get_measures': [],
+ 'write_measures': [],
+ 'write_metric': [],
+ }
+ args = parser.parse_args()
+ for root, dirs, files in os.walk(args.result):
+ for name in files:
+ for method in data:
+ if name.endswith('_%s.csv' % method):
+ datum = data[method]
+ filepath = os.path.join(root, name)
+ datum.append(pandas.read_csv(filepath))
+ cname = name.replace('_%s.csv' % method, '')
+ datum[-1].rename(columns={'Duration': cname}, inplace=True)
+
+ for method in data:
+ merged = pandas.DataFrame(columns=['Index', 'Duration'])
+ append = pandas.DataFrame(columns=['Duration'])
+ for datum in data[method]:
+ datum.dropna(axis=1, inplace=True)
+ datum.drop('Count', axis=1, inplace=True)
+ merged = merged.merge(datum, on='Index')
+ cname = datum.columns.values[1]
+ datum.rename(columns={cname: 'Duration'}, inplace=True)
+ append = append.append(datum.drop('Index', axis=1))
+ merged.to_csv(os.path.join(args.result, '%s_merged.csv' % method),
+ index=False)
+ print("* %s:" % method)
+ print(append.describe())
+ print("")
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/duration_perf_test.py b/tools/duration_perf_test.py
new file mode 100644
index 00000000..275cb05c
--- /dev/null
+++ b/tools/duration_perf_test.py
@@ -0,0 +1,194 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2014 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Tools to measure the duration of a get and a write request, can be used like:
+#
+# $ python duration_perf_test.py
+#
+# or to simulate multiple clients workload:
+#
+# $ clients=10
+# $ parallel --progress -j $clients python duration_perf_test.py \
+# --result myresults/client{} ::: $(seq 0 $clients)
+# $ python duration_perf_analyse.py myresults
+# * get_measures:
+# Time
+# count 1000.000000
+# mean 0.032090
+# std 0.028287
+# ...
+#
+
+import argparse
+import datetime
+import json
+import os
+import random
+import time
+
+from keystoneclient.v2_0 import client as keystone_client
+import requests
+
+
+def timer(func):
+ def inner(self, index, *args, **kwargs):
+ start = time.time()
+ count = func(self, index, *args, **kwargs)
+ elapsed = time.time() - start
+ self._timers.setdefault(func.__name__, []).append(
+ (index, elapsed, count)
+ )
+ print(("{name} #{index} processed "
+ "{count} objects in {elapsed} sec").format(
+ name=func.__name__,
+ index=index,
+ count=count or 0,
+ elapsed=elapsed))
+ return count
+ return inner
+
+
+class PerfTools(object):
+ def __init__(self, args):
+ self.args = args
+ self.keystone = keystone_client.Client(
+ username=args.username,
+ password=args.password,
+ tenant_name=args.tenant_name,
+ auth_url=args.auth_url)
+ self.headers = {'X-Auth-Token': self.keystone.auth_token,
+ 'Content-Type': 'application/json'}
+ self._metrics = []
+ self._timers = {}
+ self.timestamp = datetime.datetime.utcnow()
+
+ @timer
+ def write_metric(self, index):
+ data = json.dumps({"archive_policy_name": self.args.archive_policy})
+ resp = requests.post(self.args.gnocchi_url + "/v1/metric",
+ data=data, headers=self.headers)
+ try:
+ self._metrics.append(json.loads(resp.content)["id"])
+ except Exception:
+ raise RuntimeError("Can't continue without all metrics created "
+ "(%s)" % resp.content)
+
+ @timer
+ def write_measures(self, index, metric):
+ data = []
+ for i in range(self.args.batch_size):
+ self.timestamp += datetime.timedelta(minutes=1)
+ data.append({'timestamp': self.timestamp.isoformat(),
+ 'value': 100})
+ resp = requests.post(
+ "%s/v1/metric/%s/measures" % (self.args.gnocchi_url, metric),
+ data=json.dumps(data),
+ headers=self.headers)
+ if resp.status_code / 100 != 2:
+ print('Failed POST request to measures #%d: %s' % (index,
+ resp.content))
+ return 0
+ return self.args.batch_size
+
+ @timer
+ def get_measures(self, index, metric):
+ resp = requests.get(
+ "%s/v1/metric/%s/measures" % (self.args.gnocchi_url, metric),
+ headers=self.headers)
+ try:
+ return len(json.loads(resp.content))
+ except Exception:
+ print('Failed GET request to measures #%d: %s' % (index,
+ resp.content))
+ return 0
+
+ def _get_random_metric(self):
+ return self._metrics[random.randint(0, len(self._metrics) - 1)]
+
+ def run(self):
+ try:
+ for index in range(self.args.metric_count):
+ self.write_metric(index)
+
+ for index in range(self.args.measure_count):
+ metric = self._get_random_metric()
+ self.write_measures(index, metric)
+ self.get_measures(index, metric)
+ finally:
+ self.dump_logs()
+
+ def dump_logs(self):
+ for name, data in self._timers.items():
+ filepath = "%s_%s.csv" % (self.args.result_path, name)
+ dirpath = os.path.dirname(filepath)
+ if dirpath and not os.path.exists(dirpath):
+ os.makedirs(dirpath)
+ with open(filepath, 'w') as f:
+ f.write("Index,Duration,Count\n")
+ for meter in data:
+ f.write("%s\n" % ",".join("%.2f" % (m if m else 0)
+ for m in meter))
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--metric-count",
+ help=('Number of metrics to be created. '
+ 'metrics are created one by one.'),
+ default=100,
+ type=int)
+ parser.add_argument("--measure-count",
+ help='Number of measures batches to be sent.',
+ default=100,
+ type=int)
+ parser.add_argument("--gnocchi-url",
+ help='Gnocchi API URL to use.',
+ default="http://localhost:8041")
+ parser.add_argument("--archive-policy",
+ help='Archive policy to use.',
+ default="low")
+ parser.add_argument("--os-username",
+ dest='username',
+ help='User name to use for OpenStack service access.',
+ default="admin")
+ parser.add_argument("--os-tenant-name",
+ dest='tenant_name',
+ help=('Tenant name to use for '
+ 'OpenStack service access.'),
+ default="admin")
+ parser.add_argument("--os-password",
+ dest='password',
+ help='Password to use for OpenStack service access.',
+ default="password")
+ parser.add_argument("--os-auth-url",
+ dest='auth_url',
+ help='Auth URL to use for OpenStack service access.',
+ default="http://localhost:5000/v2.0")
+ parser.add_argument("--result",
+ help='path prefix to write results to.',
+ dest='result_path',
+ default="./perf_gnocchi")
+ parser.add_argument("--batch-size",
+ dest='batch_size',
+ help='Number of measurements in the batch.',
+ default=100,
+ type=int)
+ PerfTools(parser.parse_args()).run()
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/gnocchi-archive-policy-size.py b/tools/gnocchi-archive-policy-size.py
new file mode 100755
index 00000000..f3fbe784
--- /dev/null
+++ b/tools/gnocchi-archive-policy-size.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+from gnocchi import utils
+
+
+WORST_CASE_BYTES_PER_POINT = 8.04
+
+
+if (len(sys.argv) - 1) % 2 != 0:
+ print("Usage: %s ... "
+ % sys.argv[0])
+ sys.exit(1)
+
+
+def sizeof_fmt(num, suffix='B'):
+ for unit in ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi'):
+ if abs(num) < 1024.0:
+ return "%3.1f%s%s" % (num, unit, suffix)
+ num /= 1024.0
+ return "%.1f%s%s" % (num, 'Yi', suffix)
+
+
+size = 0
+for g, t in utils.grouper(sys.argv[1:], 2):
+ granularity = utils.to_timespan(g)
+ timespan = utils.to_timespan(t)
+ points = timespan.total_seconds() / granularity.total_seconds()
+ cursize = points * WORST_CASE_BYTES_PER_POINT
+ size += cursize
+ print("%s over %s = %d points = %s" % (g, t, points, sizeof_fmt(cursize)))
+
+print("Total: " + sizeof_fmt(size))
diff --git a/tools/measures_injector.py b/tools/measures_injector.py
new file mode 100755
index 00000000..ebaef520
--- /dev/null
+++ b/tools/measures_injector.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+# Copyright (c) 2016 Red Hat
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import random
+import uuid
+
+from concurrent import futures
+from oslo_config import cfg
+import six
+
+from gnocchi import indexer
+from gnocchi import service
+from gnocchi import storage
+from gnocchi import utils
+
+
+def injector():
+ conf = cfg.ConfigOpts()
+ conf.register_cli_opts([
+ cfg.IntOpt("metrics", default=1, min=1),
+ cfg.StrOpt("archive-policy-name", default="low"),
+ cfg.StrOpt("creator", default="admin"),
+ cfg.IntOpt("batch-of-measures", default=1000),
+ cfg.IntOpt("measures-per-batch", default=10),
+ ])
+ conf = service.prepare_service(conf=conf)
+ index = indexer.get_driver(conf)
+ index.connect()
+ s = storage.get_driver(conf)
+
+ def todo():
+ metric = index.create_metric(
+ uuid.uuid4(),
+ creator=conf.creator,
+ archive_policy_name=conf.archive_policy_name)
+
+ for _ in six.moves.range(conf.batch_of_measures):
+ measures = [
+ storage.Measure(
+ utils.dt_in_unix_ns(utils.utcnow()), random.random())
+ for __ in six.moves.range(conf.measures_per_batch)]
+ s.incoming.add_measures(metric, measures)
+
+ with futures.ThreadPoolExecutor(max_workers=conf.metrics) as executor:
+ for m in six.moves.range(conf.metrics):
+ executor.submit(todo)
+
+
+if __name__ == '__main__':
+ injector()
diff --git a/tools/pretty_tox.sh b/tools/pretty_tox.sh
new file mode 100755
index 00000000..799ac184
--- /dev/null
+++ b/tools/pretty_tox.sh
@@ -0,0 +1,16 @@
+#!/usr/bin/env bash
+
+set -o pipefail
+
+TESTRARGS=$1
+
+# --until-failure is not compatible with --subunit see:
+#
+# https://bugs.launchpad.net/testrepository/+bug/1411804
+#
+# this work around exists until that is addressed
+if [[ "$TESTARGS" =~ "until-failure" ]]; then
+ python setup.py testr --slowest --testr-args="$TESTRARGS"
+else
+ python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit-trace -f
+fi
diff --git a/tools/travis-ci-setup.dockerfile b/tools/travis-ci-setup.dockerfile
new file mode 100644
index 00000000..be2179bc
--- /dev/null
+++ b/tools/travis-ci-setup.dockerfile
@@ -0,0 +1,41 @@
+FROM ubuntu:16.04
+ENV GNOCCHI_SRC /home/tester/src
+ENV DEBIAN_FRONTEND noninteractive
+
+RUN apt-get update -y && apt-get install -qy \
+ locales \
+ git \
+ wget \
+ nodejs \
+ nodejs-legacy \
+ npm \
+ python \
+ python3 \
+ python-dev \
+ python3-dev \
+ python-pip \
+ redis-server \
+ build-essential \
+ libffi-dev \
+ libpq-dev \
+ postgresql \
+ mysql-client \
+ mysql-server \
+ librados-dev \
+ liberasurecode-dev \
+ ceph \
+ && apt-get clean -y
+
+#NOTE(sileht): really no utf-8 in 2017 !?
+ENV LANG en_US.UTF-8
+RUN update-locale
+RUN locale-gen $LANG
+
+#NOTE(sileht): Upgrade python dev tools
+RUN pip install -U pip tox virtualenv
+
+RUN useradd -ms /bin/bash tester
+RUN mkdir $GNOCCHI_SRC
+RUN chown -R tester: $GNOCCHI_SRC
+USER tester
+WORKDIR $GNOCCHI_SRC
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 00000000..415d5e6a
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,139 @@
+[tox]
+minversion = 2.4
+envlist = py{35,27}-{postgresql,mysql}{,-file,-swift,-ceph,-s3},pep8,bashate
+
+[testenv]
+usedevelop = True
+sitepackages = False
+passenv = LANG OS_DEBUG OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE GNOCCHI_TEST_* AWS_*
+setenv =
+ GNOCCHI_TEST_STORAGE_DRIVER=file
+ GNOCCHI_TEST_INDEXER_DRIVER=postgresql
+ GNOCCHI_TEST_STORAGE_DRIVERS=file swift ceph s3 redis
+ GNOCCHI_TEST_INDEXER_DRIVERS=postgresql mysql
+ file: GNOCCHI_TEST_STORAGE_DRIVERS=file
+ swift: GNOCCHI_TEST_STORAGE_DRIVERS=swift
+ ceph: GNOCCHI_TEST_STORAGE_DRIVERS=ceph
+ redis: GNOCCHI_TEST_STORAGE_DRIVERS=redis
+ s3: GNOCCHI_TEST_STORAGE_DRIVERS=s3
+ postgresql: GNOCCHI_TEST_INDEXER_DRIVERS=postgresql
+ mysql: GNOCCHI_TEST_INDEXER_DRIVERS=mysql
+
+ GNOCCHI_STORAGE_DEPS=file,swift,test-swift,s3,ceph,ceph_recommended_lib,redis
+ ceph: GNOCCHI_STORAGE_DEPS=ceph,ceph_recommended_lib
+ swift: GNOCCHI_STORAGE_DEPS=swift,test-swift
+ file: GNOCCHI_STORAGE_DEPS=file
+ redis: GNOCCHI_STORAGE_DEPS=redis
+ s3: GNOCCHI_STORAGE_DEPS=s3
+
+ # FIXME(sileht): pbr doesn't support url in setup.cfg extras, so we do this crap
+ GNOCCHI_TEST_TARBALLS=http://tarballs.openstack.org/swift/swift-master.tar.gz#egg=swift
+ ceph: GNOCCHI_TEST_TARBALLS=
+ swift: GNOCCHI_TEST_TARBALLS=http://tarballs.openstack.org/swift/swift-master.tar.gz#egg=swift
+ s3: GNOCCHI_TEST_TARBALLS=
+ redis: GNOCCHI_TEST_TARBALLS=
+ file: GNOCCHI_TEST_TARBALLS=
+deps = .[test]
+ postgresql: .[postgresql,{env:GNOCCHI_STORAGE_DEPS}]
+ mysql: .[mysql,{env:GNOCCHI_STORAGE_DEPS}]
+ {env:GNOCCHI_TEST_TARBALLS:}
+# NOTE(tonyb): This project has chosen to *NOT* consume upper-constraints.txt
+commands =
+ doc8 --ignore-path doc/source/rest.rst doc/source
+ gnocchi-config-generator
+ {toxinidir}/run-tests.sh {posargs}
+ {toxinidir}/run-func-tests.sh {posargs}
+
+[testenv:py35-postgresql-file-upgrade-from-3.1]
+# We should always recreate since the script upgrade
+# Gnocchi we can't reuse the virtualenv
+# FIXME(sileht): We set alembic version until next Gnocchi 3.1 is released
+envdir = upgrade
+recreate = True
+skip_install = True
+usedevelop = False
+setenv = GNOCCHI_VARIANT=test,postgresql,file
+deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.1,<3.2
+ alembic<0.9.0
+ pifpaf>=0.13
+ gnocchiclient>=2.8.0
+commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs}
+
+[testenv:py27-mysql-ceph-upgrade-from-3.1]
+# We should always recreate since the script upgrade
+# Gnocchi we can't reuse the virtualenv
+# FIXME(sileht): We set alembic version until next Gnocchi 3.1 is released
+envdir = upgrade
+recreate = True
+skip_install = True
+usedevelop = False
+setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib
+deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.1,<3.2
+ alembic<0.9.0
+ gnocchiclient>=2.8.0
+ pifpaf>=0.13
+commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs}
+
+[testenv:bashate]
+deps = bashate
+commands = bashate -v devstack/plugin.sh devstack/gate/gate_hook.sh devstack/gate/post_test_hook.sh
+whitelist_externals = bash
+
+[testenv:pep8]
+deps = hacking>=0.12,<0.13
+commands = flake8
+
+[testenv:py27-gate]
+setenv = OS_TEST_PATH=gnocchi/tests/functional_live
+ GABBI_LIVE=1
+passenv = {[testenv]passenv} GNOCCHI_SERVICE* GNOCCHI_AUTHORIZATION
+sitepackages = True
+basepython = python2.7
+commands = {toxinidir}/tools/pretty_tox.sh '{posargs}'
+
+# This target provides a shortcut to running just the gabbi tests.
+[testenv:py27-gabbi]
+deps = .[test,postgresql,file]
+setenv = OS_TEST_PATH=gnocchi/tests/functional
+basepython = python2.7
+commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- {toxinidir}/tools/pretty_tox.sh '{posargs}'
+
+[testenv:py27-cover]
+commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py testr --coverage --testr-args="{posargs}"
+
+[testenv:venv]
+# This is used by the doc job on the gate
+deps = {[testenv:docs]deps}
+commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- {posargs}
+
+[flake8]
+exclude = .tox,.eggs,doc
+show-source = true
+enable-extensions = H904
+
+[testenv:genconfig]
+deps = .[mysql,postgresql,test,file,ceph,swift,s3]
+commands = gnocchi-config-generator
+
+[testenv:docs]
+basepython = python2.7
+## This does not work, see: https://github.com/tox-dev/tox/issues/509
+# deps = {[testenv]deps}
+# .[postgresql,doc]
+# setenv = GNOCCHI_STORAGE_DEPS=file
+deps = .[test,file,postgresql,doc]
+commands = doc8 --ignore-path doc/source/rest.rst doc/source
+ pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py build_sphinx -W
+
+[testenv:docs-gnocchi.xyz]
+basepython = python2.7
+setenv = GNOCCHI_STORAGE_DEPS=file
+deps = {[testenv:docs]deps}
+ sphinxcontrib-versioning
+# for 2.x doc
+ pytimeparse
+ retrying
+# for 3.x doc
+ oslosphinx
+commands =
+ pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- sphinx-versioning build doc/source doc/build/html