Retire panko, the code is no longer maintained.
The panko source code moved to x/panko. Depends-on: https://review.opendev.org/c/openstack/project-config/+/791905 Change-Id: Ic8a7001e708d8da7cf8951eefd0a96762ea5fa46
This commit is contained in:
parent
053b147b48
commit
de5dce4de4
|
@ -1,8 +0,0 @@
|
|||
[run]
|
||||
branch = True
|
||||
source = panko
|
||||
omit = panko/tests/*
|
||||
|
||||
[report]
|
||||
ignore_errors = True
|
||||
|
|
@ -1,17 +0,0 @@
|
|||
*.egg*
|
||||
*.mo
|
||||
*.pyc
|
||||
.coverage
|
||||
.stestr/
|
||||
.tox
|
||||
AUTHORS
|
||||
build/*
|
||||
ChangeLog
|
||||
cover/*
|
||||
dist/*
|
||||
doc/build
|
||||
doc/source/api/
|
||||
etc/panko/panko.conf
|
||||
subunit.log
|
||||
etc/panko/policy.yaml.sample
|
||||
doc/source/_static/
|
33
.mailmap
33
.mailmap
|
@ -1,33 +0,0 @@
|
|||
# Format is:
|
||||
# <preferred e-mail> <other e-mail 1>
|
||||
# <preferred e-mail> <other e-mail 2>
|
||||
Adam Gandelman <adamg@canonical.com> <adamg@ubuntu.com>
|
||||
Alan Pevec <alan.pevec@redhat.com> <apevec@redhat.com>
|
||||
Alexei Kornienko <akornienko@mirantis.com> <alexei.kornienko@gmail.com>
|
||||
ChangBo Guo(gcb) <eric.guo@easystack.cn> Chang Bo Guo <guochbo@cn.ibm.com>
|
||||
Chinmaya Bharadwaj <chinmaya-bharadwaj.a@hp.com> chinmay <chinmaya-bharadwaj.a@hp.com>
|
||||
Clark Boylan <cboylan@sapwetik.org> <clark.boylan@gmail.com>
|
||||
Doug Hellmann <doug@doughellmann.com> <doug.hellmann@dreamhost.com>
|
||||
Fei Long Wang <flwang@catalyst.net.nz> <flwang@cn.ibm.com>
|
||||
Fengqian Gao <fengqian.gao@intel.com> Fengqian <fengqian.gao@intel.com>
|
||||
Fengqian Gao <fengqian.gao@intel.com> Fengqian.Gao <fengqian.gao@intel.com>
|
||||
Gordon Chung <gord@live.ca> gordon chung <gord@live.ca>
|
||||
Gordon Chung <gord@live.ca> Gordon Chung <chungg@ca.ibm.com>
|
||||
Gordon Chung <gord@live.ca> gordon chung <chungg@ca.ibm.com>
|
||||
Ildiko Vancsa <ildiko.vancsa@ericsson.com> Ildiko <ildiko.vancsa@ericsson.com>
|
||||
John H. Tran <jhtran@att.com> John Tran <jhtran@att.com>
|
||||
Julien Danjou <julien.danjou@enovance.com> <julien@danjou.info>
|
||||
LiuSheng <liusheng@huawei.com> liu-sheng <liusheng@huawei.com>
|
||||
Mehdi Abaakouk <mehdi.abaakouk@enovance.com> <sileht@sileht.net>
|
||||
Nejc Saje <nsaje@redhat.com> <nejc@saje.info>
|
||||
Nejc Saje <nsaje@redhat.com> <nejc.saje@xlab.si>
|
||||
Nicolas Barcet (nijaba) <nick@enovance.com> <nick.barcet@canonical.com>
|
||||
Pádraig Brady <pbrady@redhat.com> <P@draigBrady.com>
|
||||
Rich Bowen <rbowen@redhat.com> <rbowen@rcbowen.com>
|
||||
Sandy Walsh <sandy.walsh@rackspace.com> <sandy@sandywalsh.com>
|
||||
Sascha Peilicke <speilicke@suse.com> <saschpe@gmx.de>
|
||||
Sean Dague <sean.dague@samsung.com> <sean@dague.net>
|
||||
Shengjie Min <shengjie_min@dell.com> shengjie-min <shengjie_min@dell.com>
|
||||
Shuangtai Tian <shuangtai.tian@intel.com> shuangtai <shuangtai.tian@intel.com>
|
||||
Swann Croiset <swann.croiset@bull.net> <swann@oopss.org>
|
||||
ZhiQiang Fan <zhiqiang.fan@huawei.com> <aji.zqfan@gmail.com>
|
|
@ -1,4 +0,0 @@
|
|||
[DEFAULT]
|
||||
test_path=${OS_TEST_PATH:-./panko/tests}
|
||||
top_dir=./
|
||||
group_regex=(gabbi\.(suitemaker|driver)\.test_gabbi_(?:prefix_|)[^_]+)_
|
44
.zuul.yaml
44
.zuul.yaml
|
@ -1,44 +0,0 @@
|
|||
- project:
|
||||
templates:
|
||||
- openstack-python3-xena-jobs
|
||||
- publish-openstack-docs-pti
|
||||
- release-notes-jobs-python3
|
||||
check:
|
||||
jobs:
|
||||
- telemetry-dsvm-integration:
|
||||
irrelevant-files: &base-irrelevant-files
|
||||
- ^(test-|)requirements.txt$
|
||||
- ^.*\.rst$
|
||||
- ^.git.*$
|
||||
- ^doc/.*$
|
||||
- ^panko/hacking/.*$
|
||||
- ^panko/locale/.*$
|
||||
- ^panko/tests/.*$
|
||||
- ^releasenotes/.*$
|
||||
- ^setup.cfg$
|
||||
- ^tools/.*$
|
||||
- ^tox.ini$
|
||||
- telemetry-dsvm-integration-ipv6-only:
|
||||
irrelevant-files: *base-irrelevant-files
|
||||
# TripleO jobs that deploy Telemetry.
|
||||
# Note we don't use a project-template here, so it's easier
|
||||
# to disable voting on one specific job if things go wrong.
|
||||
# tripleo-ci-centos-7-scenario00(1|2)-multinode-oooq will only
|
||||
# run on stable/pike while the -container will run in Queens
|
||||
# and beyond.
|
||||
# If you need any support to debug these jobs in case of
|
||||
# failures, please reach us on #tripleo IRC channel.
|
||||
- tripleo-ci-centos-7-scenario001-multinode-oooq:
|
||||
voting: false
|
||||
- tripleo-ci-centos-7-scenario001-standalone:
|
||||
voting: false
|
||||
- tripleo-ci-centos-7-scenario002-multinode-oooq:
|
||||
voting: false
|
||||
- tripleo-ci-centos-7-scenario002-standalone:
|
||||
voting: false
|
||||
gate:
|
||||
jobs:
|
||||
- telemetry-dsvm-integration:
|
||||
irrelevant-files: *base-irrelevant-files
|
||||
- telemetry-dsvm-integration-ipv6-only:
|
||||
irrelevant-files: *base-irrelevant-files
|
|
@ -1,16 +0,0 @@
|
|||
If you would like to contribute to the development of OpenStack,
|
||||
you must follow the steps documented at:
|
||||
|
||||
https://docs.openstack.org/infra/manual/developers.html#development-workflow
|
||||
|
||||
Once those steps have been completed, changes to OpenStack
|
||||
should be submitted for review via the Gerrit tool, following
|
||||
the workflow documented at:
|
||||
|
||||
https://docs.openstack.org/infra/manual/developers.html#development-workflow
|
||||
|
||||
Pull requests submitted through GitHub will be ignored.
|
||||
|
||||
Bugs should be filed on Launchpad, not GitHub:
|
||||
|
||||
https://bugs.launchpad.net/panko
|
27
HACKING.rst
27
HACKING.rst
|
@ -1,27 +0,0 @@
|
|||
Panko Style Commandments
|
||||
========================
|
||||
|
||||
- Step 1: Read the OpenStack Style Commandments
|
||||
https://docs.openstack.org/hacking/latest/
|
||||
- Step 2: Read on
|
||||
|
||||
Panko Specific Commandments
|
||||
---------------------------
|
||||
|
||||
- [C301] LOG.warn() is not allowed. Use LOG.warning()
|
||||
- [C302] Deprecated library function os.popen()
|
||||
|
||||
Creating Unit Tests
|
||||
-------------------
|
||||
For every new feature, unit tests should be created that both test and
|
||||
(implicitly) document the usage of said feature. If submitting a patch for a
|
||||
bug that had no unit test, a new passing unit test should be added. If a
|
||||
submitted bug fix does have a unit test, be sure to add a new one that fails
|
||||
without the patch and passes with the patch.
|
||||
|
||||
All unittest classes must ultimately inherit from testtools.TestCase.
|
||||
|
||||
All setUp and tearDown methods must upcall using the super() method.
|
||||
tearDown methods should be avoided and addCleanup calls should be preferred.
|
||||
Never manually create tempfiles. Always use the tempfile fixtures from
|
||||
the fixture library to ensure that they are cleaned up.
|
176
LICENSE
176
LICENSE
|
@ -1,176 +0,0 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
16
MAINTAINERS
16
MAINTAINERS
|
@ -1,16 +0,0 @@
|
|||
= Generalist Code Reviewers =
|
||||
|
||||
The current members of panko-core are listed here:
|
||||
|
||||
https://launchpad.net/~panko-drivers/+members#active
|
||||
|
||||
This group can +2 and approve patches in Panko. However, they may
|
||||
choose to seek feedback from the appropriate specialist maintainer before
|
||||
approving a patch if it is in any way controversial or risky.
|
||||
|
||||
= IRC handles of maintainers =
|
||||
gordc
|
||||
jd__
|
||||
liusheng
|
||||
pradk
|
||||
sileht
|
38
README.rst
38
README.rst
|
@ -1,37 +1 @@
|
|||
panko
|
||||
=====
|
||||
|
||||
The Panko project is an event storage service that provides the ability
|
||||
to store and querying event data generated by Ceilometer with potentially
|
||||
other sources.
|
||||
|
||||
Panko is a component of the OpenStack Telemetry project.
|
||||
|
||||
-------------
|
||||
Documentation
|
||||
-------------
|
||||
|
||||
Documentation for the project can be found at:
|
||||
https://docs.openstack.org/panko/latest/
|
||||
|
||||
Launchpad Projects
|
||||
------------------
|
||||
- Server: https://launchpad.net/panko
|
||||
- Client: https://launchpad.net/python-pankoclient
|
||||
|
||||
Code Repository
|
||||
---------------
|
||||
- Server: https://github.com/openstack/panko
|
||||
- Client: https://github.com/openstack/python-pankoclient
|
||||
|
||||
Bug Tracking
|
||||
------------
|
||||
- Bugs: https://storyboard.openstack.org/#!/project/openstack/panko
|
||||
|
||||
IRC
|
||||
---
|
||||
IRC Channel: #openstack-telemetry on Freenode.
|
||||
|
||||
Release notes
|
||||
-------------
|
||||
Release notes: https://docs.openstack.org/releasenotes/panko/
|
||||
Panko moved to x/panko, the code is not longer maintained.
|
||||
|
|
14
bindep.txt
14
bindep.txt
|
@ -1,14 +0,0 @@
|
|||
mongodb [platform:dpkg]
|
||||
mongodb-server [platform:rpm]
|
||||
|
||||
mysql-server
|
||||
mysql-client [platform:dpkg]
|
||||
mysql [platform:rpm]
|
||||
|
||||
postgresql
|
||||
postgresql-client [platform:dpkg]
|
||||
postgresql-devel [platform:rpm]
|
||||
postgresql-server [platform:rpm]
|
||||
libpq-dev [platform:dpkg]
|
||||
|
||||
python37 [platform:rpm py37]
|
|
@ -1,25 +0,0 @@
|
|||
==========================
|
||||
Enabling Panko in DevStack
|
||||
==========================
|
||||
|
||||
1. Download Devstack::
|
||||
|
||||
git clone https://opendev.org/openstack/devstack
|
||||
cd devstack
|
||||
|
||||
2. Add this repo as an external repository in ``local.conf`` file::
|
||||
|
||||
[[local|localrc]]
|
||||
enable_plugin panko https://opendev.org/openstack/panko
|
||||
|
||||
To use stable branches, make sure devstack is on that branch, and specify
|
||||
the branch name to enable_plugin, for example::
|
||||
|
||||
enable_plugin panko https://opendev.org/openstack/panko stable/newton
|
||||
|
||||
There are some options, such as PANKO_BACKEND, defined in
|
||||
``panko/devstack/settings``, they can be used to configure the
|
||||
installation of Panko. If you don't want to use their default value,
|
||||
you can set a new one in ``local.conf``.
|
||||
|
||||
3. Run ``stack.sh``.
|
|
@ -1,15 +0,0 @@
|
|||
Listen %PORT%
|
||||
|
||||
<VirtualHost *:%PORT%>
|
||||
WSGIDaemonProcess panko-api processes=2 threads=10 user=%USER% display-name=%{GROUP} %VIRTUALENV%
|
||||
WSGIProcessGroup panko-api
|
||||
WSGIScriptAlias / %WSGIAPP%
|
||||
WSGIApplicationGroup %{GLOBAL}
|
||||
<IfVersion >= 2.4>
|
||||
ErrorLogFormat "%{cu}t %M"
|
||||
</IfVersion>
|
||||
ErrorLog /var/log/%APACHE_NAME%/panko.log
|
||||
CustomLog /var/log/%APACHE_NAME%/panko_access.log combined
|
||||
</VirtualHost>
|
||||
|
||||
WSGISocketPrefix /var/run/%APACHE_NAME%
|
|
@ -1,152 +0,0 @@
|
|||
#!/bin/bash -xe
|
||||
|
||||
# basic reference point for things like filecache
|
||||
#
|
||||
# TODO(sdague): once we have a few of these I imagine the download
|
||||
# step can probably be factored out to something nicer
|
||||
TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
|
||||
FILES=$TOP_DIR/files
|
||||
source $TOP_DIR/stackrc
|
||||
|
||||
# Package source and version, all pkg files are expected to have
|
||||
# something like this, as well as a way to override them.
|
||||
ELASTICSEARCH_VERSION=${ELASTICSEARCH_VERSION:-1.7.5}
|
||||
ELASTICSEARCH_BASEURL=${ELASTICSEARCH_BASEURL:-https://download.elasticsearch.org/elasticsearch/elasticsearch}
|
||||
|
||||
# Elastic search actual implementation
|
||||
function wget_elasticsearch {
|
||||
local file=${1}
|
||||
|
||||
if [ ! -f ${FILES}/${file} ]; then
|
||||
wget $ELASTICSEARCH_BASEURL/${file} -O ${FILES}/${file}
|
||||
fi
|
||||
|
||||
if [ ! -f ${FILES}/${file}.sha1.txt ]; then
|
||||
wget $ELASTICSEARCH_BASEURL/${file}.sha1.txt -O ${FILES}/${file}.sha1.txt
|
||||
fi
|
||||
|
||||
pushd ${FILES}; sha1sum ${file} > ${file}.sha1.gen; popd
|
||||
|
||||
if ! diff ${FILES}/${file}.sha1.gen ${FILES}/${file}.sha1.txt; then
|
||||
echo "Invalid elasticsearch download. Could not install."
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
function download_elasticsearch {
|
||||
if is_ubuntu; then
|
||||
wget_elasticsearch elasticsearch-${ELASTICSEARCH_VERSION}.deb
|
||||
elif is_fedora || is_suse; then
|
||||
wget_elasticsearch elasticsearch-${ELASTICSEARCH_VERSION}.noarch.rpm
|
||||
fi
|
||||
}
|
||||
|
||||
function configure_elasticsearch {
|
||||
# currently a no op
|
||||
:
|
||||
}
|
||||
|
||||
function _check_elasticsearch_ready {
|
||||
# poll elasticsearch to see if it's started
|
||||
if ! wait_for_service 120 http://localhost:9200; then
|
||||
die $LINENO "Maximum timeout reached. Could not connect to ElasticSearch"
|
||||
fi
|
||||
}
|
||||
|
||||
function start_elasticsearch {
|
||||
if is_ubuntu; then
|
||||
sudo /etc/init.d/elasticsearch start
|
||||
_check_elasticsearch_ready
|
||||
elif is_fedora; then
|
||||
sudo /bin/systemctl start elasticsearch.service
|
||||
_check_elasticsearch_ready
|
||||
elif is_suse; then
|
||||
sudo /usr/bin/systemctl start elasticsearch.service
|
||||
_check_elasticsearch_ready
|
||||
else
|
||||
echo "Unsupported architecture...can not start elasticsearch."
|
||||
fi
|
||||
}
|
||||
|
||||
function stop_elasticsearch {
|
||||
if is_ubuntu; then
|
||||
sudo /etc/init.d/elasticsearch stop
|
||||
elif is_fedora; then
|
||||
sudo /bin/systemctl stop elasticsearch.service
|
||||
elif is_suse ; then
|
||||
sudo /usr/bin/systemctl stop elasticsearch.service
|
||||
else
|
||||
echo "Unsupported architecture...can not stop elasticsearch."
|
||||
fi
|
||||
}
|
||||
|
||||
function install_elasticsearch {
|
||||
pip_install_gr elasticsearch
|
||||
if is_package_installed elasticsearch; then
|
||||
echo "Note: elasticsearch was already installed."
|
||||
return
|
||||
fi
|
||||
if is_ubuntu; then
|
||||
if [[ ${DISTRO} == "bionic" ]]; then
|
||||
is_package_installed openjdk-8-jre-headless || install_package openjdk-8-jre-headless
|
||||
else
|
||||
is_package_installed default-jre-headless || install_package default-jre-headless
|
||||
fi
|
||||
|
||||
sudo dpkg -i ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.deb
|
||||
sudo update-rc.d elasticsearch defaults 95 10
|
||||
elif is_fedora; then
|
||||
is_package_installed java-1.8.0-openjdk-headless || install_package java-1.8.0-openjdk-headless
|
||||
yum_install ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.noarch.rpm
|
||||
sudo /bin/systemctl daemon-reload
|
||||
sudo /bin/systemctl enable elasticsearch.service
|
||||
elif is_suse; then
|
||||
is_package_installed java-1_8_0-openjdk-headless || install_package java-1_8_0-openjdk-headless
|
||||
zypper_install --no-gpg-checks ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.noarch.rpm
|
||||
sudo /usr/bin/systemctl daemon-reload
|
||||
sudo /usr/bin/systemctl enable elasticsearch.service
|
||||
else
|
||||
echo "Unsupported install of elasticsearch on this architecture."
|
||||
fi
|
||||
}
|
||||
|
||||
function uninstall_elasticsearch {
|
||||
if is_package_installed elasticsearch; then
|
||||
if is_ubuntu; then
|
||||
sudo apt-get purge elasticsearch
|
||||
elif is_fedora; then
|
||||
sudo yum remove elasticsearch
|
||||
elif is_suse; then
|
||||
sudo zypper rm elasticsearch
|
||||
else
|
||||
echo "Unsupported install of elasticsearch on this architecture."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# The PHASE dispatcher. All pkg files are expected to basically cargo
|
||||
# cult the case statement.
|
||||
PHASE=$1
|
||||
echo "Phase is $PHASE"
|
||||
|
||||
case $PHASE in
|
||||
download)
|
||||
download_elasticsearch
|
||||
;;
|
||||
install)
|
||||
install_elasticsearch
|
||||
;;
|
||||
configure)
|
||||
configure_elasticsearch
|
||||
;;
|
||||
start)
|
||||
start_elasticsearch
|
||||
;;
|
||||
stop)
|
||||
stop_elasticsearch
|
||||
;;
|
||||
uninstall)
|
||||
uninstall_elasticsearch
|
||||
;;
|
||||
esac
|
|
@ -1,346 +0,0 @@
|
|||
# Install and start **Panko** service in devstack
|
||||
#
|
||||
# To enable Panko in devstack add an entry to local.conf that
|
||||
# looks like
|
||||
#
|
||||
# [[local|localrc]]
|
||||
# enable_plugin panko https://opendev.org/openstack/panko
|
||||
#
|
||||
# Several variables set in the localrc section adjust common behaviors
|
||||
# of Panko (see within for additional settings):
|
||||
#
|
||||
# PANKO_BACKEND: Database backend (e.g. 'mysql', 'mongodb', 'es')
|
||||
|
||||
# Save trace setting
|
||||
XTRACE=$(set +o | grep xtrace)
|
||||
set -o xtrace
|
||||
|
||||
# Support potential entry-points console scripts in VENV or not
|
||||
if [[ ${USE_VENV} = True ]]; then
|
||||
PROJECT_VENV["panko"]=${PANKO_DIR}.venv
|
||||
PANKO_BIN_DIR=${PROJECT_VENV["panko"]}/bin
|
||||
else
|
||||
PANKO_BIN_DIR=$(get_python_exec_prefix)
|
||||
fi
|
||||
|
||||
|
||||
if [ -z "$PANKO_DEPLOY" ]; then
|
||||
# Default
|
||||
PANKO_DEPLOY=simple
|
||||
|
||||
# Fallback to common wsgi devstack configuration
|
||||
if [ "$ENABLE_HTTPD_MOD_WSGI_SERVICES" == "True" ]; then
|
||||
PANKO_DEPLOY=mod_wsgi
|
||||
|
||||
# Deprecated config
|
||||
elif [ -n "$PANKO_USE_MOD_WSGI" ] ; then
|
||||
echo_summary "PANKO_USE_MOD_WSGI is deprecated, use PANKO_DEPLOY instead"
|
||||
if [ "$PANKO_USE_MOD_WSGI" == True ]; then
|
||||
PANKO_DEPLOY=mod_wsgi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
function panko_service_url {
|
||||
echo "$PANKO_SERVICE_PROTOCOL://$PANKO_SERVICE_HOST:$PANKO_SERVICE_PORT"
|
||||
}
|
||||
|
||||
|
||||
# _panko_install_mongdb - Install mongodb and python lib.
|
||||
function _panko_install_mongodb {
|
||||
# Server package is the same on all
|
||||
local packages=mongodb-server
|
||||
|
||||
if is_fedora; then
|
||||
# mongodb client
|
||||
packages="${packages} mongodb"
|
||||
fi
|
||||
|
||||
install_package ${packages}
|
||||
|
||||
if is_fedora; then
|
||||
restart_service mongod
|
||||
else
|
||||
restart_service mongodb
|
||||
fi
|
||||
|
||||
# give time for service to restart
|
||||
sleep 5
|
||||
}
|
||||
|
||||
# Configure mod_wsgi
|
||||
function _panko_config_apache_wsgi {
|
||||
sudo mkdir -p $PANKO_WSGI_DIR
|
||||
|
||||
local panko_apache_conf=$(apache_site_config_for panko)
|
||||
local venv_path=""
|
||||
|
||||
# Copy proxy vhost and wsgi file
|
||||
sudo cp $PANKO_DIR/panko/api/app.wsgi $PANKO_WSGI_DIR/app
|
||||
|
||||
if [[ ${USE_VENV} = True ]]; then
|
||||
venv_path="python-path=${PROJECT_VENV["panko"]}/lib/$(python_version)/site-packages"
|
||||
fi
|
||||
|
||||
sudo cp $PANKO_DIR/devstack/apache-panko.template $panko_apache_conf
|
||||
sudo sed -e "
|
||||
s|%PORT%|$PANKO_SERVICE_PORT|g;
|
||||
s|%APACHE_NAME%|$APACHE_NAME|g;
|
||||
s|%WSGIAPP%|$PANKO_WSGI_DIR/app|g;
|
||||
s|%USER%|$STACK_USER|g;
|
||||
s|%VIRTUALENV%|$venv_path|g
|
||||
" -i $panko_apache_conf
|
||||
}
|
||||
|
||||
# Install required services for storage backends
|
||||
function _panko_prepare_storage_backend {
|
||||
if [ "$PANKO_BACKEND" = 'mongodb' ] ; then
|
||||
pip_install_gr pymongo
|
||||
_panko_install_mongodb
|
||||
fi
|
||||
|
||||
if [ "$PANKO_BACKEND" = 'es' ] ; then
|
||||
$PANKO_DIR/devstack/lib/elasticsearch.sh download
|
||||
$PANKO_DIR/devstack/lib/elasticsearch.sh install
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# Create panko related accounts in Keystone
|
||||
function _panko_create_accounts {
|
||||
if is_service_enabled panko-api; then
|
||||
|
||||
create_service_user "panko" "admin"
|
||||
|
||||
get_or_create_service "panko" "event" "OpenStack Telemetry Service"
|
||||
get_or_create_endpoint "event" \
|
||||
"$REGION_NAME" \
|
||||
"$(panko_service_url)" \
|
||||
"$(panko_service_url)" \
|
||||
"$(panko_service_url)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Activities to do before panko has been installed.
|
||||
function preinstall_panko {
|
||||
echo_summary "Preinstall not in virtualenv context. Skipping."
|
||||
}
|
||||
|
||||
# Remove WSGI files, disable and remove Apache vhost file
|
||||
function _panko_cleanup_apache_wsgi {
|
||||
sudo rm -f "$PANKO_WSGI_DIR"/*
|
||||
sudo rmdir "$PANKO_WSGI_DIR"
|
||||
sudo rm -f $(apache_site_config_for panko)
|
||||
}
|
||||
|
||||
function _panko_drop_database {
|
||||
if is_service_enabled panko-api ; then
|
||||
if [ "$PANKO_BACKEND" = 'mongodb' ] ; then
|
||||
mongo panko --eval "db.dropDatabase();"
|
||||
elif [ "$PANKO_BACKEND" = 'es' ] ; then
|
||||
curl -XDELETE "localhost:9200/events_*"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# cleanup_panko() - Remove residual data files, anything left over
|
||||
# from previous runs that a clean run would need to clean up
|
||||
function cleanup_panko {
|
||||
if [ "$PANKO_DEPLOY" == "mod_wsgi" ]; then
|
||||
_panko_cleanup_apache_wsgi
|
||||
fi
|
||||
_panko_drop_database
|
||||
sudo rm -f "$PANKO_CONF_DIR"/*
|
||||
sudo rmdir "$PANKO_CONF_DIR"
|
||||
}
|
||||
|
||||
# Set configuration for storage backend.
|
||||
function _panko_configure_storage_backend {
|
||||
if [ "$PANKO_BACKEND" = 'mysql' ] || [ "$PANKO_BACKEND" = 'postgresql' ] ; then
|
||||
iniset $PANKO_CONF database connection $(database_connection_url panko)
|
||||
elif [ "$PANKO_BACKEND" = 'es' ] ; then
|
||||
iniset $PANKO_CONF database connection es://localhost:9200
|
||||
${TOP_DIR}/pkg/elasticsearch.sh start
|
||||
elif [ "$PANKO_BACKEND" = 'mongodb' ] ; then
|
||||
iniset $PANKO_CONF database connection mongodb://localhost:27017/panko
|
||||
else
|
||||
die $LINENO "Unable to configure unknown PANKO_BACKEND $PANKO_BACKEND"
|
||||
fi
|
||||
_panko_drop_database
|
||||
}
|
||||
|
||||
# Configure Panko
|
||||
function configure_panko {
|
||||
|
||||
local conffile
|
||||
|
||||
iniset $PANKO_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
|
||||
|
||||
# Set up logging
|
||||
if [ "$SYSLOG" != "False" ]; then
|
||||
iniset $PANKO_CONF DEFAULT use_syslog "True"
|
||||
fi
|
||||
|
||||
# Format logging
|
||||
if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$PANKO_DEPLOY" != "mod_wsgi" ]; then
|
||||
setup_colorized_logging $PANKO_CONF DEFAULT
|
||||
fi
|
||||
|
||||
# Install the declarative configuration files to
|
||||
# the conf dir.
|
||||
# NOTE(cdent): Do not make this a glob as it will conflict
|
||||
# with rootwrap installation done elsewhere and also clobber
|
||||
# panko.conf settings that have already been made.
|
||||
# Anyway, explicit is better than implicit.
|
||||
cp $PANKO_DIR/etc/panko/api_paste.ini $PANKO_CONF_DIR
|
||||
|
||||
configure_auth_token_middleware $PANKO_CONF panko $PANKO_AUTH_CACHE_DIR
|
||||
|
||||
# Configure storage
|
||||
if is_service_enabled panko-api; then
|
||||
_panko_configure_storage_backend
|
||||
fi
|
||||
|
||||
if is_service_enabled panko-api && [ "$PANKO_DEPLOY" == "mod_wsgi" ]; then
|
||||
_panko_config_apache_wsgi
|
||||
elif [ "$PANKO_DEPLOY" == "uwsgi" ]; then
|
||||
# iniset creates these files when it's called if they don't exist.
|
||||
PANKO_UWSGI_FILE=$PANKO_CONF_DIR/panko-uwsgi.ini
|
||||
|
||||
rm -f "$PANKO_UWSGI_FILE"
|
||||
|
||||
iniset "$PANKO_UWSGI_FILE" uwsgi http $PANKO_SERVICE_HOST:$PANKO_SERVICE_PORT
|
||||
iniset "$PANKO_UWSGI_FILE" uwsgi wsgi-file "$PANKO_DIR/panko/api/app.wsgi"
|
||||
# This is running standalone
|
||||
iniset "$PANKO_UWSGI_FILE" uwsgi master true
|
||||
# Set die-on-term & exit-on-reload so that uwsgi shuts down
|
||||
iniset "$PANKO_UWSGI_FILE" uwsgi die-on-term true
|
||||
iniset "$PANKO_UWSGI_FILE" uwsgi exit-on-reload true
|
||||
iniset "$PANKO_UWSGI_FILE" uwsgi threads 10
|
||||
iniset "$PANKO_UWSGI_FILE" uwsgi processes $API_WORKERS
|
||||
iniset "$PANKO_UWSGI_FILE" uwsgi enable-threads true
|
||||
iniset "$PANKO_UWSGI_FILE" uwsgi plugins python
|
||||
iniset "$PANKO_UWSGI_FILE" uwsgi lazy-apps true
|
||||
# uwsgi recommends this to prevent thundering herd on accept.
|
||||
iniset "$PANKO_UWSGI_FILE" uwsgi thunder-lock true
|
||||
# Override the default size for headers from the 4k default.
|
||||
iniset "$PANKO_UWSGI_FILE" uwsgi buffer-size 65535
|
||||
# Make sure the client doesn't try to re-use the connection.
|
||||
iniset "$PANKO_UWSGI_FILE" uwsgi add-header "Connection: close"
|
||||
fi
|
||||
}
|
||||
|
||||
# init_panko() - Initialize etc.
|
||||
function init_panko {
|
||||
# Get panko keystone settings in place
|
||||
_panko_create_accounts
|
||||
# Create cache dir
|
||||
sudo install -d -o $STACK_USER $PANKO_AUTH_CACHE_DIR
|
||||
rm -f $PANKO_AUTH_CACHE_DIR/*
|
||||
|
||||
if is_service_enabled panko-api && is_service_enabled mysql postgresql ; then
|
||||
if [ "$PANKO_BACKEND" = 'mysql' ] || [ "$PANKO_BACKEND" = 'postgresql' ] || [ "$PANKO_BACKEND" = 'es' ] ; then
|
||||
recreate_database panko
|
||||
$PANKO_BIN_DIR/panko-dbsync
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Install Panko.
|
||||
function install_panko {
|
||||
if is_service_enabled panko-api; then
|
||||
_panko_prepare_storage_backend
|
||||
fi
|
||||
|
||||
setup_develop $PANKO_DIR
|
||||
sudo install -d -o $STACK_USER -m 755 $PANKO_CONF_DIR
|
||||
|
||||
if [ "$PANKO_DEPLOY" == "mod_wsgi" ]; then
|
||||
install_apache_wsgi
|
||||
elif [ "$PANKO_DEPLOY" == "uwsgi" ]; then
|
||||
pip_install uwsgi
|
||||
fi
|
||||
}
|
||||
|
||||
# start_panko() - Start running processes, including screen
|
||||
function start_panko {
|
||||
if [[ "$PANKO_DEPLOY" == "mod_wsgi" ]]; then
|
||||
enable_apache_site panko
|
||||
restart_apache_server
|
||||
elif [ "$PANKO_DEPLOY" == "uwsgi" ]; then
|
||||
run_process panko-api "$PANKO_BIN_DIR/uwsgi $PANKO_UWSGI_FILE"
|
||||
else
|
||||
run_process panko-api "$PANKO_BIN_DIR/panko-api -d -v --config-file $PANKO_CONF"
|
||||
fi
|
||||
}
|
||||
|
||||
# configure_tempest_for_panko()
|
||||
# NOTE (gmann): Configure all the Tempest setting for Panko service in
|
||||
# this function.
|
||||
function configure_tempest_for_panko {
|
||||
if is_service_enabled tempest; then
|
||||
iniset $TEMPEST_CONFIG service_available panko True
|
||||
fi
|
||||
}
|
||||
|
||||
# stop_panko() - Stop running processes
|
||||
function stop_panko {
|
||||
if is_service_enabled panko-api ; then
|
||||
if [ "$PANKO_DEPLOY" == "mod_wsgi" ]; then
|
||||
disable_apache_site panko
|
||||
restart_apache_server
|
||||
else
|
||||
stop_process panko-api
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# install_pankoclient() - Collect source and prepare
|
||||
function install_pankoclient {
|
||||
if use_library_from_git "python-pankoclient"; then
|
||||
git_clone_by_name "python-pankoclient"
|
||||
setup_dev_lib "python-pankoclient"
|
||||
else
|
||||
pip_install pankoclient
|
||||
fi
|
||||
}
|
||||
|
||||
# This is the main for plugin.sh
|
||||
if is_service_enabled panko-api; then
|
||||
if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
|
||||
# Set up other services
|
||||
echo_summary "Configuring system services for Panko"
|
||||
preinstall_panko
|
||||
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
|
||||
echo_summary "Installing Panko"
|
||||
# Use stack_install_service here to account for virtualenv
|
||||
stack_install_service panko
|
||||
install_pankoclient
|
||||
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
|
||||
echo_summary "Configuring Panko"
|
||||
configure_panko
|
||||
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
|
||||
echo_summary "Initializing Panko"
|
||||
# Tidy base for panko
|
||||
init_panko
|
||||
# Start the services
|
||||
start_panko
|
||||
elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then
|
||||
echo_summary "Configuring Tempest for Panko"
|
||||
configure_tempest_for_panko
|
||||
fi
|
||||
|
||||
if [[ "$1" == "unstack" ]]; then
|
||||
echo_summary "Shutting Down Panko"
|
||||
stop_panko
|
||||
fi
|
||||
|
||||
if [[ "$1" == "clean" ]]; then
|
||||
echo_summary "Cleaning Panko"
|
||||
cleanup_panko
|
||||
fi
|
||||
fi
|
||||
|
||||
# Restore xtrace
|
||||
$XTRACE
|
|
@ -1,29 +0,0 @@
|
|||
enable_service panko-api
|
||||
|
||||
# Default directories
|
||||
PANKO_DIR=$DEST/panko
|
||||
PANKO_CONF_DIR=/etc/panko
|
||||
PANKO_CONF=$PANKO_CONF_DIR/panko.conf
|
||||
PANKO_AUTH_CACHE_DIR=${PANKO_AUTH_CACHE_DIR:-/var/cache/panko}
|
||||
PANKO_WSGI_DIR=${PANKO_WSGI_DIR:-/var/www/panko}
|
||||
|
||||
# Set up database backend
|
||||
PANKO_BACKEND=${PANKO_BACKEND:-mysql}
|
||||
|
||||
# Panko connection info.
|
||||
PANKO_SERVICE_PROTOCOL=http
|
||||
PANKO_SERVICE_HOST=$SERVICE_HOST
|
||||
PANKO_SERVICE_PORT=${PANKO_SERVICE_PORT:-8977}
|
||||
|
||||
# PANKO_DEPLOY defines how Panko is deployed, allowed values:
|
||||
# - mod_wsgi: Run Panko under Apache HTTPd mod_wsgi
|
||||
# - simple: Run panko-api
|
||||
# - uwsgi: Run Panko under uwsgi
|
||||
# - <empty>: Fallback to PANKO_USE_MOD_WSGI or ENABLE_HTTPD_MOD_WSGI_SERVICES
|
||||
PANKO_DEPLOY=${PANKO_DEPLOY}
|
||||
|
||||
# Get rid of this before done.
|
||||
# Tell emacs to use shell-script-mode
|
||||
## Local variables:
|
||||
## mode: shell-script
|
||||
## End:
|
|
@ -1,7 +0,0 @@
|
|||
register_project_for_upgrade panko
|
||||
|
||||
devstack_localrc base enable_plugin panko https://opendev.org/openstack/panko
|
||||
devstack_localrc base enable_service panko-api tempest
|
||||
|
||||
devstack_localrc target enable_plugin panko https://opendev.org/openstack/panko
|
||||
devstack_localrc target enable_service panko-api tempest
|
|
@ -1,27 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
|
||||
. $GRENADE_DIR/grenaderc
|
||||
. $GRENADE_DIR/functions
|
||||
|
||||
. $BASE_DEVSTACK_DIR/functions
|
||||
. $BASE_DEVSTACK_DIR/stackrc # needed for status directory
|
||||
. $BASE_DEVSTACK_DIR/lib/tls
|
||||
. $BASE_DEVSTACK_DIR/lib/apache
|
||||
|
||||
# Locate the panko plugin and get its functions
|
||||
PANKO_DEVSTACK_DIR=$(dirname $(dirname $0))
|
||||
. $PANKO_DEVSTACK_DIR/plugin.sh
|
||||
|
||||
set -o xtrace
|
||||
|
||||
stop_panko
|
||||
|
||||
# ensure everything is stopped
|
||||
|
||||
SERVICES_DOWN="panko-api"
|
||||
|
||||
ensure_services_stopped $SERVICES_DOWN
|
|
@ -1,87 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# ``upgrade-panko``
|
||||
|
||||
echo "*********************************************************************"
|
||||
echo "Begin $0"
|
||||
echo "*********************************************************************"
|
||||
|
||||
# Clean up any resources that may be in use
|
||||
cleanup() {
|
||||
set +o errexit
|
||||
|
||||
echo "*********************************************************************"
|
||||
echo "ERROR: Abort $0"
|
||||
echo "*********************************************************************"
|
||||
|
||||
# Kill ourselves to signal any calling process
|
||||
trap 2; kill -2 $$
|
||||
}
|
||||
|
||||
trap cleanup SIGHUP SIGINT SIGTERM
|
||||
|
||||
# Keep track of the grenade directory
|
||||
RUN_DIR=$(cd $(dirname "$0") && pwd)
|
||||
|
||||
# Source params
|
||||
. $GRENADE_DIR/grenaderc
|
||||
|
||||
# Import common functions
|
||||
. $GRENADE_DIR/functions
|
||||
|
||||
# This script exits on an error so that errors don't compound and you see
|
||||
# only the first error that occurred.
|
||||
set -o errexit
|
||||
|
||||
# Save mongodb state (replace with snapshot)
|
||||
# TODO(chdent): There used to be a 'register_db_to_save panko'
|
||||
# which may wish to consider putting back in.
|
||||
if grep -q 'connection *= *mongo' /etc/panko/panko.conf; then
|
||||
mongodump --db panko --out $SAVE_DIR/panko-dump.$BASE_RELEASE
|
||||
fi
|
||||
|
||||
# Upgrade Panko
|
||||
# ==================
|
||||
# Locate panko devstack plugin, the directory above the
|
||||
# grenade plugin.
|
||||
PANKO_DEVSTACK_DIR=$(dirname $(dirname $0))
|
||||
|
||||
# Get functions from current DevStack
|
||||
. $TARGET_DEVSTACK_DIR/functions
|
||||
. $TARGET_DEVSTACK_DIR/stackrc
|
||||
. $TARGET_DEVSTACK_DIR/lib/apache
|
||||
|
||||
# Get panko functions from devstack plugin
|
||||
. $PANKO_DEVSTACK_DIR/settings
|
||||
|
||||
# Print the commands being run so that we can see the command that triggers
|
||||
# an error.
|
||||
set -o xtrace
|
||||
|
||||
# Install the target panko
|
||||
. $PANKO_DEVSTACK_DIR/plugin.sh stack install
|
||||
|
||||
# calls upgrade-panko for specific release
|
||||
upgrade_project panko $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH
|
||||
|
||||
# Migrate the database
|
||||
# NOTE(chdent): As we evolve BIN_DIR is likely to be defined, but
|
||||
# currently it is not.
|
||||
PANKO_BIN_DIR=$(dirname $(which panko-dbsync))
|
||||
$PANKO_BIN_DIR/panko-dbsync || die $LINENO "DB sync error"
|
||||
|
||||
# Start Panko
|
||||
start_panko
|
||||
|
||||
ensure_services_started panko-api
|
||||
|
||||
# Save mongodb state (replace with snapshot)
|
||||
if grep -q 'connection *= *mongo' /etc/panko/panko.conf; then
|
||||
mongodump --db panko --out $SAVE_DIR/panko-dump.$TARGET_RELEASE
|
||||
fi
|
||||
|
||||
|
||||
set +o xtrace
|
||||
echo "*********************************************************************"
|
||||
echo "SUCCESS: End $0"
|
||||
echo "*********************************************************************"
|
164
doc/Makefile
164
doc/Makefile
|
@ -1,164 +0,0 @@
|
|||
# Makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
PAPER =
|
||||
BUILDDIR = build
|
||||
|
||||
# Internal variables.
|
||||
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||
PAPEROPT_letter = -D latex_paper_size=letter
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
|
||||
# the i18n builder cannot share the environment and doctrees with the others
|
||||
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
|
||||
|
||||
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
|
||||
|
||||
help:
|
||||
@echo "Please use \`make <target>' where <target> is one of"
|
||||
@echo " html to make standalone HTML files"
|
||||
@echo " dirhtml to make HTML files named index.html in directories"
|
||||
@echo " singlehtml to make a single large HTML file"
|
||||
@echo " pickle to make pickle files"
|
||||
@echo " json to make JSON files"
|
||||
@echo " htmlhelp to make HTML files and a HTML help project"
|
||||
@echo " qthelp to make HTML files and a qthelp project"
|
||||
@echo " devhelp to make HTML files and a Devhelp project"
|
||||
@echo " epub to make an epub"
|
||||
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
|
||||
@echo " latexpdf to make LaTeX files and run them through pdflatex"
|
||||
@echo " text to make text files"
|
||||
@echo " man to make manual pages"
|
||||
@echo " texinfo to make Texinfo files"
|
||||
@echo " info to make Texinfo files and run them through makeinfo"
|
||||
@echo " gettext to make PO message catalogs"
|
||||
@echo " changes to make an overview of all changed/added/deprecated items"
|
||||
@echo " linkcheck to check all external links for integrity"
|
||||
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
|
||||
@echo " wadl to build a WADL file for api.openstack.org"
|
||||
|
||||
clean:
|
||||
-rm -rf $(BUILDDIR)/*
|
||||
|
||||
html: check-dependencies
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
.PHONY: check-dependencies
|
||||
check-dependencies:
|
||||
@python -c 'import sphinxcontrib.autohttp.flask' >/dev/null 2>&1 || (echo "ERROR: Missing Sphinx dependencies. Run: pip install sphinxcontrib-httpdomain" && exit 1)
|
||||
@ld -ltidy >/dev/null 2>&1 || (echo "Error: Missing libtidy dependencies. Pls. install libtidy with system package manager" && exit 1)
|
||||
|
||||
wadl:
|
||||
$(SPHINXBUILD) -b docbook $(ALLSPHINXOPTS) $(BUILDDIR)/wadl
|
||||
@echo
|
||||
@echo "Build finished. The WADL pages are in $(BUILDDIR)/wadl."
|
||||
|
||||
dirhtml:
|
||||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
||||
|
||||
singlehtml:
|
||||
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
|
||||
|
||||
pickle:
|
||||
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
||||
@echo
|
||||
@echo "Build finished; now you can process the pickle files."
|
||||
|
||||
json:
|
||||
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
||||
@echo
|
||||
@echo "Build finished; now you can process the JSON files."
|
||||
|
||||
htmlhelp:
|
||||
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
||||
".hhp project file in $(BUILDDIR)/htmlhelp."
|
||||
|
||||
qthelp:
|
||||
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
||||
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
|
||||
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Panko.qhcp"
|
||||
@echo "To view the help file:"
|
||||
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Panko.qhc"
|
||||
|
||||
devhelp:
|
||||
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
|
||||
@echo
|
||||
@echo "Build finished."
|
||||
@echo "To view the help file:"
|
||||
@echo "# mkdir -p $$HOME/.local/share/devhelp/Panko"
|
||||
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Panko"
|
||||
@echo "# devhelp"
|
||||
|
||||
epub:
|
||||
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
|
||||
@echo
|
||||
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
|
||||
|
||||
latex:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo
|
||||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
||||
@echo "Run \`make' in that directory to run these through (pdf)latex" \
|
||||
"(use \`make latexpdf' here to do that automatically)."
|
||||
|
||||
latexpdf:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through pdflatex..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
text:
|
||||
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
|
||||
@echo
|
||||
@echo "Build finished. The text files are in $(BUILDDIR)/text."
|
||||
|
||||
man:
|
||||
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
|
||||
@echo
|
||||
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
|
||||
|
||||
texinfo:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo
|
||||
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
|
||||
@echo "Run \`make' in that directory to run these through makeinfo" \
|
||||
"(use \`make info' here to do that automatically)."
|
||||
|
||||
info:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo "Running Texinfo files through makeinfo..."
|
||||
make -C $(BUILDDIR)/texinfo info
|
||||
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
|
||||
|
||||
gettext:
|
||||
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
|
||||
@echo
|
||||
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
|
||||
|
||||
changes:
|
||||
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
||||
@echo
|
||||
@echo "The overview file is in $(BUILDDIR)/changes."
|
||||
|
||||
linkcheck:
|
||||
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
||||
@echo
|
||||
@echo "Link check complete; look for any errors in the above output " \
|
||||
"or in $(BUILDDIR)/linkcheck/output.txt."
|
||||
|
||||
doctest:
|
||||
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
||||
@echo "Testing of doctests in the sources finished, look at the " \
|
||||
"results in $(BUILDDIR)/doctest/output.txt."
|
|
@ -1,9 +0,0 @@
|
|||
# The order of packages is significant, because pip processes them in the order
|
||||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
sphinx>=2.0.0,!=2.1.0 # BSD
|
||||
openstackdocstheme>=2.2.1 # Apache-2.0
|
||||
sphinxcontrib-apidoc>=0.2.0 # Apache-2.0
|
||||
sphinxcontrib-pecanwsme>=0.8.0 # Apache-2.0
|
||||
sphinxcontrib-httpdomain>=1.6.1 # BSD
|
||||
reno>=3.1.0 # Apache-2.0
|
|
@ -1,8 +0,0 @@
|
|||
=================
|
||||
Source Code Index
|
||||
=================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
modules
|
|
@ -1,321 +0,0 @@
|
|||
#
|
||||
# Panko documentation build configuration file, created by
|
||||
# sphinx-quickstart on Thu Oct 27 11:38:59 2011.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its
|
||||
# containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
sys.path.insert(0, os.path.abspath('../'))
|
||||
|
||||
# -- General configuration ----------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings.
|
||||
# They can be extensions coming with Sphinx (named 'sphinx.ext.*')
|
||||
# or your custom ones.
|
||||
extensions = [
|
||||
'sphinxcontrib.apidoc',
|
||||
'sphinx.ext.autodoc',
|
||||
'sphinxcontrib.pecanwsme.rest',
|
||||
'sphinxcontrib.httpdomain',
|
||||
'openstackdocstheme',
|
||||
'oslo_policy.sphinxpolicygen'
|
||||
]
|
||||
|
||||
policy_generator_config_file = '../../etc/panko/panko-policy-generator.conf'
|
||||
sample_policy_basename = '_static/panko'
|
||||
|
||||
wsme_protocols = ['restjson', 'restxml']
|
||||
|
||||
todo_include_todos = True
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates = []
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The encoding of source files.
|
||||
#source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# openstackdocstheme options
|
||||
openstackdocs_repo_name = 'openstack/panko'
|
||||
openstackdocs_pdf_link = True
|
||||
openstackdocs_auto_name = False
|
||||
openstackdocs_bug_project = 'panko'
|
||||
openstackdocs_bug_tag = ''
|
||||
project = u'Panko'
|
||||
copyright = u'2012-2015, OpenStack Foundation'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['**/#*', '**~', '**/#*#']
|
||||
|
||||
# The reST default role (used for this markup: `text`)
|
||||
# to use for all documents.
|
||||
#default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'native'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
|
||||
primary_domain = 'py'
|
||||
nitpicky = False
|
||||
|
||||
|
||||
# -- Options for HTML output --------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
# html_theme_path = ['.']
|
||||
# html_theme = '_theme'
|
||||
html_theme = 'openstackdocs'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = {
|
||||
# "nosidebar": "false"
|
||||
#}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
#html_theme_path = []
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
#html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
#html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#html_logo = None
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
#html_favicon = None
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
#html_static_path = ['_static']
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
#html_last_updated_fmt = '%Y-%m-%d %H:%M'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
#html_additional_pages = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
#html_domain_indices = True
|
||||
|
||||
# If false, no index is generated.
|
||||
#html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
#html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#html_show_sourcelink = True
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
#html_show_sphinx = True
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||
#html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
#html_use_opensearch = ''
|
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = None
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'Pankodoc'
|
||||
|
||||
|
||||
# -- Options for LaTeX output -------------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
'makeindex': '',
|
||||
'printindex': '',
|
||||
'preamble': r'\setcounter{tocdepth}{3}',
|
||||
'maxlistdepth': '10',
|
||||
}
|
||||
|
||||
# Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664
|
||||
latex_use_xindy = False
|
||||
|
||||
# Disable smartquotes, they don't work in latex
|
||||
smartquotes_excludes = {'builders': ['latex']}
|
||||
|
||||
latex_domain_indices = False
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title, author, documentclass
|
||||
# [howto/manual]).
|
||||
latex_documents = [
|
||||
('index', 'doc-panko.tex', u'Panko Documentation',
|
||||
u'OpenStack Foundation', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
#latex_logo = None
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
#latex_use_parts = False
|
||||
|
||||
# If true, show page references after internal links.
|
||||
#latex_show_pagerefs = False
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#latex_show_urls = False
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output -------------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
('index', 'panko', u'Panko Documentation',
|
||||
[u'OpenStack'], 1)
|
||||
]
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#man_show_urls = False
|
||||
|
||||
|
||||
# -- Options for Texinfo output -----------------------------------------------
|
||||
|
||||
# Grouping the document tree into Texinfo files. List of tuples
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
('index', 'Panko', u'Panko Documentation', u'OpenStack',
|
||||
'Panko', 'One line description of project.', 'Miscellaneous'),
|
||||
]
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#texinfo_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#texinfo_domain_indices = True
|
||||
|
||||
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
||||
#texinfo_show_urls = 'footnote'
|
||||
|
||||
|
||||
# -- Options for Epub output --------------------------------------------------
|
||||
|
||||
# Bibliographic Dublin Core info.
|
||||
epub_title = u'Panko'
|
||||
epub_author = u'OpenStack'
|
||||
epub_publisher = u'OpenStack'
|
||||
epub_copyright = u'2012-2015, OpenStack'
|
||||
|
||||
# The language of the text. It defaults to the language option
|
||||
# or en if the language is not set.
|
||||
#epub_language = ''
|
||||
|
||||
# The scheme of the identifier. Typical schemes are ISBN or URL.
|
||||
#epub_scheme = ''
|
||||
|
||||
# The unique identifier of the text. This can be an ISBN number
|
||||
# or the project homepage.
|
||||
#epub_identifier = ''
|
||||
|
||||
# A unique identification for the text.
|
||||
#epub_uid = ''
|
||||
|
||||
# A tuple containing the cover image and cover page html template filenames.
|
||||
#epub_cover = ()
|
||||
|
||||
# HTML files that should be inserted before the pages created by sphinx.
|
||||
# The format is a list of tuples containing the path and title.
|
||||
#epub_pre_files = []
|
||||
|
||||
# HTML files shat should be inserted after the pages created by sphinx.
|
||||
# The format is a list of tuples containing the path and title.
|
||||
#epub_post_files = []
|
||||
|
||||
# A list of files that should not be packed into the epub file.
|
||||
#epub_exclude_files = []
|
||||
|
||||
# The depth of the table of contents in toc.ncx.
|
||||
#epub_tocdepth = 3
|
||||
|
||||
# Allow duplicate toc entries.
|
||||
#epub_tocdup = True
|
||||
|
||||
# -- sphinxcontrib.apidoc configuration --------------------------------------
|
||||
|
||||
apidoc_module_dir = '../../panko'
|
||||
apidoc_output_dir = 'api'
|
||||
apidoc_excluded_paths = [
|
||||
'tests',
|
||||
'hacking',
|
||||
# happybase is not Python3 compatible, thus skip over them
|
||||
'storage/hbase/*',
|
||||
'storage/impl_hbase.py'
|
||||
]
|
|
@ -1,33 +0,0 @@
|
|||
..
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
not use this file except in compliance with the License. You may obtain
|
||||
a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and limitations
|
||||
under the License.
|
||||
|
||||
.. _contributing:
|
||||
|
||||
=====================
|
||||
Contributing to Panko
|
||||
=====================
|
||||
|
||||
Panko follows the same workflow as other OpenStack projects. To start
|
||||
contributing to Panko, please follow the workflow found here_.
|
||||
|
||||
.. _here: https://wiki.openstack.org/wiki/Gerrit_Workflow
|
||||
|
||||
|
||||
Project Hosting Details
|
||||
=======================
|
||||
|
||||
:Bug tracker: https://bugs.launchpad.net/panko
|
||||
:Mailing list: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev (prefix subjects with ``[Panko]`` for faster responses)
|
||||
:Contribution Guide: https://docs.openstack.org/panko/latest/contributor/index.html
|
||||
:Code Hosting: https://opendev.org/openstack/panko/
|
||||
:Code Review: https://review.opendev.org/#/q/status:open+project:openstack/panko,n,z
|
|
@ -1,89 +0,0 @@
|
|||
..
|
||||
Copyright (c) 2014 OpenStack Foundation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
not use this file except in compliance with the License. You may obtain
|
||||
a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and limitations
|
||||
under the License.
|
||||
|
||||
Guru Meditation Reports
|
||||
=======================
|
||||
|
||||
Panko contains a mechanism whereby developers and system administrators
|
||||
can generate a report about the state of a running Panko executable. This
|
||||
report is called a *Guru Meditation Report* (*GMR* for short).
|
||||
|
||||
Generating a GMR
|
||||
----------------
|
||||
|
||||
A *GMR* can be generated by sending the *USR1* signal to any Panko process
|
||||
with support (see below). The *GMR* will then be outputted standard error for
|
||||
that particular process.
|
||||
|
||||
For example, suppose that ``panko-api`` has process id ``8675``, and
|
||||
was run with ``2>/var/log/panko/panko-api.log``. Then,
|
||||
``kill -USR1 8675`` will trigger the Guru Meditation report to be printed to
|
||||
``/var/log/panko/panko-api.log``.
|
||||
|
||||
Structure of a GMR
|
||||
------------------
|
||||
|
||||
The *GMR* is designed to be extensible; any particular executable may add its
|
||||
own sections. However, the base *GMR* consists of several sections:
|
||||
|
||||
Package
|
||||
Shows information about the package to which this process belongs, including
|
||||
version information
|
||||
|
||||
Threads
|
||||
Shows stack traces and thread ids for each of the threads within this process
|
||||
|
||||
Green Threads
|
||||
Shows stack traces for each of the green threads within this process (green
|
||||
threads don't have thread ids)
|
||||
|
||||
Configuration
|
||||
Lists all the configuration options currently accessible via the CONF object
|
||||
for the current process
|
||||
|
||||
Adding Support for GMRs to New Executables
|
||||
------------------------------------------
|
||||
|
||||
Adding support for a *GMR* to a given executable is fairly easy.
|
||||
|
||||
First import the module (currently residing in oslo-incubator), as well as the
|
||||
Panko version module:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from oslo_reports import guru_meditation_report as gmr
|
||||
from panko import version
|
||||
|
||||
Then, register any additional sections (optional):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
TextGuruMeditation.register_section('Some Special Section',
|
||||
some_section_generator)
|
||||
|
||||
Finally (under main), before running the "main loop" of the executable (usually
|
||||
``service.server(server)`` or something similar), register the *GMR* hook:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
TextGuruMeditation.setup_autorun(version)
|
||||
|
||||
Extending the GMR
|
||||
-----------------
|
||||
|
||||
As mentioned above, additional sections can be added to the GMR for a
|
||||
particular executable. For more information, see the inline documentation
|
||||
about oslo.reports:
|
||||
`oslo.reports <https://docs.openstack.org/oslo.reports/latest/>`_
|
|
@ -1,15 +0,0 @@
|
|||
==================
|
||||
Contribution Guide
|
||||
==================
|
||||
|
||||
In the Contribution Guide, you will find documented policies for
|
||||
developing with Panko. This includes the processes we use for
|
||||
bugs, contributor onboarding, core reviewer memberships, and other
|
||||
procedural items.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
contributing
|
||||
testing
|
||||
gmr
|
|
@ -1,82 +0,0 @@
|
|||
..
|
||||
Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
not use this file except in compliance with the License. You may obtain
|
||||
a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and limitations
|
||||
under the License.
|
||||
|
||||
=================
|
||||
Running the Tests
|
||||
=================
|
||||
|
||||
Panko includes an extensive set of automated unit tests which are
|
||||
run through tox_.
|
||||
|
||||
1. Install ``tox``::
|
||||
|
||||
$ sudo pip install tox
|
||||
|
||||
2. On Ubuntu install ``mongodb`` and ``libmysqlclient-dev`` packages::
|
||||
|
||||
$ sudo apt-get install mongodb
|
||||
$ sudo apt-get install libmysqlclient-dev
|
||||
|
||||
For Fedora20 there is no ``libmysqlclient-dev`` package, so you’ll need
|
||||
to install ``mariadb-devel.x86-64`` (or ``mariadb-devel.i386``) instead::
|
||||
|
||||
$ sudo yum install mongodb
|
||||
$ sudo yum install mariadb-devel.x86_64
|
||||
|
||||
3. Install the test dependencies::
|
||||
|
||||
$ sudo pip install -r /opt/stack/panko/test-requirements.txt
|
||||
|
||||
4. Run the unit and code-style tests::
|
||||
|
||||
$ cd /opt/stack/panko
|
||||
$ tox -e py27,pep8
|
||||
|
||||
As tox is a wrapper around testr, it also accepts the same flags as testr.
|
||||
See the `testr documentation`_ for details about these additional flags.
|
||||
|
||||
.. _testr documentation: https://testrepository.readthedocs.org/en/latest/MANUAL.html
|
||||
|
||||
Use a double hyphen to pass options to testr. For example, to run only tests under tests/api/v2::
|
||||
|
||||
$ tox -e py27 -- api.v2
|
||||
|
||||
To debug tests (ie. break into pdb debugger), you can use ''debug'' tox
|
||||
environment. Here's an example, passing the name of a test since you'll
|
||||
normally only want to run the test that hits your breakpoint::
|
||||
|
||||
$ tox -e debug panko.tests.test_bin
|
||||
|
||||
For reference, the ``debug`` tox environment implements the instructions
|
||||
here: https://wiki.openstack.org/wiki/Testr#Debugging_.28pdb.29_Tests
|
||||
|
||||
5. There is a growing suite of tests which use a tool called `gabbi`_ to
|
||||
test and validate the behavior of the Panko API. These tests are run
|
||||
when using the usual ``py27`` tox target but if desired they can be run by
|
||||
themselves::
|
||||
|
||||
$ tox -e gabbi
|
||||
|
||||
The YAML files used to drive the gabbi tests can be found in
|
||||
``panko/tests/functional/gabbi/gabbits``. If you are adding to or adjusting the
|
||||
API you should consider adding tests here.
|
||||
|
||||
.. _gabbi: https://gabbi.readthedocs.org/
|
||||
|
||||
.. seealso::
|
||||
|
||||
* tox_
|
||||
|
||||
.. _tox: http://tox.testrun.org/latest/
|
|
@ -1,55 +0,0 @@
|
|||
..
|
||||
Copyright 2012 Nicolas Barcet for Canonical
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
not use this file except in compliance with the License. You may obtain
|
||||
a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and limitations
|
||||
under the License.
|
||||
|
||||
=====================================
|
||||
Welcome to the Panko's documentation!
|
||||
=====================================
|
||||
|
||||
The Panko project is an event storage service that provides the ability to
|
||||
store and querying event data generated by Ceilometer with potentially other
|
||||
sources.
|
||||
|
||||
Panko is a component of the Telemetry project.
|
||||
|
||||
This documentation offers information on how Panko works and how to
|
||||
contribute to the project.
|
||||
|
||||
Overview
|
||||
========
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
install/index
|
||||
contributor/index
|
||||
webapi/index
|
||||
api/index
|
||||
|
||||
Sample Configuration Files
|
||||
==========================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
configuration/sample_policy
|
||||
|
||||
.. only:: html
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
|
@ -1,41 +0,0 @@
|
|||
..
|
||||
Copyright 2012 Nicolas Barcet for Canonical
|
||||
2013 New Dream Network, LLC (DreamHost)
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
not use this file except in compliance with the License. You may obtain
|
||||
a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and limitations
|
||||
under the License.
|
||||
|
||||
==============================
|
||||
Installing development sandbox
|
||||
==============================
|
||||
|
||||
Configuring devstack
|
||||
====================
|
||||
|
||||
.. index::
|
||||
double: installing; devstack
|
||||
|
||||
1. Download devstack_.
|
||||
|
||||
2. Create a ``local.conf`` file as input to devstack.
|
||||
|
||||
3. The panko services are not enabled by default, so they must be
|
||||
enabled in ``local.conf`` before running ``stack.sh``.
|
||||
|
||||
This example ``local.conf`` file shows all of the settings required for
|
||||
panko::
|
||||
|
||||
[[local|localrc]]
|
||||
# Enable the Panko devstack plugin
|
||||
enable_plugin panko https://opendev.org/openstack/panko.git
|
||||
|
||||
.. _devstack: http://www.devstack.org/
|
|
@ -1,28 +0,0 @@
|
|||
..
|
||||
Copyright 2013 New Dream Network, LLC (DreamHost)
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
not use this file except in compliance with the License. You may obtain
|
||||
a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and limitations
|
||||
under the License.
|
||||
|
||||
.. _install:
|
||||
|
||||
================
|
||||
Installing Panko
|
||||
================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
development
|
||||
manual
|
||||
mod_wsgi
|
||||
uwsgi
|
|
@ -1,122 +0,0 @@
|
|||
..
|
||||
Copyright 2012 Nicolas Barcet for Canonical
|
||||
2013 New Dream Network, LLC (DreamHost)
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
not use this file except in compliance with the License. You may obtain
|
||||
a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and limitations
|
||||
under the License.
|
||||
|
||||
.. _installing_manually:
|
||||
|
||||
===================
|
||||
Installing Manually
|
||||
===================
|
||||
|
||||
|
||||
Storage Backend Installation
|
||||
============================
|
||||
|
||||
This step is a prerequisite for the collector and API services. You may use
|
||||
one of the listed database backends below to store Panko data.
|
||||
|
||||
MongoDB
|
||||
-------
|
||||
|
||||
Follow the instructions to install the MongoDB_ package for your operating
|
||||
system, then start the service. The required minimum version of MongoDB is
|
||||
2.4.x. You will also need to have pymongo_ 2.4 installed
|
||||
|
||||
To use MongoDB as the storage backend, change the 'database' section in
|
||||
panko.conf as follows::
|
||||
|
||||
[database]
|
||||
connection = mongodb://username:password@host:27017/panko
|
||||
|
||||
SQLalchemy-supported DBs
|
||||
------------------------
|
||||
|
||||
You may alternatively use any SQLAlchemy-supported DB such as
|
||||
`PostgreSQL` or `MySQL`.
|
||||
|
||||
To use MySQL as the storage backend, change the 'database' section in
|
||||
panko.conf as follows::
|
||||
|
||||
[database]
|
||||
connection = mysql+pymysql://username:password@host/panko?charset=utf8
|
||||
|
||||
|
||||
.. _MongoDB: http://www.mongodb.org/
|
||||
.. _pymongo: https://pypi.org/project/pymongo/
|
||||
|
||||
|
||||
Installing the API Server
|
||||
=========================
|
||||
|
||||
.. index::
|
||||
double: installing; API
|
||||
|
||||
.. note::
|
||||
|
||||
The API server needs to be able to talk to keystone and panko's
|
||||
database. It is only required if you choose to store data in legacy
|
||||
database or if you inject new samples via REST API.
|
||||
|
||||
1. Clone the panko git repository to the server::
|
||||
|
||||
$ cd /opt/stack
|
||||
$ git clone https://opendev.org/openstack/panko.git
|
||||
|
||||
2. As a user with ``root`` permissions or ``sudo`` privileges, run the
|
||||
panko installer::
|
||||
|
||||
$ cd panko
|
||||
$ sudo python setup.py install
|
||||
|
||||
3. Create a service for panko in keystone::
|
||||
|
||||
$ openstack service create event --name panko \
|
||||
--description "Panko Service"
|
||||
|
||||
4. Create an endpoint in keystone for panko::
|
||||
|
||||
$ openstack endpoint create $PANKO_SERVICE \
|
||||
--region RegionOne \
|
||||
--publicurl "http://$SERVICE_HOST:8977" \
|
||||
--adminurl "http://$SERVICE_HOST:8977" \
|
||||
--internalurl "http://$SERVICE_HOST:8977"
|
||||
|
||||
.. note::
|
||||
|
||||
PANKO_SERVICE is the id of the service created by the first command
|
||||
and SERVICE_HOST is the host where the Panko API is running. The
|
||||
default port value for panko API is 8977. If the port value
|
||||
has been customized, adjust accordingly.
|
||||
|
||||
5. Choose and start the API server.
|
||||
|
||||
Panko includes the ``panko-api`` command. This can be
|
||||
used to run the API server. For smaller or proof-of-concept
|
||||
installations this is a reasonable choice. For larger installations it
|
||||
is strongly recommended to install the API server in a WSGI host
|
||||
such as mod_wsgi (see :doc:`mod_wsgi`). Doing so will provide better
|
||||
performance and more options for making adjustments specific to the
|
||||
installation environment.
|
||||
|
||||
If you are using the ``panko-api`` command it can be started
|
||||
as::
|
||||
|
||||
$ panko-api
|
||||
|
||||
.. note::
|
||||
|
||||
The development version of the API server logs to stderr, so you
|
||||
may want to run this step using a screen session or other tool for
|
||||
maintaining a long-running program in the background.
|
|
@ -1,28 +0,0 @@
|
|||
..
|
||||
Copyright 2013 New Dream Network, LLC (DreamHost)
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
not use this file except in compliance with the License. You may obtain
|
||||
a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and limitations
|
||||
under the License.
|
||||
|
||||
==================================
|
||||
Installing the API behind mod_wsgi
|
||||
==================================
|
||||
|
||||
Panko comes with a few example files for configuring the API
|
||||
service to run behind Apache with ``mod_wsgi``.
|
||||
|
||||
app.wsgi
|
||||
========
|
||||
|
||||
The file ``panko/api/app.wsgi`` sets up the V2 API WSGI
|
||||
application. The file is installed with the rest of the panko
|
||||
application code, and should not need to be modified.
|
|
@ -1,62 +0,0 @@
|
|||
=============================
|
||||
Installing the API with uwsgi
|
||||
=============================
|
||||
|
||||
Panko comes with a few example files for configuring the API
|
||||
service to run behind Apache with ``mod_wsgi``.
|
||||
|
||||
app.wsgi
|
||||
========
|
||||
|
||||
The file ``panko/api/app.wsgi`` sets up the V2 API WSGI
|
||||
application. The file is installed with the rest of the Panko
|
||||
application code, and should not need to be modified.
|
||||
|
||||
Example of uwsgi configuration file
|
||||
===================================
|
||||
|
||||
|
||||
Create panko-uwsgi.ini file::
|
||||
|
||||
[uwsgi]
|
||||
http = 0.0.0.0:8041
|
||||
wsgi-file = <path_to_panko>/panko/api/app.wsgi
|
||||
plugins = python
|
||||
# This is running standalone
|
||||
master = true
|
||||
# Set die-on-term & exit-on-reload so that uwsgi shuts down
|
||||
exit-on-reload = true
|
||||
die-on-term = true
|
||||
# uwsgi recommends this to prevent thundering herd on accept.
|
||||
thunder-lock = true
|
||||
# Override the default size for headers from the 4k default. (mainly for keystone token)
|
||||
buffer-size = 65535
|
||||
enable-threads = true
|
||||
# Set the number of threads usually with the returns of command nproc
|
||||
threads = 8
|
||||
# Make sure the client doesn't try to re-use the connection.
|
||||
add-header = Connection: close
|
||||
# Set uid and gip to an appropriate user on your server. In many
|
||||
# installations ``panko`` will be correct.
|
||||
uid = panko
|
||||
gid = panko
|
||||
|
||||
Then start the uwsgi server::
|
||||
|
||||
uwsgi ./panko-uwsgi.ini
|
||||
|
||||
Or start in background with::
|
||||
|
||||
uwsgi -d ./panko-uwsgi.ini
|
||||
|
||||
Configuring with uwsgi-plugin-python on Debian/Ubuntu
|
||||
=====================================================
|
||||
|
||||
Install the Python plugin for uwsgi::
|
||||
|
||||
apt-get install uwsgi-plugin-python
|
||||
|
||||
Run the server::
|
||||
|
||||
uwsgi_python --master --die-on-term --logto /var/log/panko/panko-api.log \
|
||||
--http-socket :8042 --wsgi-file /usr/share/panko-common/app.wsgi
|
|
@ -1,47 +0,0 @@
|
|||
=======
|
||||
Web API
|
||||
=======
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
v2
|
||||
|
||||
You can get API version list via request to endpoint root path. For example::
|
||||
|
||||
curl -H "X-AUTH-TOKEN: fa2ec18631f94039a5b9a8b4fe8f56ad" http://127.0.0.1:8977
|
||||
|
||||
Sample response::
|
||||
|
||||
{
|
||||
"versions": {
|
||||
"values": [
|
||||
{
|
||||
"id": "v2",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://127.0.0.1:8977/v2",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
"href": "https://docs.openstack.org/",
|
||||
"rel": "describedby",
|
||||
"type": "text/html"
|
||||
}
|
||||
],
|
||||
"media-types": [
|
||||
{
|
||||
"base": "application/json",
|
||||
"type": "application/vnd.openstack.telemetry-v2+json"
|
||||
},
|
||||
{
|
||||
"base": "application/xml",
|
||||
"type": "application/vnd.openstack.telemetry-v2+xml"
|
||||
}
|
||||
],
|
||||
"status": "stable",
|
||||
"updated": "2013-02-13T00:00:00Z"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
|
@ -1,53 +0,0 @@
|
|||
.. docbookrestapi
|
||||
|
||||
==========
|
||||
V2 Web API
|
||||
==========
|
||||
|
||||
Capabilities
|
||||
============
|
||||
|
||||
The Capabilities API allows you to directly discover which functions from the
|
||||
V2 API functionality, including the selectable aggregate functions, are
|
||||
supported by the currently configured storage driver. A capabilities query
|
||||
returns a flattened dictionary of properties with associated boolean values -
|
||||
a 'False' or absent value means that the corresponding feature is not
|
||||
available in the backend.
|
||||
|
||||
.. rest-controller:: panko.api.controllers.v2.capabilities:CapabilitiesController
|
||||
:webprefix: /v2/capabilities
|
||||
|
||||
.. autoclass:: panko.api.controllers.v2.capabilities.Capabilities
|
||||
:members:
|
||||
:noindex:
|
||||
|
||||
Events and Traits
|
||||
=================
|
||||
|
||||
.. rest-controller:: panko.api.controllers.v2.events:EventTypesController
|
||||
:webprefix: /v2/event_types
|
||||
|
||||
.. rest-controller:: panko.api.controllers.v2.events:TraitsController
|
||||
:webprefix: /v2/event_types/(event_type)/traits
|
||||
|
||||
.. rest-controller:: panko.api.controllers.v2.events:EventsController
|
||||
:webprefix: /v2/events
|
||||
|
||||
.. autoclass:: panko.api.controllers.v2.events.Event
|
||||
:members:
|
||||
:noindex:
|
||||
|
||||
.. autoclass:: panko.api.controllers.v2.events.Trait
|
||||
:members:
|
||||
:noindex:
|
||||
|
||||
.. autoclass:: panko.api.controllers.v2.events.TraitDescription
|
||||
:members:
|
||||
:noindex:
|
||||
|
||||
Filtering Queries
|
||||
=================
|
||||
|
||||
.. autoclass:: panko.api.controllers.v2.events.EventQuery
|
||||
:members:
|
||||
:noindex:
|
|
@ -1,52 +0,0 @@
|
|||
[composite:panko+noauth]
|
||||
use = egg:Paste#urlmap
|
||||
/ = pankoversions_pipeline
|
||||
/healthcheck = healthcheck
|
||||
/v2 = pankov2_noauth_pipeline
|
||||
|
||||
[composite:panko+keystone]
|
||||
use = egg:Paste#urlmap
|
||||
/ = pankoversions_pipeline
|
||||
/healthcheck = healthcheck
|
||||
/v2 = pankov2_keystone_pipeline
|
||||
|
||||
[pipeline:pankoversions_pipeline]
|
||||
pipeline = cors http_proxy_to_wsgi pankoversions
|
||||
|
||||
[app:pankoversions]
|
||||
paste.app_factory = panko.api.app:app_factory
|
||||
root = panko.api.controllers.root.VersionsController
|
||||
|
||||
[pipeline:pankov2_keystone_pipeline]
|
||||
pipeline = cors http_proxy_to_wsgi request_id osprofiler authtoken pankov2
|
||||
|
||||
[pipeline:pankov2_noauth_pipeline]
|
||||
pipeline = cors http_proxy_to_wsgi request_id osprofiler pankov2
|
||||
|
||||
[app:pankov2]
|
||||
paste.app_factory = panko.api.app:app_factory
|
||||
root = panko.api.controllers.v2.root.V2Controller
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
|
||||
oslo_config_project = panko
|
||||
|
||||
[filter:request_id]
|
||||
paste.filter_factory = oslo_middleware:RequestId.factory
|
||||
|
||||
[filter:cors]
|
||||
paste.filter_factory = oslo_middleware.cors:filter_factory
|
||||
oslo_config_project = panko
|
||||
|
||||
[filter:http_proxy_to_wsgi]
|
||||
paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
|
||||
oslo_config_project = panko
|
||||
|
||||
[filter:osprofiler]
|
||||
paste.filter_factory = panko.profiler:WsgiMiddleware.factory
|
||||
oslo_config_project = panko
|
||||
|
||||
[app:healthcheck]
|
||||
paste.app_factory = oslo_middleware:Healthcheck.app_factory
|
||||
backends = disable_by_file
|
||||
disable_by_file_path = /etc/panko/healthcheck_disable
|
|
@ -1,10 +0,0 @@
|
|||
[DEFAULT]
|
||||
output_file = etc/panko/panko.conf
|
||||
wrap_width = 79
|
||||
namespace = panko
|
||||
namespace = oslo.db
|
||||
namespace = oslo.log
|
||||
namespace = oslo.middleware.cors
|
||||
namespace = oslo.middleware.http_proxy_to_wsgi
|
||||
namespace = oslo.policy
|
||||
namespace = keystonemiddleware.auth_token
|
|
@ -1,3 +0,0 @@
|
|||
[DEFAULT]
|
||||
output_file = etc/panko/policy.yaml.sample
|
||||
namespace = panko
|
|
@ -1,20 +0,0 @@
|
|||
# Copyright 2014 eNovance
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
class NotImplementedError(NotImplementedError):
|
||||
# FIXME(jd) This is used by WSME to return a correct HTTP code. We should
|
||||
# not expose it here but wrap our methods in the API to convert it to a
|
||||
# proper HTTP error.
|
||||
code = 501
|
|
@ -1,81 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import uuid
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from paste import deploy
|
||||
import pecan
|
||||
|
||||
from panko.api import hooks
|
||||
from panko.api import middleware
|
||||
from panko import service
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
def setup_app(root, conf):
|
||||
app_hooks = [hooks.ConfigHook(conf),
|
||||
hooks.DBHook(conf),
|
||||
hooks.TranslationHook()]
|
||||
|
||||
return pecan.make_app(
|
||||
root,
|
||||
hooks=app_hooks,
|
||||
wrap_app=middleware.ParsableErrorMiddleware,
|
||||
guess_content_type_from_ext=False
|
||||
)
|
||||
|
||||
|
||||
# NOTE(sileht): pastedeploy uses ConfigParser to handle
|
||||
# global_conf, since python 3 ConfigParser doesn't
|
||||
# allow to store object as config value, only strings are
|
||||
# permit, so to be able to pass an object created before paste load
|
||||
# the app, we store them into a global var. But the each loaded app
|
||||
# store it's configuration in unique key to be concurrency safe.
|
||||
global APPCONFIGS
|
||||
APPCONFIGS = {}
|
||||
|
||||
|
||||
def load_app(conf, appname='panko+keystone'):
|
||||
global APPCONFIGS
|
||||
|
||||
# Build the WSGI app
|
||||
cfg_path = conf.api_paste_config
|
||||
if not os.path.isabs(cfg_path):
|
||||
cfg_path = conf.find_file(cfg_path)
|
||||
|
||||
if cfg_path is None or not os.path.exists(cfg_path):
|
||||
raise cfg.ConfigFilesNotFoundError([conf.api_paste_config])
|
||||
|
||||
config = dict(conf=conf)
|
||||
configkey = str(uuid.uuid4())
|
||||
APPCONFIGS[configkey] = config
|
||||
|
||||
LOG.info("Full WSGI config used: %s" % cfg_path)
|
||||
return deploy.loadapp("config:" + cfg_path, name=appname,
|
||||
global_conf={'configkey': configkey})
|
||||
|
||||
|
||||
def build_wsgi_app(argv=None):
|
||||
return load_app(service.prepare_service(argv=argv))
|
||||
|
||||
|
||||
def app_factory(global_config, **local_conf):
|
||||
global APPCONFIGS
|
||||
conf = APPCONFIGS.get(global_config.get('configkey'))
|
||||
return setup_app(root=local_conf.get('root'), **conf)
|
|
@ -1,19 +0,0 @@
|
|||
# -*- mode: python -*-
|
||||
#
|
||||
# Copyright 2013 New Dream Network, LLC (DreamHost)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Use this file for deploying the API under mod_wsgi."""
|
||||
from panko.api import app
|
||||
|
||||
application = app.build_wsgi_app(argv=[])
|
|
@ -1,51 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pecan
|
||||
|
||||
MEDIA_TYPE_JSON = 'application/vnd.openstack.telemetry-%s+json'
|
||||
MEDIA_TYPE_XML = 'application/vnd.openstack.telemetry-%s+xml'
|
||||
|
||||
|
||||
class VersionsController(object):
|
||||
|
||||
@pecan.expose('json')
|
||||
def index(self):
|
||||
base_url = pecan.request.application_url
|
||||
available = [{'tag': 'v2', 'date': '2013-02-13T00:00:00Z', }]
|
||||
collected = [version_descriptor(base_url, v['tag'], v['date'])
|
||||
for v in available]
|
||||
versions = {'versions': {'values': collected}}
|
||||
return versions
|
||||
|
||||
|
||||
def version_descriptor(base_url, version, released_on):
|
||||
url = version_url(base_url, version)
|
||||
return {
|
||||
'id': version,
|
||||
'links': [
|
||||
{'href': url, 'rel': 'self', },
|
||||
{'href': 'https://docs.openstack.org/',
|
||||
'rel': 'describedby', 'type': 'text/html', }],
|
||||
'media-types': [
|
||||
{'base': 'application/json', 'type': MEDIA_TYPE_JSON % version, },
|
||||
{'base': 'application/xml', 'type': MEDIA_TYPE_XML % version, }],
|
||||
'status': 'stable',
|
||||
'updated': released_on,
|
||||
}
|
||||
|
||||
|
||||
def version_url(base_url, version_number):
|
||||
return '%s/%s' % (base_url, version_number)
|
|
@ -1,228 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
# Copyright 2013 IBM Corp.
|
||||
# Copyright 2013 eNovance <licensing@enovance.com>
|
||||
# Copyright Ericsson AB 2013. All rights reserved
|
||||
# Copyright 2014 Hewlett-Packard Company
|
||||
# Copyright 2015 Huawei Technologies Co., Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ast
|
||||
import datetime
|
||||
import functools
|
||||
import inspect
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import strutils
|
||||
from oslo_utils import timeutils
|
||||
import pecan
|
||||
import wsme
|
||||
from wsme import types as wtypes
|
||||
|
||||
from panko.i18n import _
|
||||
|
||||
|
||||
operation_kind = ('lt', 'le', 'eq', 'ne', 'ge', 'gt')
|
||||
operation_kind_enum = wtypes.Enum(str, *operation_kind)
|
||||
|
||||
|
||||
class ClientSideError(wsme.exc.ClientSideError):
|
||||
def __init__(self, error, status_code=400):
|
||||
pecan.response.translatable_error = error
|
||||
super(ClientSideError, self).__init__(error, status_code)
|
||||
|
||||
|
||||
class EntityNotFound(ClientSideError):
|
||||
def __init__(self, entity, id):
|
||||
super(EntityNotFound, self).__init__(
|
||||
_("%(entity)s %(id)s Not Found") % {'entity': entity,
|
||||
'id': id},
|
||||
status_code=404)
|
||||
|
||||
|
||||
class ProjectNotAuthorized(ClientSideError):
|
||||
def __init__(self, id, aspect='project'):
|
||||
params = dict(aspect=aspect, id=id)
|
||||
super(ProjectNotAuthorized, self).__init__(
|
||||
_("Not Authorized to access %(aspect)s %(id)s") % params,
|
||||
status_code=401)
|
||||
|
||||
|
||||
class AdvEnum(wtypes.wsproperty):
|
||||
"""Handle default and mandatory for wtypes.Enum."""
|
||||
def __init__(self, name, *args, **kwargs):
|
||||
self._name = '_advenum_%s' % name
|
||||
self._default = kwargs.pop('default', None)
|
||||
mandatory = kwargs.pop('mandatory', False)
|
||||
enum = wtypes.Enum(*args, **kwargs)
|
||||
super(AdvEnum, self).__init__(datatype=enum, fget=self._get,
|
||||
fset=self._set, mandatory=mandatory)
|
||||
|
||||
def _get(self, parent):
|
||||
if hasattr(parent, self._name):
|
||||
value = getattr(parent, self._name)
|
||||
return value or self._default
|
||||
return self._default
|
||||
|
||||
def _set(self, parent, value):
|
||||
try:
|
||||
if self.datatype.validate(value):
|
||||
setattr(parent, self._name, value)
|
||||
except ValueError as e:
|
||||
raise wsme.exc.InvalidInput(self._name.replace('_advenum_', '', 1),
|
||||
value, e)
|
||||
|
||||
|
||||
class Base(wtypes.DynamicBase):
|
||||
|
||||
@classmethod
|
||||
def from_db_model(cls, m):
|
||||
return cls(**(m.as_dict()))
|
||||
|
||||
@classmethod
|
||||
def from_db_and_links(cls, m, links):
|
||||
return cls(links=links, **(m.as_dict()))
|
||||
|
||||
def as_dict(self, db_model):
|
||||
valid_keys = inspect.getfullargspec(db_model.__init__)[0]
|
||||
if 'self' in valid_keys:
|
||||
valid_keys.remove('self')
|
||||
return self.as_dict_from_keys(valid_keys)
|
||||
|
||||
def as_dict_from_keys(self, keys):
|
||||
return dict((k, getattr(self, k))
|
||||
for k in keys
|
||||
if hasattr(self, k) and
|
||||
getattr(self, k) != wsme.Unset)
|
||||
|
||||
|
||||
class Query(Base):
|
||||
"""Query filter."""
|
||||
|
||||
# The data types supported by the query.
|
||||
_supported_types = ['integer', 'float', 'string', 'boolean', 'datetime']
|
||||
|
||||
# Functions to convert the data field to the correct type.
|
||||
_type_converters = {'integer': int,
|
||||
'float': float,
|
||||
'boolean': functools.partial(
|
||||
strutils.bool_from_string, strict=True),
|
||||
'string': str,
|
||||
'datetime': timeutils.parse_isotime}
|
||||
|
||||
_op = None # provide a default
|
||||
|
||||
def get_op(self):
|
||||
return self._op or 'eq'
|
||||
|
||||
def set_op(self, value):
|
||||
self._op = value
|
||||
|
||||
field = wsme.wsattr(wtypes.text, mandatory=True)
|
||||
"The name of the field to test"
|
||||
|
||||
# op = wsme.wsattr(operation_kind, default='eq')
|
||||
# this ^ doesn't seem to work.
|
||||
op = wsme.wsproperty(operation_kind_enum, get_op, set_op)
|
||||
"The comparison operator. Defaults to 'eq'."
|
||||
|
||||
value = wsme.wsattr(wtypes.text, mandatory=True)
|
||||
"The value to compare against the stored data"
|
||||
|
||||
type = wtypes.text
|
||||
"The data type of value to compare against the stored data"
|
||||
|
||||
def __repr__(self):
|
||||
# for logging calls
|
||||
return '<Query %r %s %r %s>' % (self.field,
|
||||
self.op,
|
||||
self.value,
|
||||
self.type)
|
||||
|
||||
@classmethod
|
||||
def sample(cls):
|
||||
return cls(field='resource_id',
|
||||
op='eq',
|
||||
value='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
|
||||
type='string'
|
||||
)
|
||||
|
||||
def as_dict(self):
|
||||
return self.as_dict_from_keys(['field', 'op', 'type', 'value'])
|
||||
|
||||
def _get_value_as_type(self, forced_type=None):
|
||||
"""Convert metadata value to the specified data type.
|
||||
|
||||
This method is called during metadata query to help convert the
|
||||
querying metadata to the data type specified by user. If there is no
|
||||
data type given, the metadata will be parsed by ast.literal_eval to
|
||||
try to do a smart converting.
|
||||
|
||||
NOTE (flwang) Using "_" as prefix to avoid an InvocationError raised
|
||||
from wsmeext/sphinxext.py. It's OK to call it outside the Query class.
|
||||
Because the "public" side of that class is actually the outside of the
|
||||
API, and the "private" side is the API implementation. The method is
|
||||
only used in the API implementation, so it's OK.
|
||||
|
||||
:returns: metadata value converted with the specified data type.
|
||||
"""
|
||||
type = forced_type or self.type
|
||||
try:
|
||||
converted_value = self.value
|
||||
if not type:
|
||||
try:
|
||||
converted_value = ast.literal_eval(self.value)
|
||||
except (ValueError, SyntaxError):
|
||||
# Unable to convert the metadata value automatically
|
||||
# let it default to self.value
|
||||
pass
|
||||
else:
|
||||
if type not in self._supported_types:
|
||||
# Types must be explicitly declared so the
|
||||
# correct type converter may be used. Subclasses
|
||||
# of Query may define _supported_types and
|
||||
# _type_converters to define their own types.
|
||||
raise TypeError()
|
||||
converted_value = self._type_converters[type](self.value)
|
||||
if isinstance(converted_value, datetime.datetime):
|
||||
converted_value = timeutils.normalize_time(converted_value)
|
||||
except ValueError:
|
||||
msg = (_('Unable to convert the value %(value)s'
|
||||
' to the expected data type %(type)s.') %
|
||||
{'value': self.value, 'type': type})
|
||||
raise ClientSideError(msg)
|
||||
except TypeError:
|
||||
msg = (_('The data type %(type)s is not supported. The supported'
|
||||
' data type list is: %(supported)s') %
|
||||
{'type': type, 'supported': self._supported_types})
|
||||
raise ClientSideError(msg)
|
||||
except Exception:
|
||||
msg = (_('Unexpected exception converting %(value)s to'
|
||||
' the expected data type %(type)s.') %
|
||||
{'value': self.value, 'type': type})
|
||||
raise ClientSideError(msg)
|
||||
return converted_value
|
||||
|
||||
|
||||
class JsonType(wtypes.UserType):
|
||||
"""A simple JSON type."""
|
||||
|
||||
basetype = wtypes.text
|
||||
name = 'json'
|
||||
|
||||
@staticmethod
|
||||
def validate(value):
|
||||
# check that value can be serialised
|
||||
jsonutils.dumps(value)
|
||||
return value
|
|
@ -1,74 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
# Copyright 2013 IBM Corp.
|
||||
# Copyright 2013 eNovance <licensing@enovance.com>
|
||||
# Copyright Ericsson AB 2013. All rights reserved
|
||||
# Copyright 2014 Hewlett-Packard Company
|
||||
# Copyright 2015 Huawei Technologies Co., Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pecan
|
||||
from pecan import rest
|
||||
from wsme import types as wtypes
|
||||
import wsmeext.pecan as wsme_pecan
|
||||
|
||||
from panko.api.controllers.v2 import base
|
||||
from panko import profiler
|
||||
from panko import utils
|
||||
|
||||
|
||||
def _flatten_capabilities(capabilities):
|
||||
return dict((k, v) for k, v in utils.recursive_keypairs(capabilities))
|
||||
|
||||
|
||||
@profiler.trace_cls('api')
|
||||
class Capabilities(base.Base):
|
||||
"""A representation of the API and storage capabilities.
|
||||
|
||||
Usually constrained by restrictions imposed by the storage driver.
|
||||
"""
|
||||
|
||||
api = {wtypes.text: bool}
|
||||
"A flattened dictionary of API capabilities"
|
||||
event_storage = {wtypes.text: bool}
|
||||
"A flattened dictionary of event storage capabilities"
|
||||
|
||||
@classmethod
|
||||
def sample(cls):
|
||||
return cls(
|
||||
api=_flatten_capabilities({
|
||||
'events': {'query': {'simple': True}},
|
||||
}),
|
||||
event_storage=_flatten_capabilities(
|
||||
{'storage': {'production_ready': True}}),
|
||||
)
|
||||
|
||||
|
||||
@profiler.trace_cls('api')
|
||||
class CapabilitiesController(rest.RestController):
|
||||
"""Manages capabilities queries."""
|
||||
|
||||
@wsme_pecan.wsexpose(Capabilities)
|
||||
def get(self):
|
||||
"""Returns a flattened dictionary of API capabilities.
|
||||
|
||||
Capabilities supported by the currently configured storage driver.
|
||||
"""
|
||||
# variation in API capabilities is effectively determined by
|
||||
# the lack of strict feature parity across storage drivers
|
||||
conn = pecan.request.conn
|
||||
driver_capabilities = {'events': conn.get_capabilities()['events']}
|
||||
driver_perf = conn.get_storage_capabilities()
|
||||
return Capabilities(api=_flatten_capabilities(driver_capabilities),
|
||||
event_storage=_flatten_capabilities(driver_perf))
|
|
@ -1,345 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
# Copyright 2013 IBM Corp.
|
||||
# Copyright 2013 eNovance <licensing@enovance.com>
|
||||
# Copyright Ericsson AB 2013. All rights reserved
|
||||
# Copyright 2014 Hewlett-Packard Company
|
||||
# Copyright 2015 Huawei Technologies Co., Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
|
||||
from oslo_log import log
|
||||
from oslo_utils import strutils
|
||||
import pecan
|
||||
from pecan import rest
|
||||
import wsme
|
||||
from wsme import types as wtypes
|
||||
import wsmeext.pecan as wsme_pecan
|
||||
|
||||
from panko.api.controllers.v2 import base
|
||||
from panko.api.controllers.v2 import utils as v2_utils
|
||||
from panko.api import rbac
|
||||
from panko.i18n import _
|
||||
from panko import profiler
|
||||
from panko import storage
|
||||
from panko.storage import models as event_models
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class TraitDescription(base.Base):
|
||||
"""A description of a trait, with no associated value."""
|
||||
|
||||
type = wtypes.text
|
||||
"the data type, defaults to string"
|
||||
|
||||
name = wtypes.text
|
||||
"the name of the trait"
|
||||
|
||||
@classmethod
|
||||
def sample(cls):
|
||||
return cls(name='service',
|
||||
type='string'
|
||||
)
|
||||
|
||||
|
||||
class EventQuery(base.Query):
|
||||
"""Query arguments for Event Queries."""
|
||||
|
||||
_supported_types = ['integer', 'float', 'string', 'datetime']
|
||||
|
||||
type = wsme.wsattr(wtypes.text, default='string')
|
||||
"the type of the trait filter, defaults to string"
|
||||
|
||||
field = wsme.wsattr(wtypes.text)
|
||||
'''
|
||||
Name of the field to filter on. Can be either a trait name or field of an
|
||||
event.
|
||||
1) Use start_timestamp/end_timestamp to filter on `generated` field.
|
||||
2) Specify the 'all_tenants=True' query parameter to get all events for all
|
||||
projects, this is only allowed by admin users.
|
||||
'''
|
||||
|
||||
def __repr__(self):
|
||||
# for logging calls
|
||||
return '<EventQuery %r %s %r %s>' % (self.field,
|
||||
self.op,
|
||||
self._get_value_as_type(),
|
||||
self.type)
|
||||
|
||||
@classmethod
|
||||
def sample(cls):
|
||||
return cls(field="event_type",
|
||||
type="string",
|
||||
op="eq",
|
||||
value="compute.instance.create.start")
|
||||
|
||||
|
||||
class Trait(base.Base):
|
||||
"""A Trait associated with an event."""
|
||||
|
||||
name = wtypes.text
|
||||
"The name of the trait"
|
||||
|
||||
value = wtypes.text
|
||||
"the value of the trait"
|
||||
|
||||
type = wtypes.text
|
||||
"the type of the trait (string, integer, float or datetime)"
|
||||
|
||||
@staticmethod
|
||||
def _convert_storage_trait(trait):
|
||||
"""Helper method to convert a storage model into an API trait instance.
|
||||
|
||||
If an API trait instance is passed in, just return it.
|
||||
"""
|
||||
if isinstance(trait, Trait):
|
||||
return trait
|
||||
value = (str(trait.value)
|
||||
if not trait.dtype == event_models.Trait.DATETIME_TYPE
|
||||
else trait.value.isoformat())
|
||||
trait_type = event_models.Trait.get_name_by_type(trait.dtype)
|
||||
return Trait(name=trait.name, type=trait_type, value=value)
|
||||
|
||||
@classmethod
|
||||
def sample(cls):
|
||||
return cls(name='service',
|
||||
type='string',
|
||||
value='compute.hostname'
|
||||
)
|
||||
|
||||
|
||||
class Event(base.Base):
|
||||
"""A System event."""
|
||||
|
||||
message_id = wtypes.text
|
||||
"The message ID for the notification"
|
||||
|
||||
event_type = wtypes.text
|
||||
"The type of the event"
|
||||
|
||||
_traits = None
|
||||
|
||||
def get_traits(self):
|
||||
return self._traits
|
||||
|
||||
def set_traits(self, traits):
|
||||
self._traits = map(Trait._convert_storage_trait, traits)
|
||||
|
||||
traits = wsme.wsproperty(wtypes.ArrayType(Trait),
|
||||
get_traits,
|
||||
set_traits)
|
||||
"Event specific properties"
|
||||
|
||||
generated = datetime.datetime
|
||||
"The time the event occurred"
|
||||
|
||||
raw = base.JsonType()
|
||||
"The raw copy of notification"
|
||||
|
||||
@classmethod
|
||||
def sample(cls):
|
||||
return cls(
|
||||
event_type='compute.instance.update',
|
||||
generated=datetime.datetime(2015, 1, 1, 12, 0, 0, 0),
|
||||
message_id='94834db1-8f1b-404d-b2ec-c35901f1b7f0',
|
||||
traits={
|
||||
Trait(name='request_id',
|
||||
value='req-4e2d67b8-31a4-48af-bb2f-9df72a353a72'),
|
||||
Trait(name='service',
|
||||
value='conductor.tem-devstack-01'),
|
||||
Trait(name='tenant_id',
|
||||
value='7f13f2b17917463b9ee21aa92c4b36d6')
|
||||
},
|
||||
raw={'status': {'nested': 'started'}}
|
||||
)
|
||||
|
||||
|
||||
def _build_rbac_query_filters():
|
||||
filters = {'t_filter': [], 'admin_proj': None}
|
||||
# Returns user_id, proj_id for non-admins
|
||||
user_id, proj_id = rbac.get_limited_to(pecan.request.headers)
|
||||
# If non-admin, filter events by user and project
|
||||
if user_id and proj_id:
|
||||
filters['t_filter'].append({"key": "project_id", "string": proj_id,
|
||||
"op": "eq"})
|
||||
filters['t_filter'].append({"key": "user_id", "string": user_id,
|
||||
"op": "eq"})
|
||||
elif not user_id and not proj_id:
|
||||
filters['admin_proj'] = pecan.request.headers.get('X-Project-Id')
|
||||
return filters
|
||||
|
||||
|
||||
def _event_query_to_event_filter(q):
|
||||
evt_model_filter = {
|
||||
'event_type': None,
|
||||
'message_id': None,
|
||||
'start_timestamp': None,
|
||||
'end_timestamp': None
|
||||
}
|
||||
filters = _build_rbac_query_filters()
|
||||
traits_filter = filters['t_filter']
|
||||
admin_proj = filters['admin_proj']
|
||||
|
||||
for i in q:
|
||||
if not i.op:
|
||||
i.op = 'eq'
|
||||
elif i.op not in base.operation_kind:
|
||||
error = (_('Operator %(operator)s is not supported. The supported'
|
||||
' operators are: %(supported)s') %
|
||||
{'operator': i.op, 'supported': base.operation_kind})
|
||||
raise base.ClientSideError(error)
|
||||
if i.field in evt_model_filter:
|
||||
if i.op != 'eq' and i.field in ('event_type', 'message_id'):
|
||||
error = (_('Operator %(operator)s is not supported. Only'
|
||||
' `eq\' operator is available for field'
|
||||
' %(field)s') %
|
||||
{'operator': i.op, 'field': i.field})
|
||||
raise base.ClientSideError(error)
|
||||
if i.op != 'ge' and i.field == 'start_timestamp':
|
||||
error = (_('Operator %(operator)s is not supported. Only'
|
||||
' `ge\' operator is available for field'
|
||||
' %(field)s') %
|
||||
{'operator': i.op, 'field': i.field})
|
||||
raise base.ClientSideError(error)
|
||||
if i.op != 'le' and i.field == 'end_timestamp':
|
||||
error = (_('Operator %(operator)s is not supported. Only'
|
||||
' `le\' operator is available for field'
|
||||
' %(field)s') %
|
||||
{'operator': i.op, 'field': i.field})
|
||||
raise base.ClientSideError(error)
|
||||
evt_model_filter[i.field] = i.value
|
||||
elif i.field == 'all_tenants' and admin_proj:
|
||||
all_tenants = strutils.bool_from_string(i.value)
|
||||
if all_tenants:
|
||||
admin_proj = None
|
||||
else:
|
||||
trait_type = i.type or 'string'
|
||||
traits_filter.append({"key": i.field,
|
||||
trait_type: i._get_value_as_type(),
|
||||
"op": i.op})
|
||||
return storage.EventFilter(traits_filter=traits_filter,
|
||||
admin_proj=admin_proj, **evt_model_filter)
|
||||
|
||||
|
||||
@profiler.trace_cls('api')
|
||||
class TraitsController(rest.RestController):
|
||||
"""Works on Event Traits."""
|
||||
|
||||
@v2_utils.requires_admin
|
||||
@wsme_pecan.wsexpose([Trait], wtypes.text, wtypes.text)
|
||||
def get_one(self, event_type, trait_name):
|
||||
"""Return all instances of a trait for an event type.
|
||||
|
||||
:param event_type: Event type to filter traits by
|
||||
:param trait_name: Trait to return values for
|
||||
"""
|
||||
LOG.debug("Getting traits for %s", event_type)
|
||||
return [Trait._convert_storage_trait(t)
|
||||
for t in pecan.request.conn.get_traits(event_type, trait_name)]
|
||||
|
||||
@v2_utils.requires_admin
|
||||
@wsme_pecan.wsexpose([TraitDescription], wtypes.text)
|
||||
def get_all(self, event_type):
|
||||
"""Return all trait names for an event type.
|
||||
|
||||
:param event_type: Event type to filter traits by
|
||||
"""
|
||||
get_trait_name = event_models.Trait.get_name_by_type
|
||||
return [TraitDescription(name=t['name'],
|
||||
type=get_trait_name(t['data_type']))
|
||||
for t in pecan.request.conn.get_trait_types(event_type)]
|
||||
|
||||
|
||||
@profiler.trace_cls('api')
|
||||
class EventTypesController(rest.RestController):
|
||||
"""Works on Event Types in the system."""
|
||||
|
||||
traits = TraitsController()
|
||||
|
||||
@v2_utils.requires_admin
|
||||
@wsme_pecan.wsexpose(None, wtypes.text)
|
||||
def get_one(self, event_type):
|
||||
"""Unused API, will always return 404.
|
||||
|
||||
:param event_type: A event type
|
||||
"""
|
||||
pecan.abort(404)
|
||||
|
||||
@v2_utils.requires_admin
|
||||
@wsme_pecan.wsexpose([str])
|
||||
def get_all(self):
|
||||
"""Get all event types."""
|
||||
return list(pecan.request.conn.get_event_types())
|
||||
|
||||
|
||||
@profiler.trace_cls('api')
|
||||
class EventsController(rest.RestController):
|
||||
"""Works on Events."""
|
||||
|
||||
@v2_utils.requires_context
|
||||
@wsme_pecan.wsexpose([Event], [EventQuery], int, [str], str)
|
||||
def get_all(self, q=None, limit=None, sort=None, marker=None):
|
||||
"""Return all events matching the query filters.
|
||||
|
||||
:param q: Filter arguments for which Events to return
|
||||
:param limit: Maximum number of samples to be returned.
|
||||
:param sort: A pair of sort key and sort direction combined with ":"
|
||||
:param marker: The pagination query marker, message id of the last
|
||||
item viewed
|
||||
"""
|
||||
rbac.enforce("events:index", pecan.request)
|
||||
q = q or []
|
||||
event_filter = _event_query_to_event_filter(q)
|
||||
pagination = v2_utils.set_pagination_options(
|
||||
sort, limit, marker, event_models.Event)
|
||||
return [Event(message_id=event.message_id,
|
||||
event_type=event.event_type,
|
||||
generated=event.generated,
|
||||
traits=event.traits,
|
||||
raw=event.raw)
|
||||
for event in
|
||||
pecan.request.conn.get_events(event_filter, pagination)]
|
||||
|
||||
@v2_utils.requires_context
|
||||
@wsme_pecan.wsexpose(Event, wtypes.text)
|
||||
def get_one(self, message_id):
|
||||
"""Return a single event with the given message id.
|
||||
|
||||
:param message_id: Message ID of the Event to be returned
|
||||
"""
|
||||
rbac.enforce("events:show", pecan.request)
|
||||
filters = _build_rbac_query_filters()
|
||||
t_filter = filters['t_filter']
|
||||
admin_proj = filters['admin_proj']
|
||||
event_filter = storage.EventFilter(traits_filter=t_filter,
|
||||
admin_proj=admin_proj,
|
||||
message_id=message_id)
|
||||
events = [event for event
|
||||
in pecan.request.conn.get_events(event_filter)]
|
||||
if not events:
|
||||
raise base.EntityNotFound(_("Event"), message_id)
|
||||
|
||||
if len(events) > 1:
|
||||
LOG.error(("More than one event with "
|
||||
"id %s returned from storage driver"), message_id)
|
||||
|
||||
event = events[0]
|
||||
|
||||
return Event(message_id=event.message_id,
|
||||
event_type=event.event_type,
|
||||
generated=event.generated,
|
||||
traits=event.traits,
|
||||
raw=event.raw)
|
|
@ -1,30 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
# Copyright 2013 IBM Corp.
|
||||
# Copyright 2013 eNovance <licensing@enovance.com>
|
||||
# Copyright Ericsson AB 2013. All rights reserved
|
||||
# Copyright 2014 Hewlett-Packard Company
|
||||
# Copyright 2015 Huawei Technologies Co., Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from panko.api.controllers.v2 import capabilities
|
||||
from panko.api.controllers.v2 import events
|
||||
|
||||
|
||||
class V2Controller(object):
|
||||
"""Version 2 API controller root."""
|
||||
|
||||
event_types = events.EventTypesController()
|
||||
events = events.EventsController()
|
||||
capabilities = capabilities.CapabilitiesController()
|
|
@ -1,143 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
# Copyright 2013 IBM Corp.
|
||||
# Copyright 2013 eNovance <licensing@enovance.com>
|
||||
# Copyright Ericsson AB 2013. All rights reserved
|
||||
# Copyright 2014 Hewlett-Packard Company
|
||||
# Copyright 2015 Huawei Technologies Co., Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import functools
|
||||
import pecan
|
||||
import wsme
|
||||
|
||||
from panko.api.controllers.v2 import base
|
||||
from panko.api import rbac
|
||||
|
||||
|
||||
def get_auth_project(on_behalf_of=None):
|
||||
auth_project = rbac.get_limited_to_project(pecan.request.headers)
|
||||
created_by = pecan.request.headers.get('X-Project-Id')
|
||||
is_admin = auth_project is None
|
||||
|
||||
if is_admin and on_behalf_of != created_by:
|
||||
auth_project = on_behalf_of
|
||||
return auth_project
|
||||
|
||||
|
||||
# TODO(fabiog): this decorator should disappear and have a more unified
|
||||
# way of controlling access and scope. Before messing with this, though
|
||||
# I feel this file should be re-factored in smaller chunks one for each
|
||||
# controller (e.g. meters and so on ...). Right now its size is
|
||||
# overwhelming.
|
||||
def requires_admin(func):
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapped(*args, **kwargs):
|
||||
usr_limit, proj_limit = rbac.get_limited_to(pecan.request.headers)
|
||||
# If User and Project are None, you have full access.
|
||||
if usr_limit and proj_limit:
|
||||
# since this decorator get's called out of wsme context
|
||||
# raising exception results internal error so call abort
|
||||
# for handling the error
|
||||
ex = base.ProjectNotAuthorized(proj_limit)
|
||||
pecan.core.abort(status_code=ex.code, detail=ex.msg)
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
def requires_context(func):
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapped(*args, **kwargs):
|
||||
req_usr = pecan.request.headers.get('X-User-Id')
|
||||
proj_usr = pecan.request.headers.get('X-Project-Id')
|
||||
if ((not req_usr) or (not proj_usr)):
|
||||
pecan.core.abort(status_code=403,
|
||||
detail='RBAC Authorization Failed')
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
def set_pagination_options(sort, limit, marker, api_model):
|
||||
"""Sets the options for pagination specifying query options
|
||||
|
||||
Arguments:
|
||||
sort -- List of sorting criteria. Each sorting option has to format
|
||||
<sort key>:<sort direction>
|
||||
|
||||
Valid sort keys: message_id, generated
|
||||
(SUPPORT_SORT_KEYS in panko/event/storage/models.py)
|
||||
Valid sort directions: asc (ascending), desc (descending)
|
||||
(SUPPORT_DIRS in panko/event/storage/models.py)
|
||||
This defaults to asc if unspecified
|
||||
(DEFAULT_DIR in panko/event/storage/models.py)
|
||||
|
||||
impl_sqlalchemy.py:
|
||||
(see _get_pagination_query)
|
||||
If sort list is empty, this defaults to
|
||||
['generated:asc', 'message_id:asc']
|
||||
(DEFAULT_SORT in panko/event/storage/models.py)
|
||||
|
||||
limit -- Integer specifying maximum number of values to return
|
||||
|
||||
If unspecified, this defaults to
|
||||
pecan.request.cfg.api.default_api_return_limit
|
||||
|
||||
marker -- If specified, assumed to be an integer and assumed to be the
|
||||
message id of the last object on the previous page of the results
|
||||
|
||||
api_model -- Specifies the class implementing the api model to use for
|
||||
this pagination. The class is expected to provide the
|
||||
following members:
|
||||
|
||||
SUPPORT_DIRS
|
||||
SUPPORT_SORT_KEYS
|
||||
DEFAULT_DIR
|
||||
DEFAULT_SORT
|
||||
PRIMARY_KEY
|
||||
"""
|
||||
if limit and limit <= 0:
|
||||
raise wsme.exc.InvalidInput('limit', limit,
|
||||
'the limit should be a positive integer.')
|
||||
if not limit:
|
||||
limit = pecan.request.cfg.api.default_api_return_limit
|
||||
|
||||
sorts = list()
|
||||
for s in sort or []:
|
||||
sort_key, __, sort_dir = s.partition(':')
|
||||
if sort_key not in api_model.SUPPORT_SORT_KEYS:
|
||||
raise wsme.exc.InvalidInput(
|
||||
'sort', s, "the sort parameter should be a pair of sort "
|
||||
"key and sort dir combined with ':', or only"
|
||||
" sort key specified and sort dir will be default "
|
||||
"'%s', the supported sort keys are: %s" %
|
||||
(str(api_model.DEFAULT_DIR),
|
||||
str(api_model.SUPPORT_SORT_KEYS)))
|
||||
if sort_dir and sort_dir not in api_model.SUPPORT_DIRS:
|
||||
raise wsme.exc.InvalidInput(
|
||||
'sort direction', s,
|
||||
"the sort parameter should be a pair of sort "
|
||||
"key and sort dir combined with ':', or only"
|
||||
" sort key specified and sort dir will be default "
|
||||
"'%s', the supported sort directions are: %s" %
|
||||
(str(api_model.DEFAULT_DIR),
|
||||
str(api_model.SUPPORT_DIRS)))
|
||||
sorts.append((sort_key, sort_dir or api_model.DEFAULT_DIR))
|
||||
|
||||
return {'limit': limit,
|
||||
'marker': marker,
|
||||
'sort': sorts}
|
|
@ -1,54 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from pecan import hooks
|
||||
|
||||
from panko import storage
|
||||
|
||||
|
||||
class ConfigHook(hooks.PecanHook):
|
||||
"""Attach the configuration object to the request.
|
||||
|
||||
That allows controllers to get it.
|
||||
"""
|
||||
|
||||
def __init__(self, conf):
|
||||
super(ConfigHook, self).__init__()
|
||||
self.conf = conf
|
||||
|
||||
def before(self, state):
|
||||
state.request.cfg = self.conf
|
||||
|
||||
|
||||
class DBHook(hooks.PecanHook):
|
||||
|
||||
def __init__(self, conf):
|
||||
self.connection = storage.get_connection_from_config(
|
||||
conf)
|
||||
|
||||
def before(self, state):
|
||||
state.request.conn = self.connection
|
||||
|
||||
|
||||
class TranslationHook(hooks.PecanHook):
|
||||
|
||||
def after(self, state):
|
||||
# After a request has been done, we need to see if
|
||||
# ClientSideError has added an error onto the response.
|
||||
# If it has we need to get it info the thread-safe WSGI
|
||||
# environ to be used by the ParsableErrorMiddleware.
|
||||
if hasattr(state.response, 'translatable_error'):
|
||||
state.request.environ['translatable_error'] = (
|
||||
state.response.translatable_error)
|
|
@ -1,122 +0,0 @@
|
|||
#
|
||||
# Copyright 2013 IBM Corp.
|
||||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Middleware to replace the plain text message body of an error
|
||||
response with one formatted so the client can parse it.
|
||||
|
||||
Based on pecan.middleware.errordocument
|
||||
"""
|
||||
|
||||
from lxml import etree
|
||||
from oslo_log import log
|
||||
from oslo_serialization import jsonutils
|
||||
import webob
|
||||
|
||||
from panko import i18n
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class ParsableErrorMiddleware(object):
|
||||
"""Replace error body with something the client can parse."""
|
||||
|
||||
@staticmethod
|
||||
def best_match_language(accept_language):
|
||||
"""Determines best available locale from the Accept-Language header.
|
||||
|
||||
:returns: the best language match or None if the 'Accept-Language'
|
||||
header was not available in the request.
|
||||
"""
|
||||
if not accept_language:
|
||||
return None
|
||||
all_languages = i18n.get_available_languages()
|
||||
return accept_language.best_match(all_languages)
|
||||
|
||||
def __init__(self, app):
|
||||
self.app = app
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
# Request for this state, modified by replace_start_response()
|
||||
# and used when an error is being reported.
|
||||
state = {}
|
||||
|
||||
def replacement_start_response(status, headers, exc_info=None):
|
||||
"""Overrides the default response to make errors parsable."""
|
||||
try:
|
||||
status_code = int(status.split(' ')[0])
|
||||
state['status_code'] = status_code
|
||||
except (ValueError, TypeError): # pragma: nocover
|
||||
raise Exception((
|
||||
'ErrorDocumentMiddleware received an invalid '
|
||||
'status %s' % status
|
||||
))
|
||||
else:
|
||||
if (state['status_code'] // 100) not in (2, 3):
|
||||
# Remove some headers so we can replace them later
|
||||
# when we have the full error message and can
|
||||
# compute the length.
|
||||
headers = [(h, v)
|
||||
for (h, v) in headers
|
||||
if h not in ('Content-Length', 'Content-Type')
|
||||
]
|
||||
# Save the headers in case we need to modify them.
|
||||
state['headers'] = headers
|
||||
return start_response(status, headers, exc_info)
|
||||
|
||||
app_iter = self.app(environ, replacement_start_response)
|
||||
if (state['status_code'] // 100) not in (2, 3):
|
||||
req = webob.Request(environ)
|
||||
error = environ.get('translatable_error')
|
||||
user_locale = self.best_match_language(req.accept_language)
|
||||
if (req.accept.best_match(['application/json', 'application/xml'])
|
||||
== 'application/xml'):
|
||||
content_type = 'application/xml'
|
||||
try:
|
||||
# simple check xml is valid
|
||||
fault = etree.fromstring(b'\n'.join(app_iter))
|
||||
# Add the translated error to the xml data
|
||||
if error is not None:
|
||||
for fault_string in fault.findall('faultstring'):
|
||||
fault_string.text = i18n.translate(error,
|
||||
user_locale)
|
||||
error_message = etree.tostring(fault)
|
||||
body = b''.join((b'<error_message>',
|
||||
error_message,
|
||||
b'</error_message>'))
|
||||
except etree.XMLSyntaxError as err:
|
||||
LOG.error('Error parsing HTTP response: %s', err)
|
||||
error_message = state['status_code']
|
||||
body = '<error_message>%s</error_message>' % error_message
|
||||
body = body.encode('utf-8')
|
||||
else:
|
||||
content_type = 'application/json'
|
||||
app_data = b'\n'.join(app_iter)
|
||||
app_data = app_data.decode('utf-8')
|
||||
try:
|
||||
fault = jsonutils.loads(app_data)
|
||||
if error is not None and 'faultstring' in fault:
|
||||
fault['faultstring'] = i18n.translate(error,
|
||||
user_locale)
|
||||
except ValueError:
|
||||
fault = app_data
|
||||
body = jsonutils.dumps({'error_message': fault})
|
||||
body = body.encode('utf-8')
|
||||
|
||||
state['headers'].append(('Content-Length', str(len(body))))
|
||||
state['headers'].append(('Content-Type', content_type))
|
||||
body = [body]
|
||||
else:
|
||||
body = app_iter
|
||||
return body
|
|
@ -1,53 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2014 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
from panko import service
|
||||
from panko import storage
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
def dbsync():
|
||||
conf = service.prepare_service()
|
||||
storage.get_connection_from_config(conf).upgrade()
|
||||
|
||||
|
||||
def expirer():
|
||||
conf = service.prepare_service()
|
||||
|
||||
if conf.database.event_time_to_live > 0:
|
||||
LOG.debug("Clearing expired event data")
|
||||
conn = storage.get_connection_from_config(conf)
|
||||
max_count = conf.database.events_delete_batch_size
|
||||
try:
|
||||
if max_count > 0:
|
||||
conn.clear_expired_data(conf.database.event_time_to_live,
|
||||
max_count)
|
||||
else:
|
||||
deleted = max_count = 100
|
||||
while deleted and deleted > 0:
|
||||
deleted = conn.clear_expired_data(
|
||||
conf.database.event_time_to_live,
|
||||
max_count)
|
||||
except TypeError:
|
||||
LOG.warning("Storage driver does not support "
|
||||
"'events_delete_batch_size' config option.")
|
||||
else:
|
||||
LOG.info("Nothing to clean, database event time to live "
|
||||
"is disabled")
|
|
@ -1,56 +0,0 @@
|
|||
# Copyright (c) 2016 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Guidelines for writing new hacking checks
|
||||
|
||||
- Use only for Panko specific tests. OpenStack general tests
|
||||
should be submitted to the common 'hacking' module.
|
||||
- Pick numbers in the range X3xx. Find the current test with
|
||||
the highest allocated number and then pick the next value.
|
||||
- Keep the test method code in the source file ordered based
|
||||
on the C3xx value.
|
||||
- List the new rule in the top level HACKING.rst file
|
||||
|
||||
"""
|
||||
|
||||
from hacking import core
|
||||
|
||||
|
||||
@core.flake8ext
|
||||
def no_log_warn(logical_line):
|
||||
"""Disallow 'LOG.warn('
|
||||
|
||||
https://bugs.launchpad.net/tempest/+bug/1508442
|
||||
|
||||
C301
|
||||
"""
|
||||
if logical_line.startswith('LOG.warn('):
|
||||
yield(0, 'C301 Use LOG.warning() rather than LOG.warn()')
|
||||
|
||||
|
||||
@core.flake8ext
|
||||
def no_os_popen(logical_line):
|
||||
"""Disallow 'os.popen('
|
||||
|
||||
Deprecated library function os.popen() Replace it using subprocess
|
||||
https://bugs.launchpad.net/tempest/+bug/1529836
|
||||
|
||||
C302
|
||||
"""
|
||||
|
||||
if 'os.popen(' in logical_line:
|
||||
yield(0, 'C302 Deprecated library function os.popen(). '
|
||||
'Replace it using subprocess module. ')
|
|
@ -1,36 +0,0 @@
|
|||
# Copyright 2014 Huawei Technologies Co., Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""oslo.i18n integration module.
|
||||
|
||||
See https://docs.openstack.org/oslo.i18n/latest/user/usage.html
|
||||
|
||||
"""
|
||||
|
||||
import oslo_i18n
|
||||
|
||||
DOMAIN = 'panko'
|
||||
|
||||
_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
|
||||
|
||||
# The primary translation function using the well-known name "_"
|
||||
_ = _translators.primary
|
||||
|
||||
|
||||
def translate(value, user_locale):
|
||||
return oslo_i18n.translate(value, user_locale)
|
||||
|
||||
|
||||
def get_available_languages():
|
||||
return oslo_i18n.get_available_languages(DOMAIN)
|
|
@ -1,86 +0,0 @@
|
|||
# Andi Chandler <andi@gowling.com>, 2017. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: panko VERSION\n"
|
||||
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
|
||||
"POT-Creation-Date: 2018-05-22 10:08+0000\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"PO-Revision-Date: 2017-07-11 05:07+0000\n"
|
||||
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
|
||||
"Language-Team: English (United Kingdom)\n"
|
||||
"Language: en_GB\n"
|
||||
"X-Generator: Zanata 4.3.3\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
|
||||
|
||||
#, python-format
|
||||
msgid "%(entity)s %(id)s Not Found"
|
||||
msgstr "%(entity)s %(id)s Not Found"
|
||||
|
||||
#, python-format
|
||||
msgid "Cannot create table %(table_name)s it already exists. Ignoring error"
|
||||
msgstr "Cannot create table %(table_name)s it already exists. Ignoring error"
|
||||
|
||||
msgid "Event"
|
||||
msgstr "Event"
|
||||
|
||||
#, python-format
|
||||
msgid "Not Authorized to access %(aspect)s %(id)s"
|
||||
msgstr "Not Authorised to access %(aspect)s %(id)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Operator %(operator)s is not supported. Only `eq' operator is available for "
|
||||
"field %(field)s"
|
||||
msgstr ""
|
||||
"Operator %(operator)s is not supported. Only `eq' operator is available for "
|
||||
"field %(field)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Operator %(operator)s is not supported. Only `ge' operator is available for "
|
||||
"field %(field)s"
|
||||
msgstr ""
|
||||
"Operator %(operator)s is not supported. Only `ge' operator is available for "
|
||||
"field %(field)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Operator %(operator)s is not supported. Only `le' operator is available for "
|
||||
"field %(field)s"
|
||||
msgstr ""
|
||||
"Operator %(operator)s is not supported. Only `le' operator is available for "
|
||||
"field %(field)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Operator %(operator)s is not supported. The supported operators are: "
|
||||
"%(supported)s"
|
||||
msgstr ""
|
||||
"Operator %(operator)s is not supported. The supported operators are: "
|
||||
"%(supported)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"The data type %(type)s is not supported. The supported data type list is: "
|
||||
"%(supported)s"
|
||||
msgstr ""
|
||||
"The data type %(type)s is not supported. The supported data type list is: "
|
||||
"%(supported)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to connect to the database server: %(errmsg)s."
|
||||
msgstr "Unable to connect to the database server: %(errmsg)s."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unable to convert the value %(value)s to the expected data type %(type)s."
|
||||
msgstr ""
|
||||
"Unable to convert the value %(value)s to the expected data type %(type)s."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unexpected exception converting %(value)s to the expected data type %(type)s."
|
||||
msgstr ""
|
||||
"Unexpected exception converting %(value)s to the expected data type %(type)s."
|
|
@ -1,83 +0,0 @@
|
|||
# JongSoo Ha <neo415ha@gmail.com>, 2018. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: panko VERSION\n"
|
||||
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
|
||||
"POT-Creation-Date: 2019-04-10 01:12+0000\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"PO-Revision-Date: 2018-11-12 03:43+0000\n"
|
||||
"Last-Translator: JongSoo Ha <neo415ha@gmail.com>\n"
|
||||
"Language-Team: Korean (South Korea)\n"
|
||||
"Language: ko_KR\n"
|
||||
"X-Generator: Zanata 4.3.3\n"
|
||||
"Plural-Forms: nplurals=1; plural=0\n"
|
||||
|
||||
#, python-format
|
||||
msgid "%(entity)s %(id)s Not Found"
|
||||
msgstr "%(entity)s %(id)s 발견되지 않음"
|
||||
|
||||
#, python-format
|
||||
msgid "Cannot create table %(table_name)s it already exists. Ignoring error"
|
||||
msgstr " %(table_name)s 이 이미 존재하므로 테이블 추가 불가능. 에러 무시"
|
||||
|
||||
msgid "Event"
|
||||
msgstr "이벤트"
|
||||
|
||||
#, python-format
|
||||
msgid "Not Authorized to access %(aspect)s %(id)s"
|
||||
msgstr "%(aspect)s %(id)s로의 허가되지 않은 접근"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Operator %(operator)s is not supported. Only `eq' operator is available for "
|
||||
"field %(field)s"
|
||||
msgstr ""
|
||||
"연산자 %(operator)s 는 지원되지 않음. 오직 `eq' 연산자만이 필드 %(field)s에"
|
||||
"서 사용가능"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Operator %(operator)s is not supported. Only `ge' operator is available for "
|
||||
"field %(field)s"
|
||||
msgstr ""
|
||||
"연산자 %(operator)s 는 지원되지 않음. 오직 `ge' 연산자만이 필드 %(field)s에"
|
||||
"서 사용가능"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Operator %(operator)s is not supported. Only `le' operator is available for "
|
||||
"field %(field)s"
|
||||
msgstr ""
|
||||
"연산자 %(operator)s 는 지원되지 않음. 오직 `le' 연산자만이 필드 %(field)s에"
|
||||
"서 사용가능"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Operator %(operator)s is not supported. The supported operators are: "
|
||||
"%(supported)s"
|
||||
msgstr "연산자 %(operator)s 는 지원되지 않음. 지원되는 연산자들: %(supported)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"The data type %(type)s is not supported. The supported data type list is: "
|
||||
"%(supported)s"
|
||||
msgstr ""
|
||||
"데이터타입 %(type)s 은 지원되지 않음. 지원되는 데이터타입 목록 : "
|
||||
"%(supported)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to connect to the database server: %(errmsg)s."
|
||||
msgstr "데이터베이스 서버로 접속 불가 : %(errmsg)s."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unable to convert the value %(value)s to the expected data type %(type)s."
|
||||
msgstr "값 %(value)s 를 희망하는 데이터 타입 %(type)s 으로의 변환 불가"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unexpected exception converting %(value)s to the expected data type %(type)s."
|
||||
msgstr ""
|
||||
" %(value)s를 예측 데이터타입 %(type)s 으로 변환도중 예측치 못한 예외 발생"
|
|
@ -1,52 +0,0 @@
|
|||
# Copyright 2014 eNovance
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from oslo_config import cfg
|
||||
|
||||
import panko.storage
|
||||
import panko.utils
|
||||
|
||||
|
||||
STORAGE_OPTS = [
|
||||
cfg.IntOpt('max_retries',
|
||||
default=10,
|
||||
deprecated_group='database',
|
||||
help='Maximum number of connection retries during startup. '
|
||||
'Set to -1 to specify an infinite retry count.'),
|
||||
cfg.IntOpt('retry_interval',
|
||||
default=10,
|
||||
deprecated_group='database',
|
||||
help='Interval (in seconds) between retries of connection.')
|
||||
]
|
||||
|
||||
|
||||
def list_opts():
|
||||
return [
|
||||
('DEFAULT',
|
||||
[
|
||||
# FIXME(jd) Move to [api]
|
||||
cfg.StrOpt('api_paste_config',
|
||||
default="api_paste.ini",
|
||||
help="Configuration file for WSGI definition of API."),
|
||||
]),
|
||||
('api',
|
||||
[
|
||||
cfg.IntOpt('default_api_return_limit',
|
||||
min=1,
|
||||
default=100,
|
||||
help='Default maximum number of '
|
||||
'items returned by API request.'),
|
||||
]),
|
||||
('database', panko.storage.OPTS),
|
||||
('storage', STORAGE_OPTS),
|
||||
]
|
|
@ -1,25 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import itertools
|
||||
|
||||
from panko.policies import base
|
||||
from panko.policies import segregation
|
||||
from panko.policies import telemetry
|
||||
|
||||
|
||||
def list_policies():
|
||||
return itertools.chain(
|
||||
base.list_rules(),
|
||||
segregation.list_rules(),
|
||||
telemetry.list_rules()
|
||||
)
|
|
@ -1,34 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_policy import policy
|
||||
|
||||
ROLE_ADMIN = 'role:admin'
|
||||
UNPROTECTED = ''
|
||||
|
||||
# This is a check string that represents a common persona for someone who has
|
||||
# read-only access to the deployment, ultimately a subset of authorization for
|
||||
# system users, or administrators.
|
||||
SYSTEM_READER = 'role:admin and system_scope:all'
|
||||
|
||||
rules = [
|
||||
# This can be removed once the deprecated policies in segregation.py have
|
||||
# been removed.
|
||||
policy.RuleDefault(
|
||||
name='context_is_admin',
|
||||
check_str=ROLE_ADMIN
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
def list_rules():
|
||||
return rules
|
|
@ -1,53 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import versionutils
|
||||
from oslo_policy import policy
|
||||
|
||||
from panko.policies import base
|
||||
|
||||
DEPRECATED_REASON = """
|
||||
The events API now supports system scope and default roles.
|
||||
"""
|
||||
|
||||
deprecated_segregation = policy.DeprecatedRule(
|
||||
name='segregation',
|
||||
check_str='rule:context_is_admin'
|
||||
)
|
||||
|
||||
|
||||
rules = [
|
||||
policy.DocumentedRuleDefault(
|
||||
name='segregation',
|
||||
check_str=base.SYSTEM_READER,
|
||||
scope_types=['system'],
|
||||
description='Return the user and project the request'
|
||||
'should be limited to',
|
||||
operations=[
|
||||
{
|
||||
'path': '/v2/events',
|
||||
'method': 'GET'
|
||||
},
|
||||
{
|
||||
'path': '/v2/events/{message_id}',
|
||||
'method': 'GET'
|
||||
}
|
||||
],
|
||||
deprecated_rule=deprecated_segregation,
|
||||
deprecated_reason=DEPRECATED_REASON,
|
||||
deprecated_since=versionutils.deprecated.WALLABY
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
def list_rules():
|
||||
return rules
|
|
@ -1,47 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_policy import policy
|
||||
from panko.policies import base
|
||||
|
||||
TELEMETRY_EVENTS = 'telemetry:events:%s'
|
||||
|
||||
rules = [
|
||||
policy.DocumentedRuleDefault(
|
||||
name=TELEMETRY_EVENTS % 'index',
|
||||
check_str=base.UNPROTECTED,
|
||||
scope_types=['system', 'project'],
|
||||
description='Return all events matching the query filters.',
|
||||
operations=[
|
||||
{
|
||||
'path': '/v2/events',
|
||||
'method': 'GET'
|
||||
}
|
||||
]
|
||||
),
|
||||
policy.DocumentedRuleDefault(
|
||||
name=TELEMETRY_EVENTS % 'show',
|
||||
check_str=base.UNPROTECTED,
|
||||
scope_types=['system', 'project'],
|
||||
description='Return a single event with the given message id.',
|
||||
operations=[
|
||||
{
|
||||
'path': '/v2/events/{message_id}',
|
||||
'method': 'GET'
|
||||
}
|
||||
]
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
def list_rules():
|
||||
return rules
|
|
@ -1,76 +0,0 @@
|
|||
# Copyright 2017 Fujitsu Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import socket
|
||||
|
||||
from oslo_log import log
|
||||
from oslo_utils import importutils
|
||||
import webob.dec
|
||||
|
||||
profiler = importutils.try_import('osprofiler.profiler')
|
||||
profiler_initializer = importutils.try_import('osprofiler.initializer')
|
||||
profiler_web = importutils.try_import('osprofiler.web')
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class WsgiMiddleware(object):
|
||||
|
||||
def __init__(self, application, **kwargs):
|
||||
self.application = application
|
||||
|
||||
@classmethod
|
||||
def factory(cls, global_conf, **local_conf):
|
||||
if profiler_web:
|
||||
return profiler_web.WsgiMiddleware.factory(global_conf)
|
||||
|
||||
def filter_(app):
|
||||
return cls(app)
|
||||
|
||||
return filter_
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, request):
|
||||
return request.get_response(self.application)
|
||||
|
||||
|
||||
def setup(conf):
|
||||
if hasattr(conf, 'profiler') and conf.profiler.enabled:
|
||||
profiler_initializer.init_from_conf(
|
||||
conf=conf,
|
||||
context={},
|
||||
project=conf.project,
|
||||
service=conf.prog,
|
||||
host=socket.gethostbyname(socket.gethostname()))
|
||||
LOG.info('OSprofiler is enabled.')
|
||||
|
||||
|
||||
def trace_cls(name, **kwargs):
|
||||
"""Wrap the OSprofiler trace_cls.
|
||||
|
||||
Wrap the OSprofiler trace_cls decorator so that it will not try to
|
||||
patch the class unless OSprofiler is present.
|
||||
|
||||
:param name: The name of action. For example, wsgi, rpc, db, ...
|
||||
:param kwargs: Any other keyword args used by profiler.trace_cls
|
||||
"""
|
||||
|
||||
def decorator(cls):
|
||||
if profiler:
|
||||
trace_decorator = profiler.trace_cls(name, **kwargs)
|
||||
return trace_decorator(cls)
|
||||
return cls
|
||||
|
||||
return decorator
|
|
@ -1,40 +0,0 @@
|
|||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from panko import service
|
||||
from panko import storage
|
||||
|
||||
|
||||
class DatabasePublisher(object):
|
||||
"""Publisher class for recording event data into database.
|
||||
|
||||
The publisher class which records each event into a database configured
|
||||
in Ceilometer configuration file.
|
||||
|
||||
To enable this publisher, the following section needs to be present in
|
||||
panko.conf file
|
||||
|
||||
[database]
|
||||
connection = mysql+pymysql://panko:password@127.0.0.1/panko?charset=utf8
|
||||
|
||||
Then, panko:// should be added to Ceilometer's event_pipeline.yaml
|
||||
"""
|
||||
|
||||
def __init__(self, ceilo_conf, parsed_url):
|
||||
conf = service.prepare_service([], share=True)
|
||||
self.conn = storage.get_connection_from_config(conf)
|
||||
|
||||
def publish_events(self, events):
|
||||
if not isinstance(events, list):
|
||||
events = [events]
|
||||
self.conn.record_events(events)
|
|
@ -1,59 +0,0 @@
|
|||
# Copyright 2012-2014 eNovance <licensing@enovance.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sys
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_db import options as db_options
|
||||
import oslo_i18n
|
||||
from oslo_log import log
|
||||
from oslo_reports import guru_meditation_report as gmr
|
||||
from oslo_utils import importutils
|
||||
|
||||
from panko.conf import defaults
|
||||
from panko import opts
|
||||
from panko import profiler
|
||||
from panko import version
|
||||
|
||||
profiler_opts = importutils.try_import('osprofiler.opts')
|
||||
|
||||
|
||||
def prepare_service(argv=None, config_files=None, share=False):
|
||||
conf = cfg.ConfigOpts()
|
||||
for group, options in opts.list_opts():
|
||||
conf.register_opts(list(options),
|
||||
group=None if group == "DEFAULT" else group)
|
||||
db_options.set_defaults(conf)
|
||||
if profiler_opts:
|
||||
profiler_opts.set_defaults(conf)
|
||||
if not share:
|
||||
defaults.set_cors_middleware_defaults()
|
||||
oslo_i18n.enable_lazy()
|
||||
log.register_options(conf)
|
||||
|
||||
if argv is None:
|
||||
argv = sys.argv
|
||||
conf(argv[1:], project='panko', validate_default_values=True,
|
||||
version=version.version_info.version_string(),
|
||||
default_config_files=config_files)
|
||||
|
||||
if not share:
|
||||
log.setup(conf, 'panko')
|
||||
profiler.setup(conf)
|
||||
# NOTE(liusheng): guru cannot run with service under apache daemon, so when
|
||||
# panko-api running with mod_wsgi, the argv is [], we don't start
|
||||
# guru.
|
||||
if argv:
|
||||
gmr.TextGuruMeditation.setup_autorun(version)
|
||||
return conf
|
|
@ -1,141 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Storage backend management
|
||||
"""
|
||||
|
||||
from urllib import parse as urlparse
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from stevedore import driver
|
||||
import tenacity
|
||||
|
||||
from panko import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
OPTS = [
|
||||
cfg.IntOpt('event_time_to_live',
|
||||
default=-1,
|
||||
help=("Number of seconds that events are kept "
|
||||
"in the database for (<= 0 means forever).")),
|
||||
cfg.IntOpt('events_delete_batch_size',
|
||||
default=0,
|
||||
min=0,
|
||||
help=("Number of events to be deleted in one iteration "
|
||||
"from the database for (0 means all).")),
|
||||
cfg.StrOpt('event_connection',
|
||||
secret=True,
|
||||
deprecated_for_removal=True,
|
||||
help='The connection string used to connect '
|
||||
'to the event database - rather use ${database.connection}'),
|
||||
cfg.BoolOpt('es_ssl_enabled',
|
||||
default=False,
|
||||
help="Enable HTTPS connection in the Elasticsearch "
|
||||
"connection"),
|
||||
cfg.StrOpt('es_index_name',
|
||||
default='events',
|
||||
help='The name of the index in Elasticsearch')
|
||||
]
|
||||
|
||||
|
||||
class StorageUnknownWriteError(Exception):
|
||||
"""Error raised when an unknown error occurs while recording."""
|
||||
|
||||
|
||||
class StorageBadVersion(Exception):
|
||||
"""Error raised when the storage backend version is not good enough."""
|
||||
|
||||
|
||||
class StorageBadAggregate(Exception):
|
||||
"""Error raised when an aggregate is unacceptable to storage backend."""
|
||||
code = 400
|
||||
|
||||
|
||||
class InvalidMarker(Exception):
|
||||
"""Invalid pagination marker parameters"""
|
||||
|
||||
|
||||
def get_connection_from_config(conf):
|
||||
retries = conf.database.max_retries
|
||||
|
||||
@tenacity.retry(
|
||||
reraise=True,
|
||||
wait=tenacity.wait_fixed(conf.database.retry_interval),
|
||||
stop=(tenacity.stop_after_attempt(retries) if retries >= 0
|
||||
else tenacity.stop_never)
|
||||
)
|
||||
def _inner():
|
||||
url = (conf.database.connection or
|
||||
getattr(conf.database, 'event_connection', None))
|
||||
return get_connection(url, conf)
|
||||
|
||||
return _inner()
|
||||
|
||||
|
||||
def get_connection(url, conf):
|
||||
"""Return an open connection to the database."""
|
||||
connection_scheme = urlparse.urlparse(url).scheme
|
||||
# SqlAlchemy connections specify may specify a 'dialect' or
|
||||
# 'dialect+driver'. Handle the case where driver is specified.
|
||||
engine_name = connection_scheme.split('+')[0]
|
||||
# NOTE: translation not applied bug #1446983
|
||||
LOG.debug('looking for %(name)r driver in panko.storage',
|
||||
{'name': engine_name})
|
||||
mgr = driver.DriverManager('panko.storage', engine_name)
|
||||
return mgr.driver(url, conf)
|
||||
|
||||
|
||||
class EventFilter(object):
|
||||
"""Properties for building an Event query.
|
||||
|
||||
:param start_timestamp: UTC start datetime (mandatory)
|
||||
:param end_timestamp: UTC end datetime (mandatory)
|
||||
:param event_type: the name of the event. None for all.
|
||||
:param message_id: the message_id of the event. None for all.
|
||||
:param admin_proj: the project_id of admin role. None if non-admin user.
|
||||
:param traits_filter: the trait filter dicts, all of which are optional.
|
||||
This parameter is a list of dictionaries that specify trait values:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{'key': <key>,
|
||||
'string': <value>,
|
||||
'integer': <value>,
|
||||
'datetime': <value>,
|
||||
'float': <value>,
|
||||
'op': <eq, lt, le, ne, gt or ge> }
|
||||
"""
|
||||
|
||||
def __init__(self, start_timestamp=None, end_timestamp=None,
|
||||
event_type=None, message_id=None, traits_filter=None,
|
||||
admin_proj=None):
|
||||
self.start_timestamp = utils.sanitize_timestamp(start_timestamp)
|
||||
self.end_timestamp = utils.sanitize_timestamp(end_timestamp)
|
||||
self.message_id = message_id
|
||||
self.event_type = event_type
|
||||
self.traits_filter = traits_filter or []
|
||||
self.admin_proj = admin_proj
|
||||
|
||||
def __repr__(self):
|
||||
return ("<EventFilter(start_timestamp: %s,"
|
||||
" end_timestamp: %s,"
|
||||
" event_type: %s,"
|
||||
" traits: %s)>" %
|
||||
(self.start_timestamp,
|
||||
self.end_timestamp,
|
||||
self.event_type,
|
||||
str(self.traits_filter)))
|
|
@ -1,130 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Base classes for storage engines
|
||||
"""
|
||||
|
||||
import panko
|
||||
|
||||
|
||||
class Model(object):
|
||||
"""Base class for storage API models."""
|
||||
|
||||
def __init__(self, **kwds):
|
||||
self.fields = list(kwds)
|
||||
for k, v in kwds.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
def as_dict(self):
|
||||
d = {}
|
||||
for f in self.fields:
|
||||
v = getattr(self, f)
|
||||
if isinstance(v, Model):
|
||||
v = v.as_dict()
|
||||
elif isinstance(v, list) and v and isinstance(v[0], Model):
|
||||
v = [sub.as_dict() for sub in v]
|
||||
d[f] = v
|
||||
return d
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.as_dict() == other.as_dict()
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
|
||||
class Connection(object):
|
||||
"""Base class for event storage system connections."""
|
||||
|
||||
# A dictionary representing the capabilities of this driver.
|
||||
CAPABILITIES = {
|
||||
'events': {'query': {'simple': False}},
|
||||
}
|
||||
|
||||
STORAGE_CAPABILITIES = {
|
||||
'storage': {'production_ready': False},
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def __init__(url, conf):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def upgrade():
|
||||
"""Migrate the database to `version` or the most recent version."""
|
||||
|
||||
@staticmethod
|
||||
def clear():
|
||||
"""Clear database."""
|
||||
|
||||
@staticmethod
|
||||
def record_events(events):
|
||||
"""Write the events to the backend storage system.
|
||||
|
||||
:param events: a list of model.Event objects.
|
||||
"""
|
||||
raise panko.NotImplementedError('Events not implemented.')
|
||||
|
||||
@staticmethod
|
||||
def get_events(event_filter, pagination=None):
|
||||
"""Return an iterable of model.Event objects."""
|
||||
|
||||
@staticmethod
|
||||
def get_event_types():
|
||||
"""Return all event types as an iterable of strings."""
|
||||
raise panko.NotImplementedError('Events not implemented.')
|
||||
|
||||
@staticmethod
|
||||
def get_trait_types(event_type):
|
||||
"""Return a dictionary containing the name and data type of the trait.
|
||||
|
||||
Only trait types for the provided event_type are
|
||||
returned.
|
||||
:param event_type: the type of the Event
|
||||
"""
|
||||
raise panko.NotImplementedError('Events not implemented.')
|
||||
|
||||
@staticmethod
|
||||
def get_traits(event_type, trait_type=None):
|
||||
"""Return all trait instances associated with an event_type.
|
||||
|
||||
If trait_type is specified, only return instances of that trait type.
|
||||
:param event_type: the type of the Event to filter by
|
||||
:param trait_type: the name of the Trait to filter by
|
||||
"""
|
||||
|
||||
raise panko.NotImplementedError('Events not implemented.')
|
||||
|
||||
@classmethod
|
||||
def get_capabilities(cls):
|
||||
"""Return an dictionary with the capabilities of each driver."""
|
||||
return cls.CAPABILITIES
|
||||
|
||||
@classmethod
|
||||
def get_storage_capabilities(cls):
|
||||
"""Return a dictionary representing the performance capabilities.
|
||||
|
||||
This is needed to evaluate the performance of each driver.
|
||||
"""
|
||||
return cls.STORAGE_CAPABILITIES
|
||||
|
||||
@staticmethod
|
||||
def clear_expired_data(ttl, max_count=None):
|
||||
"""Clear expired data from the backend storage system.
|
||||
|
||||
Clearing occurs according to the time-to-live.
|
||||
:param ttl: Number of seconds to keep records for.
|
||||
:param max_count: Number of records to delete.
|
||||
"""
|
||||
raise panko.NotImplementedError('Clearing events not implemented')
|
|
@ -1,93 +0,0 @@
|
|||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
from urllib import parse as urlparse
|
||||
import warnings
|
||||
|
||||
import happybase
|
||||
from oslo_log import log
|
||||
from oslo_utils import netutils
|
||||
|
||||
from panko.storage.hbase import inmemory as hbase_inmemory
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class Connection(object):
|
||||
"""Base connection class for HBase."""
|
||||
|
||||
_memory_instance = None
|
||||
|
||||
def __init__(self, url):
|
||||
warnings.warn("Panko's HBase driver is now deprecated. Please use "
|
||||
"another driver.")
|
||||
"""Hbase Connection Initialization."""
|
||||
opts = self._parse_connection_url(url)
|
||||
|
||||
if opts['host'] == '__test__':
|
||||
url = os.environ.get('PANKO_TEST_HBASE_URL')
|
||||
if url:
|
||||
# Reparse URL, but from the env variable now
|
||||
opts = self._parse_connection_url(url)
|
||||
self.conn_pool = self._get_connection_pool(opts)
|
||||
else:
|
||||
# This is a in-memory usage for unit tests
|
||||
if Connection._memory_instance is None:
|
||||
LOG.debug('Creating a new in-memory HBase '
|
||||
'Connection object')
|
||||
Connection._memory_instance = (hbase_inmemory.
|
||||
MConnectionPool())
|
||||
self.conn_pool = Connection._memory_instance
|
||||
else:
|
||||
self.conn_pool = self._get_connection_pool(opts)
|
||||
|
||||
@staticmethod
|
||||
def _get_connection_pool(conf):
|
||||
"""Return a connection pool to the database.
|
||||
|
||||
.. note::
|
||||
|
||||
The tests use a subclass to override this and return an
|
||||
in-memory connection pool.
|
||||
"""
|
||||
LOG.debug('connecting to HBase on %(host)s:%(port)s',
|
||||
{'host': conf['host'], 'port': conf['port']})
|
||||
return happybase.ConnectionPool(
|
||||
size=100, host=conf['host'], port=conf['port'],
|
||||
table_prefix=conf['table_prefix'],
|
||||
table_prefix_separator=conf['table_prefix_separator'])
|
||||
|
||||
@staticmethod
|
||||
def _parse_connection_url(url):
|
||||
"""Parse connection parameters from a database url.
|
||||
|
||||
.. note::
|
||||
|
||||
HBase Thrift does not support authentication and there is no
|
||||
database name, so we are not looking for these in the url.
|
||||
"""
|
||||
opts = {}
|
||||
result = netutils.urlsplit(url)
|
||||
opts['table_prefix'] = urlparse.parse_qs(
|
||||
result.query).get('table_prefix', [None])[0]
|
||||
opts['table_prefix_separator'] = urlparse.parse_qs(
|
||||
result.query).get('table_prefix_separator', ['_'])[0]
|
||||
opts['dbtype'] = result.scheme
|
||||
if ':' in result.netloc:
|
||||
opts['host'], port = result.netloc.split(':')
|
||||
else:
|
||||
opts['host'] = result.netloc
|
||||
port = 9090
|
||||
opts['port'] = port and int(port) or 9090
|
||||
return opts
|
|
@ -1,280 +0,0 @@
|
|||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""This is a very crude version of "in-memory HBase", which implements just
|
||||
enough functionality of HappyBase API to support testing of our driver.
|
||||
"""
|
||||
|
||||
import copy
|
||||
import re
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
import panko
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class MTable(object):
|
||||
"""HappyBase.Table mock."""
|
||||
def __init__(self, name, families):
|
||||
self.name = name
|
||||
self.families = families
|
||||
self._rows_with_ts = {}
|
||||
|
||||
def row(self, key, columns=None):
|
||||
if key not in self._rows_with_ts:
|
||||
return {}
|
||||
res = copy.copy(sorted(
|
||||
self._rows_with_ts.get(key).items())[-1][1])
|
||||
if columns:
|
||||
keys = res.keys()
|
||||
for key in keys:
|
||||
if key not in columns:
|
||||
res.pop(key)
|
||||
return res
|
||||
|
||||
def rows(self, keys):
|
||||
return ((k, self.row(k)) for k in keys)
|
||||
|
||||
def put(self, key, data, ts=None):
|
||||
# Note: Now we use 'timestamped' but only for one Resource table.
|
||||
# That's why we may put ts='0' in case when ts is None. If it is
|
||||
# needed to use 2 types of put in one table ts=0 cannot be used.
|
||||
if ts is None:
|
||||
ts = "0"
|
||||
if key not in self._rows_with_ts:
|
||||
self._rows_with_ts[key] = {ts: data}
|
||||
else:
|
||||
if ts in self._rows_with_ts[key]:
|
||||
self._rows_with_ts[key][ts].update(data)
|
||||
else:
|
||||
self._rows_with_ts[key].update({ts: data})
|
||||
|
||||
def delete(self, key):
|
||||
del self._rows_with_ts[key]
|
||||
|
||||
def _get_latest_dict(self, row):
|
||||
# The idea here is to return latest versions of columns.
|
||||
# In _rows_with_ts we store {row: {ts_1: {data}, ts_2: {data}}}.
|
||||
# res will contain a list of tuples [(ts_1, {data}), (ts_2, {data})]
|
||||
# sorted by ts, i.e. in this list ts_2 is the most latest.
|
||||
# To get result as HBase provides we should iterate in reverse order
|
||||
# and get from "latest" data only key-values that are not in newer data
|
||||
data = {}
|
||||
for i in sorted(self._rows_with_ts[row].items()):
|
||||
data.update(i[1])
|
||||
return data
|
||||
|
||||
def scan(self, filter=None, columns=None, row_start=None, row_stop=None,
|
||||
limit=None):
|
||||
columns = columns or []
|
||||
sorted_keys = sorted(self._rows_with_ts)
|
||||
# copy data between row_start and row_stop into a dict
|
||||
rows = {}
|
||||
for row in sorted_keys:
|
||||
if row_start and row < row_start:
|
||||
continue
|
||||
if row_stop and row > row_stop:
|
||||
break
|
||||
rows[row] = self._get_latest_dict(row)
|
||||
|
||||
if columns:
|
||||
ret = {}
|
||||
for row, data in rows.items():
|
||||
for key in data:
|
||||
if key in columns:
|
||||
ret[row] = data
|
||||
rows = ret
|
||||
if filter:
|
||||
# TODO(jdanjou): we should really parse this properly,
|
||||
# but at the moment we are only going to support AND here
|
||||
filters = filter.split('AND')
|
||||
for f in filters:
|
||||
# Extract filter name and its arguments
|
||||
g = re.search(r"(.*)\((.*),?\)", f)
|
||||
fname = g.group(1).strip()
|
||||
fargs = [s.strip().replace('\'', '')
|
||||
for s in g.group(2).split(',')]
|
||||
m = getattr(self, fname)
|
||||
if callable(m):
|
||||
# overwrite rows for filtering to take effect
|
||||
# in case of multiple filters
|
||||
rows = m(fargs, rows)
|
||||
else:
|
||||
raise panko.NotImplementedError(
|
||||
"%s filter is not implemented, "
|
||||
"you may want to add it!")
|
||||
for k in sorted(rows)[:limit]:
|
||||
yield k, rows[k]
|
||||
|
||||
@staticmethod
|
||||
def SingleColumnValueFilter(args, rows):
|
||||
"""This is filter for testing "in-memory HBase".
|
||||
|
||||
This method is called from scan() when 'SingleColumnValueFilter'
|
||||
is found in the 'filter' argument.
|
||||
"""
|
||||
op = args[2]
|
||||
column = "%s:%s" % (args[0], args[1])
|
||||
value = args[3]
|
||||
if value.startswith('binary:'):
|
||||
value = value[7:]
|
||||
r = {}
|
||||
for row in rows:
|
||||
data = rows[row]
|
||||
if op == '=':
|
||||
if column in data and data[column] == value:
|
||||
r[row] = data
|
||||
elif op == '<':
|
||||
if column in data and data[column] < value:
|
||||
r[row] = data
|
||||
elif op == '<=':
|
||||
if column in data and data[column] <= value:
|
||||
r[row] = data
|
||||
elif op == '>':
|
||||
if column in data and data[column] > value:
|
||||
r[row] = data
|
||||
elif op == '>=':
|
||||
if column in data and data[column] >= value:
|
||||
r[row] = data
|
||||
elif op == '!=':
|
||||
if column in data and data[column] != value:
|
||||
r[row] = data
|
||||
return r
|
||||
|
||||
@staticmethod
|
||||
def ColumnPrefixFilter(args, rows):
|
||||
"""This is filter for testing "in-memory HBase".
|
||||
|
||||
This method is called from scan() when 'ColumnPrefixFilter' is found
|
||||
in the 'filter' argument.
|
||||
|
||||
:param args: a list of filter arguments, contain prefix of column
|
||||
:param rows: a dict of row prefixes for filtering
|
||||
"""
|
||||
value = args[0]
|
||||
column = 'f:' + value
|
||||
r = {}
|
||||
for row, data in rows.items():
|
||||
column_dict = {}
|
||||
for key in data:
|
||||
if key.startswith(column):
|
||||
column_dict[key] = data[key]
|
||||
r[row] = column_dict
|
||||
return r
|
||||
|
||||
@staticmethod
|
||||
def RowFilter(args, rows):
|
||||
"""This is filter for testing "in-memory HBase".
|
||||
|
||||
This method is called from scan() when 'RowFilter' is found in the
|
||||
'filter' argument.
|
||||
|
||||
:param args: a list of filter arguments, it contains operator and
|
||||
sought string
|
||||
:param rows: a dict of rows which are filtered
|
||||
"""
|
||||
op = args[0]
|
||||
value = args[1]
|
||||
if value.startswith('regexstring:'):
|
||||
value = value[len('regexstring:'):]
|
||||
r = {}
|
||||
for row, data in rows.items():
|
||||
try:
|
||||
g = re.search(value, row).group()
|
||||
if op == '=':
|
||||
if g == row:
|
||||
r[row] = data
|
||||
else:
|
||||
raise panko.NotImplementedError(
|
||||
"In-memory "
|
||||
"RowFilter doesn't support "
|
||||
"the %s operation yet" % op)
|
||||
except AttributeError:
|
||||
pass
|
||||
return r
|
||||
|
||||
@staticmethod
|
||||
def QualifierFilter(args, rows):
|
||||
"""This is filter for testing "in-memory HBase".
|
||||
|
||||
This method is called from scan() when 'QualifierFilter' is found in
|
||||
the 'filter' argument
|
||||
"""
|
||||
op = args[0]
|
||||
value = args[1]
|
||||
is_regex = False
|
||||
if value.startswith('binaryprefix:'):
|
||||
value = value[len('binaryprefix:'):]
|
||||
if value.startswith('regexstring:'):
|
||||
value = value[len('regexstring:'):]
|
||||
is_regex = True
|
||||
column = 'f:' + value
|
||||
r = {}
|
||||
for row in rows:
|
||||
data = rows[row]
|
||||
r_data = {}
|
||||
for key in data:
|
||||
if ((op == '=' and key.startswith(column)) or
|
||||
(op == '>=' and key >= column) or
|
||||
(op == '<=' and key <= column) or
|
||||
(op == '>' and key > column) or
|
||||
(op == '<' and key < column) or
|
||||
(is_regex and re.search(value, key))):
|
||||
r_data[key] = data[key]
|
||||
else:
|
||||
raise panko.NotImplementedError(
|
||||
"In-memory QualifierFilter "
|
||||
"doesn't support the %s "
|
||||
"operation yet" % op)
|
||||
if r_data:
|
||||
r[row] = r_data
|
||||
return r
|
||||
|
||||
|
||||
class MConnectionPool(object):
|
||||
def __init__(self):
|
||||
self.conn = MConnection()
|
||||
|
||||
def connection(self):
|
||||
return self.conn
|
||||
|
||||
|
||||
class MConnection(object):
|
||||
"""HappyBase.Connection mock."""
|
||||
def __init__(self):
|
||||
self.tables = {}
|
||||
|
||||
def __enter__(self, *args, **kwargs):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def open():
|
||||
LOG.debug("Opening in-memory HBase connection")
|
||||
|
||||
def create_table(self, n, families=None):
|
||||
families = families or {}
|
||||
if n in self.tables:
|
||||
return self.tables[n]
|
||||
t = MTable(n, families)
|
||||
self.tables[n] = t
|
||||
return t
|
||||
|
||||
def delete_table(self, name, use_prefix=True):
|
||||
del self.tables[name]
|
||||
|
||||
def table(self, name):
|
||||
return self.create_table(name)
|
|
@ -1,270 +0,0 @@
|
|||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Various HBase helpers"""
|
||||
|
||||
import copy
|
||||
import datetime
|
||||
import urllib
|
||||
|
||||
import bson.json_util
|
||||
try:
|
||||
from happybase.hbase.ttypes import AlreadyExists
|
||||
except ImportError:
|
||||
# import happybase to enable Hbase_thrift module
|
||||
import happybase # noqa
|
||||
from Hbase_thrift import AlreadyExists
|
||||
from oslo_log import log
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from panko.i18n import _
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
EVENT_TRAIT_TYPES = {'none': 0, 'string': 1, 'integer': 2, 'float': 3,
|
||||
'datetime': 4}
|
||||
OP_SIGN = {'eq': '=', 'lt': '<', 'le': '<=', 'ne': '!=', 'gt': '>', 'ge': '>='}
|
||||
# We need this additional dictionary because we have reverted timestamp in
|
||||
# row-keys for stored metrics
|
||||
OP_SIGN_REV = {'eq': '=', 'lt': '>', 'le': '>=', 'ne': '!=', 'gt': '<',
|
||||
'ge': '<='}
|
||||
|
||||
|
||||
def timestamp(dt, reverse=True):
|
||||
"""Timestamp is count of milliseconds since start of epoch.
|
||||
|
||||
If reverse=True then timestamp will be reversed. Such a technique is used
|
||||
in HBase rowkey design when period queries are required. Because of the
|
||||
fact that rows are sorted lexicographically it's possible to vary whether
|
||||
the 'oldest' entries will be on top of the table or it should be the newest
|
||||
ones (reversed timestamp case).
|
||||
|
||||
:param dt: datetime which is translated to timestamp
|
||||
:param reverse: a boolean parameter for reverse or straight count of
|
||||
timestamp in milliseconds
|
||||
:return: count or reversed count of milliseconds since start of epoch
|
||||
"""
|
||||
epoch = datetime.datetime(1970, 1, 1)
|
||||
td = dt - epoch
|
||||
ts = td.microseconds + td.seconds * 1000000 + td.days * 86400000000
|
||||
return 0x7fffffffffffffff - ts if reverse else ts
|
||||
|
||||
|
||||
def make_events_query_from_filter(event_filter):
|
||||
"""Return start and stop row for filtering and a query.
|
||||
|
||||
Query is based on the selected parameter.
|
||||
:param event_filter: storage.EventFilter object.
|
||||
"""
|
||||
start = "%s" % (timestamp(event_filter.start_timestamp, reverse=False)
|
||||
if event_filter.start_timestamp else "")
|
||||
stop = "%s" % (timestamp(event_filter.end_timestamp, reverse=False)
|
||||
if event_filter.end_timestamp else "")
|
||||
kwargs = {'event_type': event_filter.event_type,
|
||||
'event_id': event_filter.message_id}
|
||||
res_q = make_query(**kwargs)
|
||||
|
||||
if event_filter.traits_filter:
|
||||
for trait_filter in event_filter.traits_filter:
|
||||
q_trait = make_query(trait_query=True, **trait_filter)
|
||||
if q_trait:
|
||||
if res_q:
|
||||
res_q += " AND " + q_trait
|
||||
else:
|
||||
res_q = q_trait
|
||||
return res_q, start, stop
|
||||
|
||||
|
||||
def make_timestamp_query(func, start=None, start_op=None, end=None,
|
||||
end_op=None, bounds_only=False, **kwargs):
|
||||
"""Return a filter start and stop row for filtering and a query.
|
||||
|
||||
Query is based on the fact that CF-name is 'rts'.
|
||||
:param start: Optional start timestamp
|
||||
:param start_op: Optional start timestamp operator, like gt, ge
|
||||
:param end: Optional end timestamp
|
||||
:param end_op: Optional end timestamp operator, like lt, le
|
||||
:param bounds_only: if True than query will not be returned
|
||||
:param func: a function that provide a format of row
|
||||
:param kwargs: kwargs for :param func
|
||||
"""
|
||||
# We don't need to dump here because get_start_end_rts returns strings
|
||||
rts_start, rts_end = get_start_end_rts(start, end)
|
||||
start_row, end_row = func(rts_start, rts_end, **kwargs)
|
||||
|
||||
if bounds_only:
|
||||
return start_row, end_row
|
||||
|
||||
q = []
|
||||
start_op = start_op or 'ge'
|
||||
end_op = end_op or 'lt'
|
||||
if rts_start:
|
||||
q.append("SingleColumnValueFilter ('f', 'rts', %s, 'binary:%s')" %
|
||||
(OP_SIGN_REV[start_op], rts_start))
|
||||
if rts_end:
|
||||
q.append("SingleColumnValueFilter ('f', 'rts', %s, 'binary:%s')" %
|
||||
(OP_SIGN_REV[end_op], rts_end))
|
||||
|
||||
res_q = None
|
||||
if len(q):
|
||||
res_q = " AND ".join(q)
|
||||
|
||||
return start_row, end_row, res_q
|
||||
|
||||
|
||||
def get_start_end_rts(start, end):
|
||||
|
||||
rts_start = str(timestamp(start)) if start else ""
|
||||
rts_end = str(timestamp(end)) if end else ""
|
||||
return rts_start, rts_end
|
||||
|
||||
|
||||
def make_query(trait_query=None, **kwargs):
|
||||
"""Return a filter query string based on the selected parameters.
|
||||
|
||||
:param trait_query: optional boolean, for trait_query from kwargs
|
||||
:param kwargs: key-value pairs to filter on. Key should be a real
|
||||
column name in db
|
||||
"""
|
||||
q = []
|
||||
res_q = None
|
||||
|
||||
# Query for traits differs from others. It is constructed with
|
||||
# SingleColumnValueFilter with the possibility to choose comparison
|
||||
# operator
|
||||
if trait_query:
|
||||
trait_name = kwargs.pop('key')
|
||||
op = kwargs.pop('op', 'eq')
|
||||
for k, v in kwargs.items():
|
||||
if v is not None:
|
||||
res_q = ("SingleColumnValueFilter "
|
||||
"('f', '%s', %s, 'binary:%s', true, true)" %
|
||||
(prepare_key(trait_name, EVENT_TRAIT_TYPES[k]),
|
||||
OP_SIGN[op], dump(v)))
|
||||
return res_q
|
||||
|
||||
# Note: we use extended constructor for SingleColumnValueFilter here.
|
||||
# It is explicitly specified that entry should not be returned if CF is not
|
||||
# found in table.
|
||||
for key, value in sorted(kwargs.items()):
|
||||
if value is not None:
|
||||
if key == 'trait_type':
|
||||
q.append("ColumnPrefixFilter('%s')" % value)
|
||||
elif key == 'event_id':
|
||||
q.append(r"RowFilter ( = , 'regexstring:\d*:%s')" % value)
|
||||
else:
|
||||
q.append("SingleColumnValueFilter "
|
||||
"('f', '%s', =, 'binary:%s', true, true)" %
|
||||
(quote(key), dump(value)))
|
||||
res_q = None
|
||||
if len(q):
|
||||
res_q = " AND ".join(q)
|
||||
|
||||
return res_q
|
||||
|
||||
|
||||
def prepare_key(*args):
|
||||
"""Prepares names for rows and columns with correct separator.
|
||||
|
||||
:param args: strings or numbers that we want our key construct of
|
||||
:return: key with quoted args that are separated with character ":"
|
||||
"""
|
||||
key_quote = []
|
||||
for key in args:
|
||||
if isinstance(key, int):
|
||||
key = str(key)
|
||||
key_quote.append(quote(key))
|
||||
return ":".join(key_quote)
|
||||
|
||||
|
||||
def deserialize_entry(entry):
|
||||
"""Return a list of flatten_result
|
||||
|
||||
Flatten_result contains a dict of simple structures such as 'resource_id':1
|
||||
|
||||
:param entry: entry from HBase, without row name and timestamp
|
||||
"""
|
||||
flatten_result = {}
|
||||
for k, v in entry.items():
|
||||
if ':' in k[2:]:
|
||||
key = tuple([unquote(i) for i in k[2:].split(':')])
|
||||
else:
|
||||
key = unquote(k[2:])
|
||||
flatten_result[key] = load(v)
|
||||
return flatten_result
|
||||
|
||||
|
||||
def serialize_entry(data=None, **kwargs):
|
||||
"""Return a dict that is ready to be stored to HBase
|
||||
|
||||
:param data: dict to be serialized
|
||||
:param kwargs: additional args
|
||||
"""
|
||||
data = data or {}
|
||||
entry_dict = copy.copy(data)
|
||||
entry_dict.update(**kwargs)
|
||||
|
||||
return {'f:' + quote(k, ':'): dump(v) for k, v in entry_dict.items()}
|
||||
|
||||
|
||||
def dump(data):
|
||||
return jsonutils.dumps(data, default=bson.json_util.default)
|
||||
|
||||
|
||||
def load(data):
|
||||
return jsonutils.loads(data, object_hook=object_hook)
|
||||
|
||||
|
||||
# We don't want to have tzinfo in decoded json.This object_hook is
|
||||
# overwritten json_util.object_hook for $date
|
||||
def object_hook(dct):
|
||||
if "$date" in dct:
|
||||
dt = bson.json_util.object_hook(dct)
|
||||
return dt.replace(tzinfo=None)
|
||||
return bson.json_util.object_hook(dct)
|
||||
|
||||
|
||||
def create_tables(conn, tables, column_families):
|
||||
for table in tables:
|
||||
try:
|
||||
conn.create_table(table, column_families)
|
||||
except AlreadyExists:
|
||||
if conn.table_prefix:
|
||||
table = ("%(table_prefix)s"
|
||||
"%(separator)s"
|
||||
"%(table_name)s" %
|
||||
dict(table_prefix=conn.table_prefix,
|
||||
separator=conn.table_prefix_separator,
|
||||
table_name=table))
|
||||
|
||||
LOG.warning(_("Cannot create table %(table_name)s "
|
||||
"it already exists. Ignoring error")
|
||||
% {'table_name': table})
|
||||
|
||||
|
||||
def quote(s, *args):
|
||||
"""Return quoted string even if it is unicode one.
|
||||
|
||||
:param s: string that should be quoted
|
||||
:param args: any symbol we want to stay unquoted
|
||||
"""
|
||||
s_en = s.encode('utf8')
|
||||
return urllib.parse.quote(s_en, *args)
|
||||
|
||||
|
||||
def unquote(s):
|
||||
"""Return unquoted and decoded string.
|
||||
|
||||
:param s: string that should be unquoted
|
||||
"""
|
||||
s_de = urllib.parse.unquote(s)
|
||||
return s_de.decode('utf8')
|
|
@ -1,296 +0,0 @@
|
|||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
import operator
|
||||
|
||||
import elasticsearch as es
|
||||
from elasticsearch import helpers
|
||||
from oslo_log import log
|
||||
from oslo_utils import netutils
|
||||
from oslo_utils import timeutils
|
||||
|
||||
from panko import storage
|
||||
from panko.storage import base
|
||||
from panko.storage import models
|
||||
from panko import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
AVAILABLE_CAPABILITIES = {
|
||||
'events': {'query': {'simple': True}},
|
||||
}
|
||||
|
||||
|
||||
AVAILABLE_STORAGE_CAPABILITIES = {
|
||||
'storage': {'production_ready': True},
|
||||
}
|
||||
|
||||
|
||||
class Connection(base.Connection):
|
||||
"""Put the event data into an ElasticSearch db.
|
||||
|
||||
Events in ElasticSearch are indexed by day and stored by event_type.
|
||||
An example document::
|
||||
|
||||
{"_index":"events_2014-10-21",
|
||||
"_type":"event_type0",
|
||||
"_id":"dc90e464-65ab-4a5d-bf66-ecb956b5d779",
|
||||
"_score":1.0,
|
||||
"_source":{"timestamp": "2014-10-21T20:02:09.274797"
|
||||
"traits": {"id4_0": "2014-10-21T20:02:09.274797",
|
||||
"id3_0": 0.7510790937279408,
|
||||
"id2_0": 5,
|
||||
"id1_0": "18c97ba1-3b74-441a-b948-a702a30cbce2"}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
|
||||
AVAILABLE_CAPABILITIES)
|
||||
STORAGE_CAPABILITIES = utils.update_nested(
|
||||
base.Connection.STORAGE_CAPABILITIES,
|
||||
AVAILABLE_STORAGE_CAPABILITIES,
|
||||
)
|
||||
# NOTE(gordc): mainly for testing, data is not searchable after write,
|
||||
# it is only searchable after periodic refreshes.
|
||||
_refresh_on_write = False
|
||||
|
||||
def __init__(self, url, conf):
|
||||
url_split = netutils.urlsplit(url)
|
||||
|
||||
use_ssl = conf.database.es_ssl_enabled
|
||||
|
||||
self.index_name = conf.database.es_index_name
|
||||
self.conn = es.Elasticsearch(hosts=url_split.netloc + url_split.path,
|
||||
use_ssl=use_ssl)
|
||||
|
||||
def upgrade(self):
|
||||
iclient = es.client.IndicesClient(self.conn)
|
||||
ts_template = {
|
||||
'template': '*',
|
||||
'mappings': {'_default_':
|
||||
{'properties': {'traits': {'type': 'nested'}}}}}
|
||||
iclient.put_template(name='enable_timestamp', body=ts_template)
|
||||
|
||||
def record_events(self, events):
|
||||
|
||||
def _build_bulk_index(event_list):
|
||||
for ev in event_list:
|
||||
traits = {t.name: t.value for t in ev.traits}
|
||||
yield {'_op_type': 'create',
|
||||
'_index': '%s_%s' % (self.index_name,
|
||||
ev.generated.date().isoformat()),
|
||||
'_type': ev.event_type,
|
||||
'_id': ev.message_id,
|
||||
'_source': {'timestamp': ev.generated.isoformat(),
|
||||
'traits': traits,
|
||||
'raw': ev.raw}}
|
||||
|
||||
error = None
|
||||
for ok, result in helpers.streaming_bulk(
|
||||
self.conn, _build_bulk_index(events)):
|
||||
if not ok:
|
||||
__, result = result.popitem()
|
||||
if result['status'] == 409:
|
||||
LOG.info('Duplicate event detected, skipping it: %s',
|
||||
result)
|
||||
else:
|
||||
LOG.exception('Failed to record event: %s', result)
|
||||
error = storage.StorageUnknownWriteError(result)
|
||||
|
||||
if self._refresh_on_write:
|
||||
self.conn.indices.refresh(index='%s_*' % self.index_name)
|
||||
while self.conn.cluster.pending_tasks(local=True)['tasks']:
|
||||
pass
|
||||
if error:
|
||||
raise error
|
||||
|
||||
def _make_dsl_from_filter(self, indices, ev_filter):
|
||||
q_args = {}
|
||||
filters = []
|
||||
|
||||
if ev_filter.start_timestamp:
|
||||
filters.append({'range': {'timestamp':
|
||||
{'ge': ev_filter.start_timestamp.isoformat()}}})
|
||||
while indices[0] < (
|
||||
'%s_%s' % (self.index_name,
|
||||
ev_filter.start_timestamp.date().isoformat())):
|
||||
del indices[0]
|
||||
if ev_filter.end_timestamp:
|
||||
filters.append({'range': {'timestamp':
|
||||
{'le': ev_filter.end_timestamp.isoformat()}}})
|
||||
while indices[-1] > (
|
||||
'%s_%s' % (self.index_name,
|
||||
ev_filter.end_timestamp.date().isoformat())):
|
||||
del indices[-1]
|
||||
q_args['index'] = indices
|
||||
|
||||
if ev_filter.event_type:
|
||||
q_args['doc_type'] = ev_filter.event_type
|
||||
if ev_filter.message_id:
|
||||
filters.append({'term': {'_id': ev_filter.message_id}})
|
||||
|
||||
if ev_filter.traits_filter or ev_filter.admin_proj:
|
||||
or_cond = []
|
||||
trait_filters = []
|
||||
for t_filter in ev_filter.traits_filter or []:
|
||||
value = None
|
||||
for val_type in ['integer', 'string', 'float', 'datetime']:
|
||||
if t_filter.get(val_type):
|
||||
value = t_filter.get(val_type)
|
||||
if isinstance(value, str):
|
||||
value = value.lower()
|
||||
elif isinstance(value, datetime.datetime):
|
||||
value = value.isoformat()
|
||||
break
|
||||
if t_filter.get('op') in ['gt', 'ge', 'lt', 'le']:
|
||||
op = (t_filter.get('op').replace('ge', 'gte')
|
||||
.replace('le', 'lte'))
|
||||
trait_filters.append(
|
||||
{'range': {
|
||||
"traits.%s" % t_filter['key']: {op: value}}})
|
||||
else:
|
||||
tf = {"query": {"query_string": {
|
||||
"query": "traits.%s: \"%s\"" % (t_filter['key'], value)
|
||||
}}}
|
||||
if t_filter.get('op') == 'ne':
|
||||
tf = {"not": tf}
|
||||
trait_filters.append(tf)
|
||||
if ev_filter.admin_proj:
|
||||
or_cond = [{'missing': {'field': 'traits.project_id'}},
|
||||
{'term': {
|
||||
'traits.project_id': ev_filter.admin_proj}}]
|
||||
filters.append(
|
||||
{'nested': {'path': 'traits', 'query': {'filtered': {
|
||||
'filter': {'bool': {'must': trait_filters,
|
||||
'should': or_cond}}}}}})
|
||||
|
||||
q_args['body'] = {'query': {'filtered':
|
||||
{'filter': {'bool': {'must': filters}}}}}
|
||||
return q_args
|
||||
|
||||
def get_events(self, event_filter, pagination=None):
|
||||
limit = None
|
||||
if pagination:
|
||||
if pagination.get('sort'):
|
||||
LOG.warning('Driver does not support sort functionality')
|
||||
limit = pagination.get('limit')
|
||||
if limit == 0:
|
||||
return
|
||||
iclient = es.client.IndicesClient(self.conn)
|
||||
indices = iclient.get_mapping('%s_*' % self.index_name).keys()
|
||||
if indices:
|
||||
filter_args = self._make_dsl_from_filter(indices, event_filter)
|
||||
if limit is not None:
|
||||
filter_args['size'] = limit
|
||||
results = self.conn.search(fields=['_id', 'timestamp',
|
||||
'_type', '_source'],
|
||||
sort='timestamp:asc',
|
||||
**filter_args)
|
||||
trait_mappings = {}
|
||||
for record in results['hits']['hits']:
|
||||
trait_list = []
|
||||
if not record['_type'] in trait_mappings:
|
||||
trait_mappings[record['_type']] = list(
|
||||
self.get_trait_types(record['_type']))
|
||||
for key in record['_source']['traits'].keys():
|
||||
value = record['_source']['traits'][key]
|
||||
for t_map in trait_mappings[record['_type']]:
|
||||
if t_map['name'] == key:
|
||||
dtype = t_map['data_type']
|
||||
break
|
||||
else:
|
||||
dtype = models.Trait.TEXT_TYPE
|
||||
trait_list.append(models.Trait(
|
||||
name=key, dtype=dtype,
|
||||
value=models.Trait.convert_value(dtype, value)))
|
||||
gen_ts = timeutils.normalize_time(timeutils.parse_isotime(
|
||||
record['_source']['timestamp']))
|
||||
yield models.Event(message_id=record['_id'],
|
||||
event_type=record['_type'],
|
||||
generated=gen_ts,
|
||||
traits=sorted(
|
||||
trait_list,
|
||||
key=operator.attrgetter('dtype')),
|
||||
raw=record['_source']['raw'])
|
||||
|
||||
def get_event_types(self):
|
||||
iclient = es.client.IndicesClient(self.conn)
|
||||
es_mappings = iclient.get_mapping('%s_*' % self.index_name)
|
||||
seen_types = set()
|
||||
for index in es_mappings.keys():
|
||||
for ev_type in es_mappings[index]['mappings'].keys():
|
||||
seen_types.add(ev_type)
|
||||
# TODO(gordc): tests assume sorted ordering but backends are not
|
||||
# explicitly ordered.
|
||||
# NOTE: _default_ is a type that appears in all mappings but is not
|
||||
# real 'type'
|
||||
seen_types.discard('_default_')
|
||||
return sorted(list(seen_types))
|
||||
|
||||
@staticmethod
|
||||
def _remap_es_types(d_type):
|
||||
if d_type == 'string':
|
||||
d_type = 'text'
|
||||
elif d_type == 'long':
|
||||
d_type = 'int'
|
||||
elif d_type == 'double':
|
||||
d_type = 'float'
|
||||
elif d_type == 'date' or d_type == 'date_time':
|
||||
d_type = 'datetime'
|
||||
return d_type
|
||||
|
||||
def get_trait_types(self, event_type):
|
||||
iclient = es.client.IndicesClient(self.conn)
|
||||
es_mappings = iclient.get_mapping('%s_*' % self.index_name)
|
||||
seen_types = []
|
||||
for index in es_mappings.keys():
|
||||
# if event_type exists in index and has traits
|
||||
if (es_mappings[index]['mappings'].get(event_type) and
|
||||
es_mappings[index]['mappings'][event_type]['properties']
|
||||
['traits'].get('properties')):
|
||||
for t_type in (es_mappings[index]['mappings'][event_type]
|
||||
['properties']['traits']['properties'].keys()):
|
||||
d_type = (es_mappings[index]['mappings'][event_type]
|
||||
['properties']['traits']['properties']
|
||||
[t_type]['type'])
|
||||
d_type = models.Trait.get_type_by_name(
|
||||
self._remap_es_types(d_type))
|
||||
if (t_type, d_type) not in seen_types:
|
||||
yield {'name': t_type, 'data_type': d_type}
|
||||
seen_types.append((t_type, d_type))
|
||||
|
||||
def get_traits(self, event_type, trait_type=None):
|
||||
t_types = dict((res['name'], res['data_type'])
|
||||
for res in self.get_trait_types(event_type))
|
||||
if not t_types or (trait_type and trait_type not in t_types.keys()):
|
||||
return
|
||||
result = self.conn.search('%s_*' % self.index_name, event_type)
|
||||
for ev in result['hits']['hits']:
|
||||
if trait_type and ev['_source']['traits'].get(trait_type):
|
||||
yield models.Trait(
|
||||
name=trait_type,
|
||||
dtype=t_types[trait_type],
|
||||
value=models.Trait.convert_value(
|
||||
t_types[trait_type],
|
||||
ev['_source']['traits'][trait_type]))
|
||||
else:
|
||||
for trait in ev['_source']['traits'].keys():
|
||||
yield models.Trait(
|
||||
name=trait,
|
||||
dtype=t_types[trait],
|
||||
value=models.Trait.convert_value(
|
||||
t_types[trait],
|
||||
ev['_source']['traits'][trait]))
|
|
@ -1,226 +0,0 @@
|
|||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import operator
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
from panko.storage import base
|
||||
from panko.storage.hbase import base as hbase_base
|
||||
from panko.storage.hbase import utils as hbase_utils
|
||||
from panko.storage import models
|
||||
from panko import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
AVAILABLE_CAPABILITIES = {
|
||||
'events': {'query': {'simple': True}},
|
||||
}
|
||||
|
||||
|
||||
AVAILABLE_STORAGE_CAPABILITIES = {
|
||||
'storage': {'production_ready': True},
|
||||
}
|
||||
|
||||
|
||||
class Connection(hbase_base.Connection, base.Connection):
|
||||
"""Put the event data into a HBase database
|
||||
|
||||
Collections:
|
||||
|
||||
- events:
|
||||
|
||||
- row_key: timestamp of event's generation + uuid of event
|
||||
in format: "%s:%s" % (ts, Event.message_id)
|
||||
- Column Families:
|
||||
|
||||
f: contains the following qualifiers:
|
||||
|
||||
- event_type: description of event's type
|
||||
- timestamp: time stamp of event generation
|
||||
- all traits for this event in format:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
"%s:%s" % (trait_name, trait_type)
|
||||
"""
|
||||
|
||||
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
|
||||
AVAILABLE_CAPABILITIES)
|
||||
STORAGE_CAPABILITIES = utils.update_nested(
|
||||
base.Connection.STORAGE_CAPABILITIES,
|
||||
AVAILABLE_STORAGE_CAPABILITIES,
|
||||
)
|
||||
_memory_instance = None
|
||||
|
||||
EVENT_TABLE = "event"
|
||||
|
||||
def upgrade(self):
|
||||
tables = [self.EVENT_TABLE]
|
||||
column_families = {'f': dict(max_versions=1)}
|
||||
with self.conn_pool.connection() as conn:
|
||||
hbase_utils.create_tables(conn, tables, column_families)
|
||||
|
||||
def clear(self):
|
||||
LOG.debug('Dropping HBase schema...')
|
||||
with self.conn_pool.connection() as conn:
|
||||
for table in [self.EVENT_TABLE]:
|
||||
try:
|
||||
conn.disable_table(table)
|
||||
except Exception:
|
||||
LOG.debug('Cannot disable table but ignoring error')
|
||||
try:
|
||||
conn.delete_table(table)
|
||||
except Exception:
|
||||
LOG.debug('Cannot delete table but ignoring error')
|
||||
|
||||
def record_events(self, event_models):
|
||||
"""Write the events to Hbase.
|
||||
|
||||
:param event_models: a list of models.Event objects.
|
||||
"""
|
||||
error = None
|
||||
with self.conn_pool.connection() as conn:
|
||||
events_table = conn.table(self.EVENT_TABLE)
|
||||
for event_model in event_models:
|
||||
# Row key consists of timestamp and message_id from
|
||||
# models.Event or purposes of storage event sorted by
|
||||
# timestamp in the database.
|
||||
ts = event_model.generated
|
||||
row = hbase_utils.prepare_key(
|
||||
hbase_utils.timestamp(ts, reverse=False),
|
||||
event_model.message_id)
|
||||
event_type = event_model.event_type
|
||||
traits = {}
|
||||
if event_model.traits:
|
||||
for trait in event_model.traits:
|
||||
key = hbase_utils.prepare_key(trait.name, trait.dtype)
|
||||
traits[key] = trait.value
|
||||
record = hbase_utils.serialize_entry(traits,
|
||||
event_type=event_type,
|
||||
timestamp=ts,
|
||||
raw=event_model.raw)
|
||||
try:
|
||||
events_table.put(row, record)
|
||||
except Exception as ex:
|
||||
LOG.exception("Failed to record event: %s", ex)
|
||||
error = ex
|
||||
if error:
|
||||
raise error
|
||||
|
||||
def get_events(self, event_filter, pagination=None):
|
||||
"""Return an iter of models.Event objects.
|
||||
|
||||
:param event_filter: storage.EventFilter object, consists of filters
|
||||
for events that are stored in database.
|
||||
:param pagination: Pagination parameters.
|
||||
"""
|
||||
limit = None
|
||||
if pagination:
|
||||
if pagination.get('sort'):
|
||||
LOG.warning('Driver does not support sort functionality')
|
||||
limit = pagination.get('limit')
|
||||
if limit == 0:
|
||||
return
|
||||
q, start, stop = hbase_utils.make_events_query_from_filter(
|
||||
event_filter)
|
||||
with self.conn_pool.connection() as conn:
|
||||
events_table = conn.table(self.EVENT_TABLE)
|
||||
|
||||
gen = events_table.scan(filter=q, row_start=start, row_stop=stop,
|
||||
limit=limit)
|
||||
|
||||
for event_id, data in gen:
|
||||
traits = []
|
||||
events_dict = hbase_utils.deserialize_entry(data)[0]
|
||||
for key, value in events_dict.items():
|
||||
if isinstance(key, tuple):
|
||||
trait_name, trait_dtype = key
|
||||
traits.append(models.Trait(name=trait_name,
|
||||
dtype=int(trait_dtype),
|
||||
value=value))
|
||||
ts, mess = event_id.split(':')
|
||||
|
||||
yield models.Event(
|
||||
message_id=hbase_utils.unquote(mess),
|
||||
event_type=events_dict['event_type'],
|
||||
generated=events_dict['timestamp'],
|
||||
traits=sorted(traits,
|
||||
key=operator.attrgetter('dtype')),
|
||||
raw=events_dict['raw']
|
||||
)
|
||||
|
||||
def get_event_types(self):
|
||||
"""Return all event types as an iterable of strings."""
|
||||
with self.conn_pool.connection() as conn:
|
||||
events_table = conn.table(self.EVENT_TABLE)
|
||||
gen = events_table.scan()
|
||||
|
||||
event_types = set()
|
||||
for event_id, data in gen:
|
||||
events_dict = hbase_utils.deserialize_entry(data)[0]
|
||||
for key, value in events_dict.items():
|
||||
if not isinstance(key, tuple) and key.startswith('event_type'):
|
||||
if value not in event_types:
|
||||
event_types.add(value)
|
||||
yield value
|
||||
|
||||
def get_trait_types(self, event_type):
|
||||
"""Return a dictionary containing the name and data type of the trait.
|
||||
|
||||
Only trait types for the provided event_type are returned.
|
||||
|
||||
:param event_type: the type of the Event
|
||||
"""
|
||||
|
||||
q = hbase_utils.make_query(event_type=event_type)
|
||||
trait_names = set()
|
||||
with self.conn_pool.connection() as conn:
|
||||
events_table = conn.table(self.EVENT_TABLE)
|
||||
gen = events_table.scan(filter=q)
|
||||
for event_id, data in gen:
|
||||
events_dict = hbase_utils.deserialize_entry(data)[0]
|
||||
for key, value in events_dict.items():
|
||||
if isinstance(key, tuple):
|
||||
trait_name, trait_type = key
|
||||
if trait_name not in trait_names:
|
||||
# Here we check that our method return only unique
|
||||
# trait types, for ex. if it is found the same trait
|
||||
# types in different events with equal event_type,
|
||||
# method will return only one trait type. It is
|
||||
# proposed that certain trait name could have only one
|
||||
# trait type.
|
||||
trait_names.add(trait_name)
|
||||
data_type = models.Trait.type_names[int(trait_type)]
|
||||
yield {'name': trait_name, 'data_type': data_type}
|
||||
|
||||
def get_traits(self, event_type, trait_type=None):
|
||||
"""Return all trait instances associated with an event_type.
|
||||
|
||||
If trait_type is specified, only return instances of that trait type.
|
||||
:param event_type: the type of the Event to filter by
|
||||
:param trait_type: the name of the Trait to filter by
|
||||
"""
|
||||
q = hbase_utils.make_query(event_type=event_type,
|
||||
trait_type=trait_type)
|
||||
with self.conn_pool.connection() as conn:
|
||||
events_table = conn.table(self.EVENT_TABLE)
|
||||
gen = events_table.scan(filter=q)
|
||||
for event_id, data in gen:
|
||||
events_dict = hbase_utils.deserialize_entry(data)[0]
|
||||
for key, value in events_dict.items():
|
||||
if isinstance(key, tuple):
|
||||
trait_name, trait_type = key
|
||||
yield models.Trait(name=trait_name,
|
||||
dtype=int(trait_type), value=value)
|
|
@ -1,33 +0,0 @@
|
|||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
from panko.storage import base
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class Connection(base.Connection):
|
||||
"""Log event data."""
|
||||
|
||||
@staticmethod
|
||||
def clear_expired_data(ttl, max_count):
|
||||
"""Clear expired data from the backend storage system.
|
||||
|
||||
Clearing occurs according to the time-to-live.
|
||||
|
||||
:param ttl: Number of seconds to keep records for.
|
||||
:param max_count: Number of records to delete.
|
||||
"""
|
||||
LOG.info("Dropping %d events data with TTL %d", max_count, ttl)
|
|
@ -1,109 +0,0 @@
|
|||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""MongoDB storage backend"""
|
||||
|
||||
from oslo_log import log
|
||||
import pymongo
|
||||
|
||||
from panko import storage
|
||||
from panko.storage.mongo import utils as pymongo_utils
|
||||
from panko.storage import pymongo_base
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class Connection(pymongo_base.Connection):
|
||||
"""Put the event data into a MongoDB database."""
|
||||
|
||||
CONNECTION_POOL = pymongo_utils.ConnectionPool()
|
||||
|
||||
def __init__(self, url, conf):
|
||||
|
||||
# NOTE(jd) Use our own connection pooling on top of the Pymongo one.
|
||||
# We need that otherwise we overflow the MongoDB instance with new
|
||||
# connection since we instantiate a Pymongo client each time someone
|
||||
# requires a new storage connection.
|
||||
self.conn = self.CONNECTION_POOL.connect(
|
||||
url,
|
||||
conf.database.max_retries,
|
||||
conf.database.retry_interval)
|
||||
|
||||
# Require MongoDB 2.4 to use $setOnInsert
|
||||
if self.conn.server_info()['versionArray'] < [2, 4]:
|
||||
raise storage.StorageBadVersion("Need at least MongoDB 2.4")
|
||||
|
||||
connection_options = pymongo.uri_parser.parse_uri(url)
|
||||
self.db = getattr(self.conn, connection_options['database'])
|
||||
if connection_options.get('username'):
|
||||
self.db.authenticate(connection_options['username'],
|
||||
connection_options['password'])
|
||||
|
||||
# NOTE(jd) Upgrading is just about creating index, so let's do this
|
||||
# on connection to be sure at least the TTL is correctly updated if
|
||||
# needed.
|
||||
self.upgrade()
|
||||
|
||||
@staticmethod
|
||||
def update_ttl(ttl, ttl_index_name, index_field, coll):
|
||||
"""Update or create time_to_live indexes.
|
||||
|
||||
:param ttl: time to live in seconds.
|
||||
:param ttl_index_name: name of the index we want to update or create.
|
||||
:param index_field: field with the index that we need to update.
|
||||
:param coll: collection which indexes need to be updated.
|
||||
"""
|
||||
indexes = coll.index_information()
|
||||
if ttl <= 0:
|
||||
if ttl_index_name in indexes:
|
||||
coll.drop_index(ttl_index_name)
|
||||
return
|
||||
|
||||
if ttl_index_name in indexes:
|
||||
return coll.database.command(
|
||||
'collMod', coll.name,
|
||||
index={'keyPattern': {index_field: pymongo.ASCENDING},
|
||||
'expireAfterSeconds': ttl})
|
||||
|
||||
coll.create_index([(index_field, pymongo.ASCENDING)],
|
||||
expireAfterSeconds=ttl,
|
||||
name=ttl_index_name)
|
||||
|
||||
def upgrade(self):
|
||||
# create collection if not present
|
||||
if 'event' not in self.db.conn.collection_names():
|
||||
self.db.conn.create_collection('event')
|
||||
# Establish indexes
|
||||
# NOTE(idegtiarov): This indexes cover get_events, get_event_types, and
|
||||
# get_trait_types requests based on event_type and timestamp fields.
|
||||
self.db.event.create_index(
|
||||
[('event_type', pymongo.ASCENDING),
|
||||
('timestamp', pymongo.ASCENDING)],
|
||||
name='event_type_idx'
|
||||
)
|
||||
|
||||
def clear(self):
|
||||
self.conn.drop_database(self.db.name)
|
||||
# Connection will be reopened automatically if needed
|
||||
self.conn.close()
|
||||
|
||||
def clear_expired_data(self, ttl, max_count=None):
|
||||
"""Clear expired data from the backend storage system.
|
||||
|
||||
Clearing occurs according to the time-to-live.
|
||||
|
||||
:param ttl: Number of seconds to keep records for.
|
||||
:param max_count: Number of records to delete (not used for MongoDB).
|
||||
"""
|
||||
self.update_ttl(ttl, 'event_ttl', 'timestamp', self.db.event)
|
||||
LOG.info("Clearing expired event data is based on native "
|
||||
"MongoDB time to live feature and going in background.")
|
|
@ -1,496 +0,0 @@
|
|||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""SQLAlchemy storage backend."""
|
||||
import collections
|
||||
import datetime
|
||||
|
||||
from oslo_db import exception as dbexc
|
||||
from oslo_db.sqlalchemy import session as db_session
|
||||
from oslo_db.sqlalchemy import utils as oslo_sql_utils
|
||||
from oslo_log import log
|
||||
from oslo_utils import importutils
|
||||
from oslo_utils import timeutils
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.engine import url as sqlalchemy_url
|
||||
from sqlalchemy.orm import aliased
|
||||
|
||||
from panko import storage
|
||||
from panko.storage import base
|
||||
from panko.storage import models as api_models
|
||||
from panko.storage.sqlalchemy import models
|
||||
from panko import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
osprofiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy')
|
||||
|
||||
|
||||
AVAILABLE_CAPABILITIES = {
|
||||
'events': {'query': {'simple': True}},
|
||||
}
|
||||
|
||||
|
||||
AVAILABLE_STORAGE_CAPABILITIES = {
|
||||
'storage': {'production_ready': True},
|
||||
}
|
||||
|
||||
|
||||
TRAIT_MAPLIST = [(api_models.Trait.NONE_TYPE, models.TraitText),
|
||||
(api_models.Trait.TEXT_TYPE, models.TraitText),
|
||||
(api_models.Trait.INT_TYPE, models.TraitInt),
|
||||
(api_models.Trait.FLOAT_TYPE, models.TraitFloat),
|
||||
(api_models.Trait.DATETIME_TYPE, models.TraitDatetime)]
|
||||
|
||||
|
||||
TRAIT_ID_TO_MODEL = dict((x, y) for x, y in TRAIT_MAPLIST)
|
||||
TRAIT_MODEL_TO_ID = dict((y, x) for x, y in TRAIT_MAPLIST)
|
||||
|
||||
|
||||
trait_models_dict = {'string': models.TraitText,
|
||||
'integer': models.TraitInt,
|
||||
'datetime': models.TraitDatetime,
|
||||
'float': models.TraitFloat}
|
||||
|
||||
|
||||
def _get_model_and_conditions(trait_type, key, value, op='eq'):
|
||||
trait_model = aliased(trait_models_dict[trait_type])
|
||||
op_dict = {'eq': (trait_model.value == value),
|
||||
'lt': (trait_model.value < value),
|
||||
'le': (trait_model.value <= value),
|
||||
'gt': (trait_model.value > value),
|
||||
'ge': (trait_model.value >= value),
|
||||
'ne': (trait_model.value != value)}
|
||||
conditions = [trait_model.key == key, op_dict[op]]
|
||||
return (trait_model, conditions)
|
||||
|
||||
|
||||
class Connection(base.Connection):
|
||||
"""Put the event data into a SQLAlchemy database.
|
||||
|
||||
Tables::
|
||||
|
||||
- EventType
|
||||
- event definition
|
||||
- { id: event type id
|
||||
desc: description of event
|
||||
}
|
||||
- Event
|
||||
- event data
|
||||
- { id: event id
|
||||
message_id: message id
|
||||
generated = timestamp of event
|
||||
event_type_id = event type -> eventtype.id
|
||||
}
|
||||
- TraitInt
|
||||
- int trait value
|
||||
- { event_id: event -> event.id
|
||||
key: trait name
|
||||
value: integer value
|
||||
}
|
||||
- TraitDatetime
|
||||
- datetime trait value
|
||||
- { event_id: event -> event.id
|
||||
key: trait name
|
||||
value: datetime value
|
||||
}
|
||||
- TraitText
|
||||
- text trait value
|
||||
- { event_id: event -> event.id
|
||||
key: trait name
|
||||
value: text value
|
||||
}
|
||||
- TraitFloat
|
||||
- float trait value
|
||||
- { event_id: event -> event.id
|
||||
key: trait name
|
||||
value: float value
|
||||
}
|
||||
|
||||
"""
|
||||
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
|
||||
AVAILABLE_CAPABILITIES)
|
||||
STORAGE_CAPABILITIES = utils.update_nested(
|
||||
base.Connection.STORAGE_CAPABILITIES,
|
||||
AVAILABLE_STORAGE_CAPABILITIES,
|
||||
)
|
||||
|
||||
def __init__(self, url, conf):
|
||||
# Set max_retries to 0, since oslo.db in certain cases may attempt
|
||||
# to retry making the db connection retried max_retries ^ 2 times
|
||||
# in failure case and db reconnection has already been implemented
|
||||
# in storage.__init__.get_connection_from_config function
|
||||
options = dict(conf.database.items())
|
||||
options['max_retries'] = 0
|
||||
# oslo.db doesn't support options defined by Panko
|
||||
for opt in storage.OPTS:
|
||||
options.pop(opt.name, None)
|
||||
self._engine_facade = db_session.EngineFacade(self.dress_url(url),
|
||||
**options)
|
||||
if osprofiler_sqlalchemy:
|
||||
osprofiler_sqlalchemy.add_tracing(sa,
|
||||
self._engine_facade.get_engine(),
|
||||
'db')
|
||||
|
||||
@staticmethod
|
||||
def dress_url(url):
|
||||
# If no explicit driver has been set, we default to pymysql
|
||||
if url.startswith("mysql://"):
|
||||
url = sqlalchemy_url.make_url(url)
|
||||
url.drivername = "mysql+pymysql"
|
||||
return str(url)
|
||||
return url
|
||||
|
||||
def upgrade(self):
|
||||
engine = self._engine_facade.get_engine()
|
||||
models.Base.metadata.create_all(engine)
|
||||
|
||||
def clear(self):
|
||||
engine = self._engine_facade.get_engine()
|
||||
for table in reversed(models.Base.metadata.sorted_tables):
|
||||
engine.execute(table.delete())
|
||||
engine.dispose()
|
||||
|
||||
def _get_or_create_event_type(self, event_type, session):
|
||||
"""Check if an event type with the supplied name is already exists.
|
||||
|
||||
If not, we create it and return the record. This may result in a flush.
|
||||
"""
|
||||
try:
|
||||
with session.begin(nested=True):
|
||||
et = session.query(models.EventType).filter(
|
||||
models.EventType.desc == event_type).first()
|
||||
if not et:
|
||||
et = models.EventType(event_type)
|
||||
session.add(et)
|
||||
except dbexc.DBDuplicateEntry:
|
||||
et = self._get_or_create_event_type(event_type, session)
|
||||
|
||||
return et
|
||||
|
||||
def record_events(self, event_models):
|
||||
"""Write the events to SQL database via sqlalchemy.
|
||||
|
||||
:param event_models: a list of model.Event objects.
|
||||
"""
|
||||
session = self._engine_facade.get_session()
|
||||
error = None
|
||||
for event_model in event_models:
|
||||
event = None
|
||||
try:
|
||||
with session.begin():
|
||||
event_type = self._get_or_create_event_type(
|
||||
event_model.event_type, session=session)
|
||||
event = models.Event(event_model.message_id, event_type,
|
||||
event_model.generated,
|
||||
event_model.raw)
|
||||
session.add(event)
|
||||
session.flush()
|
||||
|
||||
if event_model.traits:
|
||||
trait_map = {}
|
||||
for trait in event_model.traits:
|
||||
if trait_map.get(trait.dtype) is None:
|
||||
trait_map[trait.dtype] = []
|
||||
trait_map[trait.dtype].append(
|
||||
{'event_id': event.id,
|
||||
'key': trait.name,
|
||||
'value': trait.value})
|
||||
for dtype in trait_map.keys():
|
||||
model = TRAIT_ID_TO_MODEL[dtype]
|
||||
session.execute(model.__table__.insert(),
|
||||
trait_map[dtype])
|
||||
except dbexc.DBDuplicateEntry as e:
|
||||
LOG.debug("Duplicate event detected, skipping it: %s", e)
|
||||
except KeyError as e:
|
||||
LOG.exception('Failed to record event: %s', e)
|
||||
except Exception as e:
|
||||
LOG.exception('Failed to record event: %s', e)
|
||||
error = e
|
||||
if error:
|
||||
raise error
|
||||
|
||||
def _get_pagination_query(self, query, pagination, api_model, model):
|
||||
limit = pagination.get('limit')
|
||||
|
||||
marker = None
|
||||
if pagination.get('marker'):
|
||||
marker_filter = storage.EventFilter(
|
||||
message_id=pagination.get('marker'))
|
||||
markers = list(self.get_events(marker_filter))
|
||||
if markers:
|
||||
marker = markers[0]
|
||||
else:
|
||||
raise storage.InvalidMarker(
|
||||
'Marker %s not found.' % pagination['marker'])
|
||||
|
||||
if not pagination.get('sort'):
|
||||
pagination['sort'] = api_model.DEFAULT_SORT
|
||||
sort_keys = [s[0] for s in pagination['sort']]
|
||||
sort_dirs = [s[1] for s in pagination['sort']]
|
||||
|
||||
return oslo_sql_utils.paginate_query(
|
||||
query, model, limit, sort_keys, sort_dirs=sort_dirs, marker=marker)
|
||||
|
||||
def get_events(self, event_filter, pagination=None):
|
||||
"""Return an iterable of model.Event objects.
|
||||
|
||||
:param event_filter: EventFilter instance
|
||||
:param pagination: Pagination parameters.
|
||||
"""
|
||||
pagination = pagination or {}
|
||||
session = self._engine_facade.get_session()
|
||||
with session.begin():
|
||||
# Build up the join conditions
|
||||
event_join_conditions = [models.EventType.id ==
|
||||
models.Event.event_type_id]
|
||||
|
||||
if event_filter.event_type:
|
||||
event_join_conditions.append(models.EventType.desc ==
|
||||
event_filter.event_type)
|
||||
|
||||
# Build up the where conditions
|
||||
event_filter_conditions = []
|
||||
if event_filter.message_id:
|
||||
event_filter_conditions.append(
|
||||
models.Event.message_id == event_filter.message_id)
|
||||
if event_filter.start_timestamp:
|
||||
event_filter_conditions.append(
|
||||
models.Event.generated >= event_filter.start_timestamp)
|
||||
if event_filter.end_timestamp:
|
||||
event_filter_conditions.append(
|
||||
models.Event.generated <= event_filter.end_timestamp)
|
||||
|
||||
trait_subq = None
|
||||
# Build trait filter
|
||||
if event_filter.traits_filter:
|
||||
filters = list(event_filter.traits_filter)
|
||||
trait_filter = filters.pop()
|
||||
key = trait_filter.pop('key')
|
||||
op = trait_filter.pop('op', 'eq')
|
||||
trait_type, value = list(trait_filter.items())[0]
|
||||
|
||||
trait_model, conditions = _get_model_and_conditions(
|
||||
trait_type, key, value, op)
|
||||
trait_subq = (session
|
||||
.query(trait_model.event_id.label('ev_id'))
|
||||
.filter(*conditions))
|
||||
|
||||
first_model = trait_model
|
||||
for label_num, trait_filter in enumerate(filters):
|
||||
key = trait_filter.pop('key')
|
||||
op = trait_filter.pop('op', 'eq')
|
||||
trait_type, value = list(trait_filter.items())[0]
|
||||
trait_model, conditions = _get_model_and_conditions(
|
||||
trait_type, key, value, op)
|
||||
trait_subq = (
|
||||
trait_subq
|
||||
.add_columns(
|
||||
trait_model.event_id.label('l%d' % label_num))
|
||||
.filter(
|
||||
first_model.event_id == trait_model.event_id,
|
||||
*conditions))
|
||||
|
||||
trait_subq = trait_subq.subquery()
|
||||
|
||||
query = (session.query(models.Event.id)
|
||||
.join(models.EventType,
|
||||
sa.and_(*event_join_conditions)))
|
||||
if trait_subq is not None:
|
||||
query = query.join(trait_subq,
|
||||
trait_subq.c.ev_id == models.Event.id)
|
||||
if event_filter.admin_proj:
|
||||
no_proj_q = session.query(models.TraitText.event_id).filter(
|
||||
models.TraitText.key == 'project_id')
|
||||
admin_q = (session.query(models.TraitText.event_id).filter(
|
||||
~sa.exists().where(models.TraitText.event_id ==
|
||||
no_proj_q.subquery().c.event_id)).union(
|
||||
session.query(models.TraitText.event_id).filter(sa.and_(
|
||||
models.TraitText.key == 'project_id',
|
||||
models.TraitText.value == event_filter.admin_proj,
|
||||
models.Event.id == models.TraitText.event_id))))
|
||||
query = query.filter(sa.exists().where(
|
||||
models.Event.id ==
|
||||
admin_q.subquery().c.trait_text_event_id))
|
||||
if event_filter_conditions:
|
||||
query = query.filter(sa.and_(*event_filter_conditions))
|
||||
|
||||
query = self._get_pagination_query(
|
||||
query, pagination, api_models.Event, models.Event)
|
||||
|
||||
event_list = collections.OrderedDict()
|
||||
# get a list of all events that match filters
|
||||
for (id_, generated, message_id,
|
||||
desc, raw) in query.add_columns(
|
||||
models.Event.generated, models.Event.message_id,
|
||||
models.EventType.desc, models.Event.raw).all():
|
||||
event_list[id_] = api_models.Event(
|
||||
message_id, desc, generated, [], raw)
|
||||
|
||||
# Query all traits related to events.
|
||||
# NOTE (gordc): cast is done because pgsql defaults to TEXT when
|
||||
# handling unknown values such as null.
|
||||
trait_q = (
|
||||
session.query(
|
||||
models.TraitDatetime.event_id,
|
||||
models.TraitDatetime.key, models.TraitDatetime.value,
|
||||
sa.cast(sa.null(), sa.Integer),
|
||||
sa.cast(sa.null(), sa.Float(53)),
|
||||
sa.cast(sa.null(), sa.String(255)))
|
||||
.filter(sa.exists().where(
|
||||
models.TraitDatetime.event_id == query.subquery().c.id))
|
||||
).union_all(
|
||||
session.query(
|
||||
models.TraitInt.event_id,
|
||||
models.TraitInt.key, sa.null(),
|
||||
models.TraitInt.value, sa.null(), sa.null())
|
||||
.filter(sa.exists().where(
|
||||
models.TraitInt.event_id == query.subquery().c.id)),
|
||||
session.query(
|
||||
models.TraitFloat.event_id,
|
||||
models.TraitFloat.key, sa.null(), sa.null(),
|
||||
models.TraitFloat.value, sa.null())
|
||||
.filter(sa.exists().where(
|
||||
models.TraitFloat.event_id == query.subquery().c.id)),
|
||||
session.query(
|
||||
models.TraitText.event_id,
|
||||
models.TraitText.key, sa.null(), sa.null(), sa.null(),
|
||||
models.TraitText.value)
|
||||
.filter(sa.exists().where(
|
||||
models.TraitText.event_id == query.subquery().c.id)))
|
||||
|
||||
for id_, key, t_date, t_int, t_float, t_text in (
|
||||
trait_q.order_by(models.TraitDatetime.key)).all():
|
||||
if t_int is not None:
|
||||
dtype = api_models.Trait.INT_TYPE
|
||||
val = t_int
|
||||
elif t_float is not None:
|
||||
dtype = api_models.Trait.FLOAT_TYPE
|
||||
val = t_float
|
||||
elif t_date is not None:
|
||||
dtype = api_models.Trait.DATETIME_TYPE
|
||||
val = t_date
|
||||
else:
|
||||
dtype = api_models.Trait.TEXT_TYPE
|
||||
val = t_text
|
||||
|
||||
try:
|
||||
trait_model = api_models.Trait(key, dtype, val)
|
||||
event_list[id_].append_trait(trait_model)
|
||||
except KeyError:
|
||||
# NOTE(gordc): this is expected as we do not set REPEATABLE
|
||||
# READ (bug 1506717). if query is run while recording new
|
||||
# event data, trait query may return more data than event
|
||||
# query. they can be safely discarded.
|
||||
pass
|
||||
|
||||
return event_list.values()
|
||||
|
||||
def get_event_types(self):
|
||||
"""Return all event types as an iterable of strings."""
|
||||
|
||||
session = self._engine_facade.get_session()
|
||||
with session.begin():
|
||||
query = (session.query(models.EventType.desc).
|
||||
order_by(models.EventType.desc))
|
||||
for name in query.all():
|
||||
# The query returns a tuple with one element.
|
||||
yield name[0]
|
||||
|
||||
def get_trait_types(self, event_type):
|
||||
"""Return a dictionary containing the name and data type of the trait.
|
||||
|
||||
Only trait types for the provided event_type are returned.
|
||||
:param event_type: the type of the Event
|
||||
"""
|
||||
session = self._engine_facade.get_session()
|
||||
|
||||
with session.begin():
|
||||
for trait_model in [models.TraitText, models.TraitInt,
|
||||
models.TraitFloat, models.TraitDatetime]:
|
||||
query = (session.query(trait_model.key)
|
||||
.join(models.Event,
|
||||
models.Event.id == trait_model.event_id)
|
||||
.join(models.EventType,
|
||||
sa.and_(models.EventType.id ==
|
||||
models.Event.event_type_id,
|
||||
models.EventType.desc == event_type))
|
||||
.distinct())
|
||||
|
||||
dtype = TRAIT_MODEL_TO_ID.get(trait_model)
|
||||
for row in query.all():
|
||||
yield {'name': row[0], 'data_type': dtype}
|
||||
|
||||
def get_traits(self, event_type, trait_type=None):
|
||||
"""Return all trait instances associated with an event_type.
|
||||
|
||||
If trait_type is specified, only return instances of that trait type.
|
||||
:param event_type: the type of the Event to filter by
|
||||
:param trait_type: the name of the Trait to filter by
|
||||
"""
|
||||
|
||||
session = self._engine_facade.get_session()
|
||||
with session.begin():
|
||||
for trait_model in [models.TraitText, models.TraitInt,
|
||||
models.TraitFloat, models.TraitDatetime]:
|
||||
query = (session.query(trait_model.key, trait_model.value)
|
||||
.join(models.Event,
|
||||
models.Event.id == trait_model.event_id)
|
||||
.join(models.EventType,
|
||||
sa.and_(models.EventType.id ==
|
||||
models.Event.event_type_id,
|
||||
models.EventType.desc == event_type))
|
||||
.order_by(trait_model.key))
|
||||
if trait_type:
|
||||
query = query.filter(trait_model.key == trait_type)
|
||||
|
||||
dtype = TRAIT_MODEL_TO_ID.get(trait_model)
|
||||
for k, v in query.all():
|
||||
yield api_models.Trait(name=k,
|
||||
dtype=dtype,
|
||||
value=v)
|
||||
|
||||
def clear_expired_data(self, ttl, max_count):
|
||||
"""Clear expired data from the backend storage system.
|
||||
|
||||
Clearing occurs according to the time-to-live.
|
||||
|
||||
:param ttl: Number of seconds to keep records for.
|
||||
:param max_count: Number of records to delete.
|
||||
"""
|
||||
session = self._engine_facade.get_session()
|
||||
with session.begin():
|
||||
end = timeutils.utcnow() - datetime.timedelta(seconds=ttl)
|
||||
event_q = (session.query(models.Event.id)
|
||||
.filter(models.Event.generated < end))
|
||||
|
||||
# NOTE(e0ne): it's not an optiomal from the performance point of
|
||||
# view but it works with all databases.
|
||||
ids = [i[0] for i in event_q.limit(max_count)]
|
||||
for trait_model in [models.TraitText, models.TraitInt,
|
||||
models.TraitFloat, models.TraitDatetime]:
|
||||
session.query(trait_model).filter(
|
||||
trait_model.event_id.in_(ids)
|
||||
).delete(synchronize_session="fetch")
|
||||
event_rows = session.query(models.Event).filter(
|
||||
models.Event.id.in_(ids)
|
||||
).delete(synchronize_session="fetch")
|
||||
|
||||
# remove EventType and TraitType with no corresponding
|
||||
# matching events
|
||||
(session.query(models.EventType)
|
||||
.filter(~models.EventType.events.any())
|
||||
.delete(synchronize_session="fetch"))
|
||||
LOG.info("%d events are removed from database", event_rows)
|
||||
|
||||
return event_rows
|
|
@ -1,132 +0,0 @@
|
|||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Model classes for use in the events storage API.
|
||||
"""
|
||||
from oslo_utils import timeutils
|
||||
|
||||
from panko.storage import base
|
||||
|
||||
|
||||
def serialize_dt(value):
|
||||
"""Serializes parameter if it is datetime."""
|
||||
return value.isoformat() if hasattr(value, 'isoformat') else value
|
||||
|
||||
|
||||
class Event(base.Model):
|
||||
"""A raw event from the source system. Events have Traits.
|
||||
|
||||
Metrics will be derived from one or more Events.
|
||||
"""
|
||||
|
||||
DUPLICATE = 1
|
||||
UNKNOWN_PROBLEM = 2
|
||||
INCOMPATIBLE_TRAIT = 3
|
||||
|
||||
SUPPORT_DIRS = ('asc', 'desc')
|
||||
SUPPORT_SORT_KEYS = ('message_id', 'generated')
|
||||
DEFAULT_DIR = 'asc'
|
||||
DEFAULT_SORT = [('generated', 'asc'), ('message_id', 'asc')]
|
||||
PRIMARY_KEY = 'message_id'
|
||||
|
||||
def __init__(self, message_id, event_type, generated, traits, raw):
|
||||
"""Create a new event.
|
||||
|
||||
:param message_id: Unique ID for the message this event
|
||||
stemmed from. This is different than
|
||||
the Event ID, which comes from the
|
||||
underlying storage system.
|
||||
:param event_type: The type of the event.
|
||||
:param generated: UTC time for when the event occurred.
|
||||
:param traits: list of Traits on this Event.
|
||||
:param raw: Unindexed raw notification details.
|
||||
"""
|
||||
base.Model.__init__(self, message_id=message_id, event_type=event_type,
|
||||
generated=generated, traits=traits, raw=raw)
|
||||
|
||||
def append_trait(self, trait_model):
|
||||
self.traits.append(trait_model)
|
||||
|
||||
def __repr__(self):
|
||||
trait_list = []
|
||||
if self.traits:
|
||||
trait_list = [str(trait) for trait in self.traits]
|
||||
return ("<Event: %s, %s, %s, %s>" %
|
||||
(self.message_id, self.event_type, self.generated,
|
||||
" ".join(trait_list)))
|
||||
|
||||
def serialize(self):
|
||||
return {'message_id': self.message_id,
|
||||
'event_type': self.event_type,
|
||||
'generated': serialize_dt(self.generated),
|
||||
'traits': [trait.serialize() for trait in self.traits],
|
||||
'raw': self.raw}
|
||||
|
||||
|
||||
class Trait(base.Model):
|
||||
"""A Trait is a key/value pair of data on an Event.
|
||||
|
||||
The value is variant record of basic data types (int, date, float, etc).
|
||||
"""
|
||||
|
||||
NONE_TYPE = 0
|
||||
TEXT_TYPE = 1
|
||||
INT_TYPE = 2
|
||||
FLOAT_TYPE = 3
|
||||
DATETIME_TYPE = 4
|
||||
|
||||
type_names = {
|
||||
NONE_TYPE: "none",
|
||||
TEXT_TYPE: "string",
|
||||
INT_TYPE: "integer",
|
||||
FLOAT_TYPE: "float",
|
||||
DATETIME_TYPE: "datetime"
|
||||
}
|
||||
|
||||
def __init__(self, name, dtype, value):
|
||||
if not dtype:
|
||||
dtype = Trait.NONE_TYPE
|
||||
base.Model.__init__(self, name=name, dtype=dtype, value=value)
|
||||
|
||||
def __repr__(self):
|
||||
return "<Trait: %s %d %s>" % (self.name, self.dtype, self.value)
|
||||
|
||||
def serialize(self):
|
||||
return self.name, self.dtype, serialize_dt(self.value)
|
||||
|
||||
def get_type_name(self):
|
||||
return self.get_name_by_type(self.dtype)
|
||||
|
||||
@classmethod
|
||||
def get_type_by_name(cls, type_name):
|
||||
return getattr(cls, '%s_TYPE' % type_name.upper(), None)
|
||||
|
||||
@classmethod
|
||||
def get_type_names(cls):
|
||||
return cls.type_names.values()
|
||||
|
||||
@classmethod
|
||||
def get_name_by_type(cls, type_id):
|
||||
return cls.type_names.get(type_id, "none")
|
||||
|
||||
@classmethod
|
||||
def convert_value(cls, trait_type, value):
|
||||
if trait_type is cls.INT_TYPE:
|
||||
return int(value)
|
||||
if trait_type is cls.FLOAT_TYPE:
|
||||
return float(value)
|
||||
if trait_type is cls.DATETIME_TYPE:
|
||||
return timeutils.normalize_time(timeutils.parse_isotime(value))
|
||||
# Cropping the text value to match the TraitText value size
|
||||
if isinstance(value, bytes):
|
||||
return value.decode('utf-8')[:255]
|
||||
return str(value)[:255]
|
|
@ -1,240 +0,0 @@
|
|||
#
|
||||
# Copyright Ericsson AB 2013. All rights reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Common functions for MongoDB backend
|
||||
"""
|
||||
|
||||
import weakref
|
||||
|
||||
from oslo_log import log
|
||||
from oslo_utils import netutils
|
||||
import pymongo
|
||||
import pymongo.errors
|
||||
import tenacity
|
||||
|
||||
from panko.i18n import _
|
||||
|
||||
ERROR_INDEX_WITH_DIFFERENT_SPEC_ALREADY_EXISTS = 86
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
EVENT_TRAIT_TYPES = {'none': 0, 'string': 1, 'integer': 2, 'float': 3,
|
||||
'datetime': 4}
|
||||
OP_SIGN = {'lt': '$lt', 'le': '$lte', 'ne': '$ne', 'gt': '$gt', 'ge': '$gte'}
|
||||
|
||||
MINIMUM_COMPATIBLE_MONGODB_VERSION = [2, 4]
|
||||
COMPLETE_AGGREGATE_COMPATIBLE_VERSION = [2, 6]
|
||||
|
||||
|
||||
def make_timestamp_range(start, end,
|
||||
start_timestamp_op=None, end_timestamp_op=None):
|
||||
|
||||
"""Create the query document to find timestamps within that range.
|
||||
|
||||
This is done by given two possible datetimes and their operations.
|
||||
By default, using $gte for the lower bound and $lt for the upper bound.
|
||||
"""
|
||||
ts_range = {}
|
||||
|
||||
if start:
|
||||
if start_timestamp_op == 'gt':
|
||||
start_timestamp_op = '$gt'
|
||||
else:
|
||||
start_timestamp_op = '$gte'
|
||||
ts_range[start_timestamp_op] = start
|
||||
|
||||
if end:
|
||||
if end_timestamp_op == 'le':
|
||||
end_timestamp_op = '$lte'
|
||||
else:
|
||||
end_timestamp_op = '$lt'
|
||||
ts_range[end_timestamp_op] = end
|
||||
return ts_range
|
||||
|
||||
|
||||
def make_events_query_from_filter(event_filter):
|
||||
"""Return start and stop row for filtering and a query.
|
||||
|
||||
Query is based on the selected parameter.
|
||||
|
||||
:param event_filter: storage.EventFilter object.
|
||||
"""
|
||||
query = {}
|
||||
q_list = []
|
||||
ts_range = make_timestamp_range(event_filter.start_timestamp,
|
||||
event_filter.end_timestamp)
|
||||
if ts_range:
|
||||
q_list.append({'timestamp': ts_range})
|
||||
if event_filter.event_type:
|
||||
q_list.append({'event_type': event_filter.event_type})
|
||||
if event_filter.message_id:
|
||||
q_list.append({'_id': event_filter.message_id})
|
||||
|
||||
if event_filter.traits_filter:
|
||||
for trait_filter in event_filter.traits_filter:
|
||||
op = trait_filter.pop('op', 'eq')
|
||||
dict_query = {}
|
||||
for k, v in trait_filter.items():
|
||||
if v is not None:
|
||||
# All parameters in EventFilter['traits'] are optional, so
|
||||
# we need to check if they are in the query or no.
|
||||
if k == 'key':
|
||||
dict_query.setdefault('trait_name', v)
|
||||
elif k in ['string', 'integer', 'datetime', 'float']:
|
||||
dict_query.setdefault('trait_type',
|
||||
EVENT_TRAIT_TYPES[k])
|
||||
dict_query.setdefault('trait_value',
|
||||
v if op == 'eq'
|
||||
else {OP_SIGN[op]: v})
|
||||
dict_query = {'$elemMatch': dict_query}
|
||||
q_list.append({'traits': dict_query})
|
||||
if event_filter.admin_proj:
|
||||
q_list.append({'$or': [
|
||||
{'traits': {'$not': {'$elemMatch': {'trait_name': 'project_id'}}}},
|
||||
{'traits': {
|
||||
'$elemMatch': {'trait_name': 'project_id',
|
||||
'trait_value': event_filter.admin_proj}}}]})
|
||||
if q_list:
|
||||
query = {'$and': q_list}
|
||||
|
||||
return query
|
||||
|
||||
|
||||
class ConnectionPool(object):
|
||||
|
||||
def __init__(self):
|
||||
self._pool = {}
|
||||
|
||||
def connect(self, url, max_retries, retry_interval):
|
||||
connection_options = pymongo.uri_parser.parse_uri(url)
|
||||
del connection_options['database']
|
||||
del connection_options['username']
|
||||
del connection_options['password']
|
||||
del connection_options['collection']
|
||||
pool_key = tuple(connection_options)
|
||||
|
||||
if pool_key in self._pool:
|
||||
client = self._pool.get(pool_key)()
|
||||
if client:
|
||||
return client
|
||||
splitted_url = netutils.urlsplit(url)
|
||||
log_data = {'db': splitted_url.scheme,
|
||||
'nodelist': connection_options['nodelist']}
|
||||
LOG.info('Connecting to %(db)s on %(nodelist)s' % log_data)
|
||||
try:
|
||||
client = MongoProxy(pymongo.MongoClient(url),
|
||||
max_retries, retry_interval)
|
||||
except pymongo.errors.ConnectionFailure as e:
|
||||
LOG.warning(_('Unable to connect to the database server: '
|
||||
'%(errmsg)s.') % {'errmsg': e})
|
||||
raise
|
||||
self._pool[pool_key] = weakref.ref(client)
|
||||
return client
|
||||
|
||||
|
||||
def _safe_mongo_call(max_retries, retry_interval):
|
||||
return tenacity.retry(
|
||||
retry=tenacity.retry_if_exception_type(
|
||||
pymongo.errors.AutoReconnect),
|
||||
wait=tenacity.wait_fixed(retry_interval),
|
||||
stop=(tenacity.stop_after_attempt(max_retries) if max_retries >= 0
|
||||
else tenacity.stop_never)
|
||||
)
|
||||
|
||||
|
||||
MONGO_METHODS = set([typ for typ in dir(pymongo.collection.Collection)
|
||||
if not typ.startswith('_')])
|
||||
MONGO_METHODS.update(set([typ for typ in dir(pymongo.MongoClient)
|
||||
if not typ.startswith('_')]))
|
||||
MONGO_METHODS.update(set([typ for typ in dir(pymongo)
|
||||
if not typ.startswith('_')]))
|
||||
|
||||
|
||||
class MongoProxy(object):
|
||||
def __init__(self, conn, max_retries, retry_interval):
|
||||
self.conn = conn
|
||||
self.max_retries = max_retries
|
||||
self.retry_interval = retry_interval
|
||||
self._recreate_index = _safe_mongo_call(
|
||||
self.max_retries, self.retry_interval)(self._recreate_index)
|
||||
|
||||
def __getitem__(self, item):
|
||||
"""Create and return proxy around the method in the connection.
|
||||
|
||||
:param item: name of the connection
|
||||
"""
|
||||
return MongoProxy(self.conn[item])
|
||||
|
||||
def find(self, *args, **kwargs):
|
||||
# We need this modifying method to return a CursorProxy object so that
|
||||
# we can handle the Cursor next function to catch the AutoReconnect
|
||||
# exception.
|
||||
return CursorProxy(self.conn.find(*args, **kwargs),
|
||||
self.max_retries,
|
||||
self.retry_interval)
|
||||
|
||||
def create_index(self, keys, name=None, *args, **kwargs):
|
||||
try:
|
||||
self.conn.create_index(keys, name=name, *args, **kwargs)
|
||||
except pymongo.errors.OperationFailure as e:
|
||||
if e.code is ERROR_INDEX_WITH_DIFFERENT_SPEC_ALREADY_EXISTS:
|
||||
LOG.info("Index %s will be recreate." % name)
|
||||
self._recreate_index(keys, name, *args, **kwargs)
|
||||
|
||||
def _recreate_index(self, keys, name, *args, **kwargs):
|
||||
self.conn.drop_index(name)
|
||||
self.conn.create_index(keys, name=name, *args, **kwargs)
|
||||
|
||||
def __getattr__(self, item):
|
||||
"""Wrap MongoDB connection.
|
||||
|
||||
If item is the name of an executable method, for example find or
|
||||
insert, wrap this method in the MongoConn.
|
||||
Else wrap getting attribute with MongoProxy.
|
||||
"""
|
||||
if item in ('name', 'database'):
|
||||
return getattr(self.conn, item)
|
||||
if item in MONGO_METHODS:
|
||||
return _safe_mongo_call(
|
||||
self.max_retries, self.retry_interval
|
||||
)(getattr(self.conn, item))
|
||||
return MongoProxy(getattr(self.conn, item),
|
||||
self.max_retries, self.retry_interval)
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
return self.conn(*args, **kwargs)
|
||||
|
||||
|
||||
class CursorProxy(pymongo.cursor.Cursor):
|
||||
def __init__(self, cursor, max_retry, retry_interval):
|
||||
self.cursor = cursor
|
||||
self.next = _safe_mongo_call(max_retry, retry_interval)(self._next)
|
||||
|
||||
def __getitem__(self, item):
|
||||
return self.cursor[item]
|
||||
|
||||
def _next(self):
|
||||
"""Wrap Cursor next method.
|
||||
|
||||
This method will be executed before each Cursor next method call.
|
||||
"""
|
||||
try:
|
||||
save_cursor = self.cursor.clone()
|
||||
return self.cursor.next()
|
||||
except pymongo.errors.AutoReconnect:
|
||||
self.cursor = save_cursor
|
||||
raise
|
||||
|
||||
def __getattr__(self, item):
|
||||
return getattr(self.cursor, item)
|
|
@ -1,151 +0,0 @@
|
|||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Common functions for MongoDB backend
|
||||
"""
|
||||
from oslo_log import log
|
||||
import pymongo
|
||||
|
||||
from panko.storage import base
|
||||
from panko.storage import models
|
||||
from panko.storage.mongo import utils as pymongo_utils
|
||||
from panko import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
COMMON_AVAILABLE_CAPABILITIES = {
|
||||
'events': {'query': {'simple': True}},
|
||||
}
|
||||
|
||||
|
||||
AVAILABLE_STORAGE_CAPABILITIES = {
|
||||
'storage': {'production_ready': True},
|
||||
}
|
||||
|
||||
|
||||
class Connection(base.Connection):
|
||||
"""Base event Connection class for MongoDB driver."""
|
||||
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
|
||||
COMMON_AVAILABLE_CAPABILITIES)
|
||||
|
||||
STORAGE_CAPABILITIES = utils.update_nested(
|
||||
base.Connection.STORAGE_CAPABILITIES,
|
||||
AVAILABLE_STORAGE_CAPABILITIES,
|
||||
)
|
||||
|
||||
def record_events(self, event_models):
|
||||
"""Write the events to database.
|
||||
|
||||
:param event_models: a list of models.Event objects.
|
||||
"""
|
||||
error = None
|
||||
for event_model in event_models:
|
||||
traits = []
|
||||
if event_model.traits:
|
||||
for trait in event_model.traits:
|
||||
traits.append({'trait_name': trait.name,
|
||||
'trait_type': trait.dtype,
|
||||
'trait_value': trait.value})
|
||||
try:
|
||||
self.db.event.insert_one(
|
||||
{'_id': event_model.message_id,
|
||||
'event_type': event_model.event_type,
|
||||
'timestamp': event_model.generated,
|
||||
'traits': traits, 'raw': event_model.raw})
|
||||
except pymongo.errors.DuplicateKeyError as ex:
|
||||
LOG.debug("Duplicate event detected, skipping it: %s", ex)
|
||||
except Exception as ex:
|
||||
LOG.exception("Failed to record event: %s", ex)
|
||||
error = ex
|
||||
if error:
|
||||
raise error
|
||||
|
||||
def get_events(self, event_filter, pagination=None):
|
||||
"""Return an iter of models.Event objects.
|
||||
|
||||
:param event_filter: storage.EventFilter object, consists of filters
|
||||
for events that are stored in database.
|
||||
:param pagination: Pagination parameters.
|
||||
"""
|
||||
limit = None
|
||||
if pagination:
|
||||
if pagination.get('sort'):
|
||||
LOG.warning('Driver does not support sort functionality')
|
||||
limit = pagination.get('limit')
|
||||
if limit == 0:
|
||||
return
|
||||
q = pymongo_utils.make_events_query_from_filter(event_filter)
|
||||
if limit is not None:
|
||||
results = self.db.event.find(q, limit=limit)
|
||||
else:
|
||||
results = self.db.event.find(q)
|
||||
for event in results:
|
||||
traits = []
|
||||
for trait in event['traits']:
|
||||
traits.append(models.Trait(name=trait['trait_name'],
|
||||
dtype=int(trait['trait_type']),
|
||||
value=trait['trait_value']))
|
||||
yield models.Event(message_id=event['_id'],
|
||||
event_type=event['event_type'],
|
||||
generated=event['timestamp'],
|
||||
traits=traits, raw=event.get('raw'))
|
||||
|
||||
def get_event_types(self):
|
||||
"""Return all event types as an iter of strings."""
|
||||
return self.db.event.distinct('event_type')
|
||||
|
||||
def get_trait_types(self, event_type):
|
||||
"""Return a dictionary containing the name and data type of the trait.
|
||||
|
||||
Only trait types for the provided event_type are returned.
|
||||
|
||||
:param event_type: the type of the Event.
|
||||
"""
|
||||
trait_names = set()
|
||||
events = self.db.event.find({'event_type': event_type})
|
||||
|
||||
for event in events:
|
||||
for trait in event['traits']:
|
||||
trait_name = trait['trait_name']
|
||||
if trait_name not in trait_names:
|
||||
# Here we check that our method return only unique
|
||||
# trait types. Method will return only one trait type. It
|
||||
# is proposed that certain trait name could have only one
|
||||
# trait type.
|
||||
trait_names.add(trait_name)
|
||||
yield {'name': trait_name,
|
||||
'data_type': trait['trait_type']}
|
||||
|
||||
def get_traits(self, event_type, trait_name=None):
|
||||
"""Return all trait instances associated with an event_type.
|
||||
|
||||
If trait_type is specified, only return instances of that trait type.
|
||||
|
||||
:param event_type: the type of the Event to filter by
|
||||
:param trait_name: the name of the Trait to filter by
|
||||
"""
|
||||
if not trait_name:
|
||||
events = self.db.event.find({'event_type': event_type})
|
||||
else:
|
||||
# We choose events that simultaneously have event_type and certain
|
||||
# trait_name, and retrieve events contains only mentioned traits.
|
||||
events = self.db.event.find({'$and': [{'event_type': event_type},
|
||||
{'traits.trait_name': trait_name}]},
|
||||
{'traits': {'$elemMatch':
|
||||
{'trait_name': trait_name}}
|
||||
})
|
||||
for event in events:
|
||||
for trait in event['traits']:
|
||||
yield models.Trait(name=trait['trait_name'],
|
||||
dtype=trait['trait_type'],
|
||||
value=trait['trait_value'])
|
|
@ -1 +0,0 @@
|
|||
Generic single-database configuration.
|
|
@ -1,74 +0,0 @@
|
|||
# A generic, single database configuration.
|
||||
|
||||
[alembic]
|
||||
# path to migration scripts
|
||||
script_location = panko.storage.sqlalchemy:alembic
|
||||
|
||||
# template used to generate migration files
|
||||
# file_template = %%(rev)s_%%(slug)s
|
||||
|
||||
# timezone to use when rendering the date
|
||||
# within the migration file as well as the filename.
|
||||
# string value is passed to dateutil.tz.gettz()
|
||||
# leave blank for localtime
|
||||
# timezone =
|
||||
|
||||
# max length of characters to apply to the
|
||||
# "slug" field
|
||||
#truncate_slug_length = 40
|
||||
|
||||
# set to 'true' to run the environment during
|
||||
# the 'revision' command, regardless of autogenerate
|
||||
# revision_environment = false
|
||||
|
||||
# set to 'true' to allow .pyc and .pyo files without
|
||||
# a source .py file to be detected as revisions in the
|
||||
# versions/ directory
|
||||
# sourceless = false
|
||||
|
||||
# version location specification; this defaults
|
||||
# to alembic/versions. When using multiple version
|
||||
# directories, initial revisions must be specified with --version-path
|
||||
# version_locations = %(here)s/bar %(here)s/bat alembic/versions
|
||||
|
||||
# the output encoding used when revision files
|
||||
# are written from script.py.mako
|
||||
# output_encoding = utf-8
|
||||
|
||||
sqlalchemy.url = driver://user:pass@localhost/dbname
|
||||
|
||||
|
||||
# Logging configuration
|
||||
[loggers]
|
||||
keys = root,sqlalchemy,alembic
|
||||
|
||||
[handlers]
|
||||
keys = console
|
||||
|
||||
[formatters]
|
||||
keys = generic
|
||||
|
||||
[logger_root]
|
||||
level = WARN
|
||||
handlers = console
|
||||
qualname =
|
||||
|
||||
[logger_sqlalchemy]
|
||||
level = WARN
|
||||
handlers =
|
||||
qualname = sqlalchemy.engine
|
||||
|
||||
[logger_alembic]
|
||||
level = INFO
|
||||
handlers =
|
||||
qualname = alembic
|
||||
|
||||
[handler_console]
|
||||
class = StreamHandler
|
||||
args = (sys.stderr,)
|
||||
level = NOTSET
|
||||
formatter = generic
|
||||
|
||||
[formatter_generic]
|
||||
format = %(levelname)-5.5s [%(name)s] %(message)s
|
||||
datefmt = %H:%M:%S
|
|
@ -1,87 +0,0 @@
|
|||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
from alembic import config as alembic_config
|
||||
from alembic import context
|
||||
from sqlalchemy import engine_from_config, pool
|
||||
from logging.config import fileConfig
|
||||
|
||||
# this is the Alembic Config object, which provides
|
||||
# access to the values within the .ini file in use.
|
||||
config = alembic_config.Config(os.path.join(os.path.dirname(__file__),
|
||||
'alembic.ini'))
|
||||
|
||||
# Interpret the config file for Python logging.
|
||||
# This line sets up loggers basically.
|
||||
fileConfig(config.config_file_name)
|
||||
|
||||
# add your model's MetaData object here
|
||||
# for 'autogenerate' support
|
||||
# from myapp import mymodel
|
||||
# target_metadata = mymodel.Base.metadata
|
||||
target_metadata = None
|
||||
|
||||
# other values from the config, defined by the needs of env.py,
|
||||
# can be acquired:
|
||||
# my_important_option = config.get_main_option("my_important_option")
|
||||
# ... etc.
|
||||
|
||||
|
||||
def run_migrations_offline():
|
||||
"""Run migrations in 'offline' mode.
|
||||
|
||||
This configures the context with just a URL
|
||||
and not an Engine, though an Engine is acceptable
|
||||
here as well. By skipping the Engine creation
|
||||
we don't even need a DBAPI to be available.
|
||||
|
||||
Calls to context.execute() here emit the given string to the
|
||||
script output.
|
||||
|
||||
"""
|
||||
url = config.get_main_option("sqlalchemy.url")
|
||||
context.configure(
|
||||
url=url, target_metadata=target_metadata, literal_binds=True)
|
||||
|
||||
with context.begin_transaction():
|
||||
context.run_migrations()
|
||||
|
||||
|
||||
def run_migrations_online():
|
||||
"""Run migrations in 'online' mode.
|
||||
|
||||
In this scenario we need to create an Engine
|
||||
and associate a connection with the context.
|
||||
|
||||
"""
|
||||
connectable = engine_from_config(
|
||||
config.get_section(config.config_ini_section),
|
||||
prefix='sqlalchemy.',
|
||||
poolclass=pool.NullPool)
|
||||
|
||||
with connectable.connect() as connection:
|
||||
context.configure(
|
||||
connection=connection,
|
||||
target_metadata=target_metadata
|
||||
)
|
||||
|
||||
with context.begin_transaction():
|
||||
context.run_migrations()
|
||||
|
||||
|
||||
if context.is_offline_mode():
|
||||
run_migrations_offline()
|
||||
else:
|
||||
run_migrations_online()
|
|
@ -1,24 +0,0 @@
|
|||
"""${message}
|
||||
|
||||
Revision ID: ${up_revision}
|
||||
Revises: ${down_revision | comma,n}
|
||||
Create Date: ${create_date}
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
${imports if imports else ""}
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = ${repr(up_revision)}
|
||||
down_revision = ${repr(down_revision)}
|
||||
branch_labels = ${repr(branch_labels)}
|
||||
depends_on = ${repr(depends_on)}
|
||||
|
||||
|
||||
def upgrade():
|
||||
${upgrades if upgrades else "pass"}
|
||||
|
||||
|
||||
def downgrade():
|
||||
${downgrades if downgrades else "pass"}
|
|
@ -1,33 +0,0 @@
|
|||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""support big integer traits
|
||||
|
||||
Revision ID: c3955547bff2
|
||||
Revises:
|
||||
Create Date: 2017-07-18 22:03:44.996571
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = 'c3955547bff2'
|
||||
down_revision = None
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
op.alter_column('trait_int', "value", type_=sa.BigInteger)
|
|
@ -1,186 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
SQLAlchemy models for Panko data.
|
||||
"""
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
import sqlalchemy
|
||||
from sqlalchemy import Column, Integer, String, ForeignKey, Index
|
||||
from sqlalchemy import BigInteger, Float, DateTime
|
||||
from sqlalchemy.dialects.mysql import DECIMAL
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy.orm import deferred
|
||||
from sqlalchemy.orm import relationship
|
||||
from sqlalchemy.types import TypeDecorator
|
||||
|
||||
from panko import utils
|
||||
|
||||
|
||||
class JSONEncodedDict(TypeDecorator):
|
||||
"""Represents an immutable structure as a json-encoded string."""
|
||||
|
||||
impl = sqlalchemy.Text
|
||||
|
||||
@staticmethod
|
||||
def process_bind_param(value, dialect):
|
||||
if value is not None:
|
||||
value = jsonutils.dumps(value)
|
||||
return value
|
||||
|
||||
@staticmethod
|
||||
def process_result_value(value, dialect):
|
||||
if value is not None:
|
||||
value = jsonutils.loads(value)
|
||||
return value
|
||||
|
||||
|
||||
class PreciseTimestamp(TypeDecorator):
|
||||
"""Represents a timestamp precise to the microsecond."""
|
||||
|
||||
impl = DateTime
|
||||
|
||||
def load_dialect_impl(self, dialect):
|
||||
if dialect.name == 'mysql':
|
||||
return dialect.type_descriptor(DECIMAL(precision=20,
|
||||
scale=6,
|
||||
asdecimal=True))
|
||||
return self.impl
|
||||
|
||||
@staticmethod
|
||||
def process_bind_param(value, dialect):
|
||||
if value is None:
|
||||
return value
|
||||
elif dialect.name == 'mysql':
|
||||
return utils.dt_to_decimal(value)
|
||||
return value
|
||||
|
||||
@staticmethod
|
||||
def process_result_value(value, dialect):
|
||||
if value is None:
|
||||
return value
|
||||
elif dialect.name == 'mysql':
|
||||
return utils.decimal_to_dt(value)
|
||||
return value
|
||||
|
||||
|
||||
class PankoBase(object):
|
||||
"""Base class for Panko Models."""
|
||||
__table_args__ = {'mysql_charset': "utf8",
|
||||
'mysql_engine': "InnoDB"}
|
||||
__table_initialized__ = False
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
setattr(self, key, value)
|
||||
|
||||
def __getitem__(self, key):
|
||||
return getattr(self, key)
|
||||
|
||||
def update(self, values):
|
||||
"""Make the model object behave like a dict."""
|
||||
for k, v in values.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
|
||||
Base = declarative_base(cls=PankoBase)
|
||||
|
||||
|
||||
class EventType(Base):
|
||||
"""Types of event records."""
|
||||
__tablename__ = 'event_type'
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
desc = Column(String(255), unique=True)
|
||||
|
||||
def __init__(self, event_type):
|
||||
self.desc = event_type
|
||||
|
||||
def __repr__(self):
|
||||
return "<EventType: %s>" % self.desc
|
||||
|
||||
|
||||
class Event(Base):
|
||||
__tablename__ = 'event'
|
||||
__table_args__ = (
|
||||
Index('ix_event_message_id', 'message_id'),
|
||||
Index('ix_event_type_id', 'event_type_id'),
|
||||
Index('ix_event_generated', 'generated')
|
||||
)
|
||||
id = Column(Integer, primary_key=True)
|
||||
message_id = Column(String(50), unique=True)
|
||||
generated = Column(PreciseTimestamp())
|
||||
raw = deferred(Column(JSONEncodedDict()))
|
||||
|
||||
event_type_id = Column(Integer, ForeignKey('event_type.id'))
|
||||
event_type = relationship("EventType", backref='events')
|
||||
|
||||
def __init__(self, message_id, event_type, generated, raw):
|
||||
self.message_id = message_id
|
||||
self.event_type = event_type
|
||||
self.generated = generated
|
||||
self.raw = raw
|
||||
|
||||
def __repr__(self):
|
||||
return "<Event %d('Event: %s %s, Generated: %s')>" % (self.id,
|
||||
self.message_id,
|
||||
self.event_type,
|
||||
self.generated)
|
||||
|
||||
|
||||
class TraitText(Base):
|
||||
"""Event text traits."""
|
||||
|
||||
__tablename__ = 'trait_text'
|
||||
__table_args__ = (
|
||||
Index('ix_trait_text_event_id_key', 'event_id', 'key'),
|
||||
)
|
||||
event_id = Column(Integer, ForeignKey('event.id'), primary_key=True)
|
||||
key = Column(String(255), primary_key=True)
|
||||
value = Column(String(255))
|
||||
|
||||
|
||||
class TraitInt(Base):
|
||||
"""Event integer traits."""
|
||||
|
||||
__tablename__ = 'trait_int'
|
||||
__table_args__ = (
|
||||
Index('ix_trait_int_event_id_key', 'event_id', 'key'),
|
||||
)
|
||||
event_id = Column(Integer, ForeignKey('event.id'), primary_key=True)
|
||||
key = Column(String(255), primary_key=True)
|
||||
value = Column(BigInteger)
|
||||
|
||||
|
||||
class TraitFloat(Base):
|
||||
"""Event float traits."""
|
||||
|
||||
__tablename__ = 'trait_float'
|
||||
__table_args__ = (
|
||||
Index('ix_trait_float_event_id_key', 'event_id', 'key'),
|
||||
)
|
||||
event_id = Column(Integer, ForeignKey('event.id'), primary_key=True)
|
||||
key = Column(String(255), primary_key=True)
|
||||
value = Column(Float(53))
|
||||
|
||||
|
||||
class TraitDatetime(Base):
|
||||
"""Event datetime traits."""
|
||||
|
||||
__tablename__ = 'trait_datetime'
|
||||
__table_args__ = (
|
||||
Index('ix_trait_datetime_event_id_key', 'event_id', 'key'),
|
||||
)
|
||||
event_id = Column(Integer, ForeignKey('event.id'), primary_key=True)
|
||||
key = Column(String(255), primary_key=True)
|
||||
value = Column(PreciseTimestamp())
|
|
@ -1,88 +0,0 @@
|
|||
# Copyright 2012 New Dream Network (DreamHost)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Test base classes.
|
||||
"""
|
||||
import functools
|
||||
import os.path
|
||||
|
||||
from oslo_utils import timeutils
|
||||
from oslotest import base
|
||||
from testtools import testcase
|
||||
import webtest
|
||||
|
||||
import panko
|
||||
|
||||
|
||||
class BaseTestCase(base.BaseTestCase):
|
||||
def assertTimestampEqual(self, first, second, msg=None):
|
||||
"""Checks that two timestamps are equals.
|
||||
|
||||
This relies on assertAlmostEqual to avoid rounding problem, and only
|
||||
checks up the first microsecond values.
|
||||
|
||||
"""
|
||||
return self.assertAlmostEqual(
|
||||
timeutils.delta_seconds(first, second),
|
||||
0.0,
|
||||
places=5)
|
||||
|
||||
def assertIsEmpty(self, obj):
|
||||
try:
|
||||
if len(obj) != 0:
|
||||
self.fail("%s is not empty" % type(obj))
|
||||
except (TypeError, AttributeError):
|
||||
self.fail("%s doesn't have length" % type(obj))
|
||||
|
||||
def assertIsNotEmpty(self, obj):
|
||||
try:
|
||||
if len(obj) == 0:
|
||||
self.fail("%s is empty" % type(obj))
|
||||
except (TypeError, AttributeError):
|
||||
self.fail("%s doesn't have length" % type(obj))
|
||||
|
||||
@staticmethod
|
||||
def path_get(project_file=None):
|
||||
root = os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||
'..',
|
||||
'..',
|
||||
)
|
||||
)
|
||||
if project_file:
|
||||
return os.path.join(root, project_file)
|
||||
else:
|
||||
return root
|
||||
|
||||
|
||||
def _skip_decorator(func):
|
||||
@functools.wraps(func)
|
||||
def skip_if_not_implemented(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except panko.NotImplementedError as e:
|
||||
raise testcase.TestSkipped(str(e))
|
||||
except webtest.app.AppError as e:
|
||||
if 'not implemented' in str(e):
|
||||
raise testcase.TestSkipped(str(e))
|
||||
raise
|
||||
return skip_if_not_implemented
|
||||
|
||||
|
||||
class SkipNotImplementedMeta(type):
|
||||
def __new__(cls, name, bases, local):
|
||||
for attr in local:
|
||||
value = local[attr]
|
||||
if callable(value) and (
|
||||
attr.startswith('test_') or attr == 'setUp'):
|
||||
local[attr] = _skip_decorator(value)
|
||||
return type.__new__(cls, name, bases, local)
|
|
@ -1,230 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
# Copyright 2013 eNovance
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Base classes for API tests."""
|
||||
import os
|
||||
from unittest import mock
|
||||
from urllib import parse as urlparse
|
||||
import warnings
|
||||
|
||||
import fixtures
|
||||
from oslo_utils import uuidutils
|
||||
import sqlalchemy
|
||||
from testtools import testcase
|
||||
|
||||
from panko import service
|
||||
from panko import storage
|
||||
from panko.tests import base as test_base
|
||||
try:
|
||||
from panko.tests import mocks
|
||||
except ImportError:
|
||||
mocks = None # happybase module is not Python 3 compatible yet
|
||||
|
||||
|
||||
class MongoDbManager(fixtures.Fixture):
|
||||
|
||||
def __init__(self, url, conf):
|
||||
self._url = url
|
||||
self.conf = conf
|
||||
|
||||
def setUp(self):
|
||||
super(MongoDbManager, self).setUp()
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings(
|
||||
action='ignore',
|
||||
message='.*you must provide a username and password.*')
|
||||
try:
|
||||
self.connection = storage.get_connection(self.url, self.conf)
|
||||
except storage.StorageBadVersion as e:
|
||||
raise testcase.TestSkipped(str(e))
|
||||
|
||||
@property
|
||||
def url(self):
|
||||
return '%(url)s_%(db)s' % {
|
||||
'url': self._url,
|
||||
'db': uuidutils.generate_uuid(dashed=False)
|
||||
}
|
||||
|
||||
|
||||
class SQLManager(fixtures.Fixture):
|
||||
def __init__(self, url, conf):
|
||||
db_name = 'panko_%s' % uuidutils.generate_uuid(dashed=False)
|
||||
engine = sqlalchemy.create_engine(url)
|
||||
conn = engine.connect()
|
||||
self._create_database(conn, db_name)
|
||||
conn.close()
|
||||
engine.dispose()
|
||||
parsed = list(urlparse.urlparse(url))
|
||||
parsed[2] = '/' + db_name
|
||||
self.url = urlparse.urlunparse(parsed)
|
||||
self.conf = conf
|
||||
|
||||
def setUp(self):
|
||||
super(SQLManager, self).setUp()
|
||||
self.connection = storage.get_connection(self.url, self.conf)
|
||||
|
||||
|
||||
class PgSQLManager(SQLManager):
|
||||
@staticmethod
|
||||
def _create_database(conn, db_name):
|
||||
conn.connection.set_isolation_level(0)
|
||||
conn.execute('CREATE DATABASE %s WITH TEMPLATE template0;' % db_name)
|
||||
conn.connection.set_isolation_level(1)
|
||||
|
||||
|
||||
class MySQLManager(SQLManager):
|
||||
@staticmethod
|
||||
def _create_database(conn, db_name):
|
||||
conn.execute('CREATE DATABASE %s;' % db_name)
|
||||
|
||||
|
||||
class ElasticSearchManager(fixtures.Fixture):
|
||||
def __init__(self, url, conf):
|
||||
self.url = url
|
||||
self.conf = conf
|
||||
|
||||
def setUp(self):
|
||||
super(ElasticSearchManager, self).setUp()
|
||||
self.connection = storage.get_connection(
|
||||
self.url, self.conf)
|
||||
# prefix each test with unique index name
|
||||
inx_uuid = uuidutils.generate_uuid(dashed=False)
|
||||
self.connection.index_name = 'events_%s' % inx_uuid
|
||||
# force index on write so data is queryable right away
|
||||
self.connection._refresh_on_write = True
|
||||
|
||||
|
||||
class HBaseManager(fixtures.Fixture):
|
||||
def __init__(self, url, conf):
|
||||
self._url = url
|
||||
self.conf = conf
|
||||
|
||||
def setUp(self):
|
||||
super(HBaseManager, self).setUp()
|
||||
self.connection = storage.get_connection(
|
||||
self.url, self.conf)
|
||||
# Unique prefix for each test to keep data is distinguished because
|
||||
# all test data is stored in one table
|
||||
data_prefix = uuidutils.generate_uuid(dashed=False)
|
||||
|
||||
def table(conn, name):
|
||||
return mocks.MockHBaseTable(name, conn, data_prefix)
|
||||
|
||||
# Mock only real HBase connection, MConnection "table" method
|
||||
# stays origin.
|
||||
mock.patch('happybase.Connection.table', new=table).start()
|
||||
# We shouldn't delete data and tables after each test,
|
||||
# because it last for too long.
|
||||
# All tests tables will be deleted in setup-test-env.sh
|
||||
mock.patch("happybase.Connection.disable_table",
|
||||
new=mock.MagicMock()).start()
|
||||
mock.patch("happybase.Connection.delete_table",
|
||||
new=mock.MagicMock()).start()
|
||||
mock.patch("happybase.Connection.create_table",
|
||||
new=mock.MagicMock()).start()
|
||||
|
||||
@property
|
||||
def url(self):
|
||||
return '%s?table_prefix=%s&table_prefix_separator=%s' % (
|
||||
self._url,
|
||||
os.getenv("PANKO_TEST_HBASE_TABLE_PREFIX", "test"),
|
||||
os.getenv("PANKO_TEST_HBASE_TABLE_PREFIX_SEPARATOR", "_")
|
||||
)
|
||||
|
||||
|
||||
class SQLiteManager(fixtures.Fixture):
|
||||
|
||||
def __init__(self, url, conf):
|
||||
self.url = url
|
||||
self.conf = conf
|
||||
|
||||
def setUp(self):
|
||||
super(SQLiteManager, self).setUp()
|
||||
self.connection = storage.get_connection(
|
||||
self.url, self.conf)
|
||||
|
||||
|
||||
class TestBase(test_base.BaseTestCase,
|
||||
metaclass=test_base.SkipNotImplementedMeta):
|
||||
|
||||
DRIVER_MANAGERS = {
|
||||
'mongodb': MongoDbManager,
|
||||
'mysql': MySQLManager,
|
||||
'postgresql': PgSQLManager,
|
||||
'sqlite': SQLiteManager,
|
||||
'es': ElasticSearchManager,
|
||||
}
|
||||
if mocks is not None:
|
||||
DRIVER_MANAGERS['hbase'] = HBaseManager
|
||||
|
||||
def setUp(self):
|
||||
super(TestBase, self).setUp()
|
||||
db_url = os.environ.get('PIFPAF_URL', "sqlite://").replace(
|
||||
"mysql://", "mysql+pymysql://")
|
||||
|
||||
engine = urlparse.urlparse(db_url).scheme
|
||||
# in case some drivers have additional specification, for example:
|
||||
# PyMySQL will have scheme mysql+pymysql
|
||||
engine = engine.split('+')[0]
|
||||
|
||||
# NOTE(Alexei_987) Shortcut to skip expensive db setUp
|
||||
test_method = self._get_test_method()
|
||||
if (hasattr(test_method, '_run_with')
|
||||
and engine not in test_method._run_with):
|
||||
raise testcase.TestSkipped(
|
||||
'Test is not applicable for %s' % engine)
|
||||
|
||||
self.CONF = service.prepare_service([], [])
|
||||
|
||||
manager = self.DRIVER_MANAGERS.get(engine)
|
||||
if not manager:
|
||||
self.skipTest("missing driver manager: %s" % engine)
|
||||
|
||||
self.db_manager = manager(db_url, self.CONF)
|
||||
|
||||
self.useFixture(self.db_manager)
|
||||
|
||||
self.conn = self.db_manager.connection
|
||||
self.conn.upgrade()
|
||||
|
||||
self.useFixture(fixtures.MockPatch('panko.storage.get_connection',
|
||||
side_effect=self._get_connection))
|
||||
|
||||
def tearDown(self):
|
||||
self.conn.clear()
|
||||
self.conn = None
|
||||
super(TestBase, self).tearDown()
|
||||
|
||||
def _get_connection(self, url, conf):
|
||||
return self.conn
|
||||
|
||||
|
||||
def run_with(*drivers):
|
||||
"""Used to mark tests that are only applicable for certain db driver.
|
||||
|
||||
Skips test if driver is not available.
|
||||
"""
|
||||
def decorator(test):
|
||||
if isinstance(test, type) and issubclass(test, TestBase):
|
||||
# Decorate all test methods
|
||||
for attr in dir(test):
|
||||
value = getattr(test, attr)
|
||||
if callable(value) and attr.startswith('test_'):
|
||||
value._run_with = drivers
|
||||
else:
|
||||
test._run_with = drivers
|
||||
return test
|
||||
return decorator
|
|
@ -1,163 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Base classes for API tests.
|
||||
"""
|
||||
|
||||
from oslo_policy import opts
|
||||
import webtest
|
||||
|
||||
from panko.api import app
|
||||
from panko.api import rbac
|
||||
from panko import service
|
||||
from panko.tests import db as db_test_base
|
||||
|
||||
|
||||
class FunctionalTest(db_test_base.TestBase):
|
||||
"""Used for functional tests of Pecan controllers.
|
||||
|
||||
Used in case when you need to test your literal application and its
|
||||
integration with the framework.
|
||||
"""
|
||||
|
||||
PATH_PREFIX = ''
|
||||
|
||||
def setUp(self):
|
||||
super(FunctionalTest, self).setUp()
|
||||
self.CONF = service.prepare_service([], [])
|
||||
opts.set_defaults(self.CONF)
|
||||
|
||||
self.CONF.set_override('api_paste_config',
|
||||
self.path_get('etc/panko/api_paste.ini'))
|
||||
self.app = self._make_app(self.CONF)
|
||||
|
||||
@staticmethod
|
||||
def _make_app(conf):
|
||||
return webtest.TestApp(app.load_app(conf, appname='panko+noauth'))
|
||||
|
||||
def tearDown(self):
|
||||
super(FunctionalTest, self).tearDown()
|
||||
rbac.reset()
|
||||
|
||||
def put_json(self, path, params, expect_errors=False, headers=None,
|
||||
extra_environ=None, status=None):
|
||||
"""Sends simulated HTTP PUT request to Pecan test app.
|
||||
|
||||
:param path: url path of target service
|
||||
:param params: content for wsgi.input of request
|
||||
:param expect_errors: boolean value whether an error is expected based
|
||||
on request
|
||||
:param headers: A dictionary of headers to send along with the request
|
||||
:param extra_environ: A dictionary of environ variables to send along
|
||||
with the request
|
||||
:param status: Expected status code of response
|
||||
"""
|
||||
return self.post_json(path=path, params=params,
|
||||
expect_errors=expect_errors,
|
||||
headers=headers, extra_environ=extra_environ,
|
||||
status=status, method="put")
|
||||
|
||||
def post_json(self, path, params, expect_errors=False, headers=None,
|
||||
method="post", extra_environ=None, status=None):
|
||||
"""Sends simulated HTTP POST request to Pecan test app.
|
||||
|
||||
:param path: url path of target service
|
||||
:param params: content for wsgi.input of request
|
||||
:param expect_errors: boolean value whether an error is expected based
|
||||
on request
|
||||
:param headers: A dictionary of headers to send along with the request
|
||||
:param method: Request method type. Appropriate method function call
|
||||
should be used rather than passing attribute in.
|
||||
:param extra_environ: A dictionary of environ variables to send along
|
||||
with the request
|
||||
:param status: Expected status code of response
|
||||
"""
|
||||
full_path = self.PATH_PREFIX + path
|
||||
response = getattr(self.app, "%s_json" % method)(
|
||||
str(full_path),
|
||||
params=params,
|
||||
headers=headers,
|
||||
status=status,
|
||||
extra_environ=extra_environ,
|
||||
expect_errors=expect_errors
|
||||
)
|
||||
return response
|
||||
|
||||
def delete(self, path, expect_errors=False, headers=None,
|
||||
extra_environ=None, status=None):
|
||||
"""Sends simulated HTTP DELETE request to Pecan test app.
|
||||
|
||||
:param path: url path of target service
|
||||
:param expect_errors: boolean value whether an error is expected based
|
||||
on request
|
||||
:param headers: A dictionary of headers to send along with the request
|
||||
:param extra_environ: A dictionary of environ variables to send along
|
||||
with the request
|
||||
:param status: Expected status code of response
|
||||
"""
|
||||
full_path = self.PATH_PREFIX + path
|
||||
response = self.app.delete(str(full_path),
|
||||
headers=headers,
|
||||
status=status,
|
||||
extra_environ=extra_environ,
|
||||
expect_errors=expect_errors)
|
||||
return response
|
||||
|
||||
def get_json(self, path, expect_errors=False, headers=None,
|
||||
extra_environ=None, q=None, groupby=None, status=None,
|
||||
override_params=None, **params):
|
||||
"""Sends simulated HTTP GET request to Pecan test app.
|
||||
|
||||
:param path: url path of target service
|
||||
:param expect_errors: boolean value whether an error is expected based
|
||||
on request
|
||||
:param headers: A dictionary of headers to send along with the request
|
||||
:param extra_environ: A dictionary of environ variables to send along
|
||||
with the request
|
||||
:param q: list of queries consisting of: field, value, op, and type
|
||||
keys
|
||||
:param groupby: list of fields to group by
|
||||
:param status: Expected status code of response
|
||||
:param override_params: literally encoded query param string
|
||||
:param params: content for wsgi.input of request
|
||||
"""
|
||||
q = q or []
|
||||
groupby = groupby or []
|
||||
full_path = self.PATH_PREFIX + path
|
||||
if override_params:
|
||||
all_params = override_params
|
||||
else:
|
||||
query_params = {'q.field': [],
|
||||
'q.value': [],
|
||||
'q.op': [],
|
||||
'q.type': [],
|
||||
}
|
||||
for query in q:
|
||||
for name in ['field', 'op', 'value', 'type']:
|
||||
query_params['q.%s' % name].append(query.get(name, ''))
|
||||
all_params = {}
|
||||
all_params.update(params)
|
||||
if q:
|
||||
all_params.update(query_params)
|
||||
if groupby:
|
||||
all_params.update({'groupby': groupby})
|
||||
response = self.app.get(full_path,
|
||||
params=all_params,
|
||||
headers=headers,
|
||||
extra_environ=extra_environ,
|
||||
expect_errors=expect_errors,
|
||||
status=status)
|
||||
if not expect_errors:
|
||||
response = response.json
|
||||
return response
|
|
@ -1,20 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from panko.tests.functional import api
|
||||
|
||||
|
||||
class FunctionalTest(api.FunctionalTest):
|
||||
PATH_PREFIX = '/v2'
|
|
@ -1,165 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Test ACL."""
|
||||
|
||||
import datetime
|
||||
import os
|
||||
|
||||
from keystonemiddleware import fixture as ksm_fixture
|
||||
from oslo_utils import fileutils
|
||||
from oslo_utils import uuidutils
|
||||
import webtest
|
||||
|
||||
from panko.api import app
|
||||
from panko.storage import models
|
||||
from panko.tests.functional.api import v2
|
||||
|
||||
VALID_TOKEN = uuidutils.generate_uuid(dashed=False)
|
||||
VALID_TOKEN2 = uuidutils.generate_uuid(dashed=False)
|
||||
|
||||
|
||||
class TestAPIACL(v2.FunctionalTest):
|
||||
|
||||
def setUp(self):
|
||||
super(TestAPIACL, self).setUp()
|
||||
self.auth_token_fixture = self.useFixture(
|
||||
ksm_fixture.AuthTokenFixture())
|
||||
self.auth_token_fixture.add_token_data(
|
||||
token_id=VALID_TOKEN,
|
||||
# FIXME(morganfainberg): The project-id should be a proper uuid
|
||||
project_id='123i2910',
|
||||
role_list=['admin'],
|
||||
user_name='user_id2',
|
||||
user_id='user_id2',
|
||||
is_v2=True
|
||||
)
|
||||
self.auth_token_fixture.add_token_data(
|
||||
token_id=VALID_TOKEN2,
|
||||
# FIXME(morganfainberg): The project-id should be a proper uuid
|
||||
project_id='project-good',
|
||||
role_list=['Member'],
|
||||
user_name='user_id1',
|
||||
user_id='user_id1',
|
||||
is_v2=True)
|
||||
|
||||
def get_json(self, path, expect_errors=False, headers=None,
|
||||
q=None, **params):
|
||||
return super(TestAPIACL, self).get_json(path,
|
||||
expect_errors=expect_errors,
|
||||
headers=headers,
|
||||
q=q or [],
|
||||
**params)
|
||||
|
||||
@staticmethod
|
||||
def _make_app(conf):
|
||||
return webtest.TestApp(app.load_app(conf, appname='panko+keystone'))
|
||||
|
||||
|
||||
class TestAPIEventACL(TestAPIACL):
|
||||
|
||||
PATH = '/events'
|
||||
|
||||
def test_non_admin_get_event_types(self):
|
||||
data = self.get_json('/event_types', expect_errors=True,
|
||||
headers={"X-Roles": "Member",
|
||||
"X-Auth-Token": VALID_TOKEN2,
|
||||
"X-Project-Id": "project-good"})
|
||||
self.assertEqual(401, data.status_int)
|
||||
|
||||
|
||||
class TestBaseApiEventRBAC(v2.FunctionalTest):
|
||||
|
||||
PATH = '/events'
|
||||
|
||||
def setUp(self):
|
||||
super(TestBaseApiEventRBAC, self).setUp()
|
||||
traits = [models.Trait('project_id', 1, 'project-good'),
|
||||
models.Trait('user_id', 1, 'user-good')]
|
||||
self.message_id = uuidutils.generate_uuid()
|
||||
ev = models.Event(self.message_id, 'event_type',
|
||||
datetime.datetime.now(), traits, {})
|
||||
self.conn.record_events([ev])
|
||||
|
||||
def test_get_events_without_project(self):
|
||||
headers_no_proj = {"X-Roles": "admin", "X-User-Id": "user-good"}
|
||||
resp = self.get_json(self.PATH, expect_errors=True,
|
||||
headers=headers_no_proj, status=403)
|
||||
self.assertEqual(403, resp.status_int)
|
||||
|
||||
def test_get_events_without_user(self):
|
||||
headers_no_user = {"X-Roles": "admin", "X-Project-Id": "project-good"}
|
||||
resp = self.get_json(self.PATH, expect_errors=True,
|
||||
headers=headers_no_user, status=403)
|
||||
self.assertEqual(403, resp.status_int)
|
||||
|
||||
def test_get_events_without_scope(self):
|
||||
headers_no_user_proj = {"X-Roles": "admin"}
|
||||
resp = self.get_json(self.PATH,
|
||||
expect_errors=True,
|
||||
headers=headers_no_user_proj,
|
||||
status=403)
|
||||
self.assertEqual(403, resp.status_int)
|
||||
|
||||
def test_get_events(self):
|
||||
headers = {"X-Roles": "Member", "X-User-Id": "user-good",
|
||||
"X-Project-Id": "project-good"}
|
||||
self.get_json(self.PATH, headers=headers, status=200)
|
||||
|
||||
def test_get_event(self):
|
||||
headers = {"X-Roles": "Member", "X-User-Id": "user-good",
|
||||
"X-Project-Id": "project-good"}
|
||||
self.get_json(self.PATH + "/" + self.message_id, headers=headers,
|
||||
status=200)
|
||||
|
||||
|
||||
class TestApiEventAdminRBAC(TestBaseApiEventRBAC):
|
||||
|
||||
def _make_app(self, conf):
|
||||
content = ('{"context_is_admin": "role:admin",'
|
||||
'"telemetry:events:index": "rule:context_is_admin",'
|
||||
'"telemetry:events:show": "rule:context_is_admin"}')
|
||||
content = content.encode('utf-8')
|
||||
self.tempfile = fileutils.write_to_tempfile(content=content,
|
||||
prefix='policy',
|
||||
suffix='.json')
|
||||
|
||||
conf.set_override("policy_file", self.tempfile, group='oslo_policy')
|
||||
return webtest.TestApp(app.load_app(conf, appname='panko+noauth'))
|
||||
|
||||
def tearDown(self):
|
||||
os.remove(self.tempfile)
|
||||
super(TestApiEventAdminRBAC, self).tearDown()
|
||||
|
||||
def test_get_events(self):
|
||||
headers_rbac = {"X-Roles": "admin", "X-User-Id": "user-good",
|
||||
"X-Project-Id": "project-good"}
|
||||
self.get_json(self.PATH, headers=headers_rbac, status=200)
|
||||
|
||||
def test_get_events_bad(self):
|
||||
headers_rbac = {"X-Roles": "Member", "X-User-Id": "user-good",
|
||||
"X-Project-Id": "project-good"}
|
||||
self.get_json(self.PATH, headers=headers_rbac, status=403)
|
||||
|
||||
def test_get_event(self):
|
||||
headers = {"X-Roles": "admin", "X-User-Id": "user-good",
|
||||
"X-Project-Id": "project-good"}
|
||||
self.get_json(self.PATH + "/" + self.message_id, headers=headers,
|
||||
status=200)
|
||||
|
||||
def test_get_event_bad(self):
|
||||
headers = {"X-Roles": "Member", "X-User-Id": "user-good",
|
||||
"X-Project-Id": "project-good"}
|
||||
self.get_json(self.PATH + "/" + self.message_id, headers=headers,
|
||||
status=403)
|
|
@ -1,90 +0,0 @@
|
|||
#
|
||||
# Copyright 2013 IBM Corp.
|
||||
# Copyright 2013 Julien Danjou
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Test basic panko-api app
|
||||
"""
|
||||
from panko.tests.functional.api import v2
|
||||
|
||||
|
||||
class TestApiMiddleware(v2.FunctionalTest):
|
||||
|
||||
no_lang_translated_error = 'No lang translated error'
|
||||
en_US_translated_error = 'en-US translated error'
|
||||
|
||||
def _fake_translate(self, message, user_locale):
|
||||
if user_locale is None:
|
||||
return self.no_lang_translated_error
|
||||
else:
|
||||
return self.en_US_translated_error
|
||||
|
||||
def test_json_parsable_error_middleware_404(self):
|
||||
response = self.get_json('/invalid_path',
|
||||
expect_errors=True,
|
||||
headers={"Accept":
|
||||
"application/json"}
|
||||
)
|
||||
self.assertEqual(404, response.status_int)
|
||||
self.assertEqual("application/json", response.content_type)
|
||||
self.assertTrue(response.json['error_message'])
|
||||
response = self.get_json('/invalid_path',
|
||||
expect_errors=True,
|
||||
headers={"Accept":
|
||||
"application/json,application/xml"}
|
||||
)
|
||||
self.assertEqual(404, response.status_int)
|
||||
self.assertEqual("application/json", response.content_type)
|
||||
self.assertTrue(response.json['error_message'])
|
||||
response = self.get_json('/invalid_path',
|
||||
expect_errors=True,
|
||||
headers={"Accept":
|
||||
"application/xml;q=0.8, \
|
||||
application/json"}
|
||||
)
|
||||
self.assertEqual(404, response.status_int)
|
||||
self.assertEqual("application/json", response.content_type)
|
||||
self.assertTrue(response.json['error_message'])
|
||||
response = self.get_json('/invalid_path',
|
||||
expect_errors=True
|
||||
)
|
||||
self.assertEqual(404, response.status_int)
|
||||
self.assertEqual("application/json", response.content_type)
|
||||
self.assertTrue(response.json['error_message'])
|
||||
response = self.get_json('/invalid_path',
|
||||
expect_errors=True,
|
||||
headers={"Accept":
|
||||
"text/html,*/*"}
|
||||
)
|
||||
self.assertEqual(404, response.status_int)
|
||||
self.assertEqual("application/json", response.content_type)
|
||||
self.assertTrue(response.json['error_message'])
|
||||
|
||||
def test_xml_parsable_error_middleware_404(self):
|
||||
response = self.get_json('/invalid_path',
|
||||
expect_errors=True,
|
||||
headers={"Accept":
|
||||
"application/xml,*/*"}
|
||||
)
|
||||
self.assertEqual(404, response.status_int)
|
||||
self.assertEqual("application/xml", response.content_type)
|
||||
self.assertEqual('error_message', response.xml.tag)
|
||||
response = self.get_json('/invalid_path',
|
||||
expect_errors=True,
|
||||
headers={"Accept":
|
||||
"application/json;q=0.8 \
|
||||
,application/xml"}
|
||||
)
|
||||
self.assertEqual(404, response.status_int)
|
||||
self.assertEqual("application/xml", response.content_type)
|
||||
self.assertEqual('error_message', response.xml.tag)
|
|
@ -1,30 +0,0 @@
|
|||
#
|
||||
# Copyright Ericsson AB 2014. All rights reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from panko.tests.functional.api import v2 as tests_api
|
||||
|
||||
|
||||
class TestCapabilitiesController(tests_api.FunctionalTest):
|
||||
|
||||
def setUp(self):
|
||||
super(TestCapabilitiesController, self).setUp()
|
||||
self.url = '/capabilities'
|
||||
|
||||
def test_capabilities(self):
|
||||
data = self.get_json(self.url)
|
||||
# check that capabilities data contains both 'api' and 'storage' fields
|
||||
self.assertIsNotNone(data)
|
||||
self.assertNotEqual({}, data)
|
||||
self.assertIn('api', data)
|
||||
self.assertIn('event_storage', data)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue