diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index b474acb..0000000 --- a/.coveragerc +++ /dev/null @@ -1,12 +0,0 @@ -[run] -branch = True -source = watcher -omit = - watcher/tests/* - watcher/hacking/* - -[report] -ignore_errors = True -exclude_lines = - @abc.abstract - raise NotImplementedError diff --git a/.gitignore b/.gitignore deleted file mode 100644 index debf764..0000000 --- a/.gitignore +++ /dev/null @@ -1,74 +0,0 @@ -*.py[cod] - -# C extensions -*.so - -# Packages -*.egg* -dist -build -eggs -parts -bin -var -sdist -develop-eggs -.installed.cfg -lib -lib64 - -# Installer logs -pip-log.txt - -# Unit test / coverage reports -.coverage* -.tox -nosetests.xml -.testrepository -.venv -.idea - -# Translations -*.mo - -# Mr Developer -.mr.developer.cfg -.project -.pydevproject - -# Complexity -output/*.html -output/*/index.html - -# Sphinx -doc/build -doc/source/api/* -doc/source/samples -doc/source/watcher.conf.sample -!doc/source/api/index.rst -!doc/source/api/v1.rst - -# pbr generates these -AUTHORS -ChangeLog - -# Editors -*~ -.*.swp -.*sw? - -sftp-config.json -/.idea/ -/cover/ -.settings/ -.eclipse - -cover -/demo/ - - -# Files created by releasenotes build -releasenotes/build - -# Desktop Service Store -*.DS_Store diff --git a/.gitreview b/.gitreview deleted file mode 100644 index 9c9a533..0000000 --- a/.gitreview +++ /dev/null @@ -1,4 +0,0 @@ -[gerrit] -host=review.openstack.org -port=29418 -project=openstack/watcher.git diff --git a/.mailmap b/.mailmap deleted file mode 100644 index 516ae6f..0000000 --- a/.mailmap +++ /dev/null @@ -1,3 +0,0 @@ -# Format is: -# -# diff --git a/.testr.conf b/.testr.conf deleted file mode 100644 index 0c9a76a..0000000 --- a/.testr.conf +++ /dev/null @@ -1,7 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-160} \ - ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./watcher/tests} $LISTOPT $IDOPTION -test_id_option=--load-list $IDFILE -test_list_option=--list diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index 673a5fe..0000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,16 +0,0 @@ -If you would like to contribute to the development of OpenStack, -you must follow the steps in this page: - - https://docs.openstack.org/infra/manual/developers.html - -Once those steps have been completed, changes to OpenStack -should be submitted for review via the Gerrit tool, following -the workflow documented at: - - https://docs.openstack.org/infra/manual/developers.html#development-workflow - -Pull requests submitted through GitHub will be ignored. - -Bugs should be filed on Launchpad, not GitHub: - - https://bugs.launchpad.net/watcher diff --git a/HACKING.rst b/HACKING.rst deleted file mode 100644 index 225c8a0..0000000 --- a/HACKING.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -========================== -watcher Style Commandments -========================== - -Read the OpenStack Style Commandments https://docs.openstack.org/developer/hacking/ diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 68c771a..0000000 --- a/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/README b/README new file mode 100644 index 0000000..8fcd2b2 --- /dev/null +++ b/README @@ -0,0 +1,14 @@ +This project is no longer maintained. + +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". + +For ongoing work on maintaining OpenStack packages in the Debian +distribution, please see the Debian OpenStack packaging team at +https://wiki.debian.org/OpenStack/. + +For any further questions, please email +openstack-dev@lists.openstack.org or join #openstack-dev on +Freenode. diff --git a/README.rst b/README.rst deleted file mode 100644 index 289eb91..0000000 --- a/README.rst +++ /dev/null @@ -1,31 +0,0 @@ -======================== -Team and repository tags -======================== - -.. image:: https://governance.openstack.org/badges/watcher.svg - :target: https://governance.openstack.org/reference/tags/index.html - -.. Change things from this point on - -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -======= -Watcher -======= - -OpenStack Watcher provides a flexible and scalable resource optimization -service for multi-tenant OpenStack-based clouds. -Watcher provides a robust framework to realize a wide range of cloud -optimization goals, including the reduction of data center -operating costs, increased system performance via intelligent virtual machine -migration, increased energy efficiency-and more! - -* Free software: Apache license -* Wiki: https://wiki.openstack.org/wiki/Watcher -* Source: https://github.com/openstack/watcher -* Bugs: https://bugs.launchpad.net/watcher -* Documentation: https://docs.openstack.org/watcher/latest/ diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index 15cd6cb..0000000 --- a/babel.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[python: **.py] - diff --git a/devstack/files/apache-watcher-api.template b/devstack/files/apache-watcher-api.template deleted file mode 100644 index 1f9cffb..0000000 --- a/devstack/files/apache-watcher-api.template +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is an example Apache2 configuration file for using the -# Watcher API through mod_wsgi. This version assumes you are -# running devstack to configure the software. - -Listen %WATCHER_SERVICE_PORT% - - - WSGIDaemonProcess watcher-api user=%USER% processes=%APIWORKERS% threads=1 display-name=%{GROUP} - WSGIScriptAlias / %WATCHER_WSGI_DIR%/app.wsgi - WSGIApplicationGroup %{GLOBAL} - WSGIProcessGroup watcher-api - WSGIPassAuthorization On - - ErrorLogFormat "%M" - ErrorLog /var/log/%APACHE_NAME%/watcher-api.log - CustomLog /var/log/%APACHE_NAME%/watcher-api-access.log combined - - - - WSGIProcessGroup watcher-api - WSGIApplicationGroup %{GLOBAL} - = 2.4> - Require all granted - - - Order allow,deny - Allow from all - - - diff --git a/devstack/lib/watcher b/devstack/lib/watcher deleted file mode 100644 index a0fd2c4..0000000 --- a/devstack/lib/watcher +++ /dev/null @@ -1,313 +0,0 @@ -#!/bin/bash -# -# lib/watcher -# Functions to control the configuration and operation of the watcher services - -# Dependencies: -# -# - ``functions`` file -# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined -# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined - -# ``stack.sh`` calls the entry points in this order: -# -# - is_watcher_enabled -# - install_watcher -# - configure_watcher -# - create_watcher_conf -# - init_watcher -# - start_watcher -# - stop_watcher -# - cleanup_watcher - -# Save trace setting -_XTRACE_WATCHER=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default directories -WATCHER_REPO=${WATCHER_REPO:-${GIT_BASE}/openstack/watcher.git} -WATCHER_BRANCH=${WATCHER_BRANCH:-master} -WATCHER_DIR=$DEST/watcher - -GITREPO["python-watcherclient"]=${WATCHERCLIENT_REPO:-${GIT_BASE}/openstack/python-watcherclient.git} -GITBRANCH["python-watcherclient"]=${WATCHERCLIENT_BRANCH:-master} -GITDIR["python-watcherclient"]=$DEST/python-watcherclient - -WATCHER_STATE_PATH=${WATCHER_STATE_PATH:=$DATA_DIR/watcher} -WATCHER_AUTH_CACHE_DIR=${WATCHER_AUTH_CACHE_DIR:-/var/cache/watcher} - -WATCHER_CONF_DIR=/etc/watcher -WATCHER_CONF=$WATCHER_CONF_DIR/watcher.conf -WATCHER_POLICY_JSON=$WATCHER_CONF_DIR/policy.json - -WATCHER_DEVSTACK_DIR=$WATCHER_DIR/devstack -WATCHER_DEVSTACK_FILES_DIR=$WATCHER_DEVSTACK_DIR/files - -NOVA_CONF_DIR=/etc/nova -NOVA_CONF=$NOVA_CONF_DIR/nova.conf - -if is_ssl_enabled_service "watcher" || is_service_enabled tls-proxy; then - WATCHER_SERVICE_PROTOCOL="https" -fi - -WATCHER_USE_MOD_WSGI=$(trueorfalse True WATCHER_USE_MOD_WSGI) - -if is_suse; then - WATCHER_WSGI_DIR=${WATCHER_WSGI_DIR:-/srv/www/htdocs/watcher} -else - WATCHER_WSGI_DIR=${WATCHER_WSGI_DIR:-/var/www/watcher} -fi -# Public facing bits -WATCHER_SERVICE_HOST=${WATCHER_SERVICE_HOST:-$HOST_IP} -WATCHER_SERVICE_PORT=${WATCHER_SERVICE_PORT:-9322} -WATCHER_SERVICE_PORT_INT=${WATCHER_SERVICE_PORT_INT:-19322} -WATCHER_SERVICE_PROTOCOL=${WATCHER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} - -# Support entry points installation of console scripts -if [[ -d $WATCHER_DIR/bin ]]; then - WATCHER_BIN_DIR=$WATCHER_DIR/bin -else - WATCHER_BIN_DIR=$(get_python_exec_prefix) -fi - -# Entry Points -# ------------ - -# Test if any watcher services are enabled -# is_watcher_enabled -function is_watcher_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"watcher-" ]] && return 0 - return 1 -} - -#_cleanup_watcher_apache_wsgi - Remove wsgi files, -#disable and remove apache vhost file -function _cleanup_watcher_apache_wsgi { - sudo rm -rf $WATCHER_WSGI_DIR - sudo rm -f $(apache_site_config_for watcher-api) - restart_apache_server -} - -# cleanup_watcher() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_watcher { - sudo rm -rf $WATCHER_STATE_PATH $WATCHER_AUTH_CACHE_DIR - if [[ "$WATCHER_USE_MOD_WSGI" == "True" ]]; then - _cleanup_watcher_apache_wsgi - fi -} - -# configure_watcher() - Set config files, create data dirs, etc -function configure_watcher { - # Put config files in ``/etc/watcher`` for everyone to find - sudo install -d -o $STACK_USER $WATCHER_CONF_DIR - - install_default_policy watcher - - # Rebuild the config file from scratch - create_watcher_conf -} - -# create_watcher_accounts() - Set up common required watcher accounts -# -# Project User Roles -# ------------------------------------------------------------------ -# SERVICE_TENANT_NAME watcher service -function create_watcher_accounts { - create_service_user "watcher" "admin" - - local watcher_service=$(get_or_create_service "watcher" \ - "infra-optim" "Watcher Infrastructure Optimization Service") - get_or_create_endpoint $watcher_service \ - "$REGION_NAME" \ - "$WATCHER_SERVICE_PROTOCOL://$WATCHER_SERVICE_HOST:$WATCHER_SERVICE_PORT" \ - "$WATCHER_SERVICE_PROTOCOL://$WATCHER_SERVICE_HOST:$WATCHER_SERVICE_PORT" \ - "$WATCHER_SERVICE_PROTOCOL://$WATCHER_SERVICE_HOST:$WATCHER_SERVICE_PORT" -} - -# _config_watcher_apache_wsgi() - Set WSGI config files of watcher -function _config_watcher_apache_wsgi { - local watcher_apache_conf - if [[ "$WATCHER_USE_MOD_WSGI" == "True" ]]; then - sudo mkdir -p $WATCHER_WSGI_DIR - sudo cp $WATCHER_DIR/watcher/api/app.wsgi $WATCHER_WSGI_DIR/app.wsgi - watcher_apache_conf=$(apache_site_config_for watcher-api) - sudo cp $WATCHER_DEVSTACK_FILES_DIR/apache-watcher-api.template $watcher_apache_conf - sudo sed -e " - s|%WATCHER_SERVICE_PORT%|$WATCHER_SERVICE_PORT|g; - s|%WATCHER_WSGI_DIR%|$WATCHER_WSGI_DIR|g; - s|%USER%|$STACK_USER|g; - s|%APIWORKERS%|$API_WORKERS|g; - s|%APACHE_NAME%|$APACHE_NAME|g; - " -i $watcher_apache_conf - enable_apache_site watcher-api - tail_log watcher-access /var/log/$APACHE_NAME/watcher-api-access.log - tail_log watcher-api /var/log/$APACHE_NAME/watcher-api.log - fi - -} - -# create_watcher_conf() - Create a new watcher.conf file -function create_watcher_conf { - # (Re)create ``watcher.conf`` - rm -f $WATCHER_CONF - - iniset $WATCHER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" - iniset $WATCHER_CONF DEFAULT control_exchange watcher - - iniset $WATCHER_CONF database connection $(database_connection_url watcher) - iniset $WATCHER_CONF api host "$WATCHER_SERVICE_HOST" - iniset $WATCHER_CONF api port "$WATCHER_SERVICE_PORT" - - iniset $WATCHER_CONF oslo_policy policy_file $WATCHER_POLICY_JSON - - iniset $WATCHER_CONF oslo_messaging_rabbit rabbit_userid $RABBIT_USERID - iniset $WATCHER_CONF oslo_messaging_rabbit rabbit_password $RABBIT_PASSWORD - iniset $WATCHER_CONF oslo_messaging_rabbit rabbit_host $RABBIT_HOST - - iniset $WATCHER_CONF oslo_messaging_notifications driver "messagingv2" - - iniset $NOVA_CONF oslo_messaging_notifications topics "notifications,watcher_notifications" - iniset $NOVA_CONF notifications notify_on_state_change "vm_and_task_state" - - configure_auth_token_middleware $WATCHER_CONF watcher $WATCHER_AUTH_CACHE_DIR - configure_auth_token_middleware $WATCHER_CONF watcher $WATCHER_AUTH_CACHE_DIR "watcher_clients_auth" - - if is_fedora || is_suse; then - # watcher defaults to /usr/local/bin, but fedora and suse pip like to - # install things in /usr/bin - iniset $WATCHER_CONF DEFAULT bindir "/usr/bin" - fi - - if [ -n "$WATCHER_STATE_PATH" ]; then - iniset $WATCHER_CONF DEFAULT state_path "$WATCHER_STATE_PATH" - iniset $WATCHER_CONF oslo_concurrency lock_path "$WATCHER_STATE_PATH" - fi - - if [ "$SYSLOG" != "False" ]; then - iniset $WATCHER_CONF DEFAULT use_syslog "True" - fi - - # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - setup_colorized_logging $WATCHER_CONF DEFAULT - else - # Show user_name and project_name instead of user_id and project_id - iniset $WATCHER_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(project_domain)s %(user_name)s %(project_name)s] %(instance)s%(message)s" - fi - - #config apache files - if [[ "$WATCHER_USE_MOD_WSGI" == "True" ]]; then - _config_watcher_apache_wsgi - fi - # Register SSL certificates if provided - if is_ssl_enabled_service watcher; then - ensure_certificates WATCHER - - iniset $WATCHER_CONF DEFAULT ssl_cert_file "$WATCHER_SSL_CERT" - iniset $WATCHER_CONF DEFAULT ssl_key_file "$WATCHER_SSL_KEY" - - iniset $WATCHER_CONF DEFAULT enabled_ssl_apis "$WATCHER_ENABLED_APIS" - fi - - if is_service_enabled ceilometer; then - iniset $WATCHER_CONF watcher_messaging notifier_driver "messaging" - fi -} - -# create_watcher_cache_dir() - Part of the init_watcher() process -function create_watcher_cache_dir { - # Create cache dir - sudo install -d -o $STACK_USER $WATCHER_AUTH_CACHE_DIR - rm -rf $WATCHER_AUTH_CACHE_DIR/* -} - -# init_watcher() - Initialize databases, etc. -function init_watcher { - # clean up from previous (possibly aborted) runs - # create required data files - if is_service_enabled $DATABASE_BACKENDS && is_service_enabled watcher-api; then - # (Re)create watcher database - recreate_database watcher - - # Create watcher schema - $WATCHER_BIN_DIR/watcher-db-manage --config-file $WATCHER_CONF upgrade - fi - create_watcher_cache_dir -} - -# install_watcherclient() - Collect source and prepare -function install_watcherclient { - if use_library_from_git "python-watcherclient"; then - git_clone_by_name "python-watcherclient" - setup_dev_lib "python-watcherclient" - fi -} - -# install_watcher() - Collect source and prepare -function install_watcher { - git_clone $WATCHER_REPO $WATCHER_DIR $WATCHER_BRANCH - setup_develop $WATCHER_DIR - if [[ "$WATCHER_USE_MOD_WSGI" == "True" ]]; then - install_apache_wsgi - fi -} - -# start_watcher_api() - Start the API process ahead of other things -function start_watcher_api { - # Get right service port for testing - - local service_port=$WATCHER_SERVICE_PORT - local service_protocol=$WATCHER_SERVICE_PROTOCOL - if is_service_enabled tls-proxy; then - service_port=$WATCHER_SERVICE_PORT_INT - service_protocol="http" - fi - if [[ "$WATCHER_USE_MOD_WSGI" == "True" ]]; then - restart_apache_server - else - run_process watcher-api "$WATCHER_BIN_DIR/watcher-api --config-file $WATCHER_CONF" - fi - echo "Waiting for watcher-api to start..." - if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$WATCHER_SERVICE_HOST:$service_port; then - die $LINENO "watcher-api did not start" - fi - - # Start proxies if enabled - if is_service_enabled tls-proxy; then - start_tls_proxy '*' $WATCHER_SERVICE_PORT $WATCHER_SERVICE_HOST $WATCHER_SERVICE_PORT_INT & - start_tls_proxy '*' $EC2_SERVICE_PORT $WATCHER_SERVICE_HOST $WATCHER_SERVICE_PORT_INT & - fi -} - -# start_watcher() - Start running processes, including screen -function start_watcher { - # ``run_process`` checks ``is_service_enabled``, it is not needed here - start_watcher_api - run_process watcher-decision-engine "$WATCHER_BIN_DIR/watcher-decision-engine --config-file $WATCHER_CONF" - run_process watcher-applier "$WATCHER_BIN_DIR/watcher-applier --config-file $WATCHER_CONF" -} - -# stop_watcher() - Stop running processes (non-screen) -function stop_watcher { - if [[ "$WATCHER_USE_MOD_WSGI" == "True" ]]; then - disable_apache_site watcher-api - else - stop_process watcher-api - fi - for serv in watcher-decision-engine watcher-applier; do - stop_process $serv - done -} - -# Restore xtrace -$_XTRACE_WATCHER - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/devstack/local.conf.compute b/devstack/local.conf.compute deleted file mode 100644 index 79d0aeb..0000000 --- a/devstack/local.conf.compute +++ /dev/null @@ -1,49 +0,0 @@ -# Sample ``local.conf`` for compute node for Watcher development -# NOTE: Copy this file to the root DevStack directory for it to work properly. - -[[local|localrc]] - -ADMIN_PASSWORD=nomoresecrete -DATABASE_PASSWORD=stackdb -RABBIT_PASSWORD=stackqueue -SERVICE_PASSWORD=$ADMIN_PASSWORD -SERVICE_TOKEN=azertytoken - -HOST_IP=192.168.42.2 # Change this to this compute node's IP address -FLAT_INTERFACE=eth0 - -FIXED_RANGE=10.254.1.0/24 # Change this to whatever your network is -NETWORK_GATEWAY=10.254.1.1 # Change this for your network - -MULTI_HOST=1 - -SERVICE_HOST=192.168.42.1 # Change this to the IP of your controller node -MYSQL_HOST=$SERVICE_HOST -RABBIT_HOST=$SERVICE_HOST -GLANCE_HOSTPORT=${SERVICE_HOST}:9292 - -DATABASE_TYPE=mysql - -# Enable services (including neutron) -ENABLED_SERVICES=n-cpu,n-api-meta,c-vol,q-agt,placement-client - -NOVA_VNC_ENABLED=True -NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_auto.html" -VNCSERVER_LISTEN=0.0.0.0 -VNCSERVER_PROXYCLIENT_ADDRESS=$HOST_IP - -NOVA_INSTANCES_PATH=/opt/stack/data/instances - -# Enable the Ceilometer plugin for the compute agent -enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer -disable_service ceilometer-acentral,ceilometer-collector,ceilometer-api - -LOGFILE=$DEST/logs/stack.sh.log -LOGDAYS=2 - -[[post-config|$NOVA_CONF]] -[DEFAULT] -compute_monitors=cpu.virt_driver -notify_on_state_change = vm_and_task_state -[notifications] -notify_on_state_change = vm_and_task_state diff --git a/devstack/local.conf.controller b/devstack/local.conf.controller deleted file mode 100644 index c117d60..0000000 --- a/devstack/local.conf.controller +++ /dev/null @@ -1,59 +0,0 @@ -# Sample ``local.conf`` for controller node for Watcher development -# NOTE: Copy this file to the root DevStack directory for it to work properly. - -[[local|localrc]] - -ADMIN_PASSWORD=nomoresecrete -DATABASE_PASSWORD=stackdb -RABBIT_PASSWORD=stackqueue -SERVICE_PASSWORD=$ADMIN_PASSWORD -SERVICE_TOKEN=azertytoken - -HOST_IP=192.168.42.1 # Change this to your controller node IP address -FLAT_INTERFACE=eth0 - -FIXED_RANGE=10.254.1.0/24 # Change this to whatever your network is -NETWORK_GATEWAY=10.254.1.1 # Change this for your network - -MULTI_HOST=1 - - -#Set this to FALSE if do not want to run watcher-api behind mod-wsgi -#WATCHER_USE_MOD_WSGI=TRUE - -# This is the controller node, so disable nova-compute -disable_service n-cpu - -# Disable nova-network and use neutron instead -disable_service n-net -ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3,neutron - -# Enable remote console access -enable_service n-cauth - -# Enable the Watcher Dashboard plugin -enable_plugin watcher-dashboard git://git.openstack.org/openstack/watcher-dashboard - -# Enable the Watcher plugin -enable_plugin watcher git://git.openstack.org/openstack/watcher - -# Enable the Ceilometer plugin -enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer - -# This is the controller node, so disable the ceilometer compute agent -disable_service ceilometer-acompute -# Enable the ceilometer api explicitly(bug:1667678) -enable_service ceilometer-api - -# Enable the Gnocchi plugin -enable_plugin gnocchi https://git.openstack.org/openstack/gnocchi - -LOGFILE=$DEST/logs/stack.sh.log -LOGDAYS=2 - -[[post-config|$NOVA_CONF]] -[DEFAULT] -compute_monitors=cpu.virt_driver -notify_on_state_change = vm_and_task_state -[notifications] -notify_on_state_change = vm_and_task_state diff --git a/devstack/plugin.sh b/devstack/plugin.sh deleted file mode 100644 index 2bf726d..0000000 --- a/devstack/plugin.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash -# -# plugin.sh - DevStack plugin script to install watcher - -# Save trace setting -_XTRACE_WATCHER_PLUGIN=$(set +o | grep xtrace) -set -o xtrace - -echo_summary "watcher's plugin.sh was called..." -source $DEST/watcher/devstack/lib/watcher - -# Show all of defined environment variables -(set -o posix; set) - -if is_service_enabled watcher-api watcher-decision-engine watcher-applier; then - if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then - echo_summary "Before Installing watcher" - elif [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing watcher" - install_watcher - - LIBS_FROM_GIT="${LIBS_FROM_GIT},python-watcherclient" - - install_watcherclient - cleanup_watcher - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring watcher" - configure_watcher - - if is_service_enabled key; then - create_watcher_accounts - fi - - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - # Initialize watcher - init_watcher - - # Start the watcher components - echo_summary "Starting watcher" - start_watcher - fi - - if [[ "$1" == "unstack" ]]; then - stop_watcher - fi - - if [[ "$1" == "clean" ]]; then - cleanup_watcher - fi -fi - -# Restore xtrace -$_XTRACE_WATCHER_PLUGIN diff --git a/devstack/settings b/devstack/settings deleted file mode 100644 index 190ae7d..0000000 --- a/devstack/settings +++ /dev/null @@ -1,9 +0,0 @@ -# DevStack settings - -# Make sure rabbit is enabled -enable_service rabbit - -# Enable Watcher services -enable_service watcher-api -enable_service watcher-decision-engine -enable_service watcher-applier diff --git a/doc/ext/__init__.py b/doc/ext/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/doc/ext/term.py b/doc/ext/term.py deleted file mode 100644 index 6bbc55d..0000000 --- a/doc/ext/term.py +++ /dev/null @@ -1,172 +0,0 @@ -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -import importlib -import inspect - -from docutils import nodes -from docutils.parsers import rst -from docutils import statemachine - -from watcher.version import version_info - - -class BaseWatcherDirective(rst.Directive): - - def __init__(self, name, arguments, options, content, lineno, - content_offset, block_text, state, state_machine): - super(BaseWatcherDirective, self).__init__( - name, arguments, options, content, lineno, - content_offset, block_text, state, state_machine) - self.result = statemachine.ViewList() - - def run(self): - raise NotImplementedError('Must override run() is subclass.') - - def add_line(self, line, *lineno): - """Append one line of generated reST to the output.""" - self.result.append(line, rst.directives.unchanged, *lineno) - - def add_textblock(self, textblock): - for line in textblock.splitlines(): - self.add_line(line) - - def add_object_docstring(self, obj): - obj_raw_docstring = obj.__doc__ or "" - - # Maybe it's within the __init__ - if not obj_raw_docstring and hasattr(obj, "__init__"): - if obj.__init__.__doc__: - obj_raw_docstring = obj.__init__.__doc__ - - if not obj_raw_docstring: - # Raise a warning to make the tests fail wit doc8 - raise self.error("No docstring available for %s!" % obj) - - obj_docstring = inspect.cleandoc(obj_raw_docstring) - self.add_textblock(obj_docstring) - - -class WatcherTerm(BaseWatcherDirective): - """Directive to import an RST formatted docstring into the Watcher glossary - - **How to use it** - - # inside your .py file - class DocumentedObject(object): - '''My *.rst* docstring''' - - - # Inside your .rst file - .. watcher-term:: import.path.to.your.DocumentedObject - - This directive will then import the docstring and then interpret it. - """ - - # You need to put an import path as an argument for this directive to work - required_arguments = 1 - - def run(self): - cls_path = self.arguments[0] - - try: - try: - cls = importlib.import_module(cls_path) - except ImportError: - module_name, cls_name = cls_path.rsplit('.', 1) - mod = importlib.import_module(module_name) - cls = getattr(mod, cls_name) - except Exception as exc: - raise self.error(exc) - - self.add_object_docstring(cls) - - node = nodes.paragraph() - node.document = self.state.document - self.state.nested_parse(self.result, 0, node) - return node.children - - -class WatcherFunc(BaseWatcherDirective): - """Directive to import a value returned by a func into the Watcher doc - - **How to use it** - - # inside your .py file - class Bar(object): - - def foo(object): - return foo_string - - - # Inside your .rst file - .. watcher-func:: import.path.to.your.Bar.foo node_classname - - node_classname is decumented here: - http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html - - This directive will then import the value and then interpret it. - """ - - # You need to put an import path as an argument for this directive to work - # required_arguments = 1 - # optional_arguments = 1 - - option_spec = {'format': rst.directives.unchanged} - has_content = True - - def run(self): - if not self.content: - error = self.state_machine.reporter.error( - 'The "%s" directive is empty; content required.' % self.name, - nodes.literal_block(self.block_text, self.block_text), - line=self.lineno) - return [error] - - func_path = self.content[0] - try: - cls_path, func_name = func_path.rsplit('.', 1) - module_name, cls_name = cls_path.rsplit('.', 1) - mod = importlib.import_module(module_name) - cls = getattr(mod, cls_name) - except Exception as exc: - raise self.error(exc) - - cls_obj = cls() - func = getattr(cls_obj, func_name) - textblock = func() - if not isinstance(textblock, str): - textblock = str(textblock) - - self.add_textblock(textblock) - - try: - node_class = getattr(nodes, - self.options.get('format', 'paragraph')) - except Exception as exc: - raise self.error(exc) - - node = node_class() - node.document = self.state.document - self.state.nested_parse(self.result, 0, node) - return [node] - - -def setup(app): - app.add_directive('watcher-term', WatcherTerm) - app.add_directive('watcher-func', WatcherFunc) - return {'version': version_info.version_string()} diff --git a/doc/ext/versioned_notifications.py b/doc/ext/versioned_notifications.py deleted file mode 100644 index 1a81079..0000000 --- a/doc/ext/versioned_notifications.py +++ /dev/null @@ -1,133 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -This provides a sphinx extension able to list the implemented versioned -notifications into the developer documentation. - -It is used via a single directive in the .rst file - - .. versioned_notifications:: - -""" - -from sphinx.util.compat import Directive -from docutils import nodes - -from watcher.notifications import base as notification -from watcher.objects import base - - -class VersionedNotificationDirective(Directive): - - SAMPLE_ROOT = 'doc/notification_samples/' - TOGGLE_SCRIPT = """ - -""" - - def run(self): - notifications = self._collect_notifications() - return self._build_markup(notifications) - - def _collect_notifications(self): - base.WatcherObjectRegistry.register_notification_objects() - notifications = [] - ovos = base.WatcherObjectRegistry.obj_classes() - for name, cls in ovos.items(): - cls = cls[0] - if (issubclass(cls, notification.NotificationBase) and - cls != notification.NotificationBase): - - payload_name = cls.fields['payload'].objname - payload_cls = ovos[payload_name][0] - for sample in cls.samples: - notifications.append((cls.__name__, - payload_cls.__name__, - sample)) - return sorted(notifications) - - def _build_markup(self, notifications): - content = [] - cols = ['Event type', 'Notification class', 'Payload class', 'Sample'] - table = nodes.table() - content.append(table) - group = nodes.tgroup(cols=len(cols)) - table.append(group) - - head = nodes.thead() - group.append(head) - - for _ in cols: - group.append(nodes.colspec(colwidth=1)) - - body = nodes.tbody() - group.append(body) - - # fill the table header - row = nodes.row() - body.append(row) - for col_name in cols: - col = nodes.entry() - row.append(col) - text = nodes.strong(text=col_name) - col.append(text) - - # fill the table content, one notification per row - for name, payload, sample_file in notifications: - event_type = sample_file[0: -5].replace('-', '.') - - row = nodes.row() - body.append(row) - col = nodes.entry() - row.append(col) - text = nodes.literal(text=event_type) - col.append(text) - - col = nodes.entry() - row.append(col) - text = nodes.literal(text=name) - col.append(text) - - col = nodes.entry() - row.append(col) - text = nodes.literal(text=payload) - col.append(text) - - col = nodes.entry() - row.append(col) - - with open(self.SAMPLE_ROOT + sample_file, 'r') as f: - sample_content = f.read() - - event_type = sample_file[0: -5] - html_str = self.TOGGLE_SCRIPT % ((event_type, ) * 3) - html_str += ("" % event_type) - html_str += ("
%s
" - % (event_type, sample_content)) - - raw = nodes.raw('', html_str, format="html") - col.append(raw) - - return content - - -def setup(app): - app.add_directive('versioned_notifications', - VersionedNotificationDirective) diff --git a/doc/notification_samples/action-create.json b/doc/notification_samples/action-create.json deleted file mode 100644 index c8dd7a4..0000000 --- a/doc/notification_samples/action-create.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "ActionCreatePayload", - "watcher_object.data": { - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "input_parameters": { - "param2": 2, - "param1": 1 - }, - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "state": "PENDING", - "action_plan": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "TerseActionPlanPayload", - "watcher_object.data": { - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "global_efficacy": {}, - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "state": "ONGOING", - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "deleted_at": null - } - }, - "parents": [], - "action_type": "nop", - "deleted_at": null - } - }, - "publisher_id": "infra-optim:node0", - "timestamp": "2017-01-01 00:00:00.000000", - "event_type": "action.create", - "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" -} diff --git a/doc/notification_samples/action-delete.json b/doc/notification_samples/action-delete.json deleted file mode 100644 index dbc5ef9..0000000 --- a/doc/notification_samples/action-delete.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "ActionDeletePayload", - "watcher_object.data": { - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "input_parameters": { - "param2": 2, - "param1": 1 - }, - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "state": "DELETED", - "action_plan": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "TerseActionPlanPayload", - "watcher_object.data": { - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "global_efficacy": {}, - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "state": "ONGOING", - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "deleted_at": null - } - }, - "parents": [], - "action_type": "nop", - "deleted_at": null - } - }, - "publisher_id": "infra-optim:node0", - "timestamp": "2017-01-01 00:00:00.000000", - "event_type": "action.delete", - "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" -} diff --git a/doc/notification_samples/action-execution-end.json b/doc/notification_samples/action-execution-end.json deleted file mode 100644 index 479a649..0000000 --- a/doc/notification_samples/action-execution-end.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "ActionExecutionPayload", - "watcher_object.data": { - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "input_parameters": { - "param2": 2, - "param1": 1 - }, - "fault": null, - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "state": "SUCCEEDED", - "action_plan": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "TerseActionPlanPayload", - "watcher_object.data": { - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "global_efficacy": {}, - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "state": "ONGOING", - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "deleted_at": null - } - }, - "parents": [], - "action_type": "nop", - "deleted_at": null - } - }, - "event_type": "action.execution.end", - "publisher_id": "infra-optim:node0", - "timestamp": "2017-01-01 00:00:00.000000", - "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" -} diff --git a/doc/notification_samples/action-execution-error.json b/doc/notification_samples/action-execution-error.json deleted file mode 100644 index 66e2371..0000000 --- a/doc/notification_samples/action-execution-error.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "priority": "ERROR", - "payload": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "ActionExecutionPayload", - "watcher_object.data": { - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "input_parameters": { - "param2": 2, - "param1": 1 - }, - "fault": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "ExceptionPayload", - "watcher_object.data": { - "module_name": "watcher.tests.notifications.test_action_notification", - "exception": "WatcherException", - "exception_message": "TEST", - "function_name": "test_send_action_execution_with_error" - } - }, - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "state": "FAILED", - "action_plan": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "TerseActionPlanPayload", - "watcher_object.data": { - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "global_efficacy": {}, - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "state": "ONGOING", - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "deleted_at": null - } - }, - "parents": [], - "action_type": "nop", - "deleted_at": null - } - }, - "event_type": "action.execution.error", - "publisher_id": "infra-optim:node0", - "timestamp": "2017-01-01 00:00:00.000000", - "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" -} diff --git a/doc/notification_samples/action-execution-start.json b/doc/notification_samples/action-execution-start.json deleted file mode 100644 index ace78f9..0000000 --- a/doc/notification_samples/action-execution-start.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "ActionExecutionPayload", - "watcher_object.data": { - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "input_parameters": { - "param2": 2, - "param1": 1 - }, - "fault": null, - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "state": "ONGOING", - "action_plan": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "TerseActionPlanPayload", - "watcher_object.data": { - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "global_efficacy": {}, - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "state": "ONGOING", - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "deleted_at": null - } - }, - "parents": [], - "action_type": "nop", - "deleted_at": null - } - }, - "event_type": "action.execution.start", - "publisher_id": "infra-optim:node0", - "timestamp": "2017-01-01 00:00:00.000000", - "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" -} diff --git a/doc/notification_samples/action-update.json b/doc/notification_samples/action-update.json deleted file mode 100644 index 3f4cbcb..0000000 --- a/doc/notification_samples/action-update.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "ActionUpdatePayload", - "watcher_object.data": { - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "input_parameters": { - "param2": 2, - "param1": 1 - }, - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "state_update": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "ActionStateUpdatePayload", - "watcher_object.data": { - "old_state": "PENDING", - "state": "ONGOING" - } - }, - "state": "ONGOING", - "action_plan": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "TerseActionPlanPayload", - "watcher_object.data": { - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "global_efficacy": {}, - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "state": "ONGOING", - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "deleted_at": null - } - }, - "parents": [], - "action_type": "nop", - "deleted_at": null - } - }, - "event_type": "action.update", - "publisher_id": "infra-optim:node0", - "timestamp": "2017-01-01 00:00:00.000000", - "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" -} diff --git a/doc/notification_samples/action_plan-create.json b/doc/notification_samples/action_plan-create.json deleted file mode 100644 index b3de9b7..0000000 --- a/doc/notification_samples/action_plan-create.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "publisher_id": "infra-optim:node0", - "payload": { - "watcher_object.version": "1.0", - "watcher_object.data": { - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "strategy": { - "watcher_object.version": "1.0", - "watcher_object.data": { - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "display_name": "test strategy", - "name": "TEST", - "updated_at": null, - "parameters_spec": {}, - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": null - }, - "watcher_object.namespace": "watcher", - "watcher_object.name": "StrategyPayload" - }, - "created_at": null, - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "audit": { - "watcher_object.version": "1.0", - "watcher_object.data": { - "audit_type": "ONESHOT", - "scope": [], - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "parameters": {}, - "interval": null, - "deleted_at": null, - "state": "PENDING", - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null - }, - "watcher_object.namespace": "watcher", - "watcher_object.name": "TerseAuditPayload" - }, - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "global_efficacy": {}, - "deleted_at": null, - "state": "RECOMMENDED", - "updated_at": null - }, - "watcher_object.namespace": "watcher", - "watcher_object.name": "ActionPlanCreatePayload" - }, - "priority": "INFO", - "message_id": "5148bff1-ea06-4ad6-8e4e-8c85ca5eb629", - "event_type": "action_plan.create", - "timestamp": "2016-10-18 09:52:05.219414" -} diff --git a/doc/notification_samples/action_plan-delete.json b/doc/notification_samples/action_plan-delete.json deleted file mode 100644 index 29d0762..0000000 --- a/doc/notification_samples/action_plan-delete.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "publisher_id": "infra-optim:node0", - "timestamp": "2016-10-18 09:52:05.219414", - "payload": { - "watcher_object.data": { - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "created_at": "2016-10-18T09:52:05Z", - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "audit": { - "watcher_object.data": { - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "interval": null, - "audit_type": "ONESHOT", - "scope": [], - "updated_at": null, - "deleted_at": null, - "state": "PENDING", - "created_at": "2016-10-18T09:52:05Z", - "parameters": {} - }, - "watcher_object.version": "1.0", - "watcher_object.name": "TerseAuditPayload", - "watcher_object.namespace": "watcher" - }, - "global_efficacy": {}, - "updated_at": null, - "deleted_at": null, - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "strategy": { - "watcher_object.data": { - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "created_at": "2016-10-18T09:52:05Z", - "name": "TEST", - "display_name": "test strategy", - "deleted_at": null, - "updated_at": null, - "parameters_spec": {} - }, - "watcher_object.version": "1.0", - "watcher_object.name": "StrategyPayload", - "watcher_object.namespace": "watcher" - }, - "state": "DELETED" - }, - "watcher_object.version": "1.0", - "watcher_object.name": "ActionPlanDeletePayload", - "watcher_object.namespace": "watcher" - }, - "event_type": "action_plan.delete", - "message_id": "3d137686-a1fd-4683-ab40-c4210aac2140", - "priority": "INFO" -} diff --git a/doc/notification_samples/action_plan-execution-end.json b/doc/notification_samples/action_plan-execution-end.json deleted file mode 100644 index 2fee7f0..0000000 --- a/doc/notification_samples/action_plan-execution-end.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "event_type": "action_plan.execution.end", - "payload": { - "watcher_object.namespace": "watcher", - "watcher_object.name": "ActionPlanActionPayload", - "watcher_object.version": "1.0", - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": null, - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "audit": { - "watcher_object.namespace": "watcher", - "watcher_object.name": "TerseAuditPayload", - "watcher_object.version": "1.0", - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": null, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "scope": [], - "audit_type": "ONESHOT", - "state": "SUCCEEDED", - "parameters": {}, - "interval": null, - "updated_at": null - } - }, - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "fault": null, - "state": "ONGOING", - "global_efficacy": {}, - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "strategy": { - "watcher_object.namespace": "watcher", - "watcher_object.name": "StrategyPayload", - "watcher_object.version": "1.0", - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": null, - "name": "TEST", - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "parameters_spec": {}, - "display_name": "test strategy", - "updated_at": null - } - }, - "updated_at": null - } - }, - "priority": "INFO", - "message_id": "3984dc2b-8aef-462b-a220-8ae04237a56e", - "timestamp": "2016-10-18 09:52:05.219414", - "publisher_id": "infra-optim:node0" -} diff --git a/doc/notification_samples/action_plan-execution-error.json b/doc/notification_samples/action_plan-execution-error.json deleted file mode 100644 index 466c67f..0000000 --- a/doc/notification_samples/action_plan-execution-error.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "event_type": "action_plan.execution.error", - "publisher_id": "infra-optim:node0", - "priority": "ERROR", - "message_id": "9a45c5ae-0e21-4300-8fa0-5555d52a66d9", - "payload": { - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher", - "watcher_object.name": "ActionPlanActionPayload", - "watcher_object.data": { - "fault": { - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher", - "watcher_object.name": "ExceptionPayload", - "watcher_object.data": { - "exception_message": "TEST", - "module_name": "watcher.tests.notifications.test_action_plan_notification", - "function_name": "test_send_action_plan_action_with_error", - "exception": "WatcherException" - } - }, - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "created_at": "2016-10-18T09:52:05Z", - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "strategy": { - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher", - "watcher_object.name": "StrategyPayload", - "watcher_object.data": { - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "created_at": "2016-10-18T09:52:05Z", - "name": "TEST", - "updated_at": null, - "display_name": "test strategy", - "parameters_spec": {}, - "deleted_at": null - } - }, - "updated_at": null, - "deleted_at": null, - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "audit": { - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher", - "watcher_object.name": "TerseAuditPayload", - "watcher_object.data": { - "parameters": {}, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "created_at": "2016-10-18T09:52:05Z", - "scope": [], - "updated_at": null, - "audit_type": "ONESHOT", - "interval": null, - "deleted_at": null, - "state": "PENDING" - } - }, - "global_efficacy": {}, - "state": "ONGOING" - } - }, - "timestamp": "2016-10-18 09:52:05.219414" -} diff --git a/doc/notification_samples/action_plan-execution-start.json b/doc/notification_samples/action_plan-execution-start.json deleted file mode 100644 index 7045162..0000000 --- a/doc/notification_samples/action_plan-execution-start.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "event_type": "action_plan.execution.start", - "payload": { - "watcher_object.namespace": "watcher", - "watcher_object.name": "ActionPlanActionPayload", - "watcher_object.version": "1.0", - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": null, - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "audit": { - "watcher_object.namespace": "watcher", - "watcher_object.name": "TerseAuditPayload", - "watcher_object.version": "1.0", - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": null, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "scope": [], - "audit_type": "ONESHOT", - "state": "PENDING", - "parameters": {}, - "interval": null, - "updated_at": null - } - }, - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "fault": null, - "state": "ONGOING", - "global_efficacy": {}, - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "strategy": { - "watcher_object.namespace": "watcher", - "watcher_object.name": "StrategyPayload", - "watcher_object.version": "1.0", - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": null, - "name": "TEST", - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "parameters_spec": {}, - "display_name": "test strategy", - "updated_at": null - } - }, - "updated_at": null - } - }, - "priority": "INFO", - "message_id": "3984dc2b-8aef-462b-a220-8ae04237a56e", - "timestamp": "2016-10-18 09:52:05.219414", - "publisher_id": "infra-optim:node0" -} diff --git a/doc/notification_samples/action_plan-update.json b/doc/notification_samples/action_plan-update.json deleted file mode 100644 index 60f7eec..0000000 --- a/doc/notification_samples/action_plan-update.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "payload": { - "watcher_object.version": "1.0", - "watcher_object.data": { - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "audit": { - "watcher_object.version": "1.0", - "watcher_object.data": { - "audit_type": "ONESHOT", - "scope": [], - "created_at": "2016-10-18T09:52:05Z", - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "interval": null, - "updated_at": null, - "state": "PENDING", - "deleted_at": null, - "parameters": {} - }, - "watcher_object.namespace": "watcher", - "watcher_object.name": "TerseAuditPayload" - }, - "created_at": "2016-10-18T09:52:05Z", - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "updated_at": null, - "state_update": { - "watcher_object.version": "1.0", - "watcher_object.data": { - "old_state": "PENDING", - "state": "ONGOING" - }, - "watcher_object.namespace": "watcher", - "watcher_object.name": "ActionPlanStateUpdatePayload" - }, - "state": "ONGOING", - "deleted_at": null, - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "strategy": { - "watcher_object.version": "1.0", - "watcher_object.data": { - "name": "TEST", - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "display_name": "test strategy", - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "deleted_at": null, - "parameters_spec": {} - }, - "watcher_object.namespace": "watcher", - "watcher_object.name": "StrategyPayload" - }, - "global_efficacy": {} - }, - "watcher_object.namespace": "watcher", - "watcher_object.name": "ActionPlanUpdatePayload" - }, - "publisher_id": "infra-optim:node0", - "priority": "INFO", - "timestamp": "2016-10-18 09:52:05.219414", - "event_type": "action_plan.update", - "message_id": "0a8a7329-fd5a-4ec6-97d7-2b776ce51a4c" -} diff --git a/doc/notification_samples/audit-create.json b/doc/notification_samples/audit-create.json deleted file mode 100644 index dd655ea..0000000 --- a/doc/notification_samples/audit-create.json +++ /dev/null @@ -1,71 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "watcher_object.data": { - "audit_type": "ONESHOT", - "parameters": { - "para2": "hello", - "para1": 3.2 - }, - "state": "PENDING", - "updated_at": null, - "deleted_at": null, - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "goal": { - "watcher_object.data": { - "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "name": "dummy", - "updated_at": null, - "deleted_at": null, - "efficacy_specification": [], - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy goal" - }, - "watcher_object.name": "GoalPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "interval": null, - "scope": [], - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "strategy": { - "watcher_object.data": { - "parameters_spec": { - "properties": { - "para2": { - "type": "string", - "default": "hello", - "description": "string parameter example" - }, - "para1": { - "description": "number parameter example", - "maximum": 10.2, - "type": "number", - "default": 3.2, - "minimum": 1.0 - } - } - }, - "name": "dummy", - "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "updated_at": null, - "deleted_at": null, - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy strategy" - }, - "watcher_object.name": "StrategyPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "created_at": "2016-11-04T16:29:20Z", - "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" - }, - "watcher_object.name": "AuditCreatePayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "publisher_id": "infra-optim:localhost", - "timestamp": "2016-11-04 16:31:36.264673 ", - "event_type": "audit.create", - "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" -} diff --git a/doc/notification_samples/audit-delete.json b/doc/notification_samples/audit-delete.json deleted file mode 100644 index 7527829..0000000 --- a/doc/notification_samples/audit-delete.json +++ /dev/null @@ -1,71 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "watcher_object.data": { - "audit_type": "ONESHOT", - "parameters": { - "para2": "hello", - "para1": 3.2 - }, - "state": "DELETED", - "updated_at": null, - "deleted_at": null, - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "goal": { - "watcher_object.data": { - "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "name": "dummy", - "updated_at": null, - "deleted_at": null, - "efficacy_specification": [], - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy goal" - }, - "watcher_object.name": "GoalPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "interval": null, - "scope": [], - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "strategy": { - "watcher_object.data": { - "parameters_spec": { - "properties": { - "para2": { - "type": "string", - "default": "hello", - "description": "string parameter example" - }, - "para1": { - "description": "number parameter example", - "maximum": 10.2, - "type": "number", - "default": 3.2, - "minimum": 1.0 - } - } - }, - "name": "dummy", - "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "updated_at": null, - "deleted_at": null, - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy strategy" - }, - "watcher_object.name": "StrategyPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "created_at": "2016-11-04T16:29:20Z", - "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" - }, - "watcher_object.name": "AuditDeletePayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "publisher_id": "infra-optim:localhost", - "timestamp": "2016-11-04 16:31:36.264673 ", - "event_type": "audit.delete", - "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" -} diff --git a/doc/notification_samples/audit-planner-end.json b/doc/notification_samples/audit-planner-end.json deleted file mode 100644 index d3307c0..0000000 --- a/doc/notification_samples/audit-planner-end.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "watcher_object.data": { - "audit_type": "ONESHOT", - "parameters": { - "para2": "hello", - "para1": 3.2 - }, - "state": "ONGOING", - "updated_at": null, - "deleted_at": null, - "fault": null, - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "goal": { - "watcher_object.data": { - "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "name": "dummy", - "updated_at": null, - "deleted_at": null, - "efficacy_specification": [], - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy goal" - }, - "watcher_object.name": "GoalPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "interval": null, - "scope": [], - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "strategy": { - "watcher_object.data": { - "parameters_spec": { - "properties": { - "para2": { - "type": "string", - "default": "hello", - "description": "string parameter example" - }, - "para1": { - "description": "number parameter example", - "maximum": 10.2, - "type": "number", - "default": 3.2, - "minimum": 1.0 - } - } - }, - "name": "dummy", - "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "updated_at": null, - "deleted_at": null, - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy strategy" - }, - "watcher_object.name": "StrategyPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "created_at": "2016-11-04T16:29:20Z", - "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" - }, - "watcher_object.name": "AuditActionPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "publisher_id": "infra-optim:localhost", - "timestamp": "2016-11-04 16:31:36.264673 ", - "event_type": "audit.planner.end", - "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" -} diff --git a/doc/notification_samples/audit-planner-error.json b/doc/notification_samples/audit-planner-error.json deleted file mode 100644 index d3b1635..0000000 --- a/doc/notification_samples/audit-planner-error.json +++ /dev/null @@ -1,82 +0,0 @@ -{ - "priority": "ERROR", - "payload": { - "watcher_object.data": { - "audit_type": "ONESHOT", - "parameters": { - "para2": "hello", - "para1": 3.2 - }, - "state": "ONGOING", - "updated_at": null, - "deleted_at": null, - "fault": { - "watcher_object.data": { - "exception": "WatcherException", - "exception_message": "TEST", - "function_name": "test_send_audit_action_with_error", - "module_name": "watcher.tests.notifications.test_audit_notification" - }, - "watcher_object.name": "ExceptionPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "goal": { - "watcher_object.data": { - "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "name": "dummy", - "updated_at": null, - "deleted_at": null, - "efficacy_specification": [], - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy goal" - }, - "watcher_object.name": "GoalPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "interval": null, - "scope": [], - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "strategy": { - "watcher_object.data": { - "parameters_spec": { - "properties": { - "para2": { - "type": "string", - "default": "hello", - "description": "string parameter example" - }, - "para1": { - "description": "number parameter example", - "maximum": 10.2, - "type": "number", - "default": 3.2, - "minimum": 1.0 - } - } - }, - "name": "dummy", - "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "updated_at": null, - "deleted_at": null, - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy strategy" - }, - "watcher_object.name": "StrategyPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "created_at": "2016-11-04T16:29:20Z", - "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" - }, - "watcher_object.name": "AuditActionPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "publisher_id": "infra-optim:localhost", - "timestamp": "2016-11-04 16:31:36.264673 ", - "event_type": "audit.planner.error", - "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" -} diff --git a/doc/notification_samples/audit-planner-start.json b/doc/notification_samples/audit-planner-start.json deleted file mode 100644 index 93644dd..0000000 --- a/doc/notification_samples/audit-planner-start.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "watcher_object.data": { - "audit_type": "ONESHOT", - "parameters": { - "para2": "hello", - "para1": 3.2 - }, - "state": "ONGOING", - "updated_at": null, - "deleted_at": null, - "fault": null, - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "goal": { - "watcher_object.data": { - "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "name": "dummy", - "updated_at": null, - "deleted_at": null, - "efficacy_specification": [], - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy goal" - }, - "watcher_object.name": "GoalPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "interval": null, - "scope": [], - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "strategy": { - "watcher_object.data": { - "parameters_spec": { - "properties": { - "para2": { - "type": "string", - "default": "hello", - "description": "string parameter example" - }, - "para1": { - "description": "number parameter example", - "maximum": 10.2, - "type": "number", - "default": 3.2, - "minimum": 1.0 - } - } - }, - "name": "dummy", - "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "updated_at": null, - "deleted_at": null, - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy strategy" - }, - "watcher_object.name": "StrategyPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "created_at": "2016-11-04T16:29:20Z", - "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" - }, - "watcher_object.name": "AuditActionPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "publisher_id": "infra-optim:localhost", - "timestamp": "2016-11-04 16:31:36.264673 ", - "event_type": "audit.planner.start", - "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" -} diff --git a/doc/notification_samples/audit-strategy-end.json b/doc/notification_samples/audit-strategy-end.json deleted file mode 100644 index 3874fbf..0000000 --- a/doc/notification_samples/audit-strategy-end.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "watcher_object.data": { - "audit_type": "ONESHOT", - "parameters": { - "para2": "hello", - "para1": 3.2 - }, - "state": "ONGOING", - "updated_at": null, - "deleted_at": null, - "fault": null, - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "goal": { - "watcher_object.data": { - "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "name": "dummy", - "updated_at": null, - "deleted_at": null, - "efficacy_specification": [], - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy goal" - }, - "watcher_object.name": "GoalPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "interval": null, - "scope": [], - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "strategy": { - "watcher_object.data": { - "parameters_spec": { - "properties": { - "para2": { - "type": "string", - "default": "hello", - "description": "string parameter example" - }, - "para1": { - "description": "number parameter example", - "maximum": 10.2, - "type": "number", - "default": 3.2, - "minimum": 1.0 - } - } - }, - "name": "dummy", - "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "updated_at": null, - "deleted_at": null, - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy strategy" - }, - "watcher_object.name": "StrategyPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "created_at": "2016-11-04T16:29:20Z", - "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" - }, - "watcher_object.name": "AuditActionPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "publisher_id": "infra-optim:localhost", - "timestamp": "2016-11-04 16:31:36.264673 ", - "event_type": "audit.strategy.end", - "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" -} diff --git a/doc/notification_samples/audit-strategy-error.json b/doc/notification_samples/audit-strategy-error.json deleted file mode 100644 index 4c6fd18..0000000 --- a/doc/notification_samples/audit-strategy-error.json +++ /dev/null @@ -1,82 +0,0 @@ -{ - "priority": "ERROR", - "payload": { - "watcher_object.data": { - "audit_type": "ONESHOT", - "parameters": { - "para2": "hello", - "para1": 3.2 - }, - "state": "ONGOING", - "updated_at": null, - "deleted_at": null, - "fault": { - "watcher_object.data": { - "exception": "WatcherException", - "exception_message": "TEST", - "function_name": "test_send_audit_action_with_error", - "module_name": "watcher.tests.notifications.test_audit_notification" - }, - "watcher_object.name": "ExceptionPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "goal": { - "watcher_object.data": { - "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "name": "dummy", - "updated_at": null, - "deleted_at": null, - "efficacy_specification": [], - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy goal" - }, - "watcher_object.name": "GoalPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "interval": null, - "scope": [], - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "strategy": { - "watcher_object.data": { - "parameters_spec": { - "properties": { - "para2": { - "type": "string", - "default": "hello", - "description": "string parameter example" - }, - "para1": { - "description": "number parameter example", - "maximum": 10.2, - "type": "number", - "default": 3.2, - "minimum": 1.0 - } - } - }, - "name": "dummy", - "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "updated_at": null, - "deleted_at": null, - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy strategy" - }, - "watcher_object.name": "StrategyPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "created_at": "2016-11-04T16:29:20Z", - "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" - }, - "watcher_object.name": "AuditActionPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "publisher_id": "infra-optim:localhost", - "timestamp": "2016-11-04 16:31:36.264673 ", - "event_type": "audit.strategy.error", - "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" -} diff --git a/doc/notification_samples/audit-strategy-start.json b/doc/notification_samples/audit-strategy-start.json deleted file mode 100644 index 43322a7..0000000 --- a/doc/notification_samples/audit-strategy-start.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "watcher_object.data": { - "audit_type": "ONESHOT", - "parameters": { - "para2": "hello", - "para1": 3.2 - }, - "state": "ONGOING", - "updated_at": null, - "deleted_at": null, - "fault": null, - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "goal": { - "watcher_object.data": { - "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "name": "dummy", - "updated_at": null, - "deleted_at": null, - "efficacy_specification": [], - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy goal" - }, - "watcher_object.name": "GoalPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "interval": null, - "scope": [], - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "strategy": { - "watcher_object.data": { - "parameters_spec": { - "properties": { - "para2": { - "type": "string", - "default": "hello", - "description": "string parameter example" - }, - "para1": { - "description": "number parameter example", - "maximum": 10.2, - "type": "number", - "default": 3.2, - "minimum": 1.0 - } - } - }, - "name": "dummy", - "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "updated_at": null, - "deleted_at": null, - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy strategy" - }, - "watcher_object.name": "StrategyPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "created_at": "2016-11-04T16:29:20Z", - "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" - }, - "watcher_object.name": "AuditActionPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "publisher_id": "infra-optim:localhost", - "timestamp": "2016-11-04 16:31:36.264673 ", - "event_type": "audit.strategy.start", - "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" -} diff --git a/doc/notification_samples/audit-update.json b/doc/notification_samples/audit-update.json deleted file mode 100644 index 3dc4b0b..0000000 --- a/doc/notification_samples/audit-update.json +++ /dev/null @@ -1,80 +0,0 @@ -{ - "publisher_id": "infra-optim:localhost", - "timestamp": "2016-11-04 16:51:38.722986 ", - "payload": { - "watcher_object.name": "AuditUpdatePayload", - "watcher_object.data": { - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "strategy": { - "watcher_object.name": "StrategyPayload", - "watcher_object.data": { - "name": "dummy", - "parameters_spec": { - "properties": { - "para2": { - "default": "hello", - "type": "string", - "description": "string parameter example" - }, - "para1": { - "maximum": 10.2, - "default": 3.2, - "minimum": 1.0, - "description": "number parameter example", - "type": "number" - } - } - }, - "updated_at": null, - "display_name": "Dummy strategy", - "deleted_at": null, - "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "created_at": "2016-11-04T16:25:35Z" - }, - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "scope": [], - "created_at": "2016-11-04T16:51:21Z", - "uuid": "f1e0d912-afd9-4bf2-91ef-c99cd08cc1ef", - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "goal": { - "watcher_object.name": "GoalPayload", - "watcher_object.data": { - "efficacy_specification": [], - "updated_at": null, - "name": "dummy", - "display_name": "Dummy goal", - "deleted_at": null, - "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "created_at": "2016-11-04T16:25:35Z" - }, - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "parameters": { - "para2": "hello", - "para1": 3.2 - }, - "deleted_at": null, - "state_update": { - "watcher_object.name": "AuditStateUpdatePayload", - "watcher_object.data": { - "state": "ONGOING", - "old_state": "PENDING" - }, - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "interval": null, - "updated_at": null, - "state": "ONGOING", - "audit_type": "ONESHOT" - }, - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "priority": "INFO", - "event_type": "audit.update", - "message_id": "697fdf55-7252-4b6c-a2c2-5b9e85f6342c" -} diff --git a/doc/notification_samples/infra-optim-exception.json b/doc/notification_samples/infra-optim-exception.json deleted file mode 100644 index 0793312..0000000 --- a/doc/notification_samples/infra-optim-exception.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "event_type": "infra-optim.exception", - "payload": { - "watcher_object.data": { - "exception": "NoAvailableStrategyForGoal", - "exception_message": "No strategy could be found to achieve the server_consolidation goal.", - "function_name": "_aggregate_create_in_db", - "module_name": "watcher.objects.aggregate" - }, - "watcher_object.name": "ExceptionPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "priority": "ERROR", - "publisher_id": "watcher-api:fake-mini" -} diff --git a/doc/notification_samples/service-update.json b/doc/notification_samples/service-update.json deleted file mode 100644 index 1f61e58..0000000 --- a/doc/notification_samples/service-update.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "payload": { - "watcher_object.name": "ServiceUpdatePayload", - "watcher_object.namespace": "watcher", - "watcher_object.data": { - "status_update": { - "watcher_object.name": "ServiceStatusUpdatePayload", - "watcher_object.namespace": "watcher", - "watcher_object.data": { - "old_state": "ACTIVE", - "state": "FAILED" - }, - "watcher_object.version": "1.0" - }, - "last_seen_up": "2016-09-22T08:32:06Z", - "name": "watcher-service", - "sevice_host": "controller" - }, - "watcher_object.version": "1.0" - }, - "event_type": "service.update", - "priority": "INFO", - "message_id": "3984dc2b-8aef-462b-a220-8ae04237a56e", - "timestamp": "2016-10-18 09:52:05.219414", - "publisher_id": "infra-optim:node0" -} \ No newline at end of file diff --git a/doc/source/admin/apache-mod-wsgi.rst b/doc/source/admin/apache-mod-wsgi.rst deleted file mode 100644 index c0b6347..0000000 --- a/doc/source/admin/apache-mod-wsgi.rst +++ /dev/null @@ -1,49 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - - -Installing API behind mod_wsgi -============================== - -#. Install the Apache Service:: - - Fedora 21/RHEL7/CentOS7: - sudo yum install httpd - - Fedora 22 (or higher): - sudo dnf install httpd - - Debian/Ubuntu: - apt-get install apache2 - -#. Copy ``etc/apache2/watcher.conf`` under the apache sites:: - - Fedora/RHEL7/CentOS7: - sudo cp etc/apache2/watcher /etc/httpd/conf.d/watcher.conf - - Debian/Ubuntu: - sudo cp etc/apache2/watcher /etc/apache2/sites-available/watcher.conf - -#. Edit ``/watcher.conf`` according to installation - and environment. - - * Modify the ``WSGIDaemonProcess`` directive to set the ``user`` and - ``group`` values to appropriate user on your server. - * Modify the ``WSGIScriptAlias`` directive to point to the - watcher/api/app.wsgi script. - * Modify the ``Directory`` directive to set the path to the Watcher API - code. - * Modify the ``ErrorLog and CustomLog`` to redirect the logs to the right - directory. - -#. Enable the apache watcher site and reload:: - - Fedora/RHEL7/CentOS7: - sudo systemctl reload httpd - - Debian/Ubuntu: - sudo a2ensite watcher - sudo service apache2 reload diff --git a/doc/source/admin/conf-files.rst b/doc/source/admin/conf-files.rst deleted file mode 100644 index 792bc2c..0000000 --- a/doc/source/admin/conf-files.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. _watcher_sample_configuration_files: - -================================== -Watcher sample configuration files -================================== - -watcher.conf -~~~~~~~~~~~~ - -The ``watcher.conf`` file contains most of the options to configure the -Watcher services. - -.. literalinclude:: ../watcher.conf.sample - :language: ini diff --git a/doc/source/admin/configuration.rst b/doc/source/admin/configuration.rst deleted file mode 100644 index d379156..0000000 --- a/doc/source/admin/configuration.rst +++ /dev/null @@ -1,460 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -=================== -Configuring Watcher -=================== - -This document is continually updated and reflects the latest -available code of the Watcher service. - -Service overview -================ - -The Watcher system is a collection of services that provides support to -optimize your IAAS platform. The Watcher service may, depending upon -configuration, interact with several other OpenStack services. This includes: - -- the OpenStack Identity service (`keystone`_) for request authentication and - to locate other OpenStack services -- the OpenStack Telemetry service (`ceilometer`_) for consuming the resources - metrics -- the OpenStack Compute service (`nova`_) works with the Watcher service and - acts as a user-facing API for instance migration. - -The Watcher service includes the following components: - -- ``watcher-decision-engine``: runs audit on part of your IAAS and return an - action plan in order to optimize resource placement. -- ``watcher-api``: A RESTful API that processes application requests by sending - them to the watcher-decision-engine over RPC. -- ``watcher-applier``: applies the action plan. -- `python-watcherclient`_: A command-line interface (CLI) for interacting with - the Watcher service. -- `watcher-dashboard`_: An Horizon plugin for interacting with the Watcher - service. - -Additionally, the Watcher service has certain external dependencies, which -are very similar to other OpenStack services: - -- A database to store audit and action plan information and state. You can set - the database back-end type and location. -- A queue. A central hub for passing messages, such as `RabbitMQ`_. - -Optionally, one may wish to utilize the following associated projects for -additional functionality: - -- `watcher metering`_: an alternative to collect and push metrics to the - Telemetry service. - -.. _`keystone`: https://github.com/openstack/keystone -.. _`ceilometer`: https://github.com/openstack/ceilometer -.. _`nova`: https://github.com/openstack/nova -.. _`python-watcherclient`: https://github.com/openstack/python-watcherclient -.. _`watcher-dashboard`: https://github.com/openstack/watcher-dashboard -.. _`watcher metering`: https://github.com/b-com/watcher-metering -.. _`RabbitMQ`: https://www.rabbitmq.com/ - -Install and configure prerequisites -=================================== - -You can configure Watcher services to run on separate nodes or the same node. -In this guide, the components run on one node, typically the Controller node. - -This section shows you how to install and configure the services. - -It assumes that the Identity, Image, Compute, and Networking services -have already been set up. - -.. _identity-service_configuration: - -Configure the Identity service for the Watcher service ------------------------------------------------------- - -#. Create the Watcher service user (eg ``watcher``). The service uses this to - authenticate with the Identity Service. Use the - ``KEYSTONE_SERVICE_PROJECT_NAME`` project (named ``service`` by default in - devstack) and give the user the ``admin`` role: - - .. code-block:: bash - - $ keystone user-create --name=watcher --pass=WATCHER_PASSWORD \ - --email=watcher@example.com \ - --tenant=KEYSTONE_SERVICE_PROJECT_NAME - $ keystone user-role-add --user=watcher \ - --tenant=KEYSTONE_SERVICE_PROJECT_NAME --role=admin - - or (by using python-openstackclient 1.8.0+) - - .. code-block:: bash - - $ openstack user create --password WATCHER_PASSWORD --enable \ - --email watcher@example.com watcher \ - --project=KEYSTONE_SERVICE_PROJECT_NAME - $ openstack role add --project KEYSTONE_SERVICE_PROJECT_NAME \ - --user watcher admin - - -#. You must register the Watcher Service with the Identity Service so that - other OpenStack services can locate it. To register the service: - - .. code-block:: bash - - $ keystone service-create --name=watcher --type=infra-optim \ - --description="Infrastructure Optimization service" - - or (by using python-openstackclient 1.8.0+) - - .. code-block:: bash - - $ openstack service create --name watcher infra-optim \ - --description="Infrastructure Optimization service" - -#. Create the endpoints by replacing YOUR_REGION and - ``WATCHER_API_[PUBLIC|ADMIN|INTERNAL]_IP`` with your region and your - Watcher Service's API node IP addresses (or FQDN): - - .. code-block:: bash - - $ keystone endpoint-create \ - --service-id=the_service_id_above \ - --publicurl=http://WATCHER_API_PUBLIC_IP:9322 \ - --internalurl=http://WATCHER_API_INTERNAL_IP:9322 \ - --adminurl=http://WATCHER_API_ADMIN_IP:9322 - - or (by using python-openstackclient 1.8.0+) - - .. code-block:: bash - - $ openstack endpoint create --region YOUR_REGION watcher \ - --publicurl http://WATCHER_API_PUBLIC_IP:9322 \ - --internalurl http://WATCHER_API_INTERNAL_IP:9322 \ - --adminurl http://WATCHER_API_ADMIN_IP:9322 - -.. _watcher-db_configuration: - -Set up the database for Watcher -------------------------------- - -The Watcher service stores information in a database. This guide uses the -MySQL database that is used by other OpenStack services. - -#. In MySQL, create a ``watcher`` database that is accessible by the - ``watcher`` user. Replace WATCHER_DBPASSWORD - with the actual password:: - - $ mysql -u root -p - - mysql> CREATE DATABASE watcher CHARACTER SET utf8; - mysql> GRANT ALL PRIVILEGES ON watcher.* TO 'watcher'@'localhost' \ - IDENTIFIED BY 'WATCHER_DBPASSWORD'; - mysql> GRANT ALL PRIVILEGES ON watcher.* TO 'watcher'@'%' \ - IDENTIFIED BY 'WATCHER_DBPASSWORD'; - - -Configure the Watcher service -============================= - -The Watcher service is configured via its configuration file. This file -is typically located at ``/etc/watcher/watcher.conf``. - -You can easily generate and update a sample configuration file -named :ref:`watcher.conf.sample ` by using -these following commands:: - - $ git clone git://git.openstack.org/openstack/watcher - $ cd watcher/ - $ tox -e genconfig - $ vi etc/watcher/watcher.conf.sample - - -The configuration file is organized into the following sections: - -* ``[DEFAULT]`` - General configuration -* ``[api]`` - API server configuration -* ``[database]`` - SQL driver configuration -* ``[keystone_authtoken]`` - Keystone Authentication plugin configuration -* ``[watcher_clients_auth]`` - Keystone auth configuration for clients -* ``[watcher_applier]`` - Watcher Applier module configuration -* ``[watcher_decision_engine]`` - Watcher Decision Engine module configuration -* ``[oslo_messaging_rabbit]`` - Oslo Messaging RabbitMQ driver configuration -* ``[ceilometer_client]`` - Ceilometer client configuration -* ``[cinder_client]`` - Cinder client configuration -* ``[glance_client]`` - Glance client configuration -* ``[nova_client]`` - Nova client configuration -* ``[neutron_client]`` - Neutron client configuration - -The Watcher configuration file is expected to be named -``watcher.conf``. When starting Watcher, you can specify a different -configuration file to use with ``--config-file``. If you do **not** specify a -configuration file, Watcher will look in the following directories for a -configuration file, in order: - -* ``~/.watcher/`` -* ``~/`` -* ``/etc/watcher/`` -* ``/etc/`` - - -Although some configuration options are mentioned here, it is recommended that -you review all the `available options -`_ -so that the watcher service is configured for your needs. - -#. The Watcher Service stores information in a database. This guide uses the - MySQL database that is used by other OpenStack services. - - Configure the location of the database via the ``connection`` option. In the - following, replace WATCHER_DBPASSWORD with the password of your ``watcher`` - user, and replace DB_IP with the IP address where the DB server is located:: - - [database] - ... - - # The SQLAlchemy connection string used to connect to the - # database (string value) - #connection= - connection = mysql://watcher:WATCHER_DBPASSWORD@DB_IP/watcher?charset=utf8 - -#. Configure the Watcher Service to use the RabbitMQ message broker by - setting one or more of these options. Replace RABBIT_HOST with the - IP address of the RabbitMQ server, RABBITMQ_USER and RABBITMQ_PASSWORD - by the RabbitMQ server login credentials :: - - [DEFAULT] - - # The messaging driver to use, defaults to rabbit. Other drivers - # include qpid and zmq. (string value) - #rpc_backend = rabbit - - # The default exchange under which topics are scoped. May be - # overridden by an exchange name specified in the transport_url - # option. (string value) - control_exchange = watcher - - ... - - [oslo_messaging_rabbit] - - # The username used by the message broker (string value) - rabbit_userid = RABBITMQ_USER - - # The password of user used by the message broker (string value) - rabbit_password = RABBITMQ_PASSWORD - - # The host where the message brokeris installed (string value) - rabbit_host = RABBIT_HOST - - # The port used bythe message broker (string value) - #rabbit_port = 5672 - - -#. Watcher API shall validate the token provided by every incoming request, - via keystonemiddleware, which requires the Watcher service to be configured - with the right credentials for the Identity service. - - In the configuration section here below: - - * replace IDENTITY_IP with the IP of the Identity server - * replace WATCHER_PASSWORD with the password you chose for the ``watcher`` - user - * replace KEYSTONE_SERVICE_PROJECT_NAME with the name of project created - for OpenStack services (e.g. ``service``) :: - - [keystone_authtoken] - - # Authentication type to load (unknown value) - # Deprecated group/name - [DEFAULT]/auth_plugin - #auth_type = - auth_type = password - - # Authentication URL (unknown value) - #auth_url = - auth_url = http://IDENTITY_IP:35357 - - # Username (unknown value) - # Deprecated group/name - [DEFAULT]/username - #username = - username=watcher - - # User's password (unknown value) - #password = - password = WATCHER_PASSWORD - - # Domain ID containing project (unknown value) - #project_domain_id = - project_domain_id = default - - # User's domain id (unknown value) - #user_domain_id = - user_domain_id = default - - # Project name to scope to (unknown value) - # Deprecated group/name - [DEFAULT]/tenant-name - #project_name = - project_name = KEYSTONE_SERVICE_PROJECT_NAME - -#. Watcher's decision engine and applier interact with other OpenStack - projects through those projects' clients. In order to instantiate these - clients, Watcher needs to request a new session from the Identity service - using the right credentials. - - In the configuration section here below: - - * replace IDENTITY_IP with the IP of the Identity server - * replace WATCHER_PASSWORD with the password you chose for the ``watcher`` - user - * replace KEYSTONE_SERVICE_PROJECT_NAME with the name of project created - for OpenStack services (e.g. ``service``) :: - - [watcher_clients_auth] - - # Authentication type to load (unknown value) - # Deprecated group/name - [DEFAULT]/auth_plugin - #auth_type = - auth_type = password - - # Authentication URL (unknown value) - #auth_url = - auth_url = http://IDENTITY_IP:35357 - - # Username (unknown value) - # Deprecated group/name - [DEFAULT]/username - #username = - username=watcher - - # User's password (unknown value) - #password = - password = WATCHER_PASSWORD - - # Domain ID containing project (unknown value) - #project_domain_id = - project_domain_id = default - - # User's domain id (unknown value) - #user_domain_id = - user_domain_id = default - - # Project name to scope to (unknown value) - # Deprecated group/name - [DEFAULT]/tenant-name - #project_name = - project_name = KEYSTONE_SERVICE_PROJECT_NAME - -#. Configure the clients to use a specific version if desired. For example, to - configure Watcher to use a Nova client with version 2.1, use:: - - [nova_client] - - # Version of Nova API to use in novaclient. (string value) - #api_version = 2 - api_version = 2.1 - -#. Create the Watcher Service database tables:: - - $ watcher-db-manage --config-file /etc/watcher/watcher.conf create_schema - -#. Start the Watcher Service:: - - $ watcher-api && watcher-decision-engine && watcher-applier - -Configure Nova compute -====================== - -Please check your hypervisor configuration to correctly handle -`instance migration`_. - -.. _`instance migration`: http://docs.openstack.org/admin-guide/compute-live-migration-usage.html - -Configure Measurements -====================== - -You can configure and install Ceilometer by following the documentation below : - -#. http://docs.openstack.org/developer/ceilometer -#. http://docs.openstack.org/kilo/install-guide/install/apt/content/ceilometer-nova.html - -The built-in strategy 'basic_consolidation' provided by watcher requires -"**compute.node.cpu.percent**" and "**cpu_util**" measurements to be collected -by Ceilometer. -The measurements available depend on the hypervisors that OpenStack manages on -the specific implementation. -You can find the measurements available per hypervisor and OpenStack release on -the OpenStack site. -You can use 'ceilometer meter-list' to list the available meters. - -For more information: -http://docs.openstack.org/developer/ceilometer/measurements.html - -Ceilometer is designed to collect measurements from OpenStack services and from -other external components. If you would like to add new meters to the currently -existing ones, you need to follow the documentation below: - -#. http://docs.openstack.org/developer/ceilometer/new_meters.html - -The Ceilometer collector uses a pluggable storage system, meaning that you can -pick any database system you prefer. -The original implementation has been based on MongoDB but you can create your -own storage driver using whatever technology you want. -For more information : https://wiki.openstack.org/wiki/Gnocchi - - -Configure Nova Notifications -============================ - -Watcher can consume notifications generated by the Nova services, in order to -build or update, in real time, its cluster data model related to computing -resources. - -Nova publishes, by default, notifications on ``notifications`` AMQP queue -(configurable) and ``versioned_notifications`` AMQP queue (not -configurable). ``notifications`` queue is mainly used by ceilometer, so we can -not use it. And some events, related to nova-compute service state, are only -sent into the ``versioned_notifications`` queue. - -By default, Watcher listens to AMQP queues named ``watcher_notifications`` -and ``versioned_notifications``. So you have to update the Nova -configuration file on controller and compute nodes, in order -to Watcher receives Nova notifications in ``watcher_notifications`` as well. - - * In the file ``/etc/nova/nova.conf``, update the section - ``[oslo_messaging_notifications]``, by redefining the list of topics - into which Nova services will publish events :: - - [oslo_messaging_notifications] - driver = messagingv2 - topics = notifications,watcher_notifications - - * Restart the Nova services. - - -Workers -======= - -You can define a number of workers for the Decision Engine and the Applier. - -If you want to create and run more audits simultaneously, you have to raise -the number of workers used by the Decision Engine:: - - [watcher_decision_engine] - - ... - - # The maximum number of threads that can be used to execute strategies - # (integer value) - #max_workers = 2 - - -If you want to execute simultaneously more recommended action plans, you -have to raise the number of workers used by the Applier:: - - [watcher_applier] - - ... - - # Number of workers for applier, default value is 1. (integer value) - # Minimum value: 1 - #workers = 1 - diff --git a/doc/source/admin/gmr.rst b/doc/source/admin/gmr.rst deleted file mode 100644 index 31ea4f5..0000000 --- a/doc/source/admin/gmr.rst +++ /dev/null @@ -1,52 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _watcher_gmr: - -======================= -Guru Meditation Reports -======================= - -Watcher contains a mechanism whereby developers and system administrators can -generate a report about the state of a running Watcher service. This report -is called a *Guru Meditation Report* (*GMR* for short). - -Generating a GMR -================ - -A *GMR* can be generated by sending the *USR2* signal to any Watcher process -with support (see below). The *GMR* will then be outputted as standard error -for that particular process. - -For example, suppose that ``watcher-api`` has process id ``8675``, and was run -with ``2>/var/log/watcher/watcher-api-err.log``. Then, ``kill -USR2 8675`` -will trigger the Guru Meditation report to be printed to -``/var/log/watcher/watcher-api-err.log``. - -Structure of a GMR -================== - -The *GMR* is designed to be extensible; any particular service may add its -own sections. However, the base *GMR* consists of several sections: - -Package - Shows information about the package to which this process belongs, including - version informations. - -Threads - Shows stack traces and thread ids for each of the threads within this - process. - -Green Threads - Shows stack traces for each of the green threads within this process (green - threads don't have thread ids). - -Configuration - Lists all the configuration options currently accessible via the CONF object - for the current process. - -Plugins - Lists all the plugins currently accessible by the Watcher service. diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst deleted file mode 100644 index 62729ea..0000000 --- a/doc/source/admin/index.rst +++ /dev/null @@ -1,14 +0,0 @@ -=================== -Administrator Guide -=================== - -.. toctree:: - :maxdepth: 2 - - apache-mod-wsgi - conf-files - configuration - gmr - policy - ways-to-install - ../strategies/index diff --git a/doc/source/admin/policy.rst b/doc/source/admin/policy.rst deleted file mode 100644 index 458c812..0000000 --- a/doc/source/admin/policy.rst +++ /dev/null @@ -1,142 +0,0 @@ -.. - Copyright 2016 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Policies -======== - -Watcher's public API calls may be restricted to certain sets of users using a -policy configuration file. This document explains exactly how policies are -configured and what they apply to. - -A policy is composed of a set of rules that are used in determining if a -particular action may be performed by the authorized tenant. - -Constructing a Policy Configuration File ----------------------------------------- - -A policy configuration file is a simply JSON object that contain sets of -rules. Each top-level key is the name of a rule. Each rule -is a string that describes an action that may be performed in the Watcher API. - -The actions that may have a rule enforced on them are: - -* ``strategy:get_all``, ``strategy:detail`` - List available strategies - - * ``GET /v1/strategies`` - * ``GET /v1/strategies/detail`` - -* ``strategy:get`` - Retrieve a specific strategy entity - - * ``GET /v1/strategies/`` - * ``GET /v1/strategies/`` - - -* ``goal:get_all``, ``goal:detail`` - List available goals - - * ``GET /v1/goals`` - * ``GET /v1/goals/detail`` - -* ``goal:get`` - Retrieve a specific goal entity - - * ``GET /v1/goals/`` - * ``GET /v1/goals/`` - - -* ``audit_template:get_all``, ``audit_template:detail`` - List available - audit_templates - - * ``GET /v1/audit_templates`` - * ``GET /v1/audit_templates/detail`` - -* ``audit_template:get`` - Retrieve a specific audit template entity - - * ``GET /v1/audit_templates/`` - * ``GET /v1/audit_templates/`` - -* ``audit_template:create`` - Create an audit template entity - - * ``POST /v1/audit_templates`` - -* ``audit_template:delete`` - Delete an audit template entity - - * ``DELETE /v1/audit_templates/`` - * ``DELETE /v1/audit_templates/`` - -* ``audit_template:update`` - Update an audit template entity - - * ``PATCH /v1/audit_templates/`` - * ``PATCH /v1/audit_templates/`` - - -* ``audit:get_all``, ``audit:detail`` - List available audits - - * ``GET /v1/audits`` - * ``GET /v1/audits/detail`` - -* ``audit:get`` - Retrieve a specific audit entity - - * ``GET /v1/audits/`` - -* ``audit:create`` - Create an audit entity - - * ``POST /v1/audits`` - -* ``audit:delete`` - Delete an audit entity - - * ``DELETE /v1/audits/`` - -* ``audit:update`` - Update an audit entity - - * ``PATCH /v1/audits/`` - - -* ``action_plan:get_all``, ``action_plan:detail`` - List available action plans - - * ``GET /v1/action_plans`` - * ``GET /v1/action_plans/detail`` - -* ``action_plan:get`` - Retrieve a specific action plan entity - - * ``GET /v1/action_plans/`` - -* ``action_plan:delete`` - Delete an action plan entity - - * ``DELETE /v1/action_plans/`` - -* ``action_plan:update`` - Update an action plan entity - - * ``PATCH /v1/audits/`` - - -* ``action:get_all``, ``action:detail`` - List available action - - * ``GET /v1/actions`` - * ``GET /v1/actions/detail`` - -* ``action:get`` - Retrieve a specific action plan entity - - * ``GET /v1/actions/`` - - - -To limit an action to a particular role or roles, you list the roles like so :: - - { - "audit:create": ["role:admin", "role:superuser"] - } - -The above would add a rule that only allowed users that had roles of either -"admin" or "superuser" to launch an audit. diff --git a/doc/source/admin/ways-to-install.rst b/doc/source/admin/ways-to-install.rst deleted file mode 100644 index 43bd888..0000000 --- a/doc/source/admin/ways-to-install.rst +++ /dev/null @@ -1,162 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -======================= -Ways to install Watcher -======================= - -This document describes some ways to install Watcher in order to use it. -If you are intending to develop on or with Watcher, -please read :doc:`../dev/environment`. - -Prerequisites -------------- - -The source install instructions specifically avoid using platform specific -packages, instead using the source for the code and the Python Package Index -(PyPi_). - -.. _PyPi: https://pypi.python.org/pypi - -It's expected that your system already has python2.7_, latest version of pip_, -and git_ available. - -.. _python2.7: https://www.python.org -.. _pip: https://pip.pypa.io/en/latest/installing/ -.. _git: https://git-scm.com/ - -Your system shall also have some additional system libraries: - - On Ubuntu (tested on 14.04LTS): - - .. code-block:: bash - - $ sudo apt-get install python-dev libssl-dev libmysqlclient-dev libffi-dev - - On Fedora-based distributions e.g., Fedora/RHEL/CentOS/Scientific Linux - (tested on CentOS 7.1): - - .. code-block:: bash - - $ sudo yum install gcc python-devel openssl-devel libffi-devel mysql-devel - - -Installing from Source ----------------------- - -Clone the Watcher repository: - -.. code-block:: bash - - $ git clone https://git.openstack.org/openstack/watcher.git - $ cd watcher - -Install the Watcher modules: - -.. code-block:: bash - - # python setup.py install - -The following commands should be available on the command-line path: - -* ``watcher-api`` the Watcher Web service used to handle RESTful requests -* ``watcher-decision-engine`` the Watcher Decision Engine used to build action - plans, according to optimization goals to achieve. -* ``watcher-applier`` the Watcher Applier module, used to apply action plan -* ``watcher-db-manage`` used to bootstrap Watcher data - -You will find sample configuration files in ``etc/watcher``: - -* ``watcher.conf.sample`` - -Install the Watcher modules dependencies: - -.. code-block:: bash - - # pip install -r requirements.txt - -From here, refer to :doc:`configuration` to declare Watcher as a new service -into Keystone and to configure its different modules. Once configured, you -should be able to run the Watcher services by issuing these commands: - -.. code-block:: bash - - $ watcher-api - $ watcher-decision-engine - $ watcher-applier - -By default, this will show logging on the console from which it was started. -Once started, you can use the `Watcher Client`_ to play with Watcher service. - -.. _`Watcher Client`: https://git.openstack.org/cgit/openstack/python-watcherclient - -Installing from packages: PyPI --------------------------------- - -Watcher package is available on PyPI repository. To install Watcher on your -system: - -.. code-block:: bash - - $ sudo pip install python-watcher - -The Watcher services along with its dependencies should then be automatically -installed on your system. - -Once installed, you still need to declare Watcher as a new service into -Keystone and to configure its different modules, which you can find described -in :doc:`configuration`. - - -Installing from packages: Debian (experimental) ------------------------------------------------ - -Experimental Debian packages are available on `Debian repositories`_. The best -way to use them is to install them into a Docker_ container. - -Here is single Dockerfile snippet you can use to run your Docker container: - -.. code-block:: bash - - FROM debian:experimental - MAINTAINER David TARDIVEL - - RUN apt-get update - RUN apt-get dist-upgrade -y - RUN apt-get install -y vim net-tools - RUN apt-get install -yt experimental watcher-api - - CMD ["/usr/bin/watcher-api"] - -Build your container from this Dockerfile: - -.. code-block:: bash - - $ docker build -t watcher/api . - -To run your container, execute this command: - -.. code-block:: bash - - $ docker run -d -p 9322:9322 watcher/api - -Check in your logs Watcher API is started - -.. code-block:: bash - - $ docker logs - -You can run similar container with Watcher Decision Engine (package -``watcher-decision-engine``) and with the Watcher Applier (package -``watcher-applier``). - -.. _Docker: https://www.docker.com/ -.. _`Debian repositories`: https://packages.debian.org/experimental/allpackages - - - - - diff --git a/doc/source/api/index.rst b/doc/source/api/index.rst deleted file mode 100644 index fd0ecfd..0000000 --- a/doc/source/api/index.rst +++ /dev/null @@ -1,4 +0,0 @@ -.. toctree:: - :maxdepth: 1 - - v1 diff --git a/doc/source/api/v1.rst b/doc/source/api/v1.rst deleted file mode 100644 index b702137..0000000 --- a/doc/source/api/v1.rst +++ /dev/null @@ -1,88 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -==================== -RESTful Web API (v1) -==================== - -Goals -===== - -.. rest-controller:: watcher.api.controllers.v1.goal:GoalsController - :webprefix: /v1/goal - -.. autotype:: watcher.api.controllers.v1.goal.GoalCollection - :members: - -.. autotype:: watcher.api.controllers.v1.goal.Goal - :members: - -Strategies -========== - -.. rest-controller:: watcher.api.controllers.v1.strategy:StrategiesController - :webprefix: /v1/strategies - -.. autotype:: watcher.api.controllers.v1.strategy.StrategyCollection - :members: - -.. autotype:: watcher.api.controllers.v1.strategy.Strategy - :members: - -Audit Templates -=============== - -.. rest-controller:: watcher.api.controllers.v1.audit_template:AuditTemplatesController - :webprefix: /v1/audit_templates - -.. autotype:: watcher.api.controllers.v1.audit_template.AuditTemplateCollection - :members: - -.. autotype:: watcher.api.controllers.v1.audit_template.AuditTemplate - :members: - -Audits -====== - -.. rest-controller:: watcher.api.controllers.v1.audit:AuditsController - :webprefix: /v1/audits - -.. autotype:: watcher.api.controllers.v1.audit.AuditCollection - :members: - -.. autotype:: watcher.api.controllers.v1.audit.Audit - :members: - -Links -===== - -.. autotype:: watcher.api.controllers.link.Link - :members: - -Action Plans -============ - -.. rest-controller:: watcher.api.controllers.v1.action_plan:ActionPlansController - :webprefix: /v1/action_plans - -.. autotype:: watcher.api.controllers.v1.action_plan.ActionPlanCollection - :members: - -.. autotype:: watcher.api.controllers.v1.action_plan.ActionPlan - :members: - - -Actions -======= - -.. rest-controller:: watcher.api.controllers.v1.action:ActionsController - :webprefix: /v1/actions - -.. autotype:: watcher.api.controllers.v1.action.ActionCollection - :members: - -.. autotype:: watcher.api.controllers.v1.action.Action - :members: diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst deleted file mode 100644 index b903a97..0000000 --- a/doc/source/architecture.rst +++ /dev/null @@ -1,464 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _architecture: - -=================== -System Architecture -=================== - - -This page presents the current technical Architecture of the Watcher system. - -.. _architecture_overview: - -Overview -======== - -Below you will find a diagram, showing the main components of Watcher: - -.. image:: ./images/architecture.svg - :width: 110% - - -.. _components_definition: - -Components -========== - -.. _amqp_bus_definition: - -AMQP Bus --------- - -The AMQP message bus handles internal asynchronous communications between the -different Watcher components. - -.. _cluster_datasource_definition: - -Datasource ----------- - -This component stores the metrics related to the cluster. - -It can potentially rely on any appropriate storage system (InfluxDB, OpenTSDB, -MongoDB,...) but will probably be more performant when using -`Time Series Databases `_ -which are optimized for handling time series data, which are arrays of numbers -indexed by time (a datetime or a datetime range). - -.. _archi_watcher_api_definition: - -Watcher API ------------ - -This component implements the REST API provided by the Watcher system to the -external world. - -It enables the :ref:`Administrator ` of a -:ref:`Cluster ` to control and monitor the Watcher system -via any interaction mechanism connected to this API: - -- :ref:`CLI ` -- Horizon plugin -- Python SDK - -You can also read the detailed description of `Watcher API`_. - -.. _archi_watcher_applier_definition: - -Watcher Applier ---------------- - -This component is in charge of executing the -:ref:`Action Plan ` built by the -:ref:`Watcher Decision Engine `. - -It connects to the :ref:`message bus ` and launches the -:ref:`Action Plan ` whenever a triggering message is -received on a dedicated AMQP queue. - -The triggering message contains the Action Plan UUID. - -It then gets the detailed information about the -:ref:`Action Plan ` from the -:ref:`Watcher Database ` which contains the list -of :ref:`Actions ` to launch. - -It then loops on each :ref:`Action `, gets the associated -class and calls the execute() method of this class. -Most of the time, this method will first request a token to the Keystone API -and if it is allowed, sends a request to the REST API of the OpenStack service -which handles this kind of :ref:`atomic Action `. - -Note that as soon as :ref:`Watcher Applier ` starts -handling a given :ref:`Action ` from the list, a -notification message is sent on the :ref:`message bus ` -indicating that the state of the action has changed to **ONGOING**. - -If the :ref:`Action ` is successful, -the :ref:`Watcher Applier ` sends a notification -message on :ref:`the bus ` informing the other components -of this. - - -If the :ref:`Action ` fails, the -:ref:`Watcher Applier ` tries to rollback to the -previous state of the :ref:`Managed resource ` -(i.e. before the command was sent to the underlying OpenStack service). - -.. _archi_watcher_cli_definition: - -Watcher CLI ------------ - -The watcher command-line interface (CLI) can be used to interact with the -Watcher system in order to control it or to know its current status. - -Please, read `the detailed documentation about Watcher CLI -`_. - -.. _archi_watcher_dashboard_definition: - -Watcher Dashboard ------------------ - -The Watcher Dashboard can be used to interact with the Watcher system through -Horizon in order to control it or to know its current status. - -Please, read `the detailed documentation about Watcher Dashboard -`_. - -.. _archi_watcher_database_definition: - -Watcher Database ----------------- - -This database stores all the Watcher domain objects which can be requested -by the :ref:`Watcher API ` or the -:ref:`Watcher CLI `: - -- :ref:`Goals ` -- :ref:`Strategies ` -- :ref:`Audit templates ` -- :ref:`Audits ` -- :ref:`Action plans ` -- :ref:`Efficacy indicators ` via the Action - Plan API. -- :ref:`Actions ` - -The Watcher domain being here "*optimization of some resources provided by an -OpenStack system*". - -.. _archi_watcher_decision_engine_definition: - -Watcher Decision Engine ------------------------ - -This component is responsible for computing a set of potential optimization -:ref:`Actions ` in order to fulfill -the :ref:`Goal ` of an :ref:`Audit `. - -It first reads the parameters of the :ref:`Audit ` to know -the :ref:`Goal ` to achieve. - -Unless specified, it then selects the most appropriate :ref:`strategy -` from the list of available strategies achieving this -goal. - -The :ref:`Strategy ` is then dynamically loaded (via -`stevedore `_). The -:ref:`Watcher Decision Engine ` executes -the strategy. - -In order to compute the potential :ref:`Solution ` for the -Audit, the :ref:`Strategy ` relies on different sets of -data: - -- :ref:`Cluster data models ` that are - periodically synchronized through pluggable cluster data model collectors. - These models contain the current state of various - :ref:`Managed resources ` (e.g., the data stored - in the Nova database). These models gives a strategy the ability to reason on - the current state of a given :ref:`cluster `. -- The data stored in the :ref:`Cluster Datasource - ` which provides information about the past of - the :ref:`Cluster `. - -Here below is a sequence diagram showing how the Decision Engine builds and -maintains the :ref:`cluster data models ` that -are used by the strategies. - -.. image:: ./images/sequence_architecture_cdmc_sync.png - :width: 100% - -The execution of a strategy then yields a solution composed of a set of -:ref:`Actions ` as well as a set of :ref:`efficacy -indicators `. - -These :ref:`Actions ` are scheduled in time by the -:ref:`Watcher Planner ` (i.e., it generates an -:ref:`Action Plan `). - -.. _data_model: - -Data model -========== - -The following diagram shows the data model of Watcher, especially the -functional dependency of objects from the actors (Admin, Customer) point of -view (Goals, Audits, Action Plans, ...): - -.. image:: ./images/functional_data_model.svg - :width: 100% - -Here below is a diagram representing the main objects in Watcher from a -database perspective: - -.. image:: ./images/watcher_db_schema_diagram.png - - -.. _sequence_diagrams: - -Sequence diagrams -================= - -The following paragraph shows the messages exchanged between the different -components of Watcher for the most often used scenarios. - -.. _sequence_diagrams_create_audit_template: - -Create a new Audit Template ---------------------------- - -The :ref:`Administrator ` first creates an -:ref:`Audit template ` providing at least the -following parameters: - -- A name -- A goal to achieve -- An optional strategy - -.. image:: ./images/sequence_create_audit_template.png - :width: 100% - -The `Watcher API`_ makes sure that both the specified goal (mandatory) and -its associated strategy (optional) are registered inside the :ref:`Watcher -Database ` before storing a new audit template in -the :ref:`Watcher Database `. - -.. _sequence_diagrams_create_and_launch_audit: - -Create and launch a new Audit ------------------------------ - -The :ref:`Administrator ` can then launch a new -:ref:`Audit ` by providing at least the unique UUID of the -previously created :ref:`Audit template `: - -.. image:: ./images/sequence_create_and_launch_audit.png - :width: 100% - -The :ref:`Administrator ` also can specify type of -Audit and interval (in case of CONTINUOUS type). There is two types of Audit: -ONESHOT and CONTINUOUS. Oneshot Audit is launched once and if it succeeded -executed new action plan list will be provided. Continuous Audit creates -action plans with specified interval (in seconds); if action plan -has been created, all previous action plans get CANCELLED state. - -A message is sent on the :ref:`AMQP bus ` which triggers -the Audit in the -:ref:`Watcher Decision Engine `: - -.. image:: ./images/sequence_trigger_audit_in_decision_engine.png - :width: 100% - -The :ref:`Watcher Decision Engine ` reads -the Audit parameters from the -:ref:`Watcher Database `. It instantiates the -appropriate :ref:`strategy ` (using entry points) -given both the :ref:`goal ` and the strategy associated to the -parent :ref:`audit template ` of the :ref:`audit -`. If no strategy is associated to the audit template, the -strategy is dynamically selected by the Decision Engine. - -The :ref:`Watcher Decision Engine ` also -builds the :ref:`Cluster Data Model `. This -data model is needed by the :ref:`Strategy ` to know the -current state and topology of the audited -:ref:`OpenStack cluster `. - -The :ref:`Watcher Decision Engine ` calls -the **execute()** method of the instantiated -:ref:`Strategy ` and provides the data model as an input -parameter. This method computes a :ref:`Solution ` to -achieve the goal and returns it to the -:ref:`Decision Engine `. At this point, -actions are not scheduled yet. - -The :ref:`Watcher Decision Engine ` -dynamically loads the :ref:`Watcher Planner ` -implementation which is configured in Watcher (via entry points) and calls the -**schedule()** method of this class with the solution as an input parameter. -This method finds an appropriate scheduling of -:ref:`Actions ` taking into account some scheduling rules -(such as priorities between actions). -It generates a new :ref:`Action Plan ` with status -**RECOMMENDED** and saves it into the :ref:`Watcher Database -`. The saved action plan is now a scheduled flow -of actions to which a global efficacy is associated alongside a number of -:ref:`Efficacy Indicators ` as specified by the -related :ref:`goal `. - -If every step executed successfully, the -:ref:`Watcher Decision Engine ` updates -the current status of the Audit to **SUCCEEDED** in the -:ref:`Watcher Database ` and sends a notification -on the bus to inform other components that the :ref:`Audit ` -was successful. - -This internal workflow the Decision Engine follows to conduct an audit can be -seen in the sequence diagram here below: - -.. image:: ./images/sequence_from_audit_execution_to_actionplan_creation.png - :width: 100% - -.. _sequence_diagrams_launch_action_plan: - -Launch Action Plan ------------------- - -The :ref:`Administrator ` can then launch the -recommended :ref:`Action Plan `: - -.. image:: ./images/sequence_launch_action_plan.png - :width: 100% - -A message is sent on the :ref:`AMQP bus ` which triggers -the :ref:`Action Plan ` in the -:ref:`Watcher Applier `: - -.. image:: ./images/sequence_launch_action_plan_in_applier.png - :width: 100% - -The :ref:`Watcher Applier ` will get the -description of the flow of :ref:`Actions ` from the -:ref:`Watcher Database ` and for each -:ref:`Action ` it will instantiate a corresponding -:ref:`Action ` handler python class. - -The :ref:`Watcher Applier ` will then call the -following methods of the :ref:`Action ` handler: - -- **validate_parameters()**: this method will make sure that all the - provided input parameters are valid: - - - If all parameters are valid, the Watcher Applier moves on to the next - step. - - If it is not, an error is raised and the action is not executed. A - notification is sent on the bus informing other components of the - failure. - -- **preconditions()**: this method will make sure that all conditions are met - before executing the action (for example, it makes sure that an instance - still exists before trying to migrate it). -- **execute()**: this method is what triggers real commands on other - OpenStack services (such as Nova, ...) in order to change target resource - state. If the action is successfully executed, a notification message is - sent on the bus indicating that the new state of the action is - **SUCCEEDED**. - -If every action of the action flow has been executed successfully, a -notification is sent on the bus to indicate that the whole -:ref:`Action Plan ` has **SUCCEEDED**. - - -.. _state_machine_diagrams: - -State Machine diagrams -====================== - -.. _audit_state_machine: - -Audit State Machine -------------------- - -An :ref:`Audit ` has a life-cycle and its current state may -be one of the following: - -- **PENDING** : a request for an :ref:`Audit ` has been - submitted (either manually by the - :ref:`Administrator ` or automatically via some - event handling mechanism) and is in the queue for being processed by the - :ref:`Watcher Decision Engine ` -- **ONGOING** : the :ref:`Audit ` is currently being - processed by the - :ref:`Watcher Decision Engine ` -- **SUCCEEDED** : the :ref:`Audit ` has been executed - successfully and at least one solution was found -- **FAILED** : an error occurred while executing the - :ref:`Audit ` -- **DELETED** : the :ref:`Audit ` is still stored in the - :ref:`Watcher database ` but is not returned - any more through the Watcher APIs. -- **CANCELLED** : the :ref:`Audit ` was in **PENDING** or - **ONGOING** state and was cancelled by the - :ref:`Administrator ` -- **SUSPENDED** : the :ref:`Audit ` was in **ONGOING** - state and was suspended by the - :ref:`Administrator ` - -The following diagram shows the different possible states of an -:ref:`Audit ` and what event makes the state change to a new -value: - -.. image:: ./images/audit_state_machine.png - :width: 100% - -.. _action_plan_state_machine: - -Action Plan State Machine -------------------------- - -An :ref:`Action Plan ` has a life-cycle and its current -state may be one of the following: - -- **RECOMMENDED** : the :ref:`Action Plan ` is waiting - for a validation from the :ref:`Administrator ` -- **PENDING** : a request for an :ref:`Action Plan ` - has been submitted (due to an - :ref:`Administrator ` executing an - :ref:`Audit `) and is in the queue for - being processed by the :ref:`Watcher Applier ` -- **ONGOING** : the :ref:`Action Plan ` is currently - being processed by the :ref:`Watcher Applier ` -- **SUCCEEDED** : the :ref:`Action Plan ` has been - executed successfully (i.e. all :ref:`Actions ` that it - contains have been executed successfully) -- **FAILED** : an error occurred while executing the - :ref:`Action Plan ` -- **DELETED** : the :ref:`Action Plan ` is still - stored in the :ref:`Watcher database ` but is - not returned any more through the Watcher APIs. -- **CANCELLED** : the :ref:`Action Plan ` was in - **RECOMMENDED**, **PENDING** or **ONGOING** state and was cancelled by the - :ref:`Administrator ` -- **SUPERSEDED** : the :ref:`Action Plan ` was in - RECOMMENDED state and was automatically superseded by Watcher, due to an - expiration delay or an update of the - :ref:`Cluster data model ` - - -The following diagram shows the different possible states of an -:ref:`Action Plan ` and what event makes the state -change to a new value: - -.. image:: ./images/action_plan_state_machine.png - :width: 100% - - - -.. _Watcher API: webapi/v1.html diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100755 index b73eca8..0000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,145 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -from watcher import version as watcher_version -from watcher import objects - -objects.register_all() - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('../../')) -sys.path.insert(0, os.path.abspath('../')) -sys.path.insert(0, os.path.abspath('./')) - -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ - 'oslo_config.sphinxconfiggen', - 'openstackdocstheme', - 'sphinx.ext.autodoc', - 'sphinx.ext.viewcode', - 'sphinxcontrib.httpdomain', - 'sphinxcontrib.pecanwsme.rest', - 'stevedore.sphinxext', - 'wsmeext.sphinxext', - 'ext.term', - 'ext.versioned_notifications', -] - -wsme_protocols = ['restjson'] -config_generator_config_file = '../../etc/watcher/watcher-config-generator.conf' -sample_config_basename = 'watcher' - -# autodoc generation is a bit aggressive and a nuisance when doing heavy -# text edit cycles. -# execute "export SPHINX_DEBUG=1" in your terminal to disable - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Watcher' -copyright = u'OpenStack Foundation' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -# The full version, including alpha/beta/rc tags. -release = watcher_version.version_info.release_string() -# The short X.Y version. -version = watcher_version.version_info.version_string() - -# A list of ignored prefixes for module index sorting. -modindex_common_prefix = ['watcher.'] - -exclude_patterns = [ - # The man directory includes some snippet files that are included - # in other documents during the build but that should not be - # included in the toctree themselves, so tell Sphinx to ignore - # them when scanning for input files. - 'man/footer.rst', - 'man/general-options.rst', - 'strategies/strategy-template.rst', - 'image_src/plantuml/README.rst', -] - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# -- Options for man page output -------------------------------------------- - -# Grouping the document tree for man pages. -# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' - -man_pages = [ - ('man/watcher-api', 'watcher-api', u'Watcher API Server', - [u'OpenStack'], 1), - ('man/watcher-applier', 'watcher-applier', u'Watcher Applier', - [u'OpenStack'], 1), - ('man/watcher-db-manage', 'watcher-db-manage', - u'Watcher Db Management Utility', [u'OpenStack'], 1), - ('man/watcher-decision-engine', 'watcher-decision-engine', - u'Watcher Decision Engine', [u'OpenStack'], 1), -] - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -# html_theme = '_theme' -html_theme = 'openstackdocs' -# html_static_path = ['static'] -# html_theme_options = {} - -# Output file base name for HTML help builder. -htmlhelp_basename = '%sdoc' % project - -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -#openstackdocstheme options -repository_name = 'openstack/watcher' -bug_project = 'watcher' -bug_tag = '' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', - '%s.tex' % project, - u'%s Documentation' % project, - u'OpenStack Foundation', 'manual'), -] - -# Example configuration for intersphinx: refer to the Python standard library. -# intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/doc/source/config-generator.conf b/doc/source/config-generator.conf deleted file mode 100644 index e704259..0000000 --- a/doc/source/config-generator.conf +++ /dev/null @@ -1 +0,0 @@ -../../etc/watcher/watcher-config-generator.conf diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst deleted file mode 100644 index ec0a284..0000000 --- a/doc/source/contributor/contributing.rst +++ /dev/null @@ -1,72 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _contributing: - -======================= -Contributing to Watcher -======================= - -If you're interested in contributing to the Watcher project, -the following will help get you started. - -Contributor License Agreement ------------------------------ - -.. index:: - single: license; agreement - -In order to contribute to the Watcher project, you need to have -signed OpenStack's contributor's agreement. - -.. seealso:: - - * http://docs.openstack.org/infra/manual/developers.html - * http://wiki.openstack.org/CLA - -LaunchPad Project ------------------ - -Most of the tools used for OpenStack depend on a launchpad.net ID for -authentication. After signing up for a launchpad account, join the -"openstack" team to have access to the mailing list and receive -notifications of important events. - -.. seealso:: - - * http://launchpad.net - * http://launchpad.net/watcher - * http://launchpad.net/~openstack - - -Project Hosting Details ------------------------ - -Bug tracker - http://launchpad.net/watcher - -Mailing list (prefix subjects with ``[watcher]`` for faster responses) - http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev - -Wiki - http://wiki.openstack.org/Watcher - -Code Hosting - https://git.openstack.org/cgit/openstack/watcher - -Code Review - https://review.openstack.org/#/q/status:open+project:openstack/watcher,n,z - -IRC Channel - ``#openstack-watcher`` (changelog_) - -Weekly Meetings - On Wednesdays at 14:00 UTC on even weeks in the ``#openstack-meeting-4`` - IRC channel, 13:00 UTC on odd weeks in the ``#openstack-meeting-alt`` - IRC channel (`meetings logs`_) - -.. _changelog: http://eavesdrop.openstack.org/irclogs/%23openstack-watcher/ -.. _meetings logs: http://eavesdrop.openstack.org/meetings/watcher/ diff --git a/doc/source/contributor/devstack.rst b/doc/source/contributor/devstack.rst deleted file mode 100644 index d27f6a7..0000000 --- a/doc/source/contributor/devstack.rst +++ /dev/null @@ -1,241 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -============================================= -Set up a development environment via DevStack -============================================= - -Watcher is currently able to optimize compute resources - specifically Nova -compute hosts - via operations such as live migrations. In order for you to -fully be able to exercise what Watcher can do, it is necessary to have a -multinode environment to use. - -You can set up the Watcher services quickly and easily using a Watcher -DevStack plugin. See `PluginModelDocs`_ for information on DevStack's plugin -model. To enable the Watcher plugin with DevStack, add the following to the -`[[local|localrc]]` section of your controller's `local.conf` to enable the -Watcher plugin:: - - enable_plugin watcher git://git.openstack.org/openstack/watcher - -For more detailed instructions, see `Detailed DevStack Instructions`_. Check -out the `DevStack documentation`_ for more information regarding DevStack. - -.. _PluginModelDocs: http://docs.openstack.org/developer/devstack/plugins.html -.. _DevStack documentation: http://docs.openstack.org/developer/devstack/ - -Detailed DevStack Instructions -============================== - -#. Obtain N (where N >= 1) servers (virtual machines preferred for DevStack). - One of these servers will be the controller node while the others will be - compute nodes. N is preferably >= 3 so that you have at least 2 compute - nodes, but in order to stand up the Watcher services only 1 server is - needed (i.e., no computes are needed if you want to just experiment with - the Watcher services). These servers can be VMs running on your local - machine via VirtualBox if you prefer. DevStack currently recommends that - you use Ubuntu 14.04 LTS. The servers should also have connections to the - same network such that they are all able to communicate with one another. - -#. For each server, clone the DevStack repository and create the stack user:: - - sudo apt-get update - sudo apt-get install git - git clone https://git.openstack.org/openstack-dev/devstack - sudo ./devstack/tools/create-stack-user.sh - - Now you have a stack user that is used to run the DevStack processes. You - may want to give your stack user a password to allow SSH via a password:: - - sudo passwd stack - -#. Switch to the stack user and clone the DevStack repo again:: - - sudo su stack - cd ~ - git clone https://git.openstack.org/openstack-dev/devstack - -#. For each compute node, copy the provided `local.conf.compute`_ example file - to the compute node's system at ~/devstack/local.conf. Make sure the - HOST_IP and SERVICE_HOST values are changed appropriately - i.e., HOST_IP - is set to the IP address of the compute node and SERVICE_HOST is set to the - IP address of the controller node. - - If you need specific metrics collected (or want to use something other - than Ceilometer), be sure to configure it. For example, in the - `local.conf.compute`_ example file, the appropriate ceilometer plugins and - services are enabled and disabled. If you were using something other than - Ceilometer, then you would likely want to configure it likewise. The - example file also sets the compute monitors nova configuration option to - use the CPU virt driver. If you needed other metrics, it may be necessary - to configure similar configuration options for the projects providing those - metrics. - -#. For the controller node, copy the provided `local.conf.controller`_ example - file to the controller node's system at ~/devstack/local.conf. Make sure - the HOST_IP value is changed appropriately - i.e., HOST_IP is set to the IP - address of the controller node. - - Note: if you want to use another Watcher git repository (such as a local - one), then change the enable plugin line:: - - enable_plugin watcher [optional_branch] - - If you do this, then the Watcher DevStack plugin will try to pull the - python-watcherclient repo from /../, so either make - sure that is also available or specify WATCHERCLIENT_REPO in the local.conf - file. - - Note: if you want to use a specific branch, specify WATCHER_BRANCH in the - local.conf file. By default it will use the master branch. - - Note: watcher-api will default run under apache/httpd, set the variable - WATCHER_USE_MOD_WSGI=FALSE if you do not wish to run under apache/httpd. - For development environment it is suggested to set WATHCER_USE_MOD_WSGI - to FALSE. For Production environment it is suggested to keep it at the - default TRUE value. - -#. Start stacking from the controller node:: - - ./devstack/stack.sh - -#. Start stacking on each of the compute nodes using the same command. - -#. Configure the environment for live migration via NFS. See the - `Multi-Node DevStack Environment`_ section for more details. - -.. _local.conf.controller: https://github.com/openstack/watcher/tree/master/devstack/local.conf.controller -.. _local.conf.compute: https://github.com/openstack/watcher/tree/master/devstack/local.conf.compute - -Multi-Node DevStack Environment -=============================== - -Since deploying Watcher with only a single compute node is not very useful, a -few tips are given here for enabling a multi-node environment with live -migration. - -Configuring NFS Server ----------------------- - -If you would like to use live migration for shared storage, then the controller -can serve as the NFS server if needed:: - - sudo apt-get install nfs-kernel-server - sudo mkdir -p /nfs/instances - sudo chown stack:stack /nfs/instances - -Add an entry to `/etc/exports` with the appropriate gateway and netmask -information:: - - /nfs/instances /(rw,fsid=0,insecure,no_subtree_check,async,no_root_squash) - -Export the NFS directories:: - - sudo exportfs -ra - -Make sure the NFS server is running:: - - sudo service nfs-kernel-server status - -If the server is not running, then start it:: - - sudo service nfs-kernel-server start - -Configuring NFS on Compute Node -------------------------------- - -Each compute node needs to use the NFS server to hold the instance data:: - - sudo apt-get install rpcbind nfs-common - mkdir -p /opt/stack/data/instances - sudo mount :/nfs/instances /opt/stack/data/instances - -If you would like to have the NFS directory automatically mounted on reboot, -then add the following to `/etc/fstab`:: - - :/nfs/instances /opt/stack/data/instances nfs auto 0 0 - -Edit `/etc/libvirt/libvirtd.conf` to make sure the following values are set:: - - listen_tls = 0 - listen_tcp = 1 - auth_tcp = "none" - -Edit `/etc/default/libvirt-bin`:: - - libvirtd_opts="-d -l" - -Restart the libvirt service:: - - sudo service libvirt-bin restart - -Setting up SSH keys between compute nodes to enable live migration ------------------------------------------------------------------- - -In order for live migration to work, SSH keys need to be exchanged between -each compute node: - -1. The SOURCE root user's public RSA key (likely in /root/.ssh/id_rsa.pub) - needs to be in the DESTINATION stack user's authorized_keys file - (~stack/.ssh/authorized_keys). This can be accomplished by manually - copying the contents from the file on the SOURCE to the DESTINATION. If - you have a password configured for the stack user, then you can use the - following command to accomplish the same thing:: - - ssh-copy-id -i /root/.ssh/id_rsa.pub stack@DESTINATION - -2. The DESTINATION host's public ECDSA key (/etc/ssh/ssh_host_ecdsa_key.pub) - needs to be in the SOURCE root user's known_hosts file - (/root/.ssh/known_hosts). This can be accomplished by running the - following on the SOURCE machine (hostname must be used):: - - ssh-keyscan -H DEST_HOSTNAME | sudo tee -a /root/.ssh/known_hosts - -In essence, this means that every compute node's root user's public RSA key -must exist in every other compute node's stack user's authorized_keys file and -every compute node's public ECDSA key needs to be in every other compute -node's root user's known_hosts file. - -Disable serial console ----------------------- - -Serial console needs to be disabled for live migration to work. - -On both the controller and compute node, in /etc/nova/nova.conf - -[serial_console] -enabled = False - -Alternatively, in devstack's local.conf: - -[[post-config|$NOVA_CONF]] -[serial_console] -#enabled=false - - -VNC server configuration ------------------------- - -The VNC server listening parameter needs to be set to any address so -that the server can accept connections from all of the compute nodes. - -On both the controller and compute node, in /etc/nova/nova.conf - -vncserver_listen = 0.0.0.0 - -Alternatively, in devstack's local.conf: - -VNCSERVER_LISTEN=0.0.0.0 - - -Environment final checkup -------------------------- - -If you are willing to make sure everything is in order in your DevStack -environment, you can run the Watcher Tempest tests which will validate its API -but also that you can perform the typical Watcher workflows. To do so, have a -look at the :ref:`Tempest tests ` section which will explain to -you how to run them. diff --git a/doc/source/contributor/environment.rst b/doc/source/contributor/environment.rst deleted file mode 100644 index 7404a7b..0000000 --- a/doc/source/contributor/environment.rst +++ /dev/null @@ -1,275 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _watcher_developement_environment: - -========================================= -Set up a development environment manually -========================================= - -This document describes getting the source from watcher `Git repository`_ -for development purposes. - -To install Watcher from packaging, refer instead to Watcher `User -Documentation`_. - -.. _`Git Repository`: https://git.openstack.org/cgit/openstack/watcher -.. _`User Documentation`: https://docs.openstack.org/watcher/latest/ - -Prerequisites -============= - -This document assumes you are using Ubuntu or Fedora, and that you have the -following tools available on your system: - -- Python_ 2.7 and 3.4 -- git_ -- setuptools_ -- pip_ -- msgfmt (part of the gettext package) -- virtualenv and virtualenvwrapper_ - -**Reminder**: If you're successfully using a different platform, or a -different version of the above, please document your configuration here! - -.. _Python: https://www.python.org/ -.. _git: https://git-scm.com/ -.. _setuptools: https://pypi.python.org/pypi/setuptools -.. _virtualenvwrapper: https://virtualenvwrapper.readthedocs.io/en/latest/install.html - -Getting the latest code -======================= - -Make a clone of the code from our `Git repository`: - -.. code-block:: bash - - $ git clone https://git.openstack.org/openstack/watcher.git - -When that is complete, you can: - -.. code-block:: bash - - $ cd watcher - -Installing dependencies -======================= - -Watcher maintains two lists of dependencies:: - - requirements.txt - test-requirements.txt - -The first is the list of dependencies needed for running Watcher, the second -list includes dependencies used for active development and testing of Watcher -itself. - -These dependencies can be installed from PyPi_ using the Python tool pip_. - -.. _PyPi: http://pypi.python.org/ -.. _pip: http://pypi.python.org/pypi/pip - -However, your system *may* need additional dependencies that `pip` (and by -extension, PyPi) cannot satisfy. These dependencies should be installed -prior to using `pip`, and the installation method may vary depending on -your platform. - -* Ubuntu 14.04:: - - $ sudo apt-get install python-dev libssl-dev libmysqlclient-dev libffi-dev - -* Fedora 19+:: - - $ sudo yum install openssl-devel libffi-devel mysql-devel - -* CentOS 7:: - - $ sudo yum install gcc python-devel libxml2-devel libxslt-devel mariadb-devel - -PyPi Packages and VirtualEnv ----------------------------- - -We recommend establishing a virtualenv to run Watcher within. virtualenv -limits the Python environment to just what you're installing as dependencies, -useful to keep a clean environment for working on Watcher. - -.. code-block:: bash - - $ mkvirtualenv watcher - $ git clone https://git.openstack.org/openstack/watcher - - # Use 'python setup.py' to link Watcher into Python's site-packages - $ cd watcher && python setup.py install - - # Install the dependencies for running Watcher - $ pip install -r ./requirements.txt - - # Install the dependencies for developing, testing, and running Watcher - $ pip install -r ./test-requirements.txt - -This will create a local virtual environment in the directory ``$WORKON_HOME``. -The virtual environment can be disabled using the command: - -.. code-block:: bash - - $ deactivate - -You can re-activate this virtualenv for your current shell using: - -.. code-block:: bash - - $ workon watcher - -For more information on virtual environments, see virtualenv_. - -.. _virtualenv: http://www.virtualenv.org/ - - - -Verifying Watcher is set up -=========================== - -Once set up, either directly or within a virtualenv, you should be able to -invoke Python and import the libraries. If you're using a virtualenv, don't -forget to activate it: - -.. code-block:: bash - - $ workon watcher - -You should then be able to `import watcher` using Python without issue: - -.. code-block:: bash - - $ python -c "import watcher" - -If you can import watcher without a traceback, you should be ready to develop. - -Run Watcher tests -================= - -Watcher provides both :ref:`unit tests ` and -:ref:`functional/tempest tests `. Please refer to :doc:`testing` -to understand how to run them. - - -Build the Watcher documentation -=============================== - -You can easily build the HTML documentation from ``doc/source`` files, by using -``tox``: - -.. code-block:: bash - - $ workon watcher - - (watcher) $ cd watcher - (watcher) $ tox -edocs - -The HTML files are available into ``doc/build`` directory. - - -Configure the Watcher services -============================== - -Watcher services require a configuration file. Use tox to generate -a sample configuration file that can be used to get started: - -.. code-block:: bash - - $ tox -e genconfig - $ cp etc/watcher.conf.sample etc/watcher.conf - -Most of the default configuration should be enough to get you going, but you -still need to configure the following sections: - -- The ``[database]`` section to configure the - :ref:`Watcher database ` -- The ``[keystone_authtoken]`` section to configure the - :ref:`Identity service ` i.e. Keystone -- The ``[watcher_messaging]`` section to configure the OpenStack AMQP-based - message bus - -So if you need some more details on how to configure one or more of these -sections, please do have a look at :doc:`../deploy/configuration` before -continuing. - - -Create Watcher SQL database -=========================== - -When initially getting set up, after you've configured which databases to use, -you're probably going to need to run the following to your database schema in -place: - -.. code-block:: bash - - $ workon watcher - - (watcher) $ watcher-db-manage create_schema - - -Running Watcher services -======================== - -To run the Watcher API service, use: - -.. code-block:: bash - - $ workon watcher - - (watcher) $ watcher-api - -To run the Watcher Decision Engine service, use: - -.. code-block:: bash - - $ workon watcher - - (watcher) $ watcher-decision-engine - -To run the Watcher Applier service, use: - -.. code-block:: bash - - $ workon watcher - - (watcher) $ watcher-applier - -Default configuration of these services are available into ``/etc/watcher`` -directory. See :doc:`../deploy/configuration` for details on how Watcher is -configured. By default, Watcher is configured with SQL backends. - - -Interact with Watcher -===================== - -You can also interact with Watcher through its REST API. There is a Python -Watcher client library `python-watcherclient`_ which interacts exclusively -through the REST API, and which Watcher itself uses to provide its command-line -interface. - -.. _`python-watcherclient`: https://github.com/openstack/python-watcherclient - -There is also an Horizon plugin for Watcher `watcher-dashboard`_ which -allows to interact with Watcher through a web-based interface. - -.. _`watcher-dashboard`: https://github.com/openstack/watcher-dashboard - - -Exercising the Watcher Services locally -======================================= - -If you would like to exercise the Watcher services in isolation within a local -virtual environment, you can do this without starting any other OpenStack -services. For example, this is useful for rapidly prototyping and debugging -interactions over the RPC channel, testing database migrations, and so forth. - -You will find in the `watcher-tools`_ project, Ansible playbooks and Docker -template files to easily play with Watcher services within a minimal OpenStack -isolated environment (Identity, Message Bus, SQL database, Horizon, ...). - -.. _`watcher-tools`: https://github.com/b-com/watcher-tools diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst deleted file mode 100644 index eddf972..0000000 --- a/doc/source/contributor/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. toctree:: - :maxdepth: 1 - - environment - devstack - notifications - testing - rally_link diff --git a/doc/source/contributor/notifications.rst b/doc/source/contributor/notifications.rst deleted file mode 100644 index 6ee7339..0000000 --- a/doc/source/contributor/notifications.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _watcher_notifications: - -======================== -Notifications in Watcher -======================== - -.. versioned_notifications:: diff --git a/doc/source/contributor/plugin/action-plugin.rst b/doc/source/contributor/plugin/action-plugin.rst deleted file mode 100644 index 29e9bcc..0000000 --- a/doc/source/contributor/plugin/action-plugin.rst +++ /dev/null @@ -1,219 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _implement_action_plugin: - -================== -Build a new action -================== - -Watcher Applier has an external :ref:`action ` plugin -interface which gives anyone the ability to integrate an external -:ref:`action ` in order to extend the initial set of actions -Watcher provides. - -This section gives some guidelines on how to implement and integrate custom -actions with Watcher. - - -Creating a new plugin -===================== - -First of all you have to extend the base :py:class:`BaseAction` class which -defines a set of abstract methods and/or properties that you will have to -implement: - - - The :py:attr:`~.BaseAction.schema` is an abstract property that you have to - implement. This is the first function to be called by the - :ref:`applier ` before any further processing - and its role is to validate the input parameters that were provided to it. - - The :py:meth:`~.BaseAction.pre_condition` is called before the execution of - an action. This method is a hook that can be used to perform some - initializations or to make some more advanced validation on its input - parameters. If you wish to block the execution based on this factor, you - simply have to ``raise`` an exception. - - The :py:meth:`~.BaseAction.post_condition` is called after the execution of - an action. As this function is called regardless of whether an action - succeeded or not, this can prove itself useful to perform cleanup - operations. - - The :py:meth:`~.BaseAction.execute` is the main component of an action. - This is where you should implement the logic of your action. - - The :py:meth:`~.BaseAction.revert` allows you to roll back the targeted - resource to its original state following a faulty execution. Indeed, this - method is called by the workflow engine whenever an action raises an - exception. - -Here is an example showing how you can write a plugin called ``DummyAction``: - -.. code-block:: python - - # Filepath = /thirdparty/dummy.py - # Import path = thirdparty.dummy - import voluptuous - - from watcher.applier.actions import base - - - class DummyAction(base.BaseAction): - - @property - def schema(self): - return voluptuous.Schema({}) - - def execute(self): - # Does nothing - pass # Only returning False is considered as a failure - - def revert(self): - # Does nothing - pass - - def pre_condition(self): - # No pre-checks are done here - pass - - def post_condition(self): - # Nothing done here - pass - - -This implementation is the most basic one. So in order to get a better -understanding on how to implement a more advanced action, have a look at the -:py:class:`~watcher.applier.actions.migration.Migrate` class. - -Input validation ----------------- - -As you can see in the previous example, we are using `Voluptuous`_ to validate -the input parameters of an action. So if you want to learn more about how to -work with `Voluptuous`_, you can have a look at their `documentation`_: - -.. _Voluptuous: https://github.com/alecthomas/voluptuous -.. _documentation: https://github.com/alecthomas/voluptuous/blob/master/README.md - - -Define configuration parameters -=============================== - -At this point, you have a fully functional action. However, in more complex -implementation, you may want to define some configuration options so one can -tune the action to its needs. To do so, you can implement the -:py:meth:`~.Loadable.get_config_opts` class method as followed: - -.. code-block:: python - - from oslo_config import cfg - - class DummyAction(base.BaseAction): - - # [...] - - def execute(self): - assert self.config.test_opt == 0 - - @classmethod - def get_config_opts(cls): - return super( - DummyAction, cls).get_config_opts() + [ - cfg.StrOpt('test_opt', help="Demo Option.", default=0), - # Some more options ... - ] - - -The configuration options defined within this class method will be included -within the global ``watcher.conf`` configuration file under a section named by -convention: ``{namespace}.{plugin_name}``. In our case, the ``watcher.conf`` -configuration would have to be modified as followed: - -.. code-block:: ini - - [watcher_actions.dummy] - # Option used for testing. - test_opt = test_value - -Then, the configuration options you define within this method will then be -injected in each instantiated object via the ``config`` parameter of the -:py:meth:`~.BaseAction.__init__` method. - - -Abstract Plugin Class -===================== - -Here below is the abstract ``BaseAction`` class that every single action -should implement: - -.. autoclass:: watcher.applier.actions.base.BaseAction - :members: - :special-members: __init__ - :noindex: - - .. py:attribute:: schema - - Defines a Schema that the input parameters shall comply to - - :returns: A schema declaring the input parameters this action should be - provided along with their respective constraints - (e.g. type, value range, ...) - :rtype: :py:class:`voluptuous.Schema` instance - - -Register a new entry point -========================== - -In order for the Watcher Applier to load your new action, the -action must be registered as a named entry point under the -``watcher_actions`` entry point of your ``setup.py`` file. If you are using -pbr_, this entry point should be placed in your ``setup.cfg`` file. - -The name you give to your entry point has to be unique. - -Here below is how you would proceed to register ``DummyAction`` using pbr_: - -.. code-block:: ini - - [entry_points] - watcher_actions = - dummy = thirdparty.dummy:DummyAction - -.. _pbr: http://docs.openstack.org/developer/pbr/ - - -Using action plugins -==================== - -The Watcher Applier service will automatically discover any installed plugins -when it is restarted. If a Python package containing a custom plugin is -installed within the same environment as Watcher, Watcher will automatically -make that plugin available for use. - -At this point, you can use your new action plugin in your :ref:`strategy plugin -` if you reference it via the use of the -:py:meth:`~.Solution.add_action` method: - -.. code-block:: python - - # [...] - self.solution.add_action( - action_type="dummy", # Name of the entry point we registered earlier - applies_to="", - input_parameters={}) - -By doing so, your action will be saved within the Watcher Database, ready to be -processed by the planner for creating an action plan which can then be executed -by the Watcher Applier via its workflow engine. - -At the last, remember to add the action into the weights in ``watcher.conf``, -otherwise you will get an error when the action be referenced in a strategy. - - -Scheduling of an action plugin -============================== - -Watcher provides a basic built-in :ref:`planner ` -which is only able to process the Watcher built-in actions. Therefore, you will -either have to use an existing third-party planner or :ref:`implement another -planner ` that will be able to take into account your -new action plugin. diff --git a/doc/source/contributor/plugin/base-setup.rst b/doc/source/contributor/plugin/base-setup.rst deleted file mode 100644 index 3e829f5..0000000 --- a/doc/source/contributor/plugin/base-setup.rst +++ /dev/null @@ -1,100 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _plugin-base_setup: - -======================================= -Create a third-party plugin for Watcher -======================================= - -Watcher provides a plugin architecture which allows anyone to extend the -existing functionalities by implementing third-party plugins. This process can -be cumbersome so this documentation is there to help you get going as quickly -as possible. - - -Pre-requisites -============== - -We assume that you have set up a working Watcher development environment. So if -this not already the case, you can check out our documentation which explains -how to set up a :ref:`development environment -`. - -.. _development environment: - -Third party project scaffolding -=============================== - -First off, we need to create the project structure. To do so, we can use -`cookiecutter`_ and the `OpenStack cookiecutter`_ project scaffolder to -generate the skeleton of our project:: - - $ virtualenv thirdparty - $ source thirdparty/bin/activate - $ pip install cookiecutter - $ cookiecutter https://github.com/openstack-dev/cookiecutter - -The last command will ask you for many information, and If you set -``module_name`` and ``repo_name`` as ``thirdparty``, you should end up with a -structure that looks like this:: - - $ cd thirdparty - $ tree . - . - ├── babel.cfg - ├── CONTRIBUTING.rst - ├── doc - │   └── source - │   ├── conf.py - │   ├── contributing.rst - │   ├── index.rst - │   ├── installation.rst - │   ├── readme.rst - │   └── usage.rst - ├── HACKING.rst - ├── LICENSE - ├── MANIFEST.in - ├── README.rst - ├── requirements.txt - ├── setup.cfg - ├── setup.py - ├── test-requirements.txt - ├── thirdparty - │   ├── __init__.py - │   └── tests - │   ├── base.py - │   ├── __init__.py - │   └── test_thirdparty.py - └── tox.ini - -**Note:** You should add `python-watcher`_ as a dependency in the -requirements.txt file:: - - # Watcher-specific requirements - python-watcher - -.. _cookiecutter: https://github.com/audreyr/cookiecutter -.. _OpenStack cookiecutter: https://github.com/openstack-dev/cookiecutter -.. _python-watcher: https://pypi.python.org/pypi/python-watcher - -Implementing a plugin for Watcher -================================= - -Now that the project skeleton has been created, you can start the -implementation of your plugin. As of now, you can implement the following -plugins for Watcher: - -- A :ref:`goal plugin ` -- A :ref:`strategy plugin ` -- An :ref:`action plugin ` -- A :ref:`planner plugin ` -- A workflow engine plugin -- A :ref:`cluster data model collector plugin - ` - -If you want to learn more on how to implement them, you can refer to their -dedicated documentation. diff --git a/doc/source/contributor/plugin/cdmc-plugin.rst b/doc/source/contributor/plugin/cdmc-plugin.rst deleted file mode 100644 index 179c627..0000000 --- a/doc/source/contributor/plugin/cdmc-plugin.rst +++ /dev/null @@ -1,272 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _implement_cluster_data_model_collector_plugin: - -======================================== -Build a new cluster data model collector -======================================== - -Watcher Decision Engine has an external cluster data model (CDM) plugin -interface which gives anyone the ability to integrate an external cluster data -model collector (CDMC) in order to extend the initial set of cluster data model -collectors Watcher provides. - -This section gives some guidelines on how to implement and integrate custom -cluster data model collectors within Watcher. - - -Creating a new plugin -===================== - -In order to create a new cluster data model collector, you have to: - -- Extend the :py:class:`~.base.BaseClusterDataModelCollector` class. -- Implement its :py:meth:`~.BaseClusterDataModelCollector.execute` abstract - method to return your entire cluster data model that this method should - build. -- Implement its :py:meth:`~.Goal.notification_endpoints` abstract property to - return the list of all the :py:class:`~.base.NotificationEndpoint` instances - that will be responsible for handling incoming notifications in order to - incrementally update your cluster data model. - -First of all, you have to extend the :class:`~.BaseClusterDataModelCollector` -base class which defines the :py:meth:`~.BaseClusterDataModelCollector.execute` -abstract method you will have to implement. This method is responsible for -building an entire cluster data model. - -Here is an example showing how you can write a plugin called -``DummyClusterDataModelCollector``: - -.. code-block:: python - - # Filepath = /thirdparty/dummy.py - # Import path = thirdparty.dummy - - from watcher.decision_engine.model import model_root - from watcher.decision_engine.model.collector import base - - - class DummyClusterDataModelCollector(base.BaseClusterDataModelCollector): - - def execute(self): - model = model_root.ModelRoot() - # Do something here... - return model - - @property - def notification_endpoints(self): - return [] - -This implementation is the most basic one. So in order to get a better -understanding on how to implement a more advanced cluster data model collector, -have a look at the :py:class:`~.NovaClusterDataModelCollector` class. - -Define a custom model -===================== - -As you may have noticed in the above example, we are reusing an existing model -provided by Watcher. However, this model can be easily customized by -implementing a new class that would implement the :py:class:`~.Model` abstract -base class. Here below is simple example on how to proceed in implementing a -custom Model: - -.. code-block:: python - - # Filepath = /thirdparty/dummy.py - # Import path = thirdparty.dummy - - from watcher.decision_engine.model import base as modelbase - from watcher.decision_engine.model.collector import base - - - class MyModel(modelbase.Model): - - def to_string(self): - return 'MyModel' - - - class DummyClusterDataModelCollector(base.BaseClusterDataModelCollector): - - def execute(self): - model = MyModel() - # Do something here... - return model - - @property - def notification_endpoints(self): - return [] - -Here below is the abstract ``Model`` class that every single cluster data model -should implement: - -.. autoclass:: watcher.decision_engine.model.base.Model - :members: - :special-members: __init__ - :noindex: - -Define configuration parameters -=============================== - -At this point, you have a fully functional cluster data model collector. -By default, cluster data model collectors define a ``period`` option (see -:py:meth:`~.BaseClusterDataModelCollector.get_config_opts`) that corresponds -to the interval of time between each synchronization of the in-memory model. - -However, in more complex implementation, you may want to define some -configuration options so one can tune the cluster data model collector to your -needs. To do so, you can implement the :py:meth:`~.Loadable.get_config_opts` -class method as followed: - -.. code-block:: python - - from oslo_config import cfg - from watcher.decision_engine.model import model_root - from watcher.decision_engine.model.collector import base - - - class DummyClusterDataModelCollector(base.BaseClusterDataModelCollector): - - def execute(self): - model = model_root.ModelRoot() - # Do something here... - return model - - @property - def notification_endpoints(self): - return [] - - @classmethod - def get_config_opts(cls): - return super( - DummyClusterDataModelCollector, cls).get_config_opts() + [ - cfg.StrOpt('test_opt', help="Demo Option.", default=0), - # Some more options ... - ] - -The configuration options defined within this class method will be included -within the global ``watcher.conf`` configuration file under a section named by -convention: ``{namespace}.{plugin_name}`` (see section :ref:`Register a new -entry point `). The namespace for CDMC plugins is -``watcher_cluster_data_model_collectors``, so in our case, the ``watcher.conf`` -configuration would have to be modified as followed: - -.. code-block:: ini - - [watcher_cluster_data_model_collectors.dummy] - # Option used for testing. - test_opt = test_value - -Then, the configuration options you define within this method will then be -injected in each instantiated object via the ``config`` parameter of the -:py:meth:`~.BaseClusterDataModelCollector.__init__` method. - - -Abstract Plugin Class -===================== - -Here below is the abstract ``BaseClusterDataModelCollector`` class that every -single cluster data model collector should implement: - -.. autoclass:: watcher.decision_engine.model.collector.base.BaseClusterDataModelCollector - :members: - :special-members: __init__ - :noindex: - - -.. _register_new_cdmc_entrypoint: - -Register a new entry point -========================== - -In order for the Watcher Decision Engine to load your new cluster data model -collector, the latter must be registered as a named entry point under the -``watcher_cluster_data_model_collectors`` entry point namespace of your -``setup.py`` file. If you are using pbr_, this entry point should be placed in -your ``setup.cfg`` file. - -The name you give to your entry point has to be unique. - -Here below is how to register ``DummyClusterDataModelCollector`` using pbr_: - -.. code-block:: ini - - [entry_points] - watcher_cluster_data_model_collectors = - dummy = thirdparty.dummy:DummyClusterDataModelCollector - -.. _pbr: http://docs.openstack.org/developer/pbr/ - - -Add new notification endpoints -============================== - -At this point, you have a fully functional cluster data model collector. -However, this CDMC is only refreshed periodically via a background scheduler. -As you may sometimes execute a strategy with a stale CDM due to a high activity -on your infrastructure, you can define some notification endpoints that will be -responsible for incrementally updating the CDM based on notifications emitted -by other services such as Nova. To do so, you can implement and register a new -``DummyEndpoint`` notification endpoint regarding a ``dummy`` event as shown -below: - -.. code-block:: python - - from watcher.decision_engine.model import model_root - from watcher.decision_engine.model.collector import base - - - class DummyNotification(base.NotificationEndpoint): - - @property - def filter_rule(self): - return filtering.NotificationFilter( - publisher_id=r'.*', - event_type=r'^dummy$', - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - # Do some CDM modifications here... - pass - - - class DummyClusterDataModelCollector(base.BaseClusterDataModelCollector): - - def execute(self): - model = model_root.ModelRoot() - # Do something here... - return model - - @property - def notification_endpoints(self): - return [DummyNotification(self)] - - -Note that if the event you are trying to listen to is published by a new -service, you may have to also add a new topic Watcher will have to subscribe to -in the ``notification_topics`` option of the ``[watcher_decision_engine]`` -section. - - -Using cluster data model collector plugins -========================================== - -The Watcher Decision Engine service will automatically discover any installed -plugins when it is restarted. If a Python package containing a custom plugin is -installed within the same environment as Watcher, Watcher will automatically -make that plugin available for use. - -At this point, you can use your new cluster data model plugin in your -:ref:`strategy plugin ` by using the -:py:attr:`~.BaseStrategy.collector_manager` property as followed: - -.. code-block:: python - - # [...] - dummy_collector = self.collector_manager.get_cluster_model_collector( - "dummy") # "dummy" is the name of the entry point we declared earlier - dummy_model = dummy_collector.get_latest_cluster_data_model() - # Do some stuff with this model diff --git a/doc/source/contributor/plugin/goal-plugin.rst b/doc/source/contributor/plugin/goal-plugin.rst deleted file mode 100644 index f0e7dd8..0000000 --- a/doc/source/contributor/plugin/goal-plugin.rst +++ /dev/null @@ -1,215 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _implement_goal_plugin: - -================ -Build a new goal -================ - -Watcher Decision Engine has an external :ref:`goal ` -plugin interface which gives anyone the ability to integrate an external -goal which can be achieved by a :ref:`strategy `. - -This section gives some guidelines on how to implement and integrate custom -goals with Watcher. If you wish to create a third-party package for your -plugin, you can refer to our :ref:`documentation for third-party package -creation `. - - -Pre-requisites -============== - -Before using any goal, please make sure that none of the existing goals fit -your needs. Indeed, the underlying value of defining a goal is to be able to -compare the efficacy of the action plans resulting from the various strategies -satisfying the same goal. By doing so, Watcher can assist the administrator -in his choices. - - -Create a new plugin -=================== - -In order to create a new goal, you have to: - -- Extend the :py:class:`~.base.Goal` class. -- Implement its :py:meth:`~.Goal.get_name` class method to return the - **unique** ID of the new goal you want to create. This unique ID should - be the same as the name of :ref:`the entry point you will declare later on - `. -- Implement its :py:meth:`~.Goal.get_display_name` class method to - return the translated display name of the goal you want to create. - Note: Do not use a variable to return the translated string so it can be - automatically collected by the translation tool. -- Implement its :py:meth:`~.Goal.get_translatable_display_name` - class method to return the translation key (actually the english display - name) of your new goal. The value return should be the same as the - string translated in :py:meth:`~.Goal.get_display_name`. -- Implement its :py:meth:`~.Goal.get_efficacy_specification` method to return - the :ref:`efficacy specification ` for - your goal. - -Here is an example showing how you can define a new ``NewGoal`` goal plugin: - -.. code-block:: python - - # filepath: thirdparty/new.py - # import path: thirdparty.new - - from watcher._i18n import _ - from watcher.decision_engine.goal import base - from watcher.decision_engine.goal.efficacy import specs - - class NewGoal(base.Goal): - - @classmethod - def get_name(cls): - return "new_goal" # Will be the name of the entry point - - @classmethod - def get_display_name(cls): - return _("New Goal") - - @classmethod - def get_translatable_display_name(cls): - return "New Goal" - - @classmethod - def get_efficacy_specification(cls): - return specs.Unclassified() - - -As you may have noticed, the :py:meth:`~.Goal.get_efficacy_specification` -method returns an :py:meth:`~.Unclassified` instance which -is provided by Watcher. This efficacy specification is useful during the -development process of your goal as it corresponds to an empty specification. -If you want to learn more about what efficacy specifications are used for or to -define your own efficacy specification, please refer to the :ref:`related -section below `. - - -Abstract Plugin Class -===================== - -Here below is the abstract :py:class:`~.base.Goal` class: - -.. autoclass:: watcher.decision_engine.goal.base.Goal - :members: - :noindex: - -.. _goal_plugin_add_entrypoint: - -Add a new entry point -===================== - -In order for the Watcher Decision Engine to load your new goal, the -goal must be registered as a named entry point under the ``watcher_goals`` -entry point namespace of your ``setup.py`` file. If you are using pbr_, this -entry point should be placed in your ``setup.cfg`` file. - -The name you give to your entry point has to be unique and should be the same -as the value returned by the :py:meth:`~.base.Goal.get_name` class method of -your goal. - -Here below is how you would proceed to register ``NewGoal`` using pbr_: - -.. code-block:: ini - - [entry_points] - watcher_goals = - new_goal = thirdparty.new:NewGoal - - -To get a better understanding on how to implement a more advanced goal, -have a look at the :py:class:`~.ServerConsolidation` class. - -.. _pbr: http://docs.openstack.org/developer/pbr/ - -.. _implement_efficacy_specification: - -Implement a customized efficacy specification -============================================= - -What is it for? ---------------- - -Efficacy specifications define a set of specifications for a given goal. -These specifications actually define a list of indicators which are to be used -to compute a global efficacy that outlines how well a strategy performed when -trying to achieve the goal it is associated to. - -The idea behind such specification is to give the administrator the possibility -to run an audit using different strategies satisfying the same goal and be able -to judge how they performed at a glance. - - -Implementation --------------- - -In order to create a new efficacy specification, you have to: - -- Extend the :py:class:`~.EfficacySpecification` class. -- Implement :py:meth:`~.EfficacySpecification.get_indicators_specifications` - by returning a list of :py:class:`~.IndicatorSpecification` instances. - - * Each :py:class:`~.IndicatorSpecification` instance should actually extend - the latter. - * Each indicator specification should have a **unique name** which should be - a valid Python variable name. - * They should implement the :py:attr:`~.EfficacySpecification.schema` - abstract property by returning a :py:class:`~.voluptuous.Schema` instance. - This schema is the contract the strategy will have to comply with when - setting the value associated to the indicator specification within its - solution (see the :ref:`architecture of Watcher - ` for more information on - the audit execution workflow). - -- Implement the :py:meth:`~.EfficacySpecification.get_global_efficacy` method: - it should compute the global efficacy for the goal it achieves based on the - efficacy indicators you just defined. - -Here below is an example of an efficacy specification containing one indicator -specification: - -.. code-block:: python - - from watcher._i18n import _ - from watcher.decision_engine.goal.efficacy import base as efficacy_base - from watcher.decision_engine.goal.efficacy import indicators - from watcher.decision_engine.solution import efficacy - - - class IndicatorExample(IndicatorSpecification): - def __init__(self): - super(IndicatorExample, self).__init__( - name="indicator_example", - description=_("Example of indicator specification."), - unit=None, - ) - - @property - def schema(self): - return voluptuous.Schema(voluptuous.Range(min=0), required=True) - - - class UnclassifiedStrategySpecification(efficacy_base.EfficacySpecification): - - def get_indicators_specifications(self): - return [IndicatorExample()] - - def get_global_efficacy(self, indicators_map): - return efficacy.Indicator( - name="global_efficacy_indicator", - description="Example of global efficacy indicator", - unit="%", - value=indicators_map.indicator_example % 100) - - -To get a better understanding on how to implement an efficacy specification, -have a look at :py:class:`~.ServerConsolidationSpecification`. - -Also, if you want to see a concrete example of an indicator specification, -have a look at :py:class:`~.ReleasedComputeNodesCount`. diff --git a/doc/source/contributor/plugin/index.rst b/doc/source/contributor/plugin/index.rst deleted file mode 100644 index 4fedb01..0000000 --- a/doc/source/contributor/plugin/index.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. toctree:: - :maxdepth: 1 - - base-setup - action-plugin - cdmc-plugin - goal-plugin - planner-plugin - scoring-engine-plugin - strategy-plugin - plugins diff --git a/doc/source/contributor/plugin/planner-plugin.rst b/doc/source/contributor/plugin/planner-plugin.rst deleted file mode 100644 index de2e7b1..0000000 --- a/doc/source/contributor/plugin/planner-plugin.rst +++ /dev/null @@ -1,174 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _implement_planner_plugin: - -=================== -Build a new planner -=================== - -Watcher :ref:`Decision Engine ` has an -external :ref:`planner ` plugin interface which -gives anyone the ability to integrate an external :ref:`planner -` in order to extend the initial set of planners -Watcher provides. - -This section gives some guidelines on how to implement and integrate custom -planners with Watcher. - -.. _Decision Engine: watcher_decision_engine_definition - -Creating a new plugin -===================== - -First of all you have to extend the base :py:class:`~.BasePlanner` class which -defines an abstract method that you will have to implement. The -:py:meth:`~.BasePlanner.schedule` is the method being called by the Decision -Engine to schedule a given solution (:py:class:`~.BaseSolution`) into an -:ref:`action plan ` by ordering/sequencing an unordered -set of actions contained in the proposed solution (for more details, see -:ref:`definition of a solution `). - -Here is an example showing how you can write a planner plugin called -``DummyPlanner``: - -.. code-block:: python - - # Filepath = third-party/third_party/dummy.py - # Import path = third_party.dummy - from oslo_utils import uuidutils - from watcher.decision_engine.planner import base - - - class DummyPlanner(base.BasePlanner): - - def _create_action_plan(self, context, audit_id): - action_plan_dict = { - 'uuid': uuidutils.generate_uuid(), - 'audit_id': audit_id, - 'first_action_id': None, - 'state': objects.action_plan.State.RECOMMENDED - } - - new_action_plan = objects.ActionPlan(context, **action_plan_dict) - new_action_plan.create(context) - new_action_plan.save() - return new_action_plan - - def schedule(self, context, audit_id, solution): - # Empty action plan - action_plan = self._create_action_plan(context, audit_id) - # todo: You need to create the workflow of actions here - # and attach it to the action plan - return action_plan - -This implementation is the most basic one. So if you want to have more advanced -examples, have a look at the implementation of planners already provided by -Watcher like :py:class:`~.DefaultPlanner`. A list with all available planner -plugins can be found :ref:`here `. - - -Define configuration parameters -=============================== - -At this point, you have a fully functional planner. However, in more complex -implementation, you may want to define some configuration options so one can -tune the planner to its needs. To do so, you can implement the -:py:meth:`~.Loadable.get_config_opts` class method as followed: - -.. code-block:: python - - from oslo_config import cfg - - class DummyPlanner(base.BasePlanner): - - # [...] - - def schedule(self, context, audit_uuid, solution): - assert self.config.test_opt == 0 - # [...] - - @classmethod - def get_config_opts(cls): - return super( - DummyPlanner, cls).get_config_opts() + [ - cfg.StrOpt('test_opt', help="Demo Option.", default=0), - # Some more options ... - ] - -The configuration options defined within this class method will be included -within the global ``watcher.conf`` configuration file under a section named by -convention: ``{namespace}.{plugin_name}``. In our case, the ``watcher.conf`` -configuration would have to be modified as followed: - -.. code-block:: ini - - [watcher_planners.dummy] - # Option used for testing. - test_opt = test_value - -Then, the configuration options you define within this method will then be -injected in each instantiated object via the ``config`` parameter of the -:py:meth:`~.BasePlanner.__init__` method. - - -Abstract Plugin Class -===================== - -Here below is the abstract ``BasePlanner`` class that every single planner -should implement: - -.. autoclass:: watcher.decision_engine.planner.base.BasePlanner - :members: - :special-members: __init__ - :noindex: - - -Register a new entry point -========================== - -In order for the Watcher Decision Engine to load your new planner, the -latter must be registered as a new entry point under the -``watcher_planners`` entry point namespace of your ``setup.py`` file. If you -are using pbr_, this entry point should be placed in your ``setup.cfg`` file. - -The name you give to your entry point has to be unique. - -Here below is how you would proceed to register ``DummyPlanner`` using pbr_: - -.. code-block:: ini - - [entry_points] - watcher_planners = - dummy = third_party.dummy:DummyPlanner - -.. _pbr: http://docs.openstack.org/developer/pbr/ - - -Using planner plugins -===================== - -The :ref:`Watcher Decision Engine ` service -will automatically discover any installed plugins when it is started. This -means that if Watcher is already running when you install your plugin, you will -have to restart the related Watcher services. If a Python package containing a -custom plugin is installed within the same environment as Watcher, Watcher will -automatically make that plugin available for use. - -At this point, Watcher will use your new planner if you referenced it in the -``planner`` option under the ``[watcher_planner]`` section of your -``watcher.conf`` configuration file when you started it. For example, if you -want to use the ``dummy`` planner you just installed, you would have to -select it as followed: - -.. code-block:: ini - - [watcher_planner] - planner = dummy - -As you may have noticed, only a single planner implementation can be activated -at a time, so make sure it is generic enough to support all your strategies -and actions. diff --git a/doc/source/contributor/plugin/plugins.rst b/doc/source/contributor/plugin/plugins.rst deleted file mode 100644 index 6eeb7a1..0000000 --- a/doc/source/contributor/plugin/plugins.rst +++ /dev/null @@ -1,76 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - - -================= -Available Plugins -================= - -In this section we present all the plugins that are shipped along with Watcher. -If you want to know which plugins your Watcher services have access to, you can -use the :ref:`Guru Meditation Reports ` to display them. - -.. _watcher_goals: - -Goals -===== - -.. list-plugins:: watcher_goals - :detailed: - -.. _watcher_scoring_engines: - -Scoring Engines -=============== - -.. list-plugins:: watcher_scoring_engines - :detailed: - -.. _watcher_scoring_engine_containers: - -Scoring Engine Containers -========================= - -.. list-plugins:: watcher_scoring_engine_containers - :detailed: - -.. _watcher_strategies: - -Strategies -========== - -.. list-plugins:: watcher_strategies - :detailed: - -.. _watcher_actions: - -Actions -======= - -.. list-plugins:: watcher_actions - :detailed: - -.. _watcher_workflow_engines: - -Workflow Engines -================ - -.. list-plugins:: watcher_workflow_engines - :detailed: - -.. _watcher_planners: - -Planners -======== - -.. list-plugins:: watcher_planners - :detailed: - -Cluster Data Model Collectors -============================= - -.. list-plugins:: watcher_cluster_data_model_collectors - :detailed: diff --git a/doc/source/contributor/plugin/scoring-engine-plugin.rst b/doc/source/contributor/plugin/scoring-engine-plugin.rst deleted file mode 100644 index 728fbcf..0000000 --- a/doc/source/contributor/plugin/scoring-engine-plugin.rst +++ /dev/null @@ -1,210 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _implement_scoring_engine_plugin: - -========================== -Build a new scoring engine -========================== - -Watcher Decision Engine has an external :ref:`scoring engine -` plugin interface which gives anyone the ability -to integrate an external scoring engine in order to make use of it in a -:ref:`strategy `. - -This section gives some guidelines on how to implement and integrate custom -scoring engines with Watcher. If you wish to create a third-party package for -your plugin, you can refer to our :ref:`documentation for third-party package -creation `. - - -Pre-requisites -============== - -Because scoring engines execute a purely mathematical tasks, they typically do -not have any additional dependencies. Additional requirements might be defined -by specific scoring engine implementations. For example, some scoring engines -might require to prepare learning data, which has to be loaded during the -scoring engine startup. Some other might require some external services to be -available (e.g. if the scoring infrastructure is running in the cloud). - - -Create a new scoring engine plugin -================================== - -In order to create a new scoring engine you have to: - -- Extend the :py:class:`~.ScoringEngine` class -- Implement its :py:meth:`~.ScoringEngine.get_name` method to return the - **unique** ID of the new scoring engine you want to create. This unique ID - should be the same as the name of :ref:`the entry point we will declare later - on `. -- Implement its :py:meth:`~.ScoringEngine.get_description` method to return the - user-friendly description of the implemented scoring engine. It might contain - information about algorithm used, learning data etc. -- Implement its :py:meth:`~.ScoringEngine.get_metainfo` method to return the - machine-friendly metadata about this scoring engine. For example, it could be - a JSON formatted text with information about the data model used, its input - and output data format, column names, etc. -- Implement its :py:meth:`~.ScoringEngine.calculate_score` method to return the - result calculated by this scoring engine. - -Here is an example showing how you can write a plugin called ``NewScorer``: - -.. code-block:: python - - # filepath: thirdparty/new.py - # import path: thirdparty.new - from watcher.decision_engine.scoring import base - - - class NewScorer(base.ScoringEngine): - - def get_name(self): - return 'new_scorer' - - def get_description(self): - return '' - - def get_metainfo(self): - return """{ - "feature_columns": [ - "column1", - "column2", - "column3"], - "result_columns": [ - "value", - "probability"] - }""" - - def calculate_score(self, features): - return '[12, 0.83]' - -As you can see in the above example, the -:py:meth:`~.ScoringEngine.calculate_score` method returns a string. Both this -class and the client (caller) should perform all the necessary serialization -or deserialization. - - -(Optional) Create a new scoring engine container plugin -======================================================= - -Optionally, it's possible to implement a container plugin, which can return a -list of scoring engines. This list can be re-evaluated multiple times during -the lifecycle of :ref:`Watcher Decision Engine -` and synchronized with :ref:`Watcher -Database ` using the ``watcher-sync`` command line -tool. - -Below is an example of a container using some scoring engine implementation -that is simply made of a client responsible for communicating with a real -scoring engine deployed as a web service on external servers: - -.. code-block:: python - - class NewScoringContainer(base.ScoringEngineContainer): - - @classmethod - def get_scoring_engine_list(self): - return [ - RemoteScoringEngine( - name='scoring_engine1', - description='Some remote Scoring Engine 1', - remote_url='http://engine1.example.com/score'), - RemoteScoringEngine( - name='scoring_engine2', - description='Some remote Scoring Engine 2', - remote_url='http://engine2.example.com/score'), - ] - - -Abstract Plugin Class -===================== - -Here below is the abstract :py:class:`~.ScoringEngine` class: - -.. autoclass:: watcher.decision_engine.scoring.base.ScoringEngine - :members: - :special-members: __init__ - :noindex: - - -Abstract Plugin Container Class -=============================== - -Here below is the abstract :py:class:`~.ScoringContainer` class: - -.. autoclass:: watcher.decision_engine.scoring.base.ScoringEngineContainer - :members: - :special-members: __init__ - :noindex: - - -.. _scoring_engine_plugin_add_entrypoint: - -Add a new entry point -===================== - -In order for the Watcher Decision Engine to load your new scoring engine, it -must be registered as a named entry point under the ``watcher_scoring_engines`` -entry point of your ``setup.py`` file. If you are using pbr_, this entry point -should be placed in your ``setup.cfg`` file. - -The name you give to your entry point has to be unique and should be the same -as the value returned by the :py:meth:`~.ScoringEngine.get_name` method of your -strategy. - -Here below is how you would proceed to register ``NewScorer`` using pbr_: - -.. code-block:: ini - - [entry_points] - watcher_scoring_engines = - new_scorer = thirdparty.new:NewScorer - - -To get a better understanding on how to implement a more advanced scoring -engine, have a look at the :py:class:`~.DummyScorer` class. This implementation -is not really using machine learning, but other than that it contains all the -pieces which the "real" implementation would have. - -In addition, for some use cases there is a need to register a list (possibly -dynamic, depending on the implementation and configuration) of scoring engines -in a single plugin, so there is no need to restart :ref:`Watcher Decision -Engine ` every time such list changes. For -these cases, an additional ``watcher_scoring_engine_containers`` entry point -can be used. - -For the example how to use scoring engine containers, please have a look at -the :py:class:`~.DummyScoringContainer` and the way it is configured in -``setup.cfg``. For new containers it could be done like this: - -.. code-block:: ini - - [entry_points] - watcher_scoring_engine_containers = - new_scoring_container = thirdparty.new:NewContainer - -.. _pbr: http://docs.openstack.org/developer/pbr/ - - -Using scoring engine plugins -============================ - -The Watcher Decision Engine service will automatically discover any installed -plugins when it is restarted. If a Python package containing a custom plugin is -installed within the same environment as Watcher, Watcher will automatically -make that plugin available for use. - -At this point, Watcher will scan and register inside the :ref:`Watcher Database -` all the scoring engines you implemented upon -restarting the :ref:`Watcher Decision Engine -`. - -In addition, ``watcher-sync`` tool can be used to trigger :ref:`Watcher -Database ` synchronization. This might be used for -"dynamic" scoring containers, which can return different scoring engines based -on some external configuration (if they support that). diff --git a/doc/source/contributor/plugin/strategy-plugin.rst b/doc/source/contributor/plugin/strategy-plugin.rst deleted file mode 100644 index b285dff..0000000 --- a/doc/source/contributor/plugin/strategy-plugin.rst +++ /dev/null @@ -1,314 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _implement_strategy_plugin: - -================================= -Build a new optimization strategy -================================= - -Watcher Decision Engine has an external :ref:`strategy ` -plugin interface which gives anyone the ability to integrate an external -strategy in order to make use of placement algorithms. - -This section gives some guidelines on how to implement and integrate custom -strategies with Watcher. If you wish to create a third-party package for your -plugin, you can refer to our :ref:`documentation for third-party package -creation `. - - -Pre-requisites -============== - -Before using any strategy, you should make sure you have your Telemetry service -configured so that it would provide you all the metrics you need to be able to -use your strategy. - - -Create a new strategy plugin -============================ - -In order to create a new strategy, you have to: - -- Extend the :py:class:`~.UnclassifiedStrategy` class -- Implement its :py:meth:`~.BaseStrategy.get_name` class method to return the - **unique** ID of the new strategy you want to create. This unique ID should - be the same as the name of :ref:`the entry point we will declare later on - `. -- Implement its :py:meth:`~.BaseStrategy.get_display_name` class method to - return the translated display name of the strategy you want to create. - Note: Do not use a variable to return the translated string so it can be - automatically collected by the translation tool. -- Implement its :py:meth:`~.BaseStrategy.get_translatable_display_name` - class method to return the translation key (actually the English display - name) of your new strategy. The value return should be the same as the - string translated in :py:meth:`~.BaseStrategy.get_display_name`. -- Implement its :py:meth:`~.BaseStrategy.execute` method to return the - solution you computed within your strategy. - -Here is an example showing how you can write a plugin called ``NewStrategy``: - -.. code-block:: python - - # filepath: thirdparty/new.py - # import path: thirdparty.new - import abc - - import six - - from watcher._i18n import _ - from watcher.decision_engine.strategy.strategies import base - - - class NewStrategy(base.UnclassifiedStrategy): - - def __init__(self, osc=None): - super(NewStrategy, self).__init__(osc) - - def execute(self, original_model): - self.solution.add_action(action_type="nop", - input_parameters=parameters) - # Do some more stuff here ... - return self.solution - - @classmethod - def get_name(cls): - return "new_strategy" - - @classmethod - def get_display_name(cls): - return _("New strategy") - - @classmethod - def get_translatable_display_name(cls): - return "New strategy" - - -As you can see in the above example, the :py:meth:`~.BaseStrategy.execute` -method returns a :py:class:`~.BaseSolution` instance as required. This solution -is what wraps the abstract set of actions the strategy recommends to you. This -solution is then processed by a :ref:`planner ` to -produce an action plan which contains the sequenced flow of actions to be -executed by the :ref:`Watcher Applier `. This -solution also contains the various :ref:`efficacy indicators -` alongside its computed :ref:`global efficacy -`. - -Please note that your strategy class will expect to find the same constructor -signature as BaseStrategy to instantiate you strategy. Therefore, you should -ensure that your ``__init__`` signature is identical to the -:py:class:`~.BaseStrategy` one. - - -Strategy efficacy -================= - -As stated before, the ``NewStrategy`` class extends a class called -:py:class:`~.UnclassifiedStrategy`. This class actually implements a set of -abstract methods which are defined within the :py:class:`~.BaseStrategy` parent -class. - -One thing this :py:class:`~.UnclassifiedStrategy` class defines is that our -``NewStrategy`` achieves the ``unclassified`` goal. This goal is a peculiar one -as it does not contain any indicator nor does it calculate a global efficacy. -This proves itself to be quite useful during the development of a new strategy -for which the goal has yet to be defined or in case a :ref:`new goal -` has yet to be implemented. - - -Define Strategy Parameters -========================== - -For each new added strategy, you can add parameters spec so that an operator -can input strategy parameters when creating an audit to control the -:py:meth:`~.BaseStrategy.execute` behavior of strategy. This is useful to -define some threshold for your strategy, and tune them at runtime. - -To define parameters, just implements :py:meth:`~.BaseStrategy.get_schema` to -return parameters spec with `jsonschema -`_ format. -It is strongly encouraged that provide default value for each parameter, or -else reference fails if operator specify no parameters. - -Here is an example showing how you can define 2 parameters for -``DummyStrategy``: - -.. code-block:: python - - class DummyStrategy(base.DummyBaseStrategy): - - @classmethod - def get_schema(cls): - return { - "properties": { - "para1": { - "description": "number parameter example", - "type": "number", - "default": 3.2, - "minimum": 1.0, - "maximum": 10.2, - }, - "para2": { - "description": "string parameter example", - "type": "string", - "default": "hello", - }, - }, - } - - -You can reference parameters in :py:meth:`~.BaseStrategy.execute`: - -.. code-block:: python - - class DummyStrategy(base.DummyBaseStrategy): - - def execute(self): - para1 = self.input_parameters.para1 - para2 = self.input_parameters.para2 - - if para1 > 5: - ... - - -Operator can specify parameters with following commands: - -.. code:: bash - - $ watcher audit create -a -p para1=6.0 -p para2=hi - -Pls. check user-guide for details. - - -Abstract Plugin Class -===================== - -Here below is the abstract :py:class:`~.BaseStrategy` class: - -.. autoclass:: watcher.decision_engine.strategy.strategies.base.BaseStrategy - :members: - :special-members: __init__ - :noindex: - -.. _strategy_plugin_add_entrypoint: - -Add a new entry point -===================== - -In order for the Watcher Decision Engine to load your new strategy, the -strategy must be registered as a named entry point under the -``watcher_strategies`` entry point of your ``setup.py`` file. If you are using -pbr_, this entry point should be placed in your ``setup.cfg`` file. - -The name you give to your entry point has to be unique and should be the same -as the value returned by the :py:meth:`~.BaseStrategy.get_name` class method of -your strategy. - -Here below is how you would proceed to register ``NewStrategy`` using pbr_: - -.. code-block:: ini - - [entry_points] - watcher_strategies = - new_strategy = thirdparty.new:NewStrategy - - -To get a better understanding on how to implement a more advanced strategy, -have a look at the :py:class:`~.BasicConsolidation` class. - -.. _pbr: http://docs.openstack.org/developer/pbr/ - -Using strategy plugins -====================== - -The Watcher Decision Engine service will automatically discover any installed -plugins when it is restarted. If a Python package containing a custom plugin is -installed within the same environment as Watcher, Watcher will automatically -make that plugin available for use. - -At this point, Watcher will scan and register inside the :ref:`Watcher Database -` all the strategies (alongside the goals they -should satisfy) you implemented upon restarting the :ref:`Watcher Decision -Engine `. - -You should take care when installing strategy plugins. By their very nature, -there are no guarantees that utilizing them as is will be supported, as -they may require a set of metrics which is not yet available within the -Telemetry service. In such a case, please do make sure that you first -check/configure the latter so your new strategy can be fully functional. - -Querying metrics ----------------- - -A large set of metrics, generated by OpenStack modules, can be used in your -strategy implementation. To collect these metrics, Watcher provides a -`Helper`_ for two data sources which are `Ceilometer`_ and `Monasca`_. If you -wish to query metrics from a different data source, you can implement your own -and directly use it from within your new strategy. Indeed, strategies in -Watcher have the cluster data models decoupled from the data sources which -means that you may keep the former while changing the latter. -The recommended way for you to support a new data source is to implement a new -helper that would encapsulate within separate methods the queries you need to -perform. To then use it, you would just have to instantiate it within your -strategy. - -If you want to use Ceilometer but with your own metrics database backend, -please refer to the `Ceilometer developer guide`_. The list of the available -Ceilometer backends is located here_. The `Ceilosca`_ project is a good example -of how to create your own pluggable backend. Moreover, if your strategy -requires new metrics not covered by Ceilometer, you can add them through a -`Ceilometer plugin`_. - - -.. _`Helper`: https://github.com/openstack/watcher/blob/master/watcher/decision_engine/cluster/history/ceilometer.py -.. _`Ceilometer developer guide`: http://docs.openstack.org/developer/ceilometer/architecture.html#storing-the-data -.. _`Ceilometer`: http://docs.openstack.org/developer/ceilometer/ -.. _`Monasca`: https://github.com/openstack/monasca-api/blob/master/docs/monasca-api-spec.md -.. _`here`: http://docs.openstack.org/developer/ceilometer/install/dbreco.html#choosing-a-database-backend -.. _`Ceilometer plugin`: http://docs.openstack.org/developer/ceilometer/plugins.html -.. _`Ceilosca`: https://github.com/openstack/monasca-ceilometer/blob/master/ceilosca/ceilometer/storage/impl_monasca.py - -Read usage metrics using the Watcher Datasource Helper ------------------------------------------------------- - -The following code snippet shows how to invoke a Datasource Helper class: - -.. code-block:: py - - from watcher.datasource import ceilometer as ceil - from watcher.datasource import monasca as mon - - @property - def ceilometer(self): - if self._ceilometer is None: - self._ceilometer = ceil.CeilometerHelper(osc=self.osc) - return self._ceilometer - - @property - def monasca(self): - if self._monasca is None: - self._monasca = mon.MonascaHelper(osc=self.osc) - return self._monasca - -Using that you can now query the values for that specific metric: - -.. code-block:: py - - if self.config.datasource == "ceilometer": - resource_id = "%s_%s" % (node.uuid, node.hostname) - return self.ceilometer.statistic_aggregation( - resource_id=resource_id, - meter_name='compute.node.cpu.percent', - period="7200", - aggregate='avg', - ) - elif self.config.datasource == "monasca": - statistics = self.monasca.statistic_aggregation( - meter_name='compute.node.cpu.percent', - dimensions=dict(hostname=node.uuid), - period=7200, - aggregate='avg' - ) diff --git a/doc/source/contributor/rally_link.rst b/doc/source/contributor/rally_link.rst deleted file mode 100644 index 7fd02e3..0000000 --- a/doc/source/contributor/rally_link.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../../rally-jobs/README.rst diff --git a/doc/source/contributor/testing.rst b/doc/source/contributor/testing.rst deleted file mode 100644 index ab0675f..0000000 --- a/doc/source/contributor/testing.rst +++ /dev/null @@ -1,50 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -======= -Testing -======= - -.. _unit_tests: - -Unit tests -========== - -All unit tests should be run using `tox`_. To run the same unit tests that are -executing onto `Gerrit`_ which includes ``py35``, ``py27`` and ``pep8``, you -can issue the following command:: - - $ workon watcher - (watcher) $ pip install tox - (watcher) $ cd watcher - (watcher) $ tox - -If you want to only run one of the aforementioned, you can then issue one of -the following:: - - $ workon watcher - (watcher) $ tox -e py35 - (watcher) $ tox -e py27 - (watcher) $ tox -e pep8 - -.. _tox: https://tox.readthedocs.org/ -.. _Gerrit: http://review.openstack.org/ - -You may pass options to the test programs using positional arguments. To run a -specific unit test, you can pass extra options to `os-testr`_ after putting -the ``--`` separator. So using the ``-r`` option followed by a regex string, -you can run the desired test:: - - $ workon watcher - (watcher) $ tox -e py27 -- -r watcher.tests.api - -.. _os-testr: http://docs.openstack.org/developer/os-testr/ - -When you're done, deactivate the virtualenv:: - - $ deactivate - -.. include:: ../../../watcher_tempest_plugin/README.rst diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst deleted file mode 100644 index 7ab0602..0000000 --- a/doc/source/glossary.rst +++ /dev/null @@ -1,386 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -======== -Glossary -======== - -.. glossary:: - :sorted: - -This page explains the different terms used in the Watcher system. - -They are sorted in alphabetical order. - -.. _action_definition: - -Action -====== - -.. watcher-term:: watcher.api.controllers.v1.action - -.. _action_plan_definition: - -Action Plan -=========== - -.. watcher-term:: watcher.api.controllers.v1.action_plan - -.. _administrator_definition: - -Administrator -============= - -The :ref:`Administrator ` is any user who has admin -access on the OpenStack cluster. This user is allowed to create new projects -for tenants, create new users and assign roles to each user. - -The :ref:`Administrator ` usually has remote access -to any host of the cluster in order to change the configuration and restart any -OpenStack service, including Watcher. - -In the context of Watcher, the :ref:`Administrator ` -is a role for users which allows them to run any Watcher commands, such as: - -- Create/Delete an :ref:`Audit Template ` -- Launch an :ref:`Audit ` -- Get the :ref:`Action Plan ` -- Launch a recommended :ref:`Action Plan ` manually -- Archive previous :ref:`Audits ` and - :ref:`Action Plans ` - - -The :ref:`Administrator ` is also allowed to modify -any Watcher configuration files and to restart Watcher services. - -.. _audit_definition: - -Audit -===== - -.. watcher-term:: watcher.api.controllers.v1.audit - -.. _audit_template_definition: - -Audit Template -============== - -.. watcher-term:: watcher.api.controllers.v1.audit_template - -.. _availability_zone_definition: - -Availability Zone -================= - -Please, read `the official OpenStack definition of an Availability Zone `_. - -.. _cluster_definition: - -Cluster -======= - -A :ref:`Cluster ` is a set of physical machines which -provide compute, storage and networking resources and are managed by the same -OpenStack Controller node. -A :ref:`Cluster ` represents a set of resources that a -cloud provider is able to offer to his/her -:ref:`customers `. - -A data center may contain several clusters. - -The :ref:`Cluster ` may be divided in one or several -:ref:`Availability Zone(s) `. - -.. _cluster_data_model_definition: - -Cluster Data Model (CDM) -======================== - -.. watcher-term:: watcher.decision_engine.model.collector.base - - -.. _controller_node_definition: - -Controller Node -=============== - -A controller node is a machine that typically runs the following core OpenStack -services: - -- Keystone: for identity and service management -- Cinder scheduler: for volumes management -- Glance controller: for image management -- Neutron controller: for network management -- Nova controller: for global compute resources management with services - such as nova-scheduler, nova-conductor and nova-network. - -In many configurations, Watcher will reside on a controller node even if it -can potentially be hosted on a dedicated machine. - -.. _compute_node_definition: - -Compute node -============ - -Please, read `the official OpenStack definition of a Compute Node -`_. - -.. _customer_definition: - -Customer -======== - -A :ref:`Customer ` is the person or company which -subscribes to the cloud provider offering. A customer may have several -:ref:`Project(s) ` -hosted on the same :ref:`Cluster ` or dispatched on -different clusters. - -In the private cloud context, the :ref:`Customers ` are -different groups within the same organization (different departments, project -teams, branch offices and so on). Cloud infrastructure includes the ability to -precisely track each customer's service usage so that it can be charged back to -them, or at least reported to them. - -.. _goal_definition: - -Goal -==== - -.. watcher-term:: watcher.api.controllers.v1.goal - - -.. _host_aggregates_definition: - -Host Aggregate -============== - -Please, read `the official OpenStack definition of a Host Aggregate -`_. - -.. _instance_definition: - -Instance -======== - -A running virtual machine, or a virtual machine in a known state such as -suspended, that can be used like a hardware server. - -.. _managed_resource_definition: - -Managed resource -================ - -A :ref:`Managed resource ` is one instance of -:ref:`Managed resource type ` in a topology -with particular properties and dependencies on other -:ref:`Managed resources ` (relationships). - -For example, a :ref:`Managed resource ` can be one -virtual machine (i.e., an :ref:`instance `) hosted on a -:ref:`compute node ` and connected to another virtual -machine through a network link (represented also as a -:ref:`Managed resource ` in the -:ref:`Cluster Data Model `). - -.. _managed_resource_type_definition: - -Managed resource type -===================== - -A :ref:`Managed resource type ` is a type of -hardware or software element of the :ref:`Cluster ` that -the Watcher system can act on. - -Here are some examples of -:ref:`Managed resource types `: - -- `Nova Host Aggregates `_ -- `Nova Servers `_ -- `Cinder Volumes `_ -- `Neutron Routers `_ -- `Neutron Networks `_ -- `Neutron load-balancers `_ -- `Sahara Hadoop Cluster `_ -- ... - -It can be any of the `the official list of available resource types defined in -OpenStack for HEAT -`_. - -.. _efficacy_indicator_definition: - -Efficacy Indicator -================== - -.. watcher-term:: watcher.api.controllers.v1.efficacy_indicator - -.. _efficacy_specification_definition: - -Efficacy Specification -====================== - -.. watcher-term:: watcher.decision_engine.goal.efficacy.base - -.. _efficacy_definition: - -Optimization Efficacy -===================== - -The :ref:`Optimization Efficacy ` is the objective -measure of how much of the :ref:`Goal ` has been achieved in -respect with constraints and :ref:`SLAs ` defined by the -:ref:`Customer `. - -The way efficacy is evaluated will depend on the :ref:`Goal ` -to achieve. - -Of course, the efficacy will be relevant only as long as the -:ref:`Action Plan ` is relevant -(i.e., the current state of the :ref:`Cluster ` -has not changed in a way that a new :ref:`Audit ` would need -to be launched). - -For example, if the :ref:`Goal ` is to lower the energy -consumption, the :ref:`Efficacy ` will be computed -using several :ref:`efficacy indicators ` -(KPIs): - -- the percentage of energy gain (which must be the highest possible) -- the number of :ref:`SLA violations ` - (which must be the lowest possible) -- the number of virtual machine migrations (which must be the lowest possible) - -All those indicators are computed within a given timeframe, which is the -time taken to execute the whole :ref:`Action Plan `. - -The efficacy also enables the :ref:`Administrator ` -to objectively compare different :ref:`Strategies ` for -the same goal and same workload of the :ref:`Cluster `. - -.. _project_definition: - -Project -======= - -:ref:`Projects ` represent the base unit of “ownership” -in OpenStack, in that all :ref:`resources ` in -OpenStack should be owned by a specific :ref:`project `. -In OpenStack Identity, a :ref:`project ` must be owned by a -specific domain. - -Please, read `the official OpenStack definition of a Project -`_. - -.. _scoring_engine_definition: - -Scoring Engine -============== - -.. watcher-term:: watcher.api.controllers.v1.scoring_engine - -.. _sla_definition: - -SLA -=== - -:ref:`SLA ` means Service Level Agreement. - -The resources are negotiated between the :ref:`Customer ` -and the Cloud Provider in a contract. - -Most of the time, this contract is composed of two documents: - -- :ref:`SLA ` : Service Level Agreement -- :ref:`SLO ` : Service Level Objectives - -Note that the :ref:`SLA ` is more general than the -:ref:`SLO ` in the sense that the former specifies what service -is to be provided, how it is supported, times, locations, costs, performance, -and responsibilities of the parties involved while the -:ref:`SLO ` focuses on more measurable characteristics such as -availability, throughput, frequency, response time or quality. - -You can also read `the Wikipedia page for SLA `_ -which provides a good definition. - -.. _sla_violation_definition: - -SLA violation -============= - -A :ref:`SLA violation ` happens when a -:ref:`SLA ` defined with a given -:ref:`Customer ` could not be respected by the -cloud provider within the timeframe defined by the official contract document. - -.. _slo_definition: - -SLO -=== - -A Service Level Objective (SLO) is a key element of a -:ref:`SLA ` between a service provider and a -:ref:`Customer `. SLOs are agreed as a means of measuring -the performance of the Service Provider and are outlined as a way of avoiding -disputes between the two parties based on misunderstanding. - -You can also read `the Wikipedia page for SLO `_ -which provides a good definition. - -.. _solution_definition: - -Solution -======== - -.. watcher-term:: watcher.decision_engine.solution.base - -.. _strategy_definition: - -Strategy -======== - -.. watcher-term:: watcher.api.controllers.v1.strategy - -.. _watcher_applier_definition: - -Watcher Applier -=============== - -.. watcher-term:: watcher.applier.base - -.. _watcher_database_definition: - -Watcher Database -================ - -This database stores all the Watcher domain objects which can be requested -by the Watcher API or the Watcher CLI: - -- Audit templates -- Audits -- Action plans -- Actions -- Goals - -The Watcher domain being here "*optimization of some resources provided by an -OpenStack system*". - -See :doc:`architecture` for more details on this component. - -.. _watcher_decision_engine_definition: - -Watcher Decision Engine -======================= - -.. watcher-term:: watcher.decision_engine.manager - -.. _watcher_planner_definition: - -Watcher Planner -=============== - -.. watcher-term:: watcher.decision_engine.planner.base diff --git a/doc/source/image_src/dia/architecture.dia b/doc/source/image_src/dia/architecture.dia deleted file mode 100644 index cf98ea9..0000000 Binary files a/doc/source/image_src/dia/architecture.dia and /dev/null differ diff --git a/doc/source/image_src/dia/functional_data_model.dia b/doc/source/image_src/dia/functional_data_model.dia deleted file mode 100644 index ae7654d..0000000 Binary files a/doc/source/image_src/dia/functional_data_model.dia and /dev/null differ diff --git a/doc/source/image_src/plantuml/README.rst b/doc/source/image_src/plantuml/README.rst deleted file mode 100644 index e73c9c0..0000000 --- a/doc/source/image_src/plantuml/README.rst +++ /dev/null @@ -1,14 +0,0 @@ -plantuml -======== - - -To build an image from a source file, you have to upload the plantuml JAR file -available on http://plantuml.com/download.html. -After, just run this command to build your image: - -.. code-block:: shell - - $ cd doc/source/images - $ java -jar /path/to/plantuml.jar doc/source/image_src/plantuml/my_image.txt - $ ls doc/source/images/ - my_image.png diff --git a/doc/source/image_src/plantuml/action_plan_state_machine.txt b/doc/source/image_src/plantuml/action_plan_state_machine.txt deleted file mode 100644 index 0eab6d0..0000000 --- a/doc/source/image_src/plantuml/action_plan_state_machine.txt +++ /dev/null @@ -1,18 +0,0 @@ -@startuml - -[*] --> RECOMMENDED: The Watcher Planner\ncreates the Action Plan -RECOMMENDED --> PENDING: Adminisrator launches\nthe Action Plan -PENDING --> ONGOING: The Watcher Applier receives the request\nto launch the Action Plan -ONGOING --> FAILED: Something failed while executing\nthe Action Plan in the Watcher Applier -ONGOING --> SUCCEEDED: The Watcher Applier executed\nthe Action Plan successfully -FAILED --> DELETED : Administrator removes\nAction Plan -SUCCEEDED --> DELETED : Administrator removes\nAction Plan -ONGOING --> CANCELLED : Administrator cancels\nAction Plan -RECOMMENDED --> CANCELLED : Administrator cancels\nAction Plan -RECOMMENDED --> SUPERSEDED : The Watcher Decision Engine supersedes\nAction Plan -PENDING --> CANCELLED : Administrator cancels\nAction Plan -CANCELLED --> DELETED -SUPERSEDED --> DELETED -DELETED --> [*] - -@enduml diff --git a/doc/source/image_src/plantuml/audit_state_machine.txt b/doc/source/image_src/plantuml/audit_state_machine.txt deleted file mode 100644 index 860a202..0000000 --- a/doc/source/image_src/plantuml/audit_state_machine.txt +++ /dev/null @@ -1,17 +0,0 @@ -@startuml - -[*] --> PENDING: Audit requested by Administrator -PENDING --> ONGOING: Audit request is received\nby the Watcher Decision Engine -ONGOING --> FAILED: Audit fails\n(no solution found, technical error, ...) -ONGOING --> SUCCEEDED: The Watcher Decision Engine\ncould find at least one Solution -ONGOING --> SUSPENDED: Administrator wants to\nsuspend the Audit -SUSPENDED --> ONGOING: Administrator wants to\nresume the Audit -FAILED --> DELETED : Administrator wants to\narchive/delete the Audit -SUCCEEDED --> DELETED : Administrator wants to\narchive/delete the Audit -PENDING --> CANCELLED : Administrator cancels\nthe Audit -ONGOING --> CANCELLED : Administrator cancels\nthe Audit -CANCELLED --> DELETED : Administrator wants to\narchive/delete the Audit -SUSPENDED --> DELETED: Administrator wants to\narchive/delete the Audit -DELETED --> [*] - -@enduml diff --git a/doc/source/image_src/plantuml/sequence_architecture_cdmc_sync.txt b/doc/source/image_src/plantuml/sequence_architecture_cdmc_sync.txt deleted file mode 100644 index 6792b1c..0000000 --- a/doc/source/image_src/plantuml/sequence_architecture_cdmc_sync.txt +++ /dev/null @@ -1,41 +0,0 @@ -@startuml -skinparam maxMessageSize 100 - -actor "Administrator" - -== Initialization == - -"Administrator" -> "Decision Engine" : Start all services -"Decision Engine" -> "Background Task Scheduler" : Start - -activate "Background Task Scheduler" -"Background Task Scheduler" -> "Cluster Model Collector Loader"\ -: List available cluster data models -"Cluster Model Collector Loader" --> "Background Task Scheduler"\ -: list of BaseClusterModelCollector instances - -loop for every available cluster data model collector - "Background Task Scheduler" -> "Background Task Scheduler"\ - : add periodic synchronization job - create "Jobs Pool" - "Background Task Scheduler" -> "Jobs Pool" : Create sync job -end -deactivate "Background Task Scheduler" - -hnote over "Background Task Scheduler" : Idle - -== Job workflow == - -"Background Task Scheduler" -> "Jobs Pool" : Trigger synchronization job -"Jobs Pool" -> "Nova Cluster Data Model Collector" : synchronize - -activate "Nova Cluster Data Model Collector" - "Nova Cluster Data Model Collector" -> "Nova API"\ - : Fetch needed data to build the cluster data model - "Nova API" --> "Nova Cluster Data Model Collector" : Needed data - "Nova Cluster Data Model Collector" -> "Nova Cluster Data Model Collector"\ - : Build an in-memory cluster data model - ]o<-- "Nova Cluster Data Model Collector" : Done -deactivate "Nova Cluster Data Model Collector" - -@enduml diff --git a/doc/source/image_src/plantuml/sequence_create_and_launch_audit.txt b/doc/source/image_src/plantuml/sequence_create_and_launch_audit.txt deleted file mode 100644 index fd6035f..0000000 --- a/doc/source/image_src/plantuml/sequence_create_and_launch_audit.txt +++ /dev/null @@ -1,24 +0,0 @@ -@startuml - - -actor Administrator - -Administrator -> "Watcher CLI" : watcher audit create -a - -"Watcher CLI" -> "Watcher API" : POST audit(parameters) -"Watcher API" -> "Watcher Database" : create new audit in database (status=PENDING) - -"Watcher API" <-- "Watcher Database" : new audit uuid -"Watcher CLI" <-- "Watcher API" : return new audit URL - -Administrator <-- "Watcher CLI" : new audit uuid - -"Watcher API" -> "AMQP Bus" : trigger_audit(new_audit.uuid) -"AMQP Bus" -> "Watcher Decision Engine" : trigger_audit(new_audit.uuid) (status=ONGOING) - -ref over "Watcher Decision Engine" - Trigger audit in the - Watcher Decision Engine -end ref - -@enduml diff --git a/doc/source/image_src/plantuml/sequence_create_audit_template.txt b/doc/source/image_src/plantuml/sequence_create_audit_template.txt deleted file mode 100644 index ef422a5..0000000 --- a/doc/source/image_src/plantuml/sequence_create_audit_template.txt +++ /dev/null @@ -1,22 +0,0 @@ -@startuml - -actor Administrator - -Administrator -> "Watcher CLI" : watcher audittemplate create \ -[--strategy-uuid ] -"Watcher CLI" -> "Watcher API" : POST audit_template(parameters) - -"Watcher API" -> "Watcher Database" : Request if goal exists in database -"Watcher API" <-- "Watcher Database" : OK - -"Watcher API" -> "Watcher Database" : Request if strategy exists in database (if provided) -"Watcher API" <-- "Watcher Database" : OK - -"Watcher API" -> "Watcher Database" : Create new audit_template in database -"Watcher API" <-- "Watcher Database" : New audit template UUID - -"Watcher CLI" <-- "Watcher API" : Return new audit template URL in HTTP Location Header -Administrator <-- "Watcher CLI" : New audit template UUID - -@enduml - diff --git a/doc/source/image_src/plantuml/sequence_from_audit_execution_to_actionplan_creation.txt b/doc/source/image_src/plantuml/sequence_from_audit_execution_to_actionplan_creation.txt deleted file mode 100644 index d36274c..0000000 --- a/doc/source/image_src/plantuml/sequence_from_audit_execution_to_actionplan_creation.txt +++ /dev/null @@ -1,44 +0,0 @@ -@startuml - -skinparam maxMessageSize 200 - -"Decision Engine" -> "Decision Engine" : Execute audit -activate "Decision Engine" -"Decision Engine" -> "Decision Engine" : Set the audit state to ONGOING - -"Decision Engine" -> "Strategy selector" : Select strategy -activate "Strategy selector" -alt A specific strategy is provided -"Strategy selector" -> "Strategy selector" : Load strategy and inject the \ -cluster data model -else Only a goal is specified -"Strategy selector" -> "Strategy selector" : select strategy -"Strategy selector" -> "Strategy selector" : Load strategy and inject the \ -cluster data model -end -"Strategy selector" -> "Decision Engine" : Return loaded Strategy -deactivate "Strategy selector" - -"Decision Engine" -> "Strategy" : Execute the strategy -activate "Strategy" -"Strategy" -> "Strategy" : **pre_execute()**Checks if the strategy \ -pre-requisites are all set. -"Strategy" -> "Strategy" : **do_execute()**Contains the logic of the strategy -"Strategy" -> "Strategy" : **post_execute()** Set the efficacy indicators -"Strategy" -> "Strategy" : Compute the global efficacy of the solution \ -based on the provided efficacy indicators -"Strategy" -> "Decision Engine" : Return the solution -deactivate "Strategy" - -"Decision Engine" -> "Planner" : Plan the solution that was computed by the \ -strategy -activate "Planner" -"Planner" -> "Planner" : Store the planned solution as an action plan with its \ -related actions and efficacy indicators -"Planner" --> "Decision Engine" : Done -deactivate "Planner" -"Decision Engine" -> "Decision Engine" : Update the audit state to SUCCEEDED - -deactivate "Decision Engine" - -@enduml diff --git a/doc/source/image_src/plantuml/sequence_launch_action_plan.txt b/doc/source/image_src/plantuml/sequence_launch_action_plan.txt deleted file mode 100644 index 8c60727..0000000 --- a/doc/source/image_src/plantuml/sequence_launch_action_plan.txt +++ /dev/null @@ -1,23 +0,0 @@ -@startuml - -actor Administrator - -Administrator -> "Watcher CLI" : watcher actionplan start - -"Watcher CLI" -> "Watcher API" : PATCH action_plan(state=PENDING) -"Watcher API" -> "Watcher Database" : action_plan.state=PENDING - -"Watcher CLI" <-- "Watcher API" : HTTP 200 - -Administrator <-- "Watcher CLI" : OK - -"Watcher API" -> "AMQP Bus" : launch_action_plan(action_plan.uuid) -"AMQP Bus" -> "Watcher Applier" : launch_action_plan(action_plan.uuid) - -ref over "Watcher Applier" - Launch Action Plan in the - Watcher Applier -end ref - -@enduml - diff --git a/doc/source/image_src/plantuml/sequence_launch_action_plan_in_applier.txt b/doc/source/image_src/plantuml/sequence_launch_action_plan_in_applier.txt deleted file mode 100644 index fe9caab..0000000 --- a/doc/source/image_src/plantuml/sequence_launch_action_plan_in_applier.txt +++ /dev/null @@ -1,31 +0,0 @@ -@startuml - -"AMQP Bus" -> "Watcher Applier" : launch_action_plan(action_plan.uuid) -"Watcher Applier" -> "Watcher Database" : action_plan.state=ONGOING -"Watcher Applier" -[#blue]> "AMQP Bus" : notify action plan state = ONGOING -"Watcher Applier" -> "Watcher Database" : get_action_list(action_plan.uuid) -"Watcher Applier" <-- "Watcher Database" : actions -loop for each action of the action flow -create Action -"Watcher Applier" -> Action : instantiate Action object with target resource id\n and input parameters -"Watcher Applier" -> Action : validate_parameters() -"Watcher Applier" <-- Action : OK -"Watcher Applier" -[#blue]> "AMQP Bus" : notify action state = ONGOING -"Watcher Applier" -> Action : preconditions() -"Watcher Applier" <-- Action : OK -"Watcher Applier" -> Action : execute() -alt action is "migrate instance" -Action -> "Nova API" : migrate(instance_id, dest_host_id) -Action <-- "Nova API" : OK -else action is "disable hypervisor" -Action -> "Nova API" : host-update(host_id, maintenance=true) -Action <-- "Nova API" : OK -end -"Watcher Applier" <-- Action : OK -"Watcher Applier" -> "Watcher Database" : action.state=SUCCEEDED -"Watcher Applier" -[#blue]> "AMQP Bus" : notify action state = SUCCEEDED -end -"Watcher Applier" -> "Watcher Database" : action_plan.state=SUCCEEDED -"Watcher Applier" -[#blue]> "AMQP Bus" : notify action plan state = SUCCEEDED - -@enduml diff --git a/doc/source/image_src/plantuml/sequence_overview_watcher_usage.txt b/doc/source/image_src/plantuml/sequence_overview_watcher_usage.txt deleted file mode 100644 index 36c526f..0000000 --- a/doc/source/image_src/plantuml/sequence_overview_watcher_usage.txt +++ /dev/null @@ -1,37 +0,0 @@ -@startuml - -actor Administrator - -== Create some Audit settings == - -Administrator -> Watcher : create new Audit Template (i.e. Audit settings : goal, scope, ...) -Watcher -> Watcher : save Audit Template in database -Administrator <-- Watcher : Audit Template UUID - -== Launch a new Audit == - -Administrator -> Watcher : launch new Audit of the Openstack infrastructure resources\nwith a previously created Audit Template -Administrator <-- Watcher : Audit UUID -Administrator -> Watcher : get the Audit state -Administrator <-- Watcher : ONGOING -Watcher -> Watcher : compute a solution to achieve optimization goal -Administrator -> Watcher : get the Audit state -Administrator <-- Watcher : SUCCEEDED - -== Get the result of the Audit == - -Administrator -> Watcher : get Action Plan -Administrator <-- Watcher : recommended Action Plan and estimated efficacy -Administrator -> Administrator : verify the recommended actions\nand evaluate the estimated gain vs aggressiveness of the solution - -== Launch the recommended Action Plan == - -Administrator -> Watcher : launch the Action Plan -Administrator <-- Watcher : Action Plan has been launched -Watcher -> Watcher : trigger Actions on Openstack services -Administrator -> Watcher : get the Action Plan state -Administrator <-- Watcher : ONGOING -Administrator -> Watcher : get the Action Plan state -Administrator <-- Watcher : SUCCEEDED - -@enduml diff --git a/doc/source/image_src/plantuml/sequence_trigger_audit_in_decision_engine.txt b/doc/source/image_src/plantuml/sequence_trigger_audit_in_decision_engine.txt deleted file mode 100644 index 3bfc815..0000000 --- a/doc/source/image_src/plantuml/sequence_trigger_audit_in_decision_engine.txt +++ /dev/null @@ -1,50 +0,0 @@ -@startuml - -skinparam maxMessageSize 100 - -"AMQP Bus" -> "Decision Engine" : trigger audit - -activate "Decision Engine" - -"Decision Engine" -> "Database" : update audit.state = ONGOING -"AMQP Bus" <[#blue]- "Decision Engine" : notify new audit state = ONGOING -"Decision Engine" -> "Database" : get audit parameters (goal, strategy, ...) -"Decision Engine" <-- "Database" : audit parameters (goal, strategy, ...) -"Decision Engine" --> "Decision Engine"\ -: select appropriate optimization strategy (via the Strategy Selector) -create Strategy -"Decision Engine" -> "Strategy" : execute strategy -activate "Strategy" - "Strategy" -> "Cluster Data Model Collector" : get cluster data model - "Cluster Data Model Collector" --> "Strategy"\ - : copy of the in-memory cluster data model - loop while enough history data for the strategy - "Strategy" -> "Ceilometer API" : get necessary metrics - "Strategy" <-- "Ceilometer API" : aggregated metrics - end - "Strategy" -> "Strategy"\ - : compute/set needed actions for the solution so it achieves its goal - "Strategy" -> "Strategy" : compute/set efficacy indicators for the solution - "Strategy" -> "Strategy" : compute/set the solution global efficacy - "Decision Engine" <-- "Strategy"\ - : solution (unordered actions, efficacy indicators and global efficacy) -deactivate "Strategy" - -create "Planner" -"Decision Engine" -> "Planner" : load actions scheduler -"Planner" --> "Decision Engine" : planner plugin -"Decision Engine" -> "Planner" : schedule actions -activate "Planner" - "Planner" -> "Planner"\ - : schedule actions according to scheduling rules/policies - "Decision Engine" <-- "Planner" : new action plan -deactivate "Planner" -"Decision Engine" -> "Database" : save new action plan in database -"Decision Engine" -> "Database" : update audit.state = SUCCEEDED -"AMQP Bus" <[#blue]- "Decision Engine" : notify new audit state = SUCCEEDED - -deactivate "Decision Engine" - -hnote over "Decision Engine" : Idle - -@enduml diff --git a/doc/source/image_src/plantuml/watcher_db_schema_diagram.txt b/doc/source/image_src/plantuml/watcher_db_schema_diagram.txt deleted file mode 100644 index 8b56c4a..0000000 --- a/doc/source/image_src/plantuml/watcher_db_schema_diagram.txt +++ /dev/null @@ -1,153 +0,0 @@ -@startuml -!define table(x) class x << (T,#FFAAAA) >> -!define primary_key(x) x -!define foreign_key(x) x -hide methods -hide stereotypes - -table(goals) { - primary_key(id: Integer) - uuid : String[36] - name : String[63] - display_name : String[63] - efficacy_specification : JSONEncodedList, nullable - - created_at : DateTime - updated_at : DateTime - deleted_at : DateTime - deleted : Integer -} - - -table(strategies) { - primary_key(id: Integer) - foreign_key(goal_id : Integer) - uuid : String[36] - name : String[63] - display_name : String[63] - parameters_spec : JSONEncodedDict, nullable - - created_at : DateTime - updated_at : DateTime - deleted_at : DateTime - deleted : Integer -} - - -table(audit_templates) { - primary_key(id: Integer) - foreign_key("goal_id : Integer") - foreign_key("strategy_id : Integer, nullable") - uuid : String[36] - name : String[63], nullable - description : String[255], nullable - scope : JSONEncodedList - - created_at : DateTime - updated_at : DateTime - deleted_at : DateTime - deleted : Integer -} - - -table(audits) { - primary_key(id: Integer) - foreign_key("goal_id : Integer") - foreign_key("strategy_id : Integer, nullable") - uuid : String[36] - audit_type : String[20] - state : String[20], nullable - interval : Integer, nullable - parameters : JSONEncodedDict, nullable - scope : JSONEncodedList, nullable - auto_trigger: Boolean - - created_at : DateTime - updated_at : DateTime - deleted_at : DateTime - deleted : Integer -} - - -table(action_plans) { - primary_key(id: Integer) - foreign_key("audit_id : Integer, nullable") - foreign_key("strategy_id : Integer") - uuid : String[36] - state : String[20], nullable - global_efficacy : JSONEncodedDict, nullable - - created_at : DateTime - updated_at : DateTime - deleted_at : DateTime - deleted : Integer -} - - -table(actions) { - primary_key(id: Integer) - foreign_key("action_plan_id : Integer") - uuid : String[36] - action_type : String[255] - input_parameters : JSONEncodedDict, nullable - state : String[20], nullable - parents : JSONEncodedList, nullable - - created_at : DateTime - updated_at : DateTime - deleted_at : DateTime - deleted : Integer -} - - -table(efficacy_indicators) { - primary_key(id: Integer) - foreign_key("action_plan_id : Integer") - uuid : String[36] - name : String[63] - description : String[255], nullable - unit : String[63], nullable - value : Numeric - - created_at : DateTime - updated_at : DateTime - deleted_at : DateTime - deleted : Integer -} - -table(scoring_engines) { - primary_key(id: Integer) - uuid : String[36] - name : String[63] - description : String[255], nullable - metainfo : Text, nullable - - created_at : DateTime - updated_at : DateTime - deleted_at : DateTime - deleted : Integer -} - -table(service) { - primary_key(id: Integer) - name: String[255] - host: String[255] - last_seen_up: DateTime - - created_at : DateTime - updated_at : DateTime - deleted_at : DateTime - deleted : Integer -} - - "goals" <.. "strategies" : Foreign Key - "goals" <.. "audit_templates" : Foreign Key - "strategies" <.. "audit_templates" : Foreign Key - "goals" <.. "audits" : Foreign Key - "strategies" <.. "audits" : Foreign Key - "action_plans" <.. "actions" : Foreign Key - "action_plans" <.. "efficacy_indicators" : Foreign Key - "strategies" <.. "action_plans" : Foreign Key - "audits" <.. "action_plans" : Foreign Key - -@enduml diff --git a/doc/source/images/action_plan_state_machine.png b/doc/source/images/action_plan_state_machine.png deleted file mode 100644 index 4101853..0000000 Binary files a/doc/source/images/action_plan_state_machine.png and /dev/null differ diff --git a/doc/source/images/architecture.svg b/doc/source/images/architecture.svg deleted file mode 100644 index cd72b88..0000000 --- a/doc/source/images/architecture.svg +++ /dev/null @@ -1,1407 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - - watcher decision engine - - - - - - watcherdb - - - - - - message bus - - watcher applier - - - nova - - - - glance - - - ceilometer - - - monasca - - - - - - datasourcedrivers - - - - - - modeldrivers - - - - - - actiondrivers - - - - - - plannerdrivers - - - - - - strategydrivers - - - - goaldrivers - - - - - - - - - - - - - watcher api - - watcherdashboard - - watcher cli - - - - - - - - - - - scoring enginedrivers - - - - - - - - - API call - - - RPC cast - - - - notification - - - - - - - extensions - - - - - - - - - - - - - - - workflowdrivers - - - - - - - - - gnocchi - - - - - diff --git a/doc/source/images/audit_state_machine.png b/doc/source/images/audit_state_machine.png deleted file mode 100644 index afe21c9..0000000 Binary files a/doc/source/images/audit_state_machine.png and /dev/null differ diff --git a/doc/source/images/functional_data_model.svg b/doc/source/images/functional_data_model.svg deleted file mode 100644 index f6e71dd..0000000 --- a/doc/source/images/functional_data_model.svg +++ /dev/null @@ -1,600 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/doc/source/images/sequence_architecture_cdmc_sync.png b/doc/source/images/sequence_architecture_cdmc_sync.png deleted file mode 100644 index 16e0000..0000000 Binary files a/doc/source/images/sequence_architecture_cdmc_sync.png and /dev/null differ diff --git a/doc/source/images/sequence_create_and_launch_audit.png b/doc/source/images/sequence_create_and_launch_audit.png deleted file mode 100644 index f697192..0000000 Binary files a/doc/source/images/sequence_create_and_launch_audit.png and /dev/null differ diff --git a/doc/source/images/sequence_create_audit_template.png b/doc/source/images/sequence_create_audit_template.png deleted file mode 100644 index afc9839..0000000 Binary files a/doc/source/images/sequence_create_audit_template.png and /dev/null differ diff --git a/doc/source/images/sequence_from_audit_execution_to_actionplan_creation.png b/doc/source/images/sequence_from_audit_execution_to_actionplan_creation.png deleted file mode 100644 index f1721b3..0000000 Binary files a/doc/source/images/sequence_from_audit_execution_to_actionplan_creation.png and /dev/null differ diff --git a/doc/source/images/sequence_launch_action_plan.png b/doc/source/images/sequence_launch_action_plan.png deleted file mode 100644 index ca5496a..0000000 Binary files a/doc/source/images/sequence_launch_action_plan.png and /dev/null differ diff --git a/doc/source/images/sequence_launch_action_plan_in_applier.png b/doc/source/images/sequence_launch_action_plan_in_applier.png deleted file mode 100644 index 0ae17d8..0000000 Binary files a/doc/source/images/sequence_launch_action_plan_in_applier.png and /dev/null differ diff --git a/doc/source/images/sequence_overview_watcher_usage.png b/doc/source/images/sequence_overview_watcher_usage.png deleted file mode 100644 index cc88a78..0000000 Binary files a/doc/source/images/sequence_overview_watcher_usage.png and /dev/null differ diff --git a/doc/source/images/sequence_trigger_audit_in_decision_engine.png b/doc/source/images/sequence_trigger_audit_in_decision_engine.png deleted file mode 100644 index 33ddfd3..0000000 Binary files a/doc/source/images/sequence_trigger_audit_in_decision_engine.png and /dev/null differ diff --git a/doc/source/images/watcher_db_schema_diagram.png b/doc/source/images/watcher_db_schema_diagram.png deleted file mode 100644 index 336603a..0000000 Binary files a/doc/source/images/watcher_db_schema_diagram.png and /dev/null differ diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index 16e258e..0000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,120 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -================================ -Welcome to Watcher documentation -================================ - -OpenStack Watcher provides a flexible and scalable resource optimization -service for multi-tenant OpenStack-based clouds. -Watcher provides a complete optimization loop—including everything from a -metrics receiver, complex event processor and profiler, optimization processor -and an action plan applier. This provides a robust framework to realize a wide -range of cloud optimization goals, including the reduction of data center -operating costs, increased system performance via intelligent virtual machine -migration, increased energy efficiency—and more! - -Watcher project consists of several source code repositories: - -* `watcher`_ - is the main repository. It contains code for Watcher API server, - Watcher Decision Engine and Watcher Applier. -* `python-watcherclient`_ - Client library and CLI client for Watcher. -* `watcher-dashboard`_ - Watcher Horizon plugin. - -The documentation provided here is continually kept up-to-date based -on the latest code, and may not represent the state of the project at any -specific prior release. - -.. _watcher: https://git.openstack.org/cgit/openstack/watcher/ -.. _python-watcherclient: https://git.openstack.org/cgit/openstack/python-watcherclient/ -.. _watcher-dashboard: https://git.openstack.org/cgit/openstack/watcher-dashboard/ - -Developer Guide -=============== - -Introduction ------------- - -.. toctree:: - :maxdepth: 1 - - glossary - architecture - contributor/contributing - - -Getting Started ---------------- - -.. toctree:: - :maxdepth: 1 - - contributor/index - -API References --------------- - -.. toctree:: - :maxdepth: 1 - - api/index - -Plugins -------- - -.. toctree:: - :maxdepth: 1 - - contributor/plugin/index - -Installation -============ -.. toctree:: - :maxdepth: 2 - - install/index - -Admin Guide -=========== - -.. toctree:: - :maxdepth: 1 - - admin/index - -User Guide -========== - -.. toctree:: - :maxdepth: 2 - - user/index - -Watcher Manual Pages -==================== - -.. toctree:: - :glob: - :maxdepth: 1 - - man/index - -.. # NOTE(mriedem): This is the section where we hide things that we don't - # actually want in the table of contents but sphinx build would fail if - # they aren't in the toctree somewhere. For example, we hide api/autoindex - # since that's already covered with modindex below. -.. toctree:: - :hidden: - - api/autoindex - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/doc/source/install/common_configure.rst b/doc/source/install/common_configure.rst deleted file mode 100644 index b34d5de..0000000 --- a/doc/source/install/common_configure.rst +++ /dev/null @@ -1,71 +0,0 @@ -2. Edit the ``/etc/watcher/watcher.conf`` file and complete the following - actions: - - * In the ``[database]`` section, configure database access: - - .. code-block:: ini - - [database] - ... - connection = mysql+pymysql://watcher:WATCHER_DBPASS@controller/watcher?charset=utf8 - - * In the `[DEFAULT]` section, configure the transport url for RabbitMQ message broker. - - .. code-block:: ini - - [DEFAULT] - ... - control_exchange = watcher - transport_url = rabbit://openstack:RABBIT_PASS@controller - - Replace the RABBIT_PASS with the password you chose for OpenStack user in RabbitMQ. - - * In the `[keystone_authtoken]` section, configure Identity service access. - - .. code-block:: ini - - [keystone_authtoken] - ... - auth_uri = http://controller:5000 - auth_url = http://controller:35357 - memcached_servers = controller:11211 - auth_type = password - project_domain_name = default - user_domain_name = default - project_name = service - username = watcher - password = WATCHER_PASS - - Replace WATCHER_PASS with the password you chose for the watcher user in the Identity service. - - * Watcher interacts with other OpenStack projects via project clients, in order to instantiate these - clients, Watcher requests new session from Identity service. In the `[watcher_client_auth]` section, - configure the identity service access to interact with other OpenStack project clients. - - .. code-block:: ini - - [watcher_client_auth] - ... - auth_type = password - auth_url = http://controller:35357 - username = watcher - password = WATCHER_PASS - project_domain_name = default - user_domain_name = default - project_name = service - - Replace WATCHER_PASS with the password you chose for the watcher user in the Identity service. - - * In the `[oslo_messaging_notifications]` section, configure the messaging driver. - - .. code-block:: ini - - [oslo_messaging_notifications] - ... - driver = messagingv2 - -3. Populate watcher database: - - .. code-block:: ini - - su -s /bin/sh -c "watcher-db-manage" watcher diff --git a/doc/source/install/common_prerequisites.rst b/doc/source/install/common_prerequisites.rst deleted file mode 100644 index c367894..0000000 --- a/doc/source/install/common_prerequisites.rst +++ /dev/null @@ -1,139 +0,0 @@ -Prerequisites -------------- - -Before you install and configure the Infrastructure Optimization service, -you must create a database, service credentials, and API endpoints. - -1. Create the database, complete these steps: - - * Use the database access client to connect to the database - server as the ``root`` user: - - .. code-block:: console - - $ mysql -u root -p - - * Create the ``watcher`` database: - - .. code-block:: console - - CREATE DATABASE watcher CHARACTER SET utf8; - - * Grant proper access to the ``watcher`` database: - - .. code-block:: console - - GRANT ALL PRIVILEGES ON watcher.* TO 'watcher'@'localhost' \ - IDENTIFIED BY 'WATCHER_DBPASS'; - GRANT ALL PRIVILEGES ON watcher.* TO 'watcher'@'%' \ - IDENTIFIED BY 'WATCHER_DBPASS'; - - Replace ``WATCHER_DBPASS`` with a suitable password. - - * Exit the database access client. - - .. code-block:: console - - exit; - -2. Source the ``admin`` credentials to gain access to - admin-only CLI commands: - - .. code-block:: console - - $ . admin-openrc - -3. To create the service credentials, complete these steps: - - * Create the ``watcher`` user: - - .. code-block:: console - - $ openstack user create --domain default --password-prompt watcher - User Password: - Repeat User Password: - +---------------------+----------------------------------+ - | Field | Value | - +---------------------+----------------------------------+ - | domain_id | default | - | enabled | True | - | id | b18ee38e06034b748141beda8fc8bfad | - | name | watcher | - | options | {} | - | password_expires_at | None | - +---------------------+----------------------------------+ - - - * Add the ``admin`` role to the ``watcher`` user: - - .. code-block:: console - - $ openstack role add --project service --user watcher admin - - .. note:: - - This command produces no output. - - * Create the watcher service entities: - - .. code-block:: console - - $ openstack service create --name watcher --description "Infrastructure Optimization" infra-optim - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | Infrastructure Optimization | - | enabled | True | - | id | d854f6fff0a64f77bda8003c8dedfada | - | name | watcher | - | type | infra-optim | - +-------------+----------------------------------+ - - -4. Create the Infrastructure Optimization service API endpoints: - - .. code-block:: console - - $ openstack endpoint create --region RegionOne \ - infra-optim public http://controller:9322 - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | Infrastructure Optimization | - | enabled | True | - | id | d854f6fff0a64f77bda8003c8dedfada | - | name | watcher | - | type | infra-optim | - +-------------+----------------------------------+ - - $ openstack endpoint create --region RegionOne \ - infra-optim internal http://controller:9322 - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 225aef8465ef4df48a341aaaf2b0a390 | - | interface | internal | - | region | RegionOne | - | region_id | RegionOne | - | service_id | d854f6fff0a64f77bda8003c8dedfada | - | service_name | watcher | - | service_type | infra-optim | - | url | http://controller:9322 | - +--------------+----------------------------------+ - - $ openstack endpoint create --region RegionOne \ - infra-optim admin http://controller:9322 - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 375eb5057fb546edbdf3ee4866179672 | - | interface | admin | - | region | RegionOne | - | region_id | RegionOne | - | service_id | d854f6fff0a64f77bda8003c8dedfada | - | service_name | watcher | - | service_type | infra-optim | - | url | http://controller:9322 | - +--------------+----------------------------------+ diff --git a/doc/source/install/get_started.rst b/doc/source/install/get_started.rst deleted file mode 100644 index 44a85ad..0000000 --- a/doc/source/install/get_started.rst +++ /dev/null @@ -1,27 +0,0 @@ -============================================ -Infrastructure Optimization service overview -============================================ -The Infrastructure Optimization service provides flexible and scalable -optimization service for multi-tenant OpenStack based clouds. - -The Infrastructure Optimization service consists of the following components: - -``watcher`` command-line client - A CLI to communicate with ``watcher-api`` to optimize the cloud. - -``watcher-api`` service - An OpenStack-native REST API that accepts and responds to end-user calls - by processing them and forwarding to appropriate underlying watcher - services via AMQP. - -``watcher-decision-engine`` service - It runs audit and return an action plan to achieve optimization goal - specified by the end-user in audit. - -``watcher-applier`` service - It executes action plan built by watcher-decision-engine. It interacts with - other OpenStack components like nova to execute the given action - plan. - -``watcher-dashboard`` - Watcher UI implemented as a plugin for the OpenStack Dashboard. diff --git a/doc/source/install/index.rst b/doc/source/install/index.rst deleted file mode 100644 index 38e4c88..0000000 --- a/doc/source/install/index.rst +++ /dev/null @@ -1,39 +0,0 @@ -=================================== -Infrastructure Optimization service -=================================== - -.. toctree:: - :maxdepth: 2 - - get_started.rst - install.rst - verify.rst - next-steps.rst - -The Infrastructure Optimization service (watcher) provides -flexible and scalable resource optimization service for -multi-tenant OpenStack-based clouds. - -Watcher provides a complete optimization loop including -everything from a metrics receiver, complex event processor -and profiler, optimization processor and an action plan -applier. This provides a robust framework to realize a wide -range of cloud optimization goals, including the reduction -of data center operating costs, increased system performance -via intelligent virtual machine migration, increased energy -efficiency—and more! - -Watcher also supports a pluggable architecture by which custom -optimization algorithms, data metrics and data profilers can be -developed and inserted into the Watcher framework. - -Check the documentation for watcher optimization strategies at -https://docs.openstack.org/watcher/latest/strategies/index.html - -Check watcher glossary at -https://docs.openstack.org/watcher/latest/glossary.html - - -This chapter assumes a working setup of OpenStack following the -`OpenStack Installation Tutorial -`_. diff --git a/doc/source/install/install-obs.rst b/doc/source/install/install-obs.rst deleted file mode 100644 index 6b3a48a..0000000 --- a/doc/source/install/install-obs.rst +++ /dev/null @@ -1,35 +0,0 @@ -.. _install-obs: - - -Install and configure for openSUSE and SUSE Linux Enterprise -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Infrastructure -Optimization service for openSUSE Leap 42.1 and -SUSE Linux Enterprise Server 12 SP1. - -.. include:: common_prerequisites.rst - -Install and configure components --------------------------------- - -#. Install the packages: - - .. code-block:: console - - # zypper --quiet --non-interactive install - -.. include:: common_configure.rst - - -Finalize installation ---------------------- - -Start the Infrastructure Optimization services and configure them to start when -the system boots: - -.. code-block:: console - - # systemctl enable openstack-watcher-api.service - - # systemctl start openstack-watcher-api.service diff --git a/doc/source/install/install-rdo.rst b/doc/source/install/install-rdo.rst deleted file mode 100644 index 47105d9..0000000 --- a/doc/source/install/install-rdo.rst +++ /dev/null @@ -1,38 +0,0 @@ -.. _install-rdo: - -Install and configure for Red Hat Enterprise Linux and CentOS -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - -This section describes how to install and configure the Infrastructure -Optimization service for Red Hat Enterprise Linux 7 and CentOS 7. - -.. include:: common_prerequisites.rst - -Install and configure components --------------------------------- - -1. Install the packages: - - .. code-block:: console - - # sudo yum install openstack-watcher-api openstack-watcher-applier \ - openstack-watcher-decision-engine - -.. include:: common_configure.rst - -Finalize installation ---------------------- - -Start the Infrastructure Optimization services and configure them to start when -the system boots: - -.. code-block:: console - - # systemctl enable openstack-watcher-api.service \ - openstack-watcher-decision-engine.service \ - openstack-watcher-applier.service - - # systemctl start openstack-watcher-api.service \ - openstack-watcher-decision-engine.service \ - openstack-watcher-applier.service diff --git a/doc/source/install/install-ubuntu.rst b/doc/source/install/install-ubuntu.rst deleted file mode 100644 index e475a89..0000000 --- a/doc/source/install/install-ubuntu.rst +++ /dev/null @@ -1,34 +0,0 @@ -.. _install-ubuntu: - -Install and configure for Ubuntu -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Infrastructure -Optimization service for Ubuntu 14.04 (LTS). - -.. include:: common_prerequisites.rst - -Install and configure components --------------------------------- - -1. Install the packages: - - .. code-block:: console - - # apt install watcher-api watcher-decision-engine \ - watcher-applier - - # apt install python-watcherclient - -.. include:: common_configure.rst - -Finalize installation ---------------------- - -Restart the Infrastructure Optimization services: - -.. code-block:: console - - # service watcher-api restart - # service watcher-decision-engine restart - # service watcher-applier restart diff --git a/doc/source/install/install.rst b/doc/source/install/install.rst deleted file mode 100644 index e6c8883..0000000 --- a/doc/source/install/install.rst +++ /dev/null @@ -1,20 +0,0 @@ -.. _install: - -Install and configure -~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Infrastructure -Optimization service, code-named watcher, on the controller node. - -This section assumes that you already have a working OpenStack -environment with at least the following components installed: -Identity Service, Compute Service, Telemetry data collection service. - -Note that installation and configuration vary by distribution. - -.. toctree:: - :maxdepth: 2 - - install-obs.rst - install-rdo.rst - install-ubuntu.rst diff --git a/doc/source/install/next-steps.rst b/doc/source/install/next-steps.rst deleted file mode 100644 index c07654b..0000000 --- a/doc/source/install/next-steps.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. _next-steps: - -Next steps -~~~~~~~~~~ - -Your OpenStack environment now includes the watcher service. - -To add additional services, see -https://docs.openstack.org/project-install-guide/ocata/. diff --git a/doc/source/install/verify.rst b/doc/source/install/verify.rst deleted file mode 100644 index e901146..0000000 --- a/doc/source/install/verify.rst +++ /dev/null @@ -1,119 +0,0 @@ -.. _verify: - -Verify operation -~~~~~~~~~~~~~~~~ - -Verify operation of the Infrastructure Optimization service. - -.. note:: - - Perform these commands on the controller node. - -1. Source the ``admin`` project credentials to gain access to - admin-only CLI commands: - - .. code-block:: console - - $ . admin-openrc - -2. List service components to verify successful launch and registration - of each process: - - .. code-block:: console - - $ openstack optimize service list - +----+-------------------------+------------+--------+ - | ID | Name | Host | Status | - +----+-------------------------+------------+--------+ - | 1 | watcher-decision-engine | controller | ACTIVE | - | 2 | watcher-applier | controller | ACTIVE | - +----+-------------------------+------------+--------+ - -3. List goals and strategies: - - .. code-block:: console - - $ openstack optimize goal list - +--------------------------------------+----------------------+----------------------+ - | UUID | Name | Display name | - +--------------------------------------+----------------------+----------------------+ - | a8cd6d1a-008b-4ff0-8dbc-b30493fcc5b9 | dummy | Dummy goal | - | 03953f2f-02d0-42b5-9a12-7ba500a54395 | workload_balancing | Workload Balancing | - | de0f8714-984b-4d6b-add1-9cad8120fbce | server_consolidation | Server Consolidation | - | f056bc80-c6d1-40dc-b002-938ccade9385 | thermal_optimization | Thermal Optimization | - | e7062856-892e-4f0f-b84d-b828464b3fd0 | airflow_optimization | Airflow Optimization | - | 1f038da9-b36c-449f-9f04-c225bf3eb478 | unclassified | Unclassified | - +--------------------------------------+----------------------+----------------------+ - - $ openstack optimize strategy list - +--------------------------------------+---------------------------+---------------------------------------------+----------------------+ - | UUID | Name | Display name | Goal | - +--------------------------------------+---------------------------+---------------------------------------------+----------------------+ - | 98ae84c8-7c9b-4cbd-8d9c-4bd7c6b106eb | dummy | Dummy strategy | dummy | - | 02a170b6-c72e-479d-95c0-8a4fdd4cc1ef | dummy_with_scorer | Dummy Strategy using sample Scoring Engines | dummy | - | 8bf591b8-57e5-4a9e-8c7d-c37bda735a45 | outlet_temperature | Outlet temperature based strategy | thermal_optimization | - | 8a0810fb-9d9a-47b9-ab25-e442878abc54 | vm_workload_consolidation | VM Workload Consolidation Strategy | server_consolidation | - | 1718859c-3eb5-45cb-9220-9cb79fe42fa5 | basic | Basic offline consolidation | server_consolidation | - | b5e7f5f1-4824-42c7-bb52-cf50724f67bf | workload_stabilization | Workload stabilization | workload_balancing | - | f853d71e-9286-4df3-9d3e-8eaf0f598e07 | workload_balance | Workload Balance Migration Strategy | workload_balancing | - | 58bdfa89-95b5-4630-adf6-fd3af5ff1f75 | uniform_airflow | Uniform airflow migration strategy | airflow_optimization | - | 66fde55d-a612-4be9-8cb0-ea63472b420b | dummy_with_resize | Dummy strategy with resize | dummy | - +--------------------------------------+---------------------------+---------------------------------------------+----------------------+ - -4. Run an action plan by creating an audit with dummy goal: - - .. code-block:: console - - $ openstack optimize audit create --goal dummy - +--------------+--------------------------------------+ - | Field | Value | - +--------------+--------------------------------------+ - | UUID | e94d4826-ad4e-44df-ad93-dff489fde457 | - | Created At | 2017-05-23T11:46:58.763394+00:00 | - | Updated At | None | - | Deleted At | None | - | State | PENDING | - | Audit Type | ONESHOT | - | Parameters | {} | - | Interval | None | - | Goal | dummy | - | Strategy | auto | - | Audit Scope | [] | - | Auto Trigger | False | - +--------------+--------------------------------------+ - - $ openstack optimize audit list - +--------------------------------------+------------+-----------+-------+----------+--------------+ - | UUID | Audit Type | State | Goal | Strategy | Auto Trigger | - +--------------------------------------+------------+-----------+-------+----------+--------------+ - | e94d4826-ad4e-44df-ad93-dff489fde457 | ONESHOT | SUCCEEDED | dummy | auto | False | - +--------------------------------------+------------+-----------+-------+----------+--------------+ - - $ openstack optimize actionplan list - +--------------------------------------+--------------------------------------+-------------+------------+-----------------+ - | UUID | Audit | State | Updated At | Global efficacy | - +--------------------------------------+--------------------------------------+-------------+------------+-----------------+ - | ba9ce6b3-969c-4b8e-bb61-ae24e8630f81 | e94d4826-ad4e-44df-ad93-dff489fde457 | RECOMMENDED | None | None | - +--------------------------------------+--------------------------------------+-------------+------------+-----------------+ - - $ openstack optimize actionplan start ba9ce6b3-969c-4b8e-bb61-ae24e8630f81 - +---------------------+--------------------------------------+ - | Field | Value | - +---------------------+--------------------------------------+ - | UUID | ba9ce6b3-969c-4b8e-bb61-ae24e8630f81 | - | Created At | 2017-05-23T11:46:58+00:00 | - | Updated At | 2017-05-23T11:53:12+00:00 | - | Deleted At | None | - | Audit | e94d4826-ad4e-44df-ad93-dff489fde457 | - | Strategy | dummy | - | State | ONGOING | - | Efficacy indicators | [] | - | Global efficacy | {} | - +---------------------+--------------------------------------+ - - $ openstack optimize actionplan list - +--------------------------------------+--------------------------------------+-----------+---------------------------+-----------------+ - | UUID | Audit | State | Updated At | Global efficacy | - +--------------------------------------+--------------------------------------+-----------+---------------------------+-----------------+ - | ba9ce6b3-969c-4b8e-bb61-ae24e8630f81 | e94d4826-ad4e-44df-ad93-dff489fde457 | SUCCEEDED | 2017-05-23T11:53:16+00:00 | None | - +--------------------------------------+--------------------------------------+-----------+---------------------------+-----------------+ diff --git a/doc/source/man/footer.rst b/doc/source/man/footer.rst deleted file mode 100644 index fc8c28e..0000000 --- a/doc/source/man/footer.rst +++ /dev/null @@ -1,5 +0,0 @@ -BUGS -==== - -* Watcher bugs are tracked in Launchpad at `OpenStack Watcher - `__ diff --git a/doc/source/man/general-options.rst b/doc/source/man/general-options.rst deleted file mode 100644 index 71aa842..0000000 --- a/doc/source/man/general-options.rst +++ /dev/null @@ -1,66 +0,0 @@ - **-h, --help** - Show the help message and exit - - **--version** - Print the version number and exit - - **-v, --verbose** - Print more verbose output - - **--noverbose** - Disable verbose output - - **-d, --debug** - Print debugging output (set logging level to DEBUG instead of - default WARNING level) - - **--nodebug** - Disable debugging output - - **--use-syslog** - Use syslog for logging - - **--nouse-syslog** - Disable the use of syslog for logging - - **--syslog-log-facility SYSLOG_LOG_FACILITY** - syslog facility to receive log lines - - **--config-dir DIR** - Path to a config directory to pull \*.conf files from. This - file set is sorted, to provide a predictable parse order - if individual options are over-ridden. The set is parsed after - the file(s) specified via previous --config-file, arguments hence - over-ridden options in the directory take precedence. This means - that configuration from files in a specified config-dir will - always take precedence over configuration from files specified - by --config-file, regardless to argument order. - - **--config-file PATH** - Path to a config file to use. Multiple config files can be - specified by using this flag multiple times, for example, - --config-file --config-file . Values in latter - files take precedence. - - **--log-config-append PATH** **--log-config PATH** - The name of logging configuration file. It does not - disable existing loggers, but just appends specified - logging configuration to any other existing logging - options. Please see the Python logging module documentation - for details on logging configuration files. The log-config - name for this option is depcrecated. - - **--log-format FORMAT** - A logging.Formatter log message format string which may use any - of the available logging.LogRecord attributes. Default: None - - **--log-date-format DATE_FORMAT** - Format string for %(asctime)s in log records. Default: None - - **--log-file PATH, --logfile PATH** - (Optional) Name of log file to output to. If not set, logging - will go to stdout. - - **--log-dir LOG_DIR, --logdir LOG_DIR** - (Optional) The directory to keep log files in (will be prepended - to --log-file) diff --git a/doc/source/man/index.rst b/doc/source/man/index.rst deleted file mode 100644 index 74469af..0000000 --- a/doc/source/man/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. toctree:: - :glob: - :maxdepth: 1 - - footer.rst - general-options - watcher-api - watcher-applier - watcher-db-manage - watcher-decision-engine \ No newline at end of file diff --git a/doc/source/man/watcher-api.rst b/doc/source/man/watcher-api.rst deleted file mode 100644 index d2f7fa8..0000000 --- a/doc/source/man/watcher-api.rst +++ /dev/null @@ -1,39 +0,0 @@ -=========== -watcher-api -=========== - ---------------------------- -Service for the Watcher API ---------------------------- - -:Author: openstack@lists.launchpad.net -:Date: -:Copyright: OpenStack Foundation -:Version: -:Manual section: 1 -:Manual group: cloud computing - -SYNOPSIS -======== - -watcher-api [options] - -DESCRIPTION -=========== - -watcher-api is a server daemon that serves the Watcher API - -OPTIONS -======= - - **General options** - - .. include:: general-options.rst - -FILES -===== - - **/etc/watcher/watcher.conf** - Default configuration file for Watcher API - -.. include:: footer.rst diff --git a/doc/source/man/watcher-applier.rst b/doc/source/man/watcher-applier.rst deleted file mode 100644 index 7f22b76..0000000 --- a/doc/source/man/watcher-applier.rst +++ /dev/null @@ -1,39 +0,0 @@ -=============== -watcher-applier -=============== - -------------------------------- -Service for the Watcher Applier -------------------------------- - -:Author: openstack@lists.launchpad.net -:Date: -:Copyright: OpenStack Foundation -:Version: -:Manual section: 1 -:Manual group: cloud computing - -SYNOPSIS -======== - -watcher-applier [options] - -DESCRIPTION -=========== - -:ref:`Watcher Applier ` - -OPTIONS -======= - - **General options** - - .. include:: general-options.rst - -FILES -===== - - **/etc/watcher/watcher.conf** - Default configuration file for Watcher Applier - -.. include:: footer.rst diff --git a/doc/source/man/watcher-db-manage.rst b/doc/source/man/watcher-db-manage.rst deleted file mode 100644 index a7b933d..0000000 --- a/doc/source/man/watcher-db-manage.rst +++ /dev/null @@ -1,260 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _watcher-db-manage: - -================= -watcher-db-manage -================= - -The :command:`watcher-db-manage` utility is used to create the database schema -tables that the watcher services will use for storage. It can also be used to -upgrade (or downgrade) existing database tables when migrating between -different versions of watcher. - -The `Alembic library `_ is used to perform -the database migrations. - -Options -======= - -This is a partial list of the most useful options. To see the full list, -run the following:: - - watcher-db-manage --help - -.. program:: watcher-db-manage - -.. option:: -h, --help - - Show help message and exit. - -.. option:: --config-dir - - Path to a config directory with configuration files. - -.. option:: --config-file - - Path to a configuration file to use. - -.. option:: -d, --debug - - Print debugging output. - -.. option:: -v, --verbose - - Print more verbose output. - -.. option:: --version - - Show the program's version number and exit. - -.. option:: upgrade, downgrade, stamp, revision, version, create_schema, purge - - The :ref:`command ` to run. - -Usage -===== - -Options for the various :ref:`commands ` for -:command:`watcher-db-manage` are listed when the :option:`-h` or -:option:`--help` -option is used after the command. - -For example:: - - watcher-db-manage create_schema --help - -Information about the database is read from the watcher configuration file -used by the API server and conductor services. This file must be specified -with the :option:`--config-file` option:: - - watcher-db-manage --config-file /path/to/watcher.conf create_schema - -The configuration file defines the database backend to use with the -*connection* database option:: - - [database] - connection=mysql://root@localhost/watcher - -If no configuration file is specified with the :option:`--config-file` option, -:command:`watcher-db-manage` assumes an SQLite database. - -.. _db-manage_cmds: - -Command Options -=============== - -:command:`watcher-db-manage` is given a command that tells the utility -what actions to perform. -These commands can take arguments. Several commands are available: - -.. _create_schema: - -create_schema -------------- - -.. program:: create_schema - -.. option:: -h, --help - - Show help for create_schema and exit. - -This command will create database tables based on the most current version. -It assumes that there are no existing tables. - -An example of creating database tables with the most recent version:: - - watcher-db-manage --config-file=/etc/watcher/watcher.conf create_schema - -downgrade ---------- - -.. program:: downgrade - -.. option:: -h, --help - - Show help for downgrade and exit. - -.. option:: --revision - - The revision number you want to downgrade to. - -This command will revert existing database tables to a previous version. -The version can be specified with the :option:`--revision` option. - -An example of downgrading to table versions at revision 2581ebaf0cb2:: - - watcher-db-manage --config-file=/etc/watcher/watcher.conf downgrade --revision 2581ebaf0cb2 - -revision --------- - -.. program:: revision - -.. option:: -h, --help - - Show help for revision and exit. - -.. option:: -m , --message - - The message to use with the revision file. - -.. option:: --autogenerate - - Compares table metadata in the application with the status of the database - and generates migrations based on this comparison. - -This command will create a new revision file. You can use the -:option:`--message` option to comment the revision. - -This is really only useful for watcher developers making changes that require -database changes. This revision file is used during database migration and -will specify the changes that need to be made to the database tables. Further -discussion is beyond the scope of this document. - -stamp ------ - -.. program:: stamp - -.. option:: -h, --help - - Show help for stamp and exit. - -.. option:: --revision - - The revision number. - -This command will 'stamp' the revision table with the version specified with -the :option:`--revision` option. It will not run any migrations. - -upgrade -------- - -.. program:: upgrade - -.. option:: -h, --help - - Show help for upgrade and exit. - -.. option:: --revision - - The revision number to upgrade to. - -This command will upgrade existing database tables to the most recent version, -or to the version specified with the :option:`--revision` option. - -If there are no existing tables, then new tables are created, beginning -with the oldest known version, and successively upgraded using all of the -database migration files, until they are at the specified version. Note -that this behavior is different from the :ref:`create_schema` command -that creates the tables based on the most recent version. - -An example of upgrading to the most recent table versions:: - - watcher-db-manage --config-file=/etc/watcher/watcher.conf upgrade - -.. note:: - - This command is the default if no command is given to - :command:`watcher-db-manage`. - -.. warning:: - - The upgrade command is not compatible with SQLite databases since it uses - ALTER TABLE commands to upgrade the database tables. SQLite supports only - a limited subset of ALTER TABLE. - -version -------- - -.. program:: version - -.. option:: -h, --help - - Show help for version and exit. - -This command will output the current database version. - -purge ------ - -.. program:: purge - -.. option:: -h, --help - - Show help for purge and exit. - -.. option:: -d, --age-in-days - - The number of days (starting from today) before which we consider soft - deleted objects as expired and should hence be erased. By default, all - objects soft deleted are considered expired. This can be useful as removing - a significant amount of objects may cause a performance issues. - -.. option:: -n, --max-number - - The maximum number of database objects we expect to be deleted. If exceeded, - this will prevent any deletion. - -.. option:: -t, --audit-template - - Either the UUID or name of the soft deleted audit template to purge. This - will also include any related objects with it. - -.. option:: -e, --exclude-orphans - - This is a flag to indicate when we want to exclude orphan objects from - deletion. - -.. option:: --dry-run - - This is a flag to indicate when we want to perform a dry run. This will show - the objects that would be deleted instead of actually deleting them. - -This command will purge the current database by removing both its soft deleted -and orphan objects. diff --git a/doc/source/man/watcher-decision-engine.rst b/doc/source/man/watcher-decision-engine.rst deleted file mode 100644 index 3e07c3b..0000000 --- a/doc/source/man/watcher-decision-engine.rst +++ /dev/null @@ -1,39 +0,0 @@ -======================= -watcher-decision-engine -======================= - ---------------------------------------- -Service for the Watcher Decision Engine ---------------------------------------- - -:Author: openstack@lists.launchpad.net -:Date: -:Copyright: OpenStack Foundation -:Version: -:Manual section: 1 -:Manual group: cloud computing - -SYNOPSIS -======== - -watcher-decision-engine [options] - -DESCRIPTION -=========== - -:ref:`Watcher Decision Engine ` - -OPTIONS -======= - - **General options** - - .. include:: general-options.rst - -FILES -===== - - **/etc/watcher/watcher.conf** - Default configuration file for Watcher Decision Engine - -.. include:: footer.rst diff --git a/doc/source/strategies/basic-server-consolidation.rst b/doc/source/strategies/basic-server-consolidation.rst deleted file mode 100644 index ef8bea1..0000000 --- a/doc/source/strategies/basic-server-consolidation.rst +++ /dev/null @@ -1,99 +0,0 @@ -================================== -Basic Offline Server Consolidation -================================== - -Synopsis --------- - -**display name**: ``basic`` - -**goal**: ``server_consolidation`` - - .. watcher-term:: watcher.decision_engine.strategy.strategies.basic_consolidation - -Requirements ------------- - -Metrics -******* - -The *basic* strategy requires the following metrics: - -============================ ============ ======= ======= -metric service name plugins comment -============================ ============ ======= ======= -``compute.node.cpu.percent`` ceilometer_ none -``cpu_util`` ceilometer_ none -============================ ============ ======= ======= - -.. _ceilometer: http://docs.openstack.org/admin-guide/telemetry-measurements.html#openstack-compute - -Cluster data model -****************** - -Default Watcher's Compute cluster data model: - - .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector - -Actions -******* - -Default Watcher's actions: - - - .. list-table:: - :widths: 30 30 - :header-rows: 1 - - * - action - - description - * - ``migration`` - - .. watcher-term:: watcher.applier.actions.migration.Migrate - * - ``change_nova_service_state`` - - .. watcher-term:: watcher.applier.actions.change_nova_service_state.ChangeNovaServiceState - -Planner -******* - -Default Watcher's planner: - - .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner - -Configuration -------------- - -Strategy parameter is: - -====================== ====== ============= =================================== -parameter type default Value description -====================== ====== ============= =================================== -``migration_attempts`` Number 0 Maximum number of combinations to - be tried by the strategy while - searching for potential candidates. - To remove the limit, set it to 0 -``period`` Number 7200 The time interval in seconds - for getting statistic aggregation - from metric data source -====================== ====== ============= =================================== - -Efficacy Indicator ------------------- - -.. watcher-func:: - :format: literal_block - - watcher.decision_engine.goal.efficacy.specs.ServerConsolidation.get_global_efficacy_indicator - -How to use it ? ---------------- - -.. code-block:: shell - - $ openstack optimize audittemplate create \ - at1 server_consolidation --strategy basic - - $ openstack optimize audit create -a at1 -p migration_attempts=4 - -External Links --------------- -None. diff --git a/doc/source/strategies/index.rst b/doc/source/strategies/index.rst deleted file mode 100644 index 9af2667..0000000 --- a/doc/source/strategies/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -Strategies -========== - -.. toctree:: - :glob: - :maxdepth: 1 - - ./* diff --git a/doc/source/strategies/outlet_temp_control.rst b/doc/source/strategies/outlet_temp_control.rst deleted file mode 100644 index 5a4294c..0000000 --- a/doc/source/strategies/outlet_temp_control.rst +++ /dev/null @@ -1,104 +0,0 @@ -================================= -Outlet Temperature Based Strategy -================================= - -Synopsis --------- - -**display name**: ``outlet_temperature`` - -**goal**: ``thermal_optimization`` - -Outlet (Exhaust Air) temperature is a new thermal telemetry which can be -used to measure the host's thermal/workload status. This strategy makes -decisions to migrate workloads to the hosts with good thermal condition -(lowest outlet temperature) when the outlet temperature of source hosts -reach a configurable threshold. - -Requirements ------------- - -This strategy has a dependency on the host having Intel's Power -Node Manager 3.0 or later enabled. - - -Metrics -******* - -The *outlet_temperature* strategy requires the following metrics: - -========================================= ============ ======= ======= -metric service name plugins comment -========================================= ============ ======= ======= -``hardware.ipmi.node.outlet_temperature`` ceilometer_ IPMI -========================================= ============ ======= ======= - -.. _ceilometer: http://docs.openstack.org/admin-guide/telemetry-measurements.html#ipmi-based-meters - -Cluster data model -****************** - -Default Watcher's Compute cluster data model: - - .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector - -Actions -******* - -Default Watcher's actions: - - .. list-table:: - :widths: 30 30 - :header-rows: 1 - - * - action - - description - * - ``migration`` - - .. watcher-term:: watcher.applier.actions.migration.Migrate - -Planner -******* - -Default Watcher's planner: - - .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner - -Configuration -------------- - -Strategy parameter is: - -============== ====== ============= ==================================== -parameter type default Value description -============== ====== ============= ==================================== -``threshold`` Number 35.0 Temperature threshold for migration -``period`` Number 30 The time interval in seconds for - getting statistic aggregation from - metric data source -============== ====== ============= ==================================== - -Efficacy Indicator ------------------- - -None - -Algorithm ---------- - -For more information on the Outlet Temperature Based Strategy please refer to: -https://specs.openstack.org/openstack/watcher-specs/specs/mitaka/implemented/outlet-temperature-based-strategy.html - -How to use it ? ---------------- - -.. code-block:: shell - - $ openstack optimize audittemplate create \ - at1 thermal_optimization --strategy outlet_temperature - - $ openstack optimize audit create -a at1 -p threshold=31.0 - -External Links --------------- - -- `Intel Power Node Manager 3.0 `_ diff --git a/doc/source/strategies/strategy-template.rst b/doc/source/strategies/strategy-template.rst deleted file mode 100644 index 4c07edd..0000000 --- a/doc/source/strategies/strategy-template.rst +++ /dev/null @@ -1,115 +0,0 @@ -============= -Strategy name -============= - -Synopsis --------- - -**display name**: - -**goal**: - -Add here a complete description of your strategy - -Requirements ------------- - -Metrics -******* - -Write here the list of metrics required by your strategy algorithm (in the form - of a table). If these metrics requires specific Telemetry plugin or other - additional software, please explain here how to deploy them (and add link to - dedicated installation guide). - -Example: - -======================= ============ ======= ======= -metric service name plugins comment -======================= ============ ======= ======= -compute.node.* ceilometer_ none one point every 60s -vm.cpu.utilization_perc monasca_ none -power ceilometer_ kwapi_ one point every 60s -======================= ============ ======= ======= - - -.. _ceilometer: http://docs.openstack.org/admin-guide/telemetry-measurements.html#openstack-compute -.. _monasca: https://github.com/openstack/monasca-agent/blob/master/docs/Libvirt.md -.. _kwapi: https://kwapi.readthedocs.io/en/latest/index.html - - -Cluster data model -****************** - -Default Watcher's cluster data model. - -or - -If your strategy implementation requires a new cluster data model, please - describe it in this section, with a link to model plugin's installation guide. - -Actions -******* - -Default Watcher's actions. - -or - -If your strategy implementation requires new actions, add the list of Action - plugins here (in the form of a table) with a link to the plugin's installation - procedure. - -======== ================= -action description -======== ================= -action1_ This action1 ... -action2_ This action2 ... -======== ================= - -.. _action1 : https://github.com/myrepo/watcher/plugins/action1 -.. _action2 : https://github.com/myrepo/watcher/plugins/action2 - -Planner -******* - -Default Watcher's planner. - -or - -If your strategy requires also a new planner to schedule built actions in time, - please describe it in this section, with a link to planner plugin's - installation guide. - -Configuration -------------- - -If your strategy use configurable parameters, explain here how to tune them. - - -Efficacy Indicator ------------------- - -Add here the Efficacy indicator computed by your strategy. - -Algorithm ---------- - -Add here either the description of your algorithm or -link to the existing description. - -How to use it ? ---------------- - -.. code-block:: shell - - $ Write the command line to create an audit with your strategy. - -External Links --------------- - -If you have written papers, blog articles .... about your strategy into Watcher, - or if your strategy is based from external publication(s), please add HTTP - links and references in this section. - -- `link1 `_ -- `link2 `_ diff --git a/doc/source/strategies/uniform_airflow.rst b/doc/source/strategies/uniform_airflow.rst deleted file mode 100644 index d31f631..0000000 --- a/doc/source/strategies/uniform_airflow.rst +++ /dev/null @@ -1,107 +0,0 @@ -================================== -Uniform Airflow Migration Strategy -================================== - -Synopsis --------- - -**display name**: ``uniform_airflow`` - -**goal**: ``airflow_optimization`` - -.. watcher-term:: watcher.decision_engine.strategy.strategies.uniform_airflow - -Requirements ------------- - -This strategy has a dependency on the server having Intel's Power -Node Manager 3.0 or later enabled. - -Metrics -******* - -The *uniform_airflow* strategy requires the following metrics: - -================================== ============ ======= ======= -metric service name plugins comment -================================== ============ ======= ======= -``hardware.ipmi.node.airflow`` ceilometer_ IPMI -``hardware.ipmi.node.temperature`` ceilometer_ IPMI -``hardware.ipmi.node.power`` ceilometer_ IPMI -================================== ============ ======= ======= - -.. _ceilometer: http://docs.openstack.org/admin-guide/telemetry-measurements.html#ipmi-based-meters - -Cluster data model -****************** - -Default Watcher's Compute cluster data model: - - .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector - -Actions -******* - -Default Watcher's actions: - - - .. list-table:: - :widths: 30 30 - :header-rows: 1 - - * - action - - description - * - ``migration`` - - .. watcher-term:: watcher.applier.actions.migration.Migrate - -Planner -******* - -Default Watcher's planner: - - .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner - -Configuration -------------- - -Strategy parameters are: - -====================== ====== ============= =========================== -parameter type default Value description -====================== ====== ============= =========================== -``threshold_airflow`` Number 400.0 Airflow threshold for - migration Unit is 0.1CFM -``threshold_inlet_t`` Number 28.0 Inlet temperature threshold - for migration decision -``threshold_power`` Number 350.0 System power threshold for - migration decision -``period`` Number 300 Aggregate time period of - ceilometer -====================== ====== ============= =========================== - -Efficacy Indicator ------------------- - -None - -Algorithm ---------- - -For more information on the Uniform Airflow Migration Strategy please refer to: -https://specs.openstack.org/openstack/watcher-specs/specs/newton/implemented/uniform-airflow-migration-strategy.html - -How to use it ? ---------------- - -.. code-block:: shell - - $ openstack optimize audittemplate create \ - at1 airflow_optimization --strategy uniform_airflow - - $ openstack optimize audit create -a at1 -p threshold_airflow=410 \ - -p threshold_inlet_t=29.0 -p threshold_power=355.0 -p period=310 - -External Links --------------- - -- `Intel Power Node Manager 3.0 `_ diff --git a/doc/source/strategies/vm_workload_consolidation.rst b/doc/source/strategies/vm_workload_consolidation.rst deleted file mode 100644 index 5d30f9d..0000000 --- a/doc/source/strategies/vm_workload_consolidation.rst +++ /dev/null @@ -1,114 +0,0 @@ -================================== -VM Workload Consolidation Strategy -================================== - -Synopsis --------- - -**display name**: ``vm_workload_consolidation`` - -**goal**: ``vm_consolidation`` - - .. watcher-term:: watcher.decision_engine.strategy.strategies.vm_workload_consolidation - -Requirements ------------- - -Metrics -******* - -The *vm_workload_consolidation* strategy requires the following metrics: - -============================ ============ ======= ======= -metric service name plugins comment -============================ ============ ======= ======= -``memory`` ceilometer_ none -``disk.root.size`` ceilometer_ none -============================ ============ ======= ======= - -The following metrics are not required but increase the accuracy of -the strategy if available: - -============================ ============ ======= ======= -metric service name plugins comment -============================ ============ ======= ======= -``memory.usage`` ceilometer_ none -``cpu_util`` ceilometer_ none -============================ ============ ======= ======= - -.. _ceilometer: http://docs.openstack.org/admin-guide/telemetry-measurements.html#openstack-compute - -Cluster data model -****************** - -Default Watcher's Compute cluster data model: - - .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector - -Actions -******* - -Default Watcher's actions: - - - .. list-table:: - :widths: 30 30 - :header-rows: 1 - - * - action - - description - * - ``migration`` - - .. watcher-term:: watcher.applier.actions.migration.Migrate - * - ``change_nova_service_state`` - - .. watcher-term:: watcher.applier.actions.change_nova_service_state.ChangeNovaServiceState - -Planner -******* - -Default Watcher's planner: - - .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner - - -Configuration -------------- - -Strategy parameter is: - -====================== ====== ============= =================================== -parameter type default Value description -====================== ====== ============= =================================== -``period`` Number 3600 The time interval in seconds - for getting statistic aggregation - from metric data source -====================== ====== ============= =================================== - - -Efficacy Indicator ------------------- - -.. watcher-func:: - :format: literal_block - - watcher.decision_engine.goal.efficacy.specs.ServerConsolidation.get_global_efficacy_indicator - -Algorithm ---------- - -For more information on the VM Workload consolidation strategy please refer to: https://specs.openstack.org/openstack/watcher-specs/specs/mitaka/implemented/zhaw-load-consolidation.html - -How to use it ? ---------------- - -.. code-block:: shell - - $ openstack optimize audittemplate create \ - at1 server_consolidation --strategy vm_workload_consolidation - - $ openstack optimize audit create -a at1 - -External Links --------------- - -*Spec URL* -https://specs.openstack.org/openstack/watcher-specs/specs/mitaka/implemented/zhaw-load-consolidation.html diff --git a/doc/source/strategies/workload-stabilization.rst b/doc/source/strategies/workload-stabilization.rst deleted file mode 100644 index 567aa1d..0000000 --- a/doc/source/strategies/workload-stabilization.rst +++ /dev/null @@ -1,141 +0,0 @@ -============================================= -Watcher Overload standard deviation algorithm -============================================= - -Synopsis --------- - -**display name**: ``workload_stabilization`` - -**goal**: ``workload_balancing`` - - .. watcher-term:: watcher.decision_engine.strategy.strategies.workload_stabilization - -Requirements ------------- - -Metrics -******* - -The *workload_stabilization* strategy requires the following metrics: - -============================ ============ ======= ======= -metric service name plugins comment -============================ ============ ======= ======= -``compute.node.cpu.percent`` ceilometer_ none -``hardware.memory.used`` ceilometer_ SNMP_ -``cpu_util`` ceilometer_ none -``memory.resident`` ceilometer_ none -============================ ============ ======= ======= - -.. _ceilometer: http://docs.openstack.org/admin-guide/telemetry-measurements.html#openstack-compute -.. _SNMP: http://docs.openstack.org/admin-guide/telemetry-measurements.html - -Cluster data model -****************** - -Default Watcher's Compute cluster data model: - - .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector - -Actions -******* - -Default Watcher's actions: - - - .. list-table:: - :widths: 30 30 - :header-rows: 1 - - * - action - - description - * - ``migration`` - - .. watcher-term:: watcher.applier.actions.migration.Migrate - -Planner -******* - -Default Watcher's planner: - - .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner - -Configuration -------------- - -Strategy parameters are: - -==================== ====== ===================== ============================= -parameter type default Value description -==================== ====== ===================== ============================= -``metrics`` array |metrics| Metrics used as rates of - cluster loads. -``thresholds`` object |thresholds| Dict where key is a metric - and value is a trigger value. - -``weights`` object |weights| These weights used to - calculate common standard - deviation. Name of weight - contains meter name and - _weight suffix. -``instance_metrics`` object |instance_metrics| Mapping to get hardware - statistics using instance - metrics. -``host_choice`` string retry Method of host's choice. - There are cycle, retry and - fullsearch methods. Cycle - will iterate hosts in cycle. - Retry will get some hosts - random (count defined in - retry_count option). - Fullsearch will return each - host from list. -``retry_count`` number 1 Count of random returned - hosts. -``periods`` object |periods| These periods are used to get - statistic aggregation for - instance and host metrics. - The period is simply a - repeating interval of time - into which the samples are - grouped for aggregation. - Watcher uses only the last - period of all recieved ones. -==================== ====== ===================== ============================= - -.. |metrics| replace:: ["cpu_util", "memory.resident"] -.. |thresholds| replace:: {"cpu_util": 0.2, "memory.resident": 0.2} -.. |weights| replace:: {"cpu_util_weight": 1.0, "memory.resident_weight": 1.0} -.. |instance_metrics| replace:: {"cpu_util": "compute.node.cpu.percent", "memory.resident": "hardware.memory.used"} -.. |periods| replace:: {"instance": 720, "node": 600} - -Efficacy Indicator ------------------- - -.. watcher-func:: - :format: literal_block - - watcher.decision_engine.goal.efficacy.specs.ServerConsolidation.get_global_efficacy_indicator - -Algorithm ---------- - -You can find description of overload algorithm and role of standard deviation -here: https://specs.openstack.org/openstack/watcher-specs/specs/newton/implemented/sd-strategy.html - -How to use it ? ---------------- - -.. code-block:: shell - - $ openstack optimize audittemplate create \ - at1 workload_balancing --strategy workload_stabilization - - $ openstack optimize audit create -a at1 \ - -p thresholds='{"memory.resident": 0.05}' \ - -p metrics='["memory.resident"]' - -External Links --------------- - -- `Watcher Overload standard deviation algorithm spec `_ diff --git a/doc/source/strategies/workload_balance.rst b/doc/source/strategies/workload_balance.rst deleted file mode 100644 index ea09c6e..0000000 --- a/doc/source/strategies/workload_balance.rst +++ /dev/null @@ -1,98 +0,0 @@ -=================================== -Workload Balance Migration Strategy -=================================== - -Synopsis --------- - -**display name**: ``workload_balance`` - -**goal**: ``workload_balancing`` - -.. watcher-term:: watcher.decision_engine.strategy.strategies.workload_balance - -Requirements ------------- - -None. - -Metrics -******* - -The *workload_balance* strategy requires the following metrics: - -======================= ============ ======= ======= -metric service name plugins comment -======================= ============ ======= ======= -``cpu_util`` ceilometer_ none -======================= ============ ======= ======= - -.. _ceilometer: http://docs.openstack.org/admin-guide/telemetry-measurements.html#openstack-compute - - -Cluster data model -****************** - -Default Watcher's Compute cluster data model: - - .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector - -Actions -******* - -Default Watcher's actions: - - .. list-table:: - :widths: 30 30 - :header-rows: 1 - - * - action - - description - * - ``migration`` - - .. watcher-term:: watcher.applier.actions.migration.Migrate - -Planner -******* - -Default Watcher's planner: - - .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner - -Configuration -------------- - -Strategy parameters are: - -============== ====== ============= ==================================== -parameter type default Value description -============== ====== ============= ==================================== -``threshold`` Number 25.0 Workload threshold for migration -``period`` Number 300 Aggregate time period of ceilometer -============== ====== ============= ==================================== - -Efficacy Indicator ------------------- - -None - -Algorithm ---------- - -For more information on the Workload Balance Migration Strategy please refer -to: https://specs.openstack.org/openstack/watcher-specs/specs/mitaka/implemented/workload-balance-migration-strategy.html - -How to use it ? ---------------- - -.. code-block:: shell - - $ openstack optimize audittemplate create \ - at1 workload_balancing --strategy workload_balance - - $ openstack optimize audit create -a at1 -p threshold=26.0 \ - -p period=310 - -External Links --------------- - -None. diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst deleted file mode 100644 index bb1db2c..0000000 --- a/doc/source/user/index.rst +++ /dev/null @@ -1,4 +0,0 @@ -.. toctree:: - :maxdepth: 2 - - user-guide diff --git a/doc/source/user/user-guide.rst b/doc/source/user/user-guide.rst deleted file mode 100644 index e0f2b54..0000000 --- a/doc/source/user/user-guide.rst +++ /dev/null @@ -1,236 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _user-guide: - -================== -Watcher User Guide -================== - -See the -`architecture page `_ -for an architectural overview of the different components of Watcher and how -they fit together. - -In this guide we're going to take you through the fundamentals of using -Watcher. - -The following diagram shows the main interactions between the -:ref:`Administrator ` and the Watcher system: - -.. image:: ../images/sequence_overview_watcher_usage.png - :width: 100% - - -Getting started with Watcher ----------------------------- -This guide assumes you have a working installation of Watcher. If you get -"*watcher: command not found*" you may have to verify your installation. -Please refer to the `installation guide`_. -In order to use Watcher, you have to configure your credentials suitable for -watcher command-line tools. - -You can interact with Watcher either by using our dedicated `Watcher CLI`_ -named ``watcher``, or by using the `OpenStack CLI`_ ``openstack``. - -If you want to deploy Watcher in Horizon, please refer to the `Watcher Horizon -plugin installation guide`_. - -.. _`installation guide`: http://docs.openstack.org/developer/python-watcherclient -.. _`Watcher Horizon plugin installation guide`: http://docs.openstack.org/developer/watcher-dashboard/deploy/installation.html -.. _`OpenStack CLI`: http://docs.openstack.org/developer/python-openstackclient/man/openstack.html -.. _`Watcher CLI`: http://docs.openstack.org/developer/python-watcherclient/index.html - -Seeing what the Watcher CLI can do ? ------------------------------------- -We can see all of the commands available with Watcher CLI by running the -watcher binary without options. - -.. code:: bash - - $ watcher help - -or:: - - $ openstack help optimize - -How do I run an audit of my cluster ? -------------------------------------- - -First, you need to find the :ref:`goal ` you want to achieve: - -.. code:: bash - - $ watcher goal list - -or:: - - $ openstack optimize goal list - -.. note:: - - If you get "*You must provide a username via either --os-username or via - env[OS_USERNAME]*" you may have to verify your credentials. - -Then, you can create an :ref:`audit template `. -An :ref:`audit template ` defines an optimization -:ref:`goal ` to achieve (i.e. the settings of your audit). - -.. code:: bash - - $ watcher audittemplate create my_first_audit_template - -or:: - - $ openstack optimize audittemplate create my_first_audit_template - -Although optional, you may want to actually set a specific strategy for your -audit template. If so, you may can search of its UUID or name using the -following command: - -.. code:: bash - - $ watcher strategy list --goal - -or:: - - $ openstack optimize strategy list --goal - -You can use the following command to check strategy details including which -parameters of which format it supports: - -.. code:: bash - - $ watcher strategy show - -or:: - - $ openstack optimize strategy show - -The command to create your audit template would then be: - -.. code:: bash - - $ watcher audittemplate create my_first_audit_template \ - --strategy - -or:: - - $ openstack optimize audittemplate create my_first_audit_template \ - --strategy - -Then, you can create an audit. An audit is a request for optimizing your -cluster depending on the specified :ref:`goal `. - -You can launch an audit on your cluster by referencing the -:ref:`audit template ` (i.e. the settings of your -audit) that you want to use. - -- Get the :ref:`audit template ` UUID or name: - -.. code:: bash - - $ watcher audittemplate list - -or:: - - $ openstack optimize audittemplate list - -- Start an audit based on this :ref:`audit template - ` settings: - -.. code:: bash - - $ watcher audit create -a - -or:: - - $ openstack optimize audit create -a - -If your_audit_template was created by --strategy , and it -defines some parameters (command `watcher strategy show` to check parameters -format), your can append `-p` to input required parameters: - -.. code:: bash - - $ watcher audit create -a \ - -p =5.5 -p =hi - -or:: - - $ openstack optimize audit create -a \ - -p =5.5 -p =hi - -Input parameter could cause audit creation failure, when: - -- no predefined strategy for audit template -- no parameters spec in predefined strategy -- input parameters don't comply with spec - -Watcher service will compute an :ref:`Action Plan ` -composed of a list of potential optimization :ref:`actions ` -(instance migration, disabling of a compute node, ...) according to the -:ref:`goal ` to achieve. You can see all of the goals -available in section ``[watcher_strategies]`` of the Watcher service -configuration file. - -- Wait until the Watcher audit has produced a new :ref:`action plan - `, and get it: - -.. code:: bash - - $ watcher actionplan list --audit - -or:: - - $ openstack optimize actionplan list --audit - -- Have a look on the list of optimization :ref:`actions ` - contained in this new :ref:`action plan `: - -.. code:: bash - - $ watcher action list --action-plan - -or:: - - $ openstack optimize action list --action-plan - -Once you have learned how to create an :ref:`Action Plan -`, it's time to go further by applying it to your -cluster: - -- Execute the :ref:`action plan `: - -.. code:: bash - - $ watcher actionplan start - -or:: - - $ openstack optimize actionplan start - -You can follow the states of the :ref:`actions ` by -periodically calling: - -.. code:: bash - - $ watcher action list - -or:: - - $ openstack optimize action list - -You can also obtain more detailed information about a specific action: - -.. code:: bash - - $ watcher action show - -or:: - - $ openstack optimize action show - diff --git a/etc/apache2/watcher b/etc/apache2/watcher deleted file mode 100644 index bdf5562..0000000 --- a/etc/apache2/watcher +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is an example Apache2 configuration file for using -# Watcher API through mod_wsgi -Listen 9322 - - - WSGIDaemonProcess watcher-api user=stack group=stack processes=2 threads=2 display-name=%{GROUP} - WSGIScriptAlias / /opt/stack/watcher/watcher/api/app.wsgi - WSGIProcessGroup watcher-api - - ErrorLog /var/log/httpd/watcher_error.log - LogLevel info - CustomLog /var/log/httpd/watcher_access.log combined - - - WSGIProcessGroup watcher-api - WSGIApplicationGroup %{GLOBAL} - AllowOverride All - Require all granted - - - diff --git a/etc/watcher/README-watcher.conf.txt b/etc/watcher/README-watcher.conf.txt deleted file mode 100644 index 59373b8..0000000 --- a/etc/watcher/README-watcher.conf.txt +++ /dev/null @@ -1,4 +0,0 @@ -To generate the sample watcher.conf file, run the following -command from the top level of the watcher directory: - -tox -e genconfig diff --git a/etc/watcher/policy.json b/etc/watcher/policy.json deleted file mode 100644 index 5f94931..0000000 --- a/etc/watcher/policy.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "admin_api": "role:admin or role:administrator", - "show_password": "!", - "default": "rule:admin_api", - - "action:detail": "rule:default", - "action:get": "rule:default", - "action:get_all": "rule:default", - - "action_plan:delete": "rule:default", - "action_plan:detail": "rule:default", - "action_plan:get": "rule:default", - "action_plan:get_all": "rule:default", - "action_plan:update": "rule:default", - - "audit:create": "rule:default", - "audit:delete": "rule:default", - "audit:detail": "rule:default", - "audit:get": "rule:default", - "audit:get_all": "rule:default", - "audit:update": "rule:default", - - "audit_template:create": "rule:default", - "audit_template:delete": "rule:default", - "audit_template:detail": "rule:default", - "audit_template:get": "rule:default", - "audit_template:get_all": "rule:default", - "audit_template:update": "rule:default", - - "goal:detail": "rule:default", - "goal:get": "rule:default", - "goal:get_all": "rule:default", - - "scoring_engine:detail": "rule:default", - "scoring_engine:get": "rule:default", - "scoring_engine:get_all": "rule:default", - - "strategy:detail": "rule:default", - "strategy:get": "rule:default", - "strategy:get_all": "rule:default", - - "service:detail": "rule:default", - "service:get": "rule:default", - "service:get_all": "rule:default" -} diff --git a/etc/watcher/watcher-config-generator.conf b/etc/watcher/watcher-config-generator.conf deleted file mode 100644 index 6e51dea..0000000 --- a/etc/watcher/watcher-config-generator.conf +++ /dev/null @@ -1,16 +0,0 @@ -[DEFAULT] -output_file = etc/watcher/watcher.conf.sample -wrap_width = 79 - -namespace = watcher -namespace = keystonemiddleware.auth_token -namespace = oslo.cache -namespace = oslo.concurrency -namespace = oslo.db -namespace = oslo.log -namespace = oslo.messaging -namespace = oslo.policy -namespace = oslo.reports -namespace = oslo.service.periodic_task -namespace = oslo.service.service -namespace = oslo.service.wsgi diff --git a/rally-jobs/README.rst b/rally-jobs/README.rst deleted file mode 100644 index 37c09ff..0000000 --- a/rally-jobs/README.rst +++ /dev/null @@ -1,42 +0,0 @@ -Rally job -========= - -We provide, with Watcher, a Rally plugin you can use to benchmark the optimization service. - -To launch this task with configured Rally you just need to run: - -:: - - rally task start watcher/rally-jobs/watcher-watcher.yaml - -Structure ---------- - -* plugins - directory where you can add rally plugins. Almost everything in - Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic - cleanup resources, .... - -* extra - all files from this directory will be copy pasted to gates, so you - are able to use absolute paths in rally tasks. - Files will be located in ~/.rally/extra/* - -* watcher.yaml is a task that is run in gates against OpenStack - deployed by DevStack - - -Useful links ------------- - -* How to install: http://docs.openstack.org/developer/rally/install.html - -* How to set Rally up and launch your first scenario: https://rally.readthedocs.io/en/latest/tutorial/step_1_setting_up_env_and_running_benchmark_from_samples.html - -* More about Rally: https://rally.readthedocs.org/en/latest/ - -* Rally release notes: https://rally.readthedocs.org/en/latest/release_notes.html - -* How to add rally-gates: https://rally.readthedocs.org/en/latest/gates.html - -* About plugins: https://rally.readthedocs.org/en/latest/plugins.html - -* Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins diff --git a/rally-jobs/watcher-watcher.yaml b/rally-jobs/watcher-watcher.yaml deleted file mode 100644 index c01310b..0000000 --- a/rally-jobs/watcher-watcher.yaml +++ /dev/null @@ -1,63 +0,0 @@ ---- - Watcher.create_audit_and_delete: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - audit_templates: - audit_templates_per_admin: 5 - fill_strategy: "round_robin" - params: - - goal: - name: "dummy" - strategy: - name: "dummy" - sla: - failure_rate: - max: 0 - - Watcher.create_audit_template_and_delete: - - - args: - goal: - name: "dummy" - strategy: - name: "dummy" - runner: - type: "constant" - times: 10 - concurrency: 2 - sla: - failure_rate: - max: 0 - - Watcher.list_audit_templates: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - audit_templates: - audit_templates_per_admin: 5 - fill_strategy: "random" - params: - - goal: - name: "workload_balancing" - strategy: - name: "workload_stabilization" - - goal: - name: "dummy" - strategy: - name: "dummy" - sla: - failure_rate: - max: 0 diff --git a/releasenotes/notes/.placeholder b/releasenotes/notes/.placeholder deleted file mode 100644 index e69de29..0000000 diff --git a/releasenotes/notes/action-plan-cancel-c54726378019e096.yaml b/releasenotes/notes/action-plan-cancel-c54726378019e096.yaml deleted file mode 100644 index cf4b562..0000000 --- a/releasenotes/notes/action-plan-cancel-c54726378019e096.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Adds feature to cancel an action-plan. diff --git a/releasenotes/notes/action-plan-versioned-notifications-api-e8ca4f5d37aa5b4b.yaml b/releasenotes/notes/action-plan-versioned-notifications-api-e8ca4f5d37aa5b4b.yaml deleted file mode 100644 index 1c73022..0000000 --- a/releasenotes/notes/action-plan-versioned-notifications-api-e8ca4f5d37aa5b4b.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add notifications related to Action plan object. diff --git a/releasenotes/notes/add-plugins-parameters-376eb6b0b8978b44.yaml b/releasenotes/notes/add-plugins-parameters-376eb6b0b8978b44.yaml deleted file mode 100644 index 0d2b749..0000000 --- a/releasenotes/notes/add-plugins-parameters-376eb6b0b8978b44.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - Added a standard way to both declare and fetch - configuration options so that whenever the - administrator generates the Watcher - configuration sample file, it contains the - configuration options of the plugins that are - currently available. diff --git a/releasenotes/notes/add-power-on-off-a77673d482568a8b.yaml b/releasenotes/notes/add-power-on-off-a77673d482568a8b.yaml deleted file mode 100644 index 0a3f172..0000000 --- a/releasenotes/notes/add-power-on-off-a77673d482568a8b.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add action for compute node power on/off diff --git a/releasenotes/notes/add-scoring-module-fa00d013ed2d614e.yaml b/releasenotes/notes/add-scoring-module-fa00d013ed2d614e.yaml deleted file mode 100644 index 1255b71..0000000 --- a/releasenotes/notes/add-scoring-module-fa00d013ed2d614e.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - Added a generic scoring engine module, which - will standardize interactions with scoring engines - through the common API. It is possible to use the - scoring engine by different Strategies, which - improve the code and data model re-use. diff --git a/releasenotes/notes/audit-versioned-notifications-api-bca7738e16954bad.yaml b/releasenotes/notes/audit-versioned-notifications-api-bca7738e16954bad.yaml deleted file mode 100644 index 9eb1cd2..0000000 --- a/releasenotes/notes/audit-versioned-notifications-api-bca7738e16954bad.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add notifications related to Audit object. diff --git a/releasenotes/notes/automatic-triggering-audit-8a9b0540d547db60.yaml b/releasenotes/notes/automatic-triggering-audit-8a9b0540d547db60.yaml deleted file mode 100644 index b96723c..0000000 --- a/releasenotes/notes/automatic-triggering-audit-8a9b0540d547db60.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Watcher can continuously optimize the OpenStack cloud for a specific - strategy or goal by triggering an audit periodically which generates - an action plan and run it automatically. diff --git a/releasenotes/notes/centralise-config-opts-95670987dfbdb0e7.yaml b/releasenotes/notes/centralise-config-opts-95670987dfbdb0e7.yaml deleted file mode 100644 index b76db57..0000000 --- a/releasenotes/notes/centralise-config-opts-95670987dfbdb0e7.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Centralize all configuration options for Watcher. diff --git a/releasenotes/notes/cinder-model-integration-baa394a72a0a33bf.yaml b/releasenotes/notes/cinder-model-integration-baa394a72a0a33bf.yaml deleted file mode 100644 index b0295cf..0000000 --- a/releasenotes/notes/cinder-model-integration-baa394a72a0a33bf.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Added cinder cluster data model diff --git a/releasenotes/notes/cluster-model-objects-wrapper-9c799ea262c56a5b.yaml b/releasenotes/notes/cluster-model-objects-wrapper-9c799ea262c56a5b.yaml deleted file mode 100644 index 5d9d014..0000000 --- a/releasenotes/notes/cluster-model-objects-wrapper-9c799ea262c56a5b.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - Added an in-memory cache of the cluster model - built up and kept fresh via notifications from - services of interest in addition to periodic - syncing logic. diff --git a/releasenotes/notes/configurable-weights-default-planner-3746b33160bc7347.yaml b/releasenotes/notes/configurable-weights-default-planner-3746b33160bc7347.yaml deleted file mode 100644 index 53e101d..0000000 --- a/releasenotes/notes/configurable-weights-default-planner-3746b33160bc7347.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added a way to add a new action without having to - amend the source code of the default planner. diff --git a/releasenotes/notes/continuously-optimization-35364f4d2c0b81fc.yaml b/releasenotes/notes/continuously-optimization-35364f4d2c0b81fc.yaml deleted file mode 100644 index 898887b..0000000 --- a/releasenotes/notes/continuously-optimization-35364f4d2c0b81fc.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added a way to create periodic audit to be able to - optimize continuously the cloud infrastructure. diff --git a/releasenotes/notes/db-migration-e1a705a8b54ccdd2.yaml b/releasenotes/notes/db-migration-e1a705a8b54ccdd2.yaml deleted file mode 100644 index 63d5ad6..0000000 --- a/releasenotes/notes/db-migration-e1a705a8b54ccdd2.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Watcher database can now be upgraded thanks to Alembic. diff --git a/releasenotes/notes/define-the-audit-scope-e89edc5051dcf3f2.yaml b/releasenotes/notes/define-the-audit-scope-e89edc5051dcf3f2.yaml deleted file mode 100644 index 5cea918..0000000 --- a/releasenotes/notes/define-the-audit-scope-e89edc5051dcf3f2.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Provides a generic way to define the scope of an audit. The set of audited - resources will be called "Audit scope" and will be defined in each audit - template (which contains the audit settings). diff --git a/releasenotes/notes/efficacy-indicator-95380ad7b84e3be2.yaml b/releasenotes/notes/efficacy-indicator-95380ad7b84e3be2.yaml deleted file mode 100644 index acaddcc..0000000 --- a/releasenotes/notes/efficacy-indicator-95380ad7b84e3be2.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added a way to compare the efficacy of different - strategies for a give optimization goal. diff --git a/releasenotes/notes/get-goal-from-strategy-396c9b13a38bb650.yaml b/releasenotes/notes/get-goal-from-strategy-396c9b13a38bb650.yaml deleted file mode 100644 index 9732b15..0000000 --- a/releasenotes/notes/get-goal-from-strategy-396c9b13a38bb650.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Added a way to return the of available goals depending - on which strategies have been deployed on the node - where the decision engine is running. diff --git a/releasenotes/notes/graph-based-cluster-model-523937a6f5e66537.yaml b/releasenotes/notes/graph-based-cluster-model-523937a6f5e66537.yaml deleted file mode 100644 index d57a2dd..0000000 --- a/releasenotes/notes/graph-based-cluster-model-523937a6f5e66537.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - The graph model describes how VMs are associated to compute hosts. - This allows for seeing relationships upfront between the entities and hence - can be used to identify hot/cold spots in the data center and influence - a strategy decision. diff --git a/releasenotes/notes/monasca-support-0b0486b8572ac38b.yaml b/releasenotes/notes/monasca-support-0b0486b8572ac38b.yaml deleted file mode 100644 index a98dff8..0000000 --- a/releasenotes/notes/monasca-support-0b0486b8572ac38b.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Watcher supports multiple metrics backend and relies on Ceilometer and - Monasca. diff --git a/releasenotes/notes/optimization-threshold-21ad38f0470d0e1a.yaml b/releasenotes/notes/optimization-threshold-21ad38f0470d0e1a.yaml deleted file mode 100644 index 8c4fa6c..0000000 --- a/releasenotes/notes/optimization-threshold-21ad38f0470d0e1a.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Allow decision engine to pass strategy parameters, - like optimization threshold, to selected strategy, - also strategy to provide parameters info to end user. diff --git a/releasenotes/notes/persistent-audit-parameters-ae41dd7252ba9672.yaml b/releasenotes/notes/persistent-audit-parameters-ae41dd7252ba9672.yaml deleted file mode 100644 index 0d3eb61..0000000 --- a/releasenotes/notes/persistent-audit-parameters-ae41dd7252ba9672.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - Copy all audit templates parameters into - audit instead of having a reference to the - audit template. - diff --git a/releasenotes/notes/planner-storage-action-plan-26ef37893c5e8648.yaml b/releasenotes/notes/planner-storage-action-plan-26ef37893c5e8648.yaml deleted file mode 100644 index 0d3407f..0000000 --- a/releasenotes/notes/planner-storage-action-plan-26ef37893c5e8648.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Watcher can now run specific actions in parallel improving the performances - dramatically when executing an action plan. diff --git a/releasenotes/notes/stale-action-plan-b6a6b08df873c128.yaml b/releasenotes/notes/stale-action-plan-b6a6b08df873c128.yaml deleted file mode 100644 index 9ef9a4c..0000000 --- a/releasenotes/notes/stale-action-plan-b6a6b08df873c128.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Check the creation time of the action plan, - and set its state to SUPERSEDED if it has expired. diff --git a/releasenotes/notes/standard-deviation-strategy-cd1d0c443fdfde9c.yaml b/releasenotes/notes/standard-deviation-strategy-cd1d0c443fdfde9c.yaml deleted file mode 100644 index 0bdd480..0000000 --- a/releasenotes/notes/standard-deviation-strategy-cd1d0c443fdfde9c.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - Added a strategy that monitors if there is a higher - load on some hosts compared to other hosts in the - cluster and re-balances the work across hosts to - minimize the standard deviation of the loads in - the cluster. diff --git a/releasenotes/notes/suspended-audit-state-07f998c94e9d9a47.yaml b/releasenotes/notes/suspended-audit-state-07f998c94e9d9a47.yaml deleted file mode 100644 index cf66a44..0000000 --- a/releasenotes/notes/suspended-audit-state-07f998c94e9d9a47.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Added SUSPENDED audit state diff --git a/releasenotes/notes/uniform-airflow-strategy-68cdba1419c3f770.yaml b/releasenotes/notes/uniform-airflow-strategy-68cdba1419c3f770.yaml deleted file mode 100644 index 7cbc421..0000000 --- a/releasenotes/notes/uniform-airflow-strategy-68cdba1419c3f770.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Added a new strategy based on the airflow - of servers. This strategy makes decisions - to migrate VMs to make the airflow uniform. diff --git a/releasenotes/notes/watcher-notifications-ovo-7b44d52ef6400dd0.yaml b/releasenotes/notes/watcher-notifications-ovo-7b44d52ef6400dd0.yaml deleted file mode 100644 index d469c00..0000000 --- a/releasenotes/notes/watcher-notifications-ovo-7b44d52ef6400dd0.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - Provide a notification mechanism into Watcher that supports versioning. - Whenever a Watcher object is created, updated or deleted, a versioned - notification will, if it's relevant, be automatically sent to notify in order - to allow an event-driven style of architecture within Watcher. Moreover, it - will also give other services and/or 3rd party softwares (e.g. monitoring - solutions or rules engines) the ability to react to such events. diff --git a/releasenotes/notes/watcher-policies-1e86a30f0f11c6fa.yaml b/releasenotes/notes/watcher-policies-1e86a30f0f11c6fa.yaml deleted file mode 100644 index 1c5f813..0000000 --- a/releasenotes/notes/watcher-policies-1e86a30f0f11c6fa.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added policies to handle user rights - to access Watcher API. diff --git a/releasenotes/notes/watcher-service-list-7b2f4b64f71e9b89.yaml b/releasenotes/notes/watcher-service-list-7b2f4b64f71e9b89.yaml deleted file mode 100644 index 9710b97..0000000 --- a/releasenotes/notes/watcher-service-list-7b2f4b64f71e9b89.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add a service supervisor to watch Watcher deamons. diff --git a/releasenotes/notes/watcher-versioned-objects-fc5abf5c81c4590c.yaml b/releasenotes/notes/watcher-versioned-objects-fc5abf5c81c4590c.yaml deleted file mode 100644 index c2a6df8..0000000 --- a/releasenotes/notes/watcher-versioned-objects-fc5abf5c81c4590c.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - all Watcher objects have been refactored to support OVO - (oslo.versionedobjects) which was a prerequisite step in order to implement - versioned notifications. diff --git a/releasenotes/notes/workload-balance-migration-strategy-a0b05148a57815c0.yaml b/releasenotes/notes/workload-balance-migration-strategy-a0b05148a57815c0.yaml deleted file mode 100644 index e607426..0000000 --- a/releasenotes/notes/workload-balance-migration-strategy-a0b05148a57815c0.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - Added a strategy based on the VM workloads of - hypervisors. This strategy makes decisions to - migrate workloads to make the total VM workloads - of each hypervisor balanced, when the total VM - workloads of hypervisor reaches threshold. diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder deleted file mode 100644 index e69de29..0000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index 8b36a78..0000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,258 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# watcher documentation build configuration file, created by -# sphinx-quickstart on Fri Jun 3 11:37:52 2016. -# -# This file is execfile()d with the current directory set to its containing dir -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys, os -from watcher import version as watcher_version - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ---------------------------------------------------- - -# If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['reno.sphinxext', - 'openstackdocstheme'] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'watcher' -copyright = u'2016, Watcher developers' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = watcher_version.version_info.release_string() -# The full version, including alpha/beta/rc tags. -release = watcher_version.version_info.version_string() - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ['_build'] - -# The reST default role (used for this markup: `text`) to use for all documents -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'watcherdoc' - - -# -- Options for LaTeX output ------------------------------------------------- - -latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]) -latex_documents = [ - ('index', 'watcher.tex', u'Watcher Documentation', - u'Watcher developers', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - - -# -- Options for manual page output ------------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'watcher', u'Watcher Documentation', - [u'Watcher developers'], 1) -] - -# If true, show URL addresses after external links. -#man_show_urls = False - - -# -- Options for Texinfo output ----------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'watcher', u'Watcher Documentation', - u'Watcher developers', 'watcher', 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -#texinfo_appendices = [] - -# If false, no module index is generated. -#texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index 2478fff..0000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,26 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -================================================= -Welcome to watcher's Release Notes documentation! -================================================= - -Contents: - -.. toctree:: - :maxdepth: 1 - - unreleased - ocata - newton - diff --git a/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po deleted file mode 100644 index d38f86d..0000000 --- a/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po +++ /dev/null @@ -1,33 +0,0 @@ -# Gérald LONLAS , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: watcher 1.0.1.dev51\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2017-03-21 11:57+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-10-22 06:44+0000\n" -"Last-Translator: Gérald LONLAS \n" -"Language-Team: French\n" -"Language: fr\n" -"X-Generator: Zanata 3.9.6\n" -"Plural-Forms: nplurals=2; plural=(n > 1)\n" - -msgid "0.29.0" -msgstr "0.29.0" - -msgid "Contents:" -msgstr "Contenu :" - -msgid "Current Series Release Notes" -msgstr "Note de la release actuelle" - -msgid "New Features" -msgstr "Nouvelles fonctionnalités" - -msgid "Newton Series Release Notes" -msgstr "Note de release pour Newton" - -msgid "Welcome to watcher's Release Notes documentation!" -msgstr "Bienvenue dans la documentation de la note de Release de Watcher" diff --git a/releasenotes/source/newton.rst b/releasenotes/source/newton.rst deleted file mode 100644 index 97036ed..0000000 --- a/releasenotes/source/newton.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Newton Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/newton diff --git a/releasenotes/source/ocata.rst b/releasenotes/source/ocata.rst deleted file mode 100644 index ebe62f4..0000000 --- a/releasenotes/source/ocata.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Ocata Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/ocata diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst deleted file mode 100644 index cd22aab..0000000 --- a/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================== - Current Series Release Notes -============================== - -.. release-notes:: diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 7a18c2f..0000000 --- a/requirements.txt +++ /dev/null @@ -1,49 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -apscheduler # MIT License -enum34;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' # BSD -jsonpatch>=1.1 # BSD -keystoneauth1>=3.0.1 # Apache-2.0 -jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT -keystonemiddleware>=4.12.0 # Apache-2.0 -lxml!=3.7.0,>=2.3 # BSD -croniter>=0.3.4 # MIT License -oslo.concurrency>=3.8.0 # Apache-2.0 -oslo.cache>=1.5.0 # Apache-2.0 -oslo.config!=4.3.0,!=4.4.0,>=4.0.0 # Apache-2.0 -oslo.context>=2.14.0 # Apache-2.0 -oslo.db>=4.24.0 # Apache-2.0 -oslo.i18n!=3.15.2,>=2.1.0 # Apache-2.0 -oslo.log>=3.22.0 # Apache-2.0 -oslo.messaging!=5.25.0,>=5.24.2 # Apache-2.0 -oslo.policy>=1.23.0 # Apache-2.0 -oslo.reports>=0.6.0 # Apache-2.0 -oslo.serialization!=2.19.1,>=1.10.0 # Apache-2.0 -oslo.service>=1.10.0 # Apache-2.0 -oslo.utils>=3.20.0 # Apache-2.0 -oslo.versionedobjects>=1.17.0 # Apache-2.0 -PasteDeploy>=1.5.0 # MIT -pbr!=2.1.0,>=2.0.0 # Apache-2.0 -pecan!=1.0.2,!=1.0.3,!=1.0.4,!=1.2,>=1.0.0 # BSD -PrettyTable<0.8,>=0.7.1 # BSD -voluptuous>=0.8.9 # BSD License -gnocchiclient>=2.7.0 # Apache-2.0 -python-ceilometerclient>=2.5.0 # Apache-2.0 -python-cinderclient>=3.0.0 # Apache-2.0 -python-glanceclient>=2.7.0 # Apache-2.0 -python-keystoneclient>=3.8.0 # Apache-2.0 -python-monascaclient>=1.1.0 # Apache-2.0 -python-neutronclient>=6.3.0 # Apache-2.0 -python-novaclient>=9.0.0 # Apache-2.0 -python-openstackclient!=3.10.0,>=3.3.0 # Apache-2.0 -python-ironicclient>=1.14.0 # Apache-2.0 -six>=1.9.0 # MIT -SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT -stevedore>=1.20.0 # Apache-2.0 -taskflow>=2.7.0 # Apache-2.0 -WebOb>=1.7.1 # MIT -WSME>=0.8 # MIT -networkx>=1.10 # BSD - diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index bbd993f..0000000 --- a/setup.cfg +++ /dev/null @@ -1,129 +0,0 @@ -[metadata] -name = python-watcher -summary = OpenStack Watcher provides a flexible and scalable resource optimization service for multi-tenant OpenStack-based clouds. -description-file = - README.rst -author = OpenStack -author-email = openstack-dev@lists.openstack.org -home-page = https://docs.openstack.org/watcher/latest/ -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.5 - -[files] -packages = - watcher - watcher_tempest_plugin -data_files = - etc/ = etc/* - -[global] -setup-hooks = - pbr.hooks.setup_hook - -[entry_points] -oslo.config.opts = - watcher = watcher.conf.opts:list_opts - -console_scripts = - watcher-api = watcher.cmd.api:main - watcher-db-manage = watcher.cmd.dbmanage:main - watcher-decision-engine = watcher.cmd.decisionengine:main - watcher-applier = watcher.cmd.applier:main - watcher-sync = watcher.cmd.sync:main - -tempest.test_plugins = - watcher_tests = watcher_tempest_plugin.plugin:WatcherTempestPlugin - -watcher.database.migration_backend = - sqlalchemy = watcher.db.sqlalchemy.migration - -watcher_goals = - unclassified = watcher.decision_engine.goal.goals:Unclassified - dummy = watcher.decision_engine.goal.goals:Dummy - server_consolidation = watcher.decision_engine.goal.goals:ServerConsolidation - thermal_optimization = watcher.decision_engine.goal.goals:ThermalOptimization - workload_balancing = watcher.decision_engine.goal.goals:WorkloadBalancing - airflow_optimization = watcher.decision_engine.goal.goals:AirflowOptimization - noisy_neighbor = watcher.decision_engine.goal.goals:NoisyNeighborOptimization - -watcher_scoring_engines = - dummy_scorer = watcher.decision_engine.scoring.dummy_scorer:DummyScorer - -watcher_scoring_engine_containers = - dummy_scoring_container = watcher.decision_engine.scoring.dummy_scoring_container:DummyScoringContainer - -watcher_strategies = - dummy = watcher.decision_engine.strategy.strategies.dummy_strategy:DummyStrategy - dummy_with_scorer = watcher.decision_engine.strategy.strategies.dummy_with_scorer:DummyWithScorer - dummy_with_resize = watcher.decision_engine.strategy.strategies.dummy_with_resize:DummyWithResize - basic = watcher.decision_engine.strategy.strategies.basic_consolidation:BasicConsolidation - outlet_temperature = watcher.decision_engine.strategy.strategies.outlet_temp_control:OutletTempControl - vm_workload_consolidation = watcher.decision_engine.strategy.strategies.vm_workload_consolidation:VMWorkloadConsolidation - workload_stabilization = watcher.decision_engine.strategy.strategies.workload_stabilization:WorkloadStabilization - workload_balance = watcher.decision_engine.strategy.strategies.workload_balance:WorkloadBalance - uniform_airflow = watcher.decision_engine.strategy.strategies.uniform_airflow:UniformAirflow - noisy_neighbor = watcher.decision_engine.strategy.strategies.noisy_neighbor:NoisyNeighbor - -watcher_actions = - migrate = watcher.applier.actions.migration:Migrate - nop = watcher.applier.actions.nop:Nop - sleep = watcher.applier.actions.sleep:Sleep - change_nova_service_state = watcher.applier.actions.change_nova_service_state:ChangeNovaServiceState - resize = watcher.applier.actions.resize:Resize - change_node_power_state = watcher.applier.actions.change_node_power_state:ChangeNodePowerState - -watcher_workflow_engines = - taskflow = watcher.applier.workflow_engine.default:DefaultWorkFlowEngine - -watcher_planners = - weight = watcher.decision_engine.planner.weight:WeightPlanner - workload_stabilization = watcher.decision_engine.planner.workload_stabilization:WorkloadStabilizationPlanner - -watcher_cluster_data_model_collectors = - compute = watcher.decision_engine.model.collector.nova:NovaClusterDataModelCollector - storage = watcher.decision_engine.model.collector.cinder:CinderClusterDataModelCollector - - -[pbr] -warnerrors = true -autodoc_index_modules = true -autodoc_exclude_modules = - watcher.db.sqlalchemy.alembic.env - watcher.db.sqlalchemy.alembic.versions.* - watcher.tests.* - watcher_tempest_plugin.* - watcher.doc - - -[build_sphinx] -source-dir = doc/source -build-dir = doc/build -fresh_env = 1 -all_files = 1 - -[upload_sphinx] -upload-dir = doc/build/html - - -[compile_catalog] -directory = watcher/locale -domain = watcher - -[update_catalog] -domain = watcher -output_dir = watcher/locale -input_file = watcher/locale/watcher.pot - -[extract_messages] -keywords = _ gettext ngettext l_ lazy_gettext _LI _LW _LE _LC -mapping_file = babel.cfg -output_file = watcher/locale/watcher.pot diff --git a/setup.py b/setup.py deleted file mode 100644 index 566d844..0000000 --- a/setup.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr>=2.0.0'], - pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index 84a7d89..0000000 --- a/test-requirements.txt +++ /dev/null @@ -1,27 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -coverage!=4.4,>=4.0 # Apache-2.0 -doc8 # Apache-2.0 -freezegun>=0.3.6 # Apache-2.0 -hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 -mock>=2.0 # BSD -oslotest>=1.10.0 # Apache-2.0 -os-testr>=0.8.0 # Apache-2.0 -python-subunit>=0.0.18 # Apache-2.0/BSD -testrepository>=0.0.18 # Apache-2.0/BSD -testscenarios>=0.4 # Apache-2.0/BSD -testtools>=1.4.0 # MIT - -# Doc requirements -openstackdocstheme>=1.11.0 # Apache-2.0 -sphinx>=1.6.2 # BSD -sphinxcontrib-pecanwsme>=0.8 # Apache-2.0 - - -# releasenotes -reno!=2.3.1,>=1.8.0 # Apache-2.0 - -# bandit -bandit>=1.1.0 # Apache-2.0 diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 89148b2..0000000 --- a/tox.ini +++ /dev/null @@ -1,73 +0,0 @@ -[tox] -minversion = 1.8 -envlist = py35,py27,pep8 -skipsdist = True - -[testenv] -usedevelop = True -whitelist_externals = find - rm -install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages} -setenv = - VIRTUAL_ENV={envdir} -deps = -r{toxinidir}/test-requirements.txt -commands = - rm -f .testrepository/times.dbm - find . -type f -name "*.py[c|o]" -delete - ostestr --concurrency=6 {posargs} -passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY - -[testenv:pep8] -commands = - doc8 doc/source/ CONTRIBUTING.rst HACKING.rst README.rst - flake8 - bandit -r watcher -x tests -n5 -ll -s B320 - -[testenv:venv] -setenv = PYTHONHASHSEED=0 -commands = {posargs} - -[testenv:cover] -commands = - python setup.py testr --coverage --testr-args='{posargs}' - coverage report - -[testenv:docs] -setenv = PYTHONHASHSEED=0 -commands = - doc8 doc/source/ CONTRIBUTING.rst HACKING.rst README.rst - python setup.py build_sphinx - -[testenv:debug] -commands = oslo_debug_helper -t watcher/tests {posargs} - -[testenv:genconfig] -sitepackages = False -commands = - oslo-config-generator --config-file etc/watcher/watcher-config-generator.conf - -[flake8] -show-source=True -ignore= H105,E123,E226,N320,H202 -builtins= _ -enable-extensions = H106,H203 -exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,*sqlalchemy/alembic/versions/*,demo/,releasenotes - -[testenv:wheel] -commands = python setup.py bdist_wheel - -[hacking] -import_exceptions = watcher._i18n -local-check-factory = watcher.hacking.checks.factory - -[doc8] -extension=.rst -# todo: stop ignoring doc/source/man when https://bugs.launchpad.net/doc8/+bug/1502391 is fixed -ignore-path=doc/source/image_src,doc/source/man,doc/source/api - -[testenv:releasenotes] -commands = sphinx-build -a -W -E -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html - -[testenv:bandit] -deps = -r{toxinidir}/test-requirements.txt -commands = bandit -r watcher -x tests -n5 -ll -s B320 diff --git a/watcher/__init__.py b/watcher/__init__.py deleted file mode 100644 index 403d61b..0000000 --- a/watcher/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pbr.version - - -__version__ = pbr.version.VersionInfo('python-watcher').version_string() diff --git a/watcher/_i18n.py b/watcher/_i18n.py deleted file mode 100644 index b410850..0000000 --- a/watcher/_i18n.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import oslo_i18n -from oslo_i18n import _lazy - -# The domain is the name of the App which is used to generate the folder -# containing the translation files (i.e. the .pot file and the various locales) -DOMAIN = "watcher" - -_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) - -# The primary translation function using the well-known name "_" -_ = _translators.primary - -# The contextual translation function using the name "_C" -_C = _translators.contextual_form - -# The plural translation function using the name "_P" -_P = _translators.plural_form - - -def lazy_translation_enabled(): - return _lazy.USE_LAZY - - -def get_available_languages(): - return oslo_i18n.get_available_languages(DOMAIN) diff --git a/watcher/api/__init__.py b/watcher/api/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/api/acl.py b/watcher/api/acl.py deleted file mode 100644 index 75b8019..0000000 --- a/watcher/api/acl.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2012 New Dream Network, LLC (DreamHost) -# Copyright (c) 2016 Intel Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Access Control Lists (ACL's) control access the API server.""" - -from watcher.api.middleware import auth_token -from watcher import conf - -CONF = conf.CONF - - -def install(app, conf, public_routes): - """Install ACL check on application. - - :param app: A WSGI application. - :param conf: Settings. Dict'ified and passed to keystonemiddleware - :param public_routes: The list of the routes which will be allowed to - access without authentication. - :return: The same WSGI application with ACL installed. - - """ - if not CONF.get('enable_authentication'): - return app - return auth_token.AuthTokenMiddleware(app, - conf=dict(conf), - public_api_routes=public_routes) diff --git a/watcher/api/app.py b/watcher/api/app.py deleted file mode 100644 index 7926eda..0000000 --- a/watcher/api/app.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- encoding: utf-8 -*- - -# Copyright © 2012 New Dream Network, LLC (DreamHost) -# All Rights Reserved. -# Copyright (c) 2016 Intel Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import pecan - -from watcher.api import acl -from watcher.api import config as api_config -from watcher.api import middleware -from watcher import conf - -CONF = conf.CONF - - -def get_pecan_config(): - # Set up the pecan configuration - return pecan.configuration.conf_from_dict(api_config.PECAN_CONFIG) - - -def setup_app(config=None): - if not config: - config = get_pecan_config() - - app_conf = dict(config.app) - - app = pecan.make_app( - app_conf.pop('root'), - logging=getattr(config, 'logging', {}), - debug=CONF.debug, - wrap_app=middleware.ParsableErrorMiddleware, - **app_conf - ) - - return acl.install(app, CONF, config.app.acl_public_routes) - - -class VersionSelectorApplication(object): - def __init__(self): - pc = get_pecan_config() - self.v1 = setup_app(config=pc) - - def __call__(self, environ, start_response): - return self.v1(environ, start_response) diff --git a/watcher/api/app.wsgi b/watcher/api/app.wsgi deleted file mode 100644 index c2b0609..0000000 --- a/watcher/api/app.wsgi +++ /dev/null @@ -1,40 +0,0 @@ -# -*- mode: python -*- -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Use this file for deploying the API service under Apache2 mod_wsgi. -""" - -import sys - -from oslo_config import cfg -import oslo_i18n as i18n -from oslo_log import log - -from watcher.api import app -from watcher.common import service - - -CONF = cfg.CONF - -i18n.install('watcher') - -service.prepare_service(sys.argv) - -LOG = log.getLogger(__name__) -LOG.debug("Configuration:") -CONF.log_opt_values(LOG, log.DEBUG) - -application = app.VersionSelectorApplication() - diff --git a/watcher/api/config.py b/watcher/api/config.py deleted file mode 100644 index 3952459..0000000 --- a/watcher/api/config.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -from oslo_config import cfg -from watcher.api import hooks - -# Server Specific Configurations -# See https://pecan.readthedocs.org/en/latest/configuration.html#server-configuration # noqa -server = { - 'port': '9322', - 'host': '127.0.0.1' -} - -# Pecan Application Configurations -# See https://pecan.readthedocs.org/en/latest/configuration.html#application-configuration # noqa -app = { - 'root': 'watcher.api.controllers.root.RootController', - 'modules': ['watcher.api'], - 'hooks': [ - hooks.ContextHook(), - hooks.NoExceptionTracebackHook(), - ], - 'static_root': '%(confdir)s/public', - 'enable_acl': True, - 'acl_public_routes': [ - '/', - ], -} - -# WSME Configurations -# See https://wsme.readthedocs.org/en/latest/integrate.html#configuration -wsme = { - 'debug': cfg.CONF.get("debug") if "debug" in cfg.CONF else False, -} - -PECAN_CONFIG = { - "server": server, - "app": app, - "wsme": wsme, -} diff --git a/watcher/api/controllers/__init__.py b/watcher/api/controllers/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/api/controllers/base.py b/watcher/api/controllers/base.py deleted file mode 100644 index 54b5c3f..0000000 --- a/watcher/api/controllers/base.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -import wsme -from wsme import types as wtypes - - -class APIBase(wtypes.Base): - - created_at = wsme.wsattr(datetime.datetime, readonly=True) - """The time in UTC at which the object is created""" - - updated_at = wsme.wsattr(datetime.datetime, readonly=True) - """The time in UTC at which the object is updated""" - - deleted_at = wsme.wsattr(datetime.datetime, readonly=True) - """The time in UTC at which the object is deleted""" - - def as_dict(self): - """Render this object as a dict of its fields.""" - return dict((k, getattr(self, k)) - for k in self.fields - if hasattr(self, k) and - getattr(self, k) != wsme.Unset) - - def unset_fields_except(self, except_list=None): - """Unset fields so they don't appear in the message body. - - :param except_list: A list of fields that won't be touched. - - """ - if except_list is None: - except_list = [] - - for k in self.as_dict(): - if k not in except_list: - setattr(self, k, wsme.Unset) diff --git a/watcher/api/controllers/link.py b/watcher/api/controllers/link.py deleted file mode 100644 index 6c89fe1..0000000 --- a/watcher/api/controllers/link.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pecan -from wsme import types as wtypes - -from watcher.api.controllers import base - - -def build_url(resource, resource_args, bookmark=False, base_url=None): - if base_url is None: - base_url = pecan.request.host_url - - template = '%(url)s/%(res)s' if bookmark else '%(url)s/v1/%(res)s' - # FIXME(lucasagomes): I'm getting a 404 when doing a GET on - # a nested resource that the URL ends with a '/'. - # https://groups.google.com/forum/#!topic/pecan-dev/QfSeviLg5qs - template += '%(args)s' if resource_args.startswith('?') else '/%(args)s' - return template % {'url': base_url, 'res': resource, 'args': resource_args} - - -class Link(base.APIBase): - """A link representation.""" - - href = wtypes.text - """The url of a link.""" - - rel = wtypes.text - """The name of a link.""" - - type = wtypes.text - """Indicates the type of document/link.""" - - @staticmethod - def make_link(rel_name, url, resource, resource_args, - bookmark=False, type=wtypes.Unset): - href = build_url(resource, resource_args, - bookmark=bookmark, base_url=url) - return Link(href=href, rel=rel_name, type=type) - - @classmethod - def sample(cls): - sample = cls(href="http://localhost:6385/chassis/" - "eaaca217-e7d8-47b4-bb41-3f99f20eed89", - rel="bookmark") - return sample diff --git a/watcher/api/controllers/root.py b/watcher/api/controllers/root.py deleted file mode 100644 index e42734c..0000000 --- a/watcher/api/controllers/root.py +++ /dev/null @@ -1,98 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pecan -from pecan import rest -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from watcher.api.controllers import base -from watcher.api.controllers import link -from watcher.api.controllers import v1 - - -class Version(base.APIBase): - """An API version representation.""" - - id = wtypes.text - """The ID of the version, also acts as the release number""" - - links = [link.Link] - """A Link that point to a specific version of the API""" - - @staticmethod - def convert(id): - version = Version() - version.id = id - version.links = [link.Link.make_link('self', pecan.request.host_url, - id, '', bookmark=True)] - return version - - -class Root(base.APIBase): - - name = wtypes.text - """The name of the API""" - - description = wtypes.text - """Some information about this API""" - - versions = [Version] - """Links to all the versions available in this API""" - - default_version = Version - """A link to the default version of the API""" - - @staticmethod - def convert(): - root = Root() - root.name = "OpenStack Watcher API" - root.description = ("Watcher is an OpenStack project which aims to " - "improve physical resources usage through " - "better VM placement.") - root.versions = [Version.convert('v1')] - root.default_version = Version.convert('v1') - return root - - -class RootController(rest.RestController): - - _versions = ['v1'] - """All supported API versions""" - - _default_version = 'v1' - """The default API version""" - - v1 = v1.Controller() - - @wsme_pecan.wsexpose(Root) - def get(self): - # NOTE: The reason why convert() it's being called for every - # request is because we need to get the host url from - # the request object to make the links. - return Root.convert() - - @pecan.expose() - def _route(self, args): - """Overrides the default routing behavior. - - It redirects the request to the default version of the watcher API - if the version number is not specified in the url. - """ - - if args[0] and args[0] not in self._versions: - args = [self._default_version] + args - return super(RootController, self)._route(args) diff --git a/watcher/api/controllers/v1/__init__.py b/watcher/api/controllers/v1/__init__.py deleted file mode 100644 index 1627955..0000000 --- a/watcher/api/controllers/v1/__init__.py +++ /dev/null @@ -1,197 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -""" -Version 1 of the Watcher API - -NOTE: IN PROGRESS AND NOT FULLY IMPLEMENTED. -""" - -import datetime - -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from watcher.api.controllers import link -from watcher.api.controllers.v1 import action -from watcher.api.controllers.v1 import action_plan -from watcher.api.controllers.v1 import audit -from watcher.api.controllers.v1 import audit_template -from watcher.api.controllers.v1 import goal -from watcher.api.controllers.v1 import scoring_engine -from watcher.api.controllers.v1 import service -from watcher.api.controllers.v1 import strategy - - -class APIBase(wtypes.Base): - - created_at = wsme.wsattr(datetime.datetime, readonly=True) - """The time in UTC at which the object is created""" - - updated_at = wsme.wsattr(datetime.datetime, readonly=True) - """The time in UTC at which the object is updated""" - - deleted_at = wsme.wsattr(datetime.datetime, readonly=True) - """The time in UTC at which the object is deleted""" - - def as_dict(self): - """Render this object as a dict of its fields.""" - return dict((k, getattr(self, k)) - for k in self.fields - if hasattr(self, k) and - getattr(self, k) != wsme.Unset) - - def unset_fields_except(self, except_list=None): - """Unset fields so they don't appear in the message body. - - :param except_list: A list of fields that won't be touched. - - """ - if except_list is None: - except_list = [] - - for k in self.as_dict(): - if k not in except_list: - setattr(self, k, wsme.Unset) - - -class MediaType(APIBase): - """A media type representation.""" - - base = wtypes.text - type = wtypes.text - - def __init__(self, base, type): - self.base = base - self.type = type - - -class V1(APIBase): - """The representation of the version 1 of the API.""" - - id = wtypes.text - """The ID of the version, also acts as the release number""" - - media_types = [MediaType] - """An array of supcontainersed media types for this version""" - - audit_templates = [link.Link] - """Links to the audit templates resource""" - - audits = [link.Link] - """Links to the audits resource""" - - actions = [link.Link] - """Links to the actions resource""" - - action_plans = [link.Link] - """Links to the action plans resource""" - - scoring_engines = [link.Link] - """Links to the Scoring Engines resource""" - - services = [link.Link] - """Links to the services resource""" - - links = [link.Link] - """Links that point to a specific URL for this version and documentation""" - - @staticmethod - def convert(): - v1 = V1() - v1.id = "v1" - v1.links = [link.Link.make_link('self', pecan.request.host_url, - 'v1', '', bookmark=True), - link.Link.make_link('describedby', - 'http://docs.openstack.org', - 'developer/watcher/dev', - 'api-spec-v1.html', - bookmark=True, type='text/html') - ] - v1.media_types = [MediaType('application/json', - 'application/vnd.openstack.watcher.v1+json')] - v1.audit_templates = [link.Link.make_link('self', - pecan.request.host_url, - 'audit_templates', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'audit_templates', '', - bookmark=True) - ] - v1.audits = [link.Link.make_link('self', pecan.request.host_url, - 'audits', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'audits', '', - bookmark=True) - ] - v1.actions = [link.Link.make_link('self', pecan.request.host_url, - 'actions', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'actions', '', - bookmark=True) - ] - v1.action_plans = [link.Link.make_link( - 'self', pecan.request.host_url, 'action_plans', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'action_plans', '', - bookmark=True) - ] - - v1.scoring_engines = [link.Link.make_link( - 'self', pecan.request.host_url, 'scoring_engines', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'scoring_engines', '', - bookmark=True) - ] - - v1.services = [link.Link.make_link( - 'self', pecan.request.host_url, 'services', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'services', '', - bookmark=True) - ] - return v1 - - -class Controller(rest.RestController): - """Version 1 API controller root.""" - - audits = audit.AuditsController() - audit_templates = audit_template.AuditTemplatesController() - actions = action.ActionsController() - action_plans = action_plan.ActionPlansController() - goals = goal.GoalsController() - scoring_engines = scoring_engine.ScoringEngineController() - services = service.ServicesController() - strategies = strategy.StrategiesController() - - @wsme_pecan.wsexpose(V1) - def get(self): - # NOTE: The reason why convert() it's being called for every - # request is because we need to get the host url from - # the request object to make the links. - return V1.convert() - - -__all__ = ("Controller", ) diff --git a/watcher/api/controllers/v1/action.py b/watcher/api/controllers/v1/action.py deleted file mode 100644 index 3e96fdb..0000000 --- a/watcher/api/controllers/v1/action.py +++ /dev/null @@ -1,403 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -An :ref:`Action ` is what enables Watcher to transform the -current state of a :ref:`Cluster ` after an -:ref:`Audit `. - -An :ref:`Action ` is an atomic task which changes the -current state of a target :ref:`Managed resource ` -of the OpenStack :ref:`Cluster ` such as: - -- Live migration of an instance from one compute node to another compute - node with Nova -- Changing the power level of a compute node (ACPI level, ...) -- Changing the current state of a compute node (enable or disable) with Nova - -In most cases, an :ref:`Action ` triggers some concrete -commands on an existing OpenStack module (Nova, Neutron, Cinder, Ironic, etc.). - -An :ref:`Action ` has a life-cycle and its current state may -be one of the following: - -- **PENDING** : the :ref:`Action ` has not been executed - yet by the :ref:`Watcher Applier ` -- **ONGOING** : the :ref:`Action ` is currently being - processed by the :ref:`Watcher Applier ` -- **SUCCEEDED** : the :ref:`Action ` has been executed - successfully -- **FAILED** : an error occurred while trying to execute the - :ref:`Action ` -- **DELETED** : the :ref:`Action ` is still stored in the - :ref:`Watcher database ` but is not returned - any more through the Watcher APIs. -- **CANCELLED** : the :ref:`Action ` was in **PENDING** or - **ONGOING** state and was cancelled by the - :ref:`Administrator ` - -:ref:`Some default implementations are provided `, but it is -possible to :ref:`develop new implementations ` which -are dynamically loaded by Watcher at launch time. -""" - -import datetime - -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from watcher._i18n import _ -from watcher.api.controllers import base -from watcher.api.controllers import link -from watcher.api.controllers.v1 import collection -from watcher.api.controllers.v1 import types -from watcher.api.controllers.v1 import utils as api_utils -from watcher.common import exception -from watcher.common import policy -from watcher import objects - - -class ActionPatchType(types.JsonPatchType): - - @staticmethod - def mandatory_attrs(): - return [] - - -class Action(base.APIBase): - """API representation of a action. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of a action. - """ - _action_plan_uuid = None - - def _get_action_plan_uuid(self): - return self._action_plan_uuid - - def _set_action_plan_uuid(self, value): - if value == wtypes.Unset: - self._action_plan_uuid = wtypes.Unset - elif value and self._action_plan_uuid != value: - try: - action_plan = objects.ActionPlan.get( - pecan.request.context, value) - self._action_plan_uuid = action_plan.uuid - self.action_plan_id = action_plan.id - except exception.ActionPlanNotFound: - self._action_plan_uuid = None - - uuid = wtypes.wsattr(types.uuid, readonly=True) - """Unique UUID for this action""" - - action_plan_uuid = wsme.wsproperty(types.uuid, _get_action_plan_uuid, - _set_action_plan_uuid, - mandatory=True) - """The action plan this action belongs to """ - - state = wtypes.text - """This audit state""" - - action_type = wtypes.text - """Action type""" - - input_parameters = types.jsontype - """One or more key/value pairs """ - - parents = wtypes.wsattr(types.jsontype, readonly=True) - """UUIDs of parent actions""" - - links = wsme.wsattr([link.Link], readonly=True) - """A list containing a self link and associated action links""" - - def __init__(self, **kwargs): - super(Action, self).__init__() - - self.fields = [] - fields = list(objects.Action.fields) - fields.append('action_plan_uuid') - for field in fields: - # Skip fields we do not expose. - if not hasattr(self, field): - continue - self.fields.append(field) - setattr(self, field, kwargs.get(field, wtypes.Unset)) - - self.fields.append('action_plan_id') - setattr(self, 'action_plan_uuid', kwargs.get('action_plan_id', - wtypes.Unset)) - - @staticmethod - def _convert_with_links(action, url, expand=True): - if not expand: - action.unset_fields_except(['uuid', 'state', 'action_plan_uuid', - 'action_plan_id', 'action_type', - 'parents']) - - action.links = [link.Link.make_link('self', url, - 'actions', action.uuid), - link.Link.make_link('bookmark', url, - 'actions', action.uuid, - bookmark=True) - ] - return action - - @classmethod - def convert_with_links(cls, action, expand=True): - action = Action(**action.as_dict()) - return cls._convert_with_links(action, pecan.request.host_url, expand) - - @classmethod - def sample(cls, expand=True): - sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', - description='action description', - state='PENDING', - created_at=datetime.datetime.utcnow(), - deleted_at=None, - updated_at=datetime.datetime.utcnow(), - parents=[]) - sample._action_plan_uuid = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae' - return cls._convert_with_links(sample, 'http://localhost:9322', expand) - - -class ActionCollection(collection.Collection): - """API representation of a collection of actions.""" - - actions = [Action] - """A list containing actions objects""" - - def __init__(self, **kwargs): - self._type = 'actions' - - @staticmethod - def convert_with_links(actions, limit, url=None, expand=False, - **kwargs): - - collection = ActionCollection() - collection.actions = [Action.convert_with_links(p, expand) - for p in actions] - - return collection - - @classmethod - def sample(cls): - sample = cls() - sample.actions = [Action.sample(expand=False)] - return sample - - -class ActionsController(rest.RestController): - """REST controller for Actions.""" - def __init__(self): - super(ActionsController, self).__init__() - - from_actions = False - """A flag to indicate if the requests to this controller are coming - from the top-level resource Actions.""" - - _custom_actions = { - 'detail': ['GET'], - } - - def _get_actions_collection(self, marker, limit, - sort_key, sort_dir, expand=False, - resource_url=None, - action_plan_uuid=None, audit_uuid=None): - limit = api_utils.validate_limit(limit) - api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.Action.get_by_uuid(pecan.request.context, - marker) - - filters = {} - if action_plan_uuid: - filters['action_plan_uuid'] = action_plan_uuid - - if audit_uuid: - filters['audit_uuid'] = audit_uuid - - sort_db_key = sort_key - - actions = objects.Action.list(pecan.request.context, - limit, - marker_obj, sort_key=sort_db_key, - sort_dir=sort_dir, - filters=filters) - - return ActionCollection.convert_with_links(actions, limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(ActionCollection, types.uuid, int, - wtypes.text, wtypes.text, types.uuid, - types.uuid) - def get_all(self, marker=None, limit=None, - sort_key='id', sort_dir='asc', action_plan_uuid=None, - audit_uuid=None): - """Retrieve a list of actions. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param action_plan_uuid: Optional UUID of an action plan, - to get only actions for that action plan. - :param audit_uuid: Optional UUID of an audit, - to get only actions for that audit. - """ - context = pecan.request.context - policy.enforce(context, 'action:get_all', - action='action:get_all') - - if action_plan_uuid and audit_uuid: - raise exception.ActionFilterCombinationProhibited - - return self._get_actions_collection( - marker, limit, sort_key, sort_dir, - action_plan_uuid=action_plan_uuid, audit_uuid=audit_uuid) - - @wsme_pecan.wsexpose(ActionCollection, types.uuid, int, - wtypes.text, wtypes.text, types.uuid, - types.uuid) - def detail(self, marker=None, limit=None, - sort_key='id', sort_dir='asc', action_plan_uuid=None, - audit_uuid=None): - """Retrieve a list of actions with detail. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param action_plan_uuid: Optional UUID of an action plan, - to get only actions for that action plan. - :param audit_uuid: Optional UUID of an audit, - to get only actions for that audit. - """ - context = pecan.request.context - policy.enforce(context, 'action:detail', - action='action:detail') - - # NOTE(lucasagomes): /detail should only work agaist collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "actions": - raise exception.HTTPNotFound - - if action_plan_uuid and audit_uuid: - raise exception.ActionFilterCombinationProhibited - - expand = True - resource_url = '/'.join(['actions', 'detail']) - return self._get_actions_collection( - marker, limit, sort_key, sort_dir, expand, resource_url, - action_plan_uuid=action_plan_uuid, audit_uuid=audit_uuid) - - @wsme_pecan.wsexpose(Action, types.uuid) - def get_one(self, action_uuid): - """Retrieve information about the given action. - - :param action_uuid: UUID of a action. - """ - if self.from_actions: - raise exception.OperationNotPermitted - - context = pecan.request.context - action = api_utils.get_resource('Action', action_uuid) - policy.enforce(context, 'action:get', action, action='action:get') - - return Action.convert_with_links(action) - - @wsme_pecan.wsexpose(Action, body=Action, status_code=201) - def post(self, action): - """Create a new action. - - :param action: a action within the request body. - """ - # FIXME: blueprint edit-action-plan-flow - raise exception.OperationNotPermitted( - _("Cannot create an action directly")) - - if self.from_actions: - raise exception.OperationNotPermitted - - action_dict = action.as_dict() - context = pecan.request.context - new_action = objects.Action(context, **action_dict) - new_action.create() - - # Set the HTTP Location Header - pecan.response.location = link.build_url('actions', new_action.uuid) - return Action.convert_with_links(new_action) - - @wsme.validate(types.uuid, [ActionPatchType]) - @wsme_pecan.wsexpose(Action, types.uuid, body=[ActionPatchType]) - def patch(self, action_uuid, patch): - """Update an existing action. - - :param action_uuid: UUID of a action. - :param patch: a json PATCH document to apply to this action. - """ - # FIXME: blueprint edit-action-plan-flow - raise exception.OperationNotPermitted( - _("Cannot modify an action directly")) - - if self.from_actions: - raise exception.OperationNotPermitted - - action_to_update = objects.Action.get_by_uuid(pecan.request.context, - action_uuid) - try: - action_dict = action_to_update.as_dict() - action = Action(**api_utils.apply_jsonpatch(action_dict, patch)) - except api_utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) - - # Update only the fields that have changed - for field in objects.Action.fields: - try: - patch_val = getattr(action, field) - except AttributeError: - # Ignore fields that aren't exposed in the API - continue - if patch_val == wtypes.Unset: - patch_val = None - if action_to_update[field] != patch_val: - action_to_update[field] = patch_val - - action_to_update.save() - return Action.convert_with_links(action_to_update) - - @wsme_pecan.wsexpose(None, types.uuid, status_code=204) - def delete(self, action_uuid): - """Delete a action. - - :param action_uuid: UUID of a action. - """ - # FIXME: blueprint edit-action-plan-flow - raise exception.OperationNotPermitted( - _("Cannot delete an action directly")) - - action_to_delete = objects.Action.get_by_uuid( - pecan.request.context, - action_uuid) - action_to_delete.soft_delete() diff --git a/watcher/api/controllers/v1/action_plan.py b/watcher/api/controllers/v1/action_plan.py deleted file mode 100644 index 79dd5d8..0000000 --- a/watcher/api/controllers/v1/action_plan.py +++ /dev/null @@ -1,558 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -An :ref:`Action Plan ` specifies a flow of -:ref:`Actions ` that should be executed in order to satisfy -a given :ref:`Goal `. It also contains an estimated -:ref:`global efficacy ` alongside a set of -:ref:`efficacy indicators `. - -An :ref:`Action Plan ` is generated by Watcher when an -:ref:`Audit ` is successful which implies that the -:ref:`Strategy ` -which was used has found a :ref:`Solution ` to achieve the -:ref:`Goal ` of this :ref:`Audit `. - -In the default implementation of Watcher, an action plan is composed of -a list of successive :ref:`Actions ` (i.e., a Workflow of -:ref:`Actions ` belonging to a unique branch). - -However, Watcher provides abstract interfaces for many of its components, -allowing other implementations to generate and handle more complex :ref:`Action -Plan(s) ` composed of two types of Action Item(s): - -- simple :ref:`Actions `: atomic tasks, which means it - can not be split into smaller tasks or commands from an OpenStack point of - view. -- composite Actions: which are composed of several simple - :ref:`Actions ` - ordered in sequential and/or parallel flows. - -An :ref:`Action Plan ` may be described using -standard workflow model description formats such as -`Business Process Model and Notation 2.0 (BPMN 2.0) -`_ or `Unified Modeling Language (UML) -`_. - -To see the life-cycle and description of -:ref:`Action Plan ` states, visit :ref:`the Action Plan -state machine `. -""" - -import datetime - -from oslo_log import log -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from watcher._i18n import _ -from watcher.api.controllers import base -from watcher.api.controllers import link -from watcher.api.controllers.v1 import collection -from watcher.api.controllers.v1 import efficacy_indicator as efficacyindicator -from watcher.api.controllers.v1 import types -from watcher.api.controllers.v1 import utils as api_utils -from watcher.applier import rpcapi -from watcher.common import exception -from watcher.common import policy -from watcher.common import utils -from watcher import objects -from watcher.objects import action_plan as ap_objects - -LOG = log.getLogger(__name__) - - -class ActionPlanPatchType(types.JsonPatchType): - - @staticmethod - def _validate_state(patch): - serialized_patch = {'path': patch.path, 'op': patch.op} - if patch.value is not wsme.Unset: - serialized_patch['value'] = patch.value - # todo: use state machines to handle state transitions - state_value = patch.value - if state_value and not hasattr(ap_objects.State, state_value): - msg = _("Invalid state: %(state)s") - raise exception.PatchError( - patch=serialized_patch, reason=msg % dict(state=state_value)) - - @staticmethod - def validate(patch): - if patch.path == "/state": - ActionPlanPatchType._validate_state(patch) - return types.JsonPatchType.validate(patch) - - @staticmethod - def internal_attrs(): - return types.JsonPatchType.internal_attrs() - - @staticmethod - def mandatory_attrs(): - return ["audit_id", "state"] - - -class ActionPlan(base.APIBase): - """API representation of a action plan. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of an - action plan. - """ - - _audit_uuid = None - _strategy_uuid = None - _strategy_name = None - _efficacy_indicators = None - - def _get_audit_uuid(self): - return self._audit_uuid - - def _set_audit_uuid(self, value): - if value == wtypes.Unset: - self._audit_uuid = wtypes.Unset - elif value and self._audit_uuid != value: - try: - audit = objects.Audit.get(pecan.request.context, value) - self._audit_uuid = audit.uuid - self.audit_id = audit.id - except exception.AuditNotFound: - self._audit_uuid = None - - def _get_efficacy_indicators(self): - if self._efficacy_indicators is None: - self._set_efficacy_indicators(wtypes.Unset) - return self._efficacy_indicators - - def _set_efficacy_indicators(self, value): - efficacy_indicators = [] - if value == wtypes.Unset and not self._efficacy_indicators: - try: - _efficacy_indicators = objects.EfficacyIndicator.list( - pecan.request.context, - filters={"action_plan_uuid": self.uuid}) - - for indicator in _efficacy_indicators: - efficacy_indicator = efficacyindicator.EfficacyIndicator( - context=pecan.request.context, - name=indicator.name, - description=indicator.description, - unit=indicator.unit, - value=indicator.value, - ) - efficacy_indicators.append(efficacy_indicator.as_dict()) - self._efficacy_indicators = efficacy_indicators - except exception.EfficacyIndicatorNotFound as exc: - LOG.exception(exc) - elif value and self._efficacy_indicators != value: - self._efficacy_indicators = value - - def _get_strategy(self, value): - if value == wtypes.Unset: - return None - strategy = None - try: - if utils.is_uuid_like(value) or utils.is_int_like(value): - strategy = objects.Strategy.get( - pecan.request.context, value) - else: - strategy = objects.Strategy.get_by_name( - pecan.request.context, value) - except exception.StrategyNotFound: - pass - if strategy: - self.strategy_id = strategy.id - return strategy - - def _get_strategy_uuid(self): - return self._strategy_uuid - - def _set_strategy_uuid(self, value): - if value and self._strategy_uuid != value: - self._strategy_uuid = None - strategy = self._get_strategy(value) - if strategy: - self._strategy_uuid = strategy.uuid - - def _get_strategy_name(self): - return self._strategy_name - - def _set_strategy_name(self, value): - if value and self._strategy_name != value: - self._strategy_name = None - strategy = self._get_strategy(value) - if strategy: - self._strategy_name = strategy.name - - uuid = wtypes.wsattr(types.uuid, readonly=True) - """Unique UUID for this action plan""" - - audit_uuid = wsme.wsproperty(types.uuid, _get_audit_uuid, _set_audit_uuid, - mandatory=True) - """The UUID of the audit this port belongs to""" - - strategy_uuid = wsme.wsproperty( - wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False) - """Strategy UUID the action plan refers to""" - - strategy_name = wsme.wsproperty( - wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False) - """The name of the strategy this action plan refers to""" - - efficacy_indicators = wsme.wsproperty( - types.jsontype, _get_efficacy_indicators, _set_efficacy_indicators, - mandatory=True) - """The list of efficacy indicators associated to this action plan""" - - global_efficacy = wtypes.wsattr(types.jsontype, readonly=True) - """The global efficacy of this action plan""" - - state = wtypes.text - """This action plan state""" - - links = wsme.wsattr([link.Link], readonly=True) - """A list containing a self link and associated action links""" - - def __init__(self, **kwargs): - super(ActionPlan, self).__init__() - self.fields = [] - fields = list(objects.ActionPlan.fields) - for field in fields: - # Skip fields we do not expose. - if not hasattr(self, field): - continue - self.fields.append(field) - setattr(self, field, kwargs.get(field, wtypes.Unset)) - - self.fields.append('audit_uuid') - self.fields.append('efficacy_indicators') - - setattr(self, 'audit_uuid', kwargs.get('audit_id', wtypes.Unset)) - fields.append('strategy_uuid') - setattr(self, 'strategy_uuid', kwargs.get('strategy_id', wtypes.Unset)) - fields.append('strategy_name') - setattr(self, 'strategy_name', kwargs.get('strategy_id', wtypes.Unset)) - - @staticmethod - def _convert_with_links(action_plan, url, expand=True): - if not expand: - action_plan.unset_fields_except( - ['uuid', 'state', 'efficacy_indicators', 'global_efficacy', - 'updated_at', 'audit_uuid', 'strategy_uuid', 'strategy_name']) - - action_plan.links = [ - link.Link.make_link( - 'self', url, - 'action_plans', action_plan.uuid), - link.Link.make_link( - 'bookmark', url, - 'action_plans', action_plan.uuid, - bookmark=True)] - return action_plan - - @classmethod - def convert_with_links(cls, rpc_action_plan, expand=True): - action_plan = ActionPlan(**rpc_action_plan.as_dict()) - return cls._convert_with_links(action_plan, pecan.request.host_url, - expand) - - @classmethod - def sample(cls, expand=True): - sample = cls(uuid='9ef4d84c-41e8-4418-9220-ce55be0436af', - state='ONGOING', - created_at=datetime.datetime.utcnow(), - deleted_at=None, - updated_at=datetime.datetime.utcnow()) - sample._audit_uuid = 'abcee106-14d3-4515-b744-5a26885cf6f6' - sample._efficacy_indicators = [{'description': 'Test indicator', - 'name': 'test_indicator', - 'unit': '%'}] - sample._global_efficacy = {'description': 'Global efficacy', - 'name': 'test_global_efficacy', - 'unit': '%'} - return cls._convert_with_links(sample, 'http://localhost:9322', expand) - - -class ActionPlanCollection(collection.Collection): - """API representation of a collection of action_plans.""" - - action_plans = [ActionPlan] - """A list containing action_plans objects""" - - def __init__(self, **kwargs): - self._type = 'action_plans' - - @staticmethod - def convert_with_links(rpc_action_plans, limit, url=None, expand=False, - **kwargs): - ap_collection = ActionPlanCollection() - ap_collection.action_plans = [ActionPlan.convert_with_links( - p, expand) for p in rpc_action_plans] - - if 'sort_key' in kwargs: - reverse = False - if kwargs['sort_key'] == 'audit_uuid': - if 'sort_dir' in kwargs: - reverse = True if kwargs['sort_dir'] == 'desc' else False - ap_collection.action_plans = sorted( - ap_collection.action_plans, - key=lambda action_plan: action_plan.audit_uuid, - reverse=reverse) - - ap_collection.next = ap_collection.get_next(limit, url=url, **kwargs) - return ap_collection - - @classmethod - def sample(cls): - sample = cls() - sample.action_plans = [ActionPlan.sample(expand=False)] - return sample - - -class ActionPlansController(rest.RestController): - """REST controller for Actions.""" - - def __init__(self): - super(ActionPlansController, self).__init__() - - from_actionsPlans = False - """A flag to indicate if the requests to this controller are coming - from the top-level resource ActionPlan.""" - - _custom_actions = { - 'detail': ['GET'], - } - - def _get_action_plans_collection(self, marker, limit, - sort_key, sort_dir, expand=False, - resource_url=None, audit_uuid=None, - strategy=None): - - limit = api_utils.validate_limit(limit) - api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.ActionPlan.get_by_uuid( - pecan.request.context, marker) - - filters = {} - if audit_uuid: - filters['audit_uuid'] = audit_uuid - - if strategy: - if utils.is_uuid_like(strategy): - filters['strategy_uuid'] = strategy - else: - filters['strategy_name'] = strategy - - if sort_key == 'audit_uuid': - sort_db_key = None - else: - sort_db_key = sort_key - - action_plans = objects.ActionPlan.list( - pecan.request.context, - limit, - marker_obj, sort_key=sort_db_key, - sort_dir=sort_dir, filters=filters) - - return ActionPlanCollection.convert_with_links( - action_plans, limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text, - wtypes.text, types.uuid, wtypes.text) - def get_all(self, marker=None, limit=None, - sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None): - """Retrieve a list of action plans. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param audit_uuid: Optional UUID of an audit, to get only actions - for that audit. - :param strategy: strategy UUID or name to filter by - """ - context = pecan.request.context - policy.enforce(context, 'action_plan:get_all', - action='action_plan:get_all') - - return self._get_action_plans_collection( - marker, limit, sort_key, sort_dir, - audit_uuid=audit_uuid, strategy=strategy) - - @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text, - wtypes.text, types.uuid, wtypes.text) - def detail(self, marker=None, limit=None, - sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None): - """Retrieve a list of action_plans with detail. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param audit_uuid: Optional UUID of an audit, to get only actions - for that audit. - :param strategy: strategy UUID or name to filter by - """ - context = pecan.request.context - policy.enforce(context, 'action_plan:detail', - action='action_plan:detail') - - # NOTE(lucasagomes): /detail should only work agaist collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "action_plans": - raise exception.HTTPNotFound - - expand = True - resource_url = '/'.join(['action_plans', 'detail']) - return self._get_action_plans_collection( - marker, limit, sort_key, sort_dir, expand, - resource_url, audit_uuid=audit_uuid, strategy=strategy) - - @wsme_pecan.wsexpose(ActionPlan, types.uuid) - def get_one(self, action_plan_uuid): - """Retrieve information about the given action plan. - - :param action_plan_uuid: UUID of a action plan. - """ - if self.from_actionsPlans: - raise exception.OperationNotPermitted - - context = pecan.request.context - action_plan = api_utils.get_resource('ActionPlan', action_plan_uuid) - policy.enforce( - context, 'action_plan:get', action_plan, action='action_plan:get') - - return ActionPlan.convert_with_links(action_plan) - - @wsme_pecan.wsexpose(None, types.uuid, status_code=204) - def delete(self, action_plan_uuid): - """Delete an action plan. - - :param action_plan_uuid: UUID of a action. - """ - context = pecan.request.context - action_plan = api_utils.get_resource( - 'ActionPlan', action_plan_uuid, eager=True) - policy.enforce(context, 'action_plan:delete', action_plan, - action='action_plan:delete') - - action_plan.soft_delete() - - @wsme.validate(types.uuid, [ActionPlanPatchType]) - @wsme_pecan.wsexpose(ActionPlan, types.uuid, - body=[ActionPlanPatchType]) - def patch(self, action_plan_uuid, patch): - """Update an existing action plan. - - :param action_plan_uuid: UUID of a action plan. - :param patch: a json PATCH document to apply to this action plan. - """ - if self.from_actionsPlans: - raise exception.OperationNotPermitted - - context = pecan.request.context - action_plan_to_update = api_utils.get_resource( - 'ActionPlan', action_plan_uuid, eager=True) - policy.enforce(context, 'action_plan:update', action_plan_to_update, - action='action_plan:update') - - try: - action_plan_dict = action_plan_to_update.as_dict() - action_plan = ActionPlan(**api_utils.apply_jsonpatch( - action_plan_dict, patch)) - except api_utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) - - launch_action_plan = False - cancel_action_plan = False - - # transitions that are allowed via PATCH - allowed_patch_transitions = [ - (ap_objects.State.RECOMMENDED, - ap_objects.State.PENDING), - (ap_objects.State.RECOMMENDED, - ap_objects.State.CANCELLED), - (ap_objects.State.ONGOING, - ap_objects.State.CANCELLING), - (ap_objects.State.PENDING, - ap_objects.State.CANCELLED), - ] - - # todo: improve this in blueprint watcher-api-validation - if hasattr(action_plan, 'state'): - transition = (action_plan_to_update.state, action_plan.state) - if transition not in allowed_patch_transitions: - error_message = _("State transition not allowed: " - "(%(initial_state)s -> %(new_state)s)") - raise exception.PatchError( - patch=patch, - reason=error_message % dict( - initial_state=action_plan_to_update.state, - new_state=action_plan.state)) - - if action_plan.state == ap_objects.State.PENDING: - launch_action_plan = True - if action_plan.state == ap_objects.State.CANCELLED: - cancel_action_plan = True - - # Update only the fields that have changed - for field in objects.ActionPlan.fields: - try: - patch_val = getattr(action_plan, field) - except AttributeError: - # Ignore fields that aren't exposed in the API - continue - if patch_val == wtypes.Unset: - patch_val = None - if action_plan_to_update[field] != patch_val: - action_plan_to_update[field] = patch_val - - if (field == 'state'and - patch_val == objects.action_plan.State.PENDING): - launch_action_plan = True - - action_plan_to_update.save() - - # NOTE: if action plan is cancelled from pending or recommended - # state update action state here only - if cancel_action_plan: - filters = {'action_plan_uuid': action_plan.uuid} - actions = objects.Action.list(pecan.request.context, - filters=filters, eager=True) - for a in actions: - a.state = objects.action.State.CANCELLED - a.save() - - if launch_action_plan: - applier_client = rpcapi.ApplierAPI() - applier_client.launch_action_plan(pecan.request.context, - action_plan.uuid) - - action_plan_to_update = objects.ActionPlan.get_by_uuid( - pecan.request.context, - action_plan_uuid) - return ActionPlan.convert_with_links(action_plan_to_update) diff --git a/watcher/api/controllers/v1/audit.py b/watcher/api/controllers/v1/audit.py deleted file mode 100644 index f654c63..0000000 --- a/watcher/api/controllers/v1/audit.py +++ /dev/null @@ -1,615 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -In the Watcher system, an :ref:`Audit ` is a request for -optimizing a :ref:`Cluster `. - -The optimization is done in order to satisfy one :ref:`Goal ` -on a given :ref:`Cluster `. - -For each :ref:`Audit `, the Watcher system generates an -:ref:`Action Plan `. - -To see the life-cycle and description of an :ref:`Audit ` -states, visit :ref:`the Audit State machine `. -""" - -import datetime - -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from watcher._i18n import _ -from watcher.api.controllers import base -from watcher.api.controllers import link -from watcher.api.controllers.v1 import collection -from watcher.api.controllers.v1 import types -from watcher.api.controllers.v1 import utils as api_utils -from watcher.common import exception -from watcher.common import policy -from watcher.common import utils -from watcher.decision_engine import rpcapi -from watcher import objects - - -class AuditPostType(wtypes.Base): - - audit_template_uuid = wtypes.wsattr(types.uuid, mandatory=False) - - goal = wtypes.wsattr(wtypes.text, mandatory=False) - - strategy = wtypes.wsattr(wtypes.text, mandatory=False) - - audit_type = wtypes.wsattr(wtypes.text, mandatory=True) - - state = wsme.wsattr(wtypes.text, readonly=True, - default=objects.audit.State.PENDING) - - parameters = wtypes.wsattr({wtypes.text: types.jsontype}, mandatory=False, - default={}) - interval = wsme.wsattr(types.interval_or_cron, mandatory=False) - - scope = wtypes.wsattr(types.jsontype, readonly=True) - - auto_trigger = wtypes.wsattr(bool, mandatory=False) - - def as_audit(self, context): - audit_type_values = [val.value for val in objects.audit.AuditType] - if self.audit_type not in audit_type_values: - raise exception.AuditTypeNotFound(audit_type=self.audit_type) - - if (self.audit_type == objects.audit.AuditType.ONESHOT.value and - self.interval not in (wtypes.Unset, None)): - raise exception.AuditIntervalNotAllowed(audit_type=self.audit_type) - - if (self.audit_type == objects.audit.AuditType.CONTINUOUS.value and - self.interval in (wtypes.Unset, None)): - raise exception.AuditIntervalNotSpecified( - audit_type=self.audit_type) - - # If audit_template_uuid was provided, we will provide any - # variables not included in the request, but not override - # those variables that were included. - if self.audit_template_uuid: - try: - audit_template = objects.AuditTemplate.get( - context, self.audit_template_uuid) - except exception.AuditTemplateNotFound: - raise exception.Invalid( - message=_('The audit template UUID or name specified is ' - 'invalid')) - at2a = { - 'goal': 'goal_id', - 'strategy': 'strategy_id', - 'scope': 'scope', - } - to_string_fields = set(['goal', 'strategy']) - for k in at2a: - if not getattr(self, k): - try: - at_attr = getattr(audit_template, at2a[k]) - if at_attr and (k in to_string_fields): - at_attr = str(at_attr) - setattr(self, k, at_attr) - except AttributeError: - pass - return Audit( - audit_type=self.audit_type, - parameters=self.parameters, - goal_id=self.goal, - strategy_id=self.strategy, - interval=self.interval, - scope=self.scope, - auto_trigger=self.auto_trigger) - - -class AuditPatchType(types.JsonPatchType): - - @staticmethod - def mandatory_attrs(): - return ['/audit_template_uuid', '/type'] - - @staticmethod - def validate(patch): - - def is_new_state_none(p): - return p.path == '/state' and p.op == 'replace' and p.value is None - - serialized_patch = {'path': patch.path, - 'op': patch.op, - 'value': patch.value} - if (patch.path in AuditPatchType.mandatory_attrs() or - is_new_state_none(patch)): - msg = _("%(field)s can't be updated.") - raise exception.PatchError( - patch=serialized_patch, - reason=msg % dict(field=patch.path)) - return types.JsonPatchType.validate(patch) - - -class Audit(base.APIBase): - """API representation of a audit. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of a audit. - """ - _goal_uuid = None - _goal_name = None - _strategy_uuid = None - _strategy_name = None - - def _get_goal(self, value): - if value == wtypes.Unset: - return None - goal = None - try: - if utils.is_uuid_like(value) or utils.is_int_like(value): - goal = objects.Goal.get( - pecan.request.context, value) - else: - goal = objects.Goal.get_by_name( - pecan.request.context, value) - except exception.GoalNotFound: - pass - if goal: - self.goal_id = goal.id - return goal - - def _get_goal_uuid(self): - return self._goal_uuid - - def _set_goal_uuid(self, value): - if value and self._goal_uuid != value: - self._goal_uuid = None - goal = self._get_goal(value) - if goal: - self._goal_uuid = goal.uuid - - def _get_goal_name(self): - return self._goal_name - - def _set_goal_name(self, value): - if value and self._goal_name != value: - self._goal_name = None - goal = self._get_goal(value) - if goal: - self._goal_name = goal.name - - def _get_strategy(self, value): - if value == wtypes.Unset: - return None - strategy = None - try: - if utils.is_uuid_like(value) or utils.is_int_like(value): - strategy = objects.Strategy.get( - pecan.request.context, value) - else: - strategy = objects.Strategy.get_by_name( - pecan.request.context, value) - except exception.StrategyNotFound: - pass - if strategy: - self.strategy_id = strategy.id - return strategy - - def _get_strategy_uuid(self): - return self._strategy_uuid - - def _set_strategy_uuid(self, value): - if value and self._strategy_uuid != value: - self._strategy_uuid = None - strategy = self._get_strategy(value) - if strategy: - self._strategy_uuid = strategy.uuid - - def _get_strategy_name(self): - return self._strategy_name - - def _set_strategy_name(self, value): - if value and self._strategy_name != value: - self._strategy_name = None - strategy = self._get_strategy(value) - if strategy: - self._strategy_name = strategy.name - - uuid = types.uuid - """Unique UUID for this audit""" - - audit_type = wtypes.text - """Type of this audit""" - - state = wtypes.text - """This audit state""" - - goal_uuid = wsme.wsproperty( - wtypes.text, _get_goal_uuid, _set_goal_uuid, mandatory=True) - """Goal UUID the audit template refers to""" - - goal_name = wsme.wsproperty( - wtypes.text, _get_goal_name, _set_goal_name, mandatory=False) - """The name of the goal this audit template refers to""" - - strategy_uuid = wsme.wsproperty( - wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False) - """Strategy UUID the audit template refers to""" - - strategy_name = wsme.wsproperty( - wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False) - """The name of the strategy this audit template refers to""" - - parameters = {wtypes.text: types.jsontype} - """The strategy parameters for this audit""" - - links = wsme.wsattr([link.Link], readonly=True) - """A list containing a self link and associated audit links""" - - interval = wsme.wsattr(wtypes.text, mandatory=False) - """Launch audit periodically (in seconds)""" - - scope = wsme.wsattr(types.jsontype, mandatory=False) - """Audit Scope""" - - auto_trigger = wsme.wsattr(bool, mandatory=False, default=False) - """Autoexecute action plan once audit is succeeded""" - - next_run_time = wsme.wsattr(datetime.datetime, mandatory=False) - """The next time audit launch""" - - def __init__(self, **kwargs): - self.fields = [] - fields = list(objects.Audit.fields) - for k in fields: - # Skip fields we do not expose. - if not hasattr(self, k): - continue - self.fields.append(k) - setattr(self, k, kwargs.get(k, wtypes.Unset)) - - self.fields.append('goal_id') - self.fields.append('strategy_id') - fields.append('goal_uuid') - setattr(self, 'goal_uuid', kwargs.get('goal_id', - wtypes.Unset)) - fields.append('goal_name') - setattr(self, 'goal_name', kwargs.get('goal_id', - wtypes.Unset)) - fields.append('strategy_uuid') - setattr(self, 'strategy_uuid', kwargs.get('strategy_id', - wtypes.Unset)) - fields.append('strategy_name') - setattr(self, 'strategy_name', kwargs.get('strategy_id', - wtypes.Unset)) - - @staticmethod - def _convert_with_links(audit, url, expand=True): - if not expand: - audit.unset_fields_except(['uuid', 'audit_type', 'state', - 'goal_uuid', 'interval', 'scope', - 'strategy_uuid', 'goal_name', - 'strategy_name', 'auto_trigger', - 'next_run_time']) - - audit.links = [link.Link.make_link('self', url, - 'audits', audit.uuid), - link.Link.make_link('bookmark', url, - 'audits', audit.uuid, - bookmark=True) - ] - - return audit - - @classmethod - def convert_with_links(cls, rpc_audit, expand=True): - audit = Audit(**rpc_audit.as_dict()) - return cls._convert_with_links(audit, pecan.request.host_url, expand) - - @classmethod - def sample(cls, expand=True): - sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', - audit_type='ONESHOT', - state='PENDING', - created_at=datetime.datetime.utcnow(), - deleted_at=None, - updated_at=datetime.datetime.utcnow(), - interval='7200', - scope=[], - auto_trigger=False, - next_run_time=datetime.datetime.utcnow()) - - sample.goal_id = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae' - sample.strategy_id = '7ae81bb3-dec3-4289-8d6c-da80bd8001ff' - - return cls._convert_with_links(sample, 'http://localhost:9322', expand) - - -class AuditCollection(collection.Collection): - """API representation of a collection of audits.""" - - audits = [Audit] - """A list containing audits objects""" - - def __init__(self, **kwargs): - super(AuditCollection, self).__init__() - self._type = 'audits' - - @staticmethod - def convert_with_links(rpc_audits, limit, url=None, expand=False, - **kwargs): - collection = AuditCollection() - collection.audits = [Audit.convert_with_links(p, expand) - for p in rpc_audits] - - if 'sort_key' in kwargs: - reverse = False - if kwargs['sort_key'] == 'goal_uuid': - if 'sort_dir' in kwargs: - reverse = True if kwargs['sort_dir'] == 'desc' else False - collection.audits = sorted( - collection.audits, - key=lambda audit: audit.goal_uuid, - reverse=reverse) - - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - @classmethod - def sample(cls): - sample = cls() - sample.audits = [Audit.sample(expand=False)] - return sample - - -class AuditsController(rest.RestController): - """REST controller for Audits.""" - def __init__(self): - super(AuditsController, self).__init__() - - from_audits = False - """A flag to indicate if the requests to this controller are coming - from the top-level resource Audits.""" - - _custom_actions = { - 'detail': ['GET'], - } - - def _get_audits_collection(self, marker, limit, - sort_key, sort_dir, expand=False, - resource_url=None, goal=None, - strategy=None): - limit = api_utils.validate_limit(limit) - api_utils.validate_sort_dir(sort_dir) - marker_obj = None - if marker: - marker_obj = objects.Audit.get_by_uuid(pecan.request.context, - marker) - - filters = {} - if goal: - if utils.is_uuid_like(goal): - filters['goal_uuid'] = goal - else: - # TODO(michaelgugino): add method to get goal by name. - filters['goal_name'] = goal - - if strategy: - if utils.is_uuid_like(strategy): - filters['strategy_uuid'] = strategy - else: - # TODO(michaelgugino): add method to get goal by name. - filters['strategy_name'] = strategy - - if sort_key == 'goal_uuid': - sort_db_key = 'goal_id' - elif sort_key == 'strategy_uuid': - sort_db_key = 'strategy_id' - else: - sort_db_key = sort_key - - audits = objects.Audit.list(pecan.request.context, - limit, - marker_obj, sort_key=sort_db_key, - sort_dir=sort_dir, filters=filters) - - return AuditCollection.convert_with_links(audits, limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(AuditCollection, types.uuid, int, wtypes.text, - wtypes.text, wtypes.text, wtypes.text, int) - def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', - goal=None, strategy=None): - """Retrieve a list of audits. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param goal: goal UUID or name to filter by - :param strategy: strategy UUID or name to filter by - """ - - context = pecan.request.context - policy.enforce(context, 'audit:get_all', - action='audit:get_all') - - return self._get_audits_collection(marker, limit, sort_key, - sort_dir, goal=goal, - strategy=strategy) - - @wsme_pecan.wsexpose(AuditCollection, wtypes.text, types.uuid, int, - wtypes.text, wtypes.text) - def detail(self, goal=None, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of audits with detail. - - :param goal: goal UUID or name to filter by - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'audit:detail', - action='audit:detail') - # NOTE(lucasagomes): /detail should only work agaist collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "audits": - raise exception.HTTPNotFound - - expand = True - resource_url = '/'.join(['audits', 'detail']) - return self._get_audits_collection(marker, limit, - sort_key, sort_dir, expand, - resource_url, - goal=goal) - - @wsme_pecan.wsexpose(Audit, types.uuid) - def get_one(self, audit_uuid): - """Retrieve information about the given audit. - - :param audit_uuid: UUID of a audit. - """ - if self.from_audits: - raise exception.OperationNotPermitted - - context = pecan.request.context - rpc_audit = api_utils.get_resource('Audit', audit_uuid) - policy.enforce(context, 'audit:get', rpc_audit, action='audit:get') - - return Audit.convert_with_links(rpc_audit) - - @wsme_pecan.wsexpose(Audit, body=AuditPostType, status_code=201) - def post(self, audit_p): - """Create a new audit. - - :param audit_p: a audit within the request body. - """ - context = pecan.request.context - policy.enforce(context, 'audit:create', - action='audit:create') - audit = audit_p.as_audit(context) - - if self.from_audits: - raise exception.OperationNotPermitted - - if not audit._goal_uuid: - raise exception.Invalid( - message=_('A valid goal_id or audit_template_id ' - 'must be provided')) - - strategy_uuid = audit.strategy_uuid - no_schema = True - if strategy_uuid is not None: - # validate parameter when predefined strategy in audit template - strategy = objects.Strategy.get(pecan.request.context, - strategy_uuid) - schema = strategy.parameters_spec - if schema: - # validate input parameter with default value feedback - no_schema = False - utils.StrictDefaultValidatingDraft4Validator(schema).validate( - audit.parameters) - - if no_schema and audit.parameters: - raise exception.Invalid(_('Specify parameters but no predefined ' - 'strategy for audit template, or no ' - 'parameter spec in predefined strategy')) - - audit_dict = audit.as_dict() - - new_audit = objects.Audit(context, **audit_dict) - new_audit.create() - - # Set the HTTP Location Header - pecan.response.location = link.build_url('audits', new_audit.uuid) - - # trigger decision-engine to run the audit - if new_audit.audit_type == objects.audit.AuditType.ONESHOT.value: - dc_client = rpcapi.DecisionEngineAPI() - dc_client.trigger_audit(context, new_audit.uuid) - - return Audit.convert_with_links(new_audit) - - @wsme.validate(types.uuid, [AuditPatchType]) - @wsme_pecan.wsexpose(Audit, types.uuid, body=[AuditPatchType]) - def patch(self, audit_uuid, patch): - """Update an existing audit. - - :param audit_uuid: UUID of a audit. - :param patch: a json PATCH document to apply to this audit. - """ - if self.from_audits: - raise exception.OperationNotPermitted - - context = pecan.request.context - audit_to_update = api_utils.get_resource( - 'Audit', audit_uuid, eager=True) - policy.enforce(context, 'audit:update', audit_to_update, - action='audit:update') - - try: - audit_dict = audit_to_update.as_dict() - - initial_state = audit_dict['state'] - new_state = api_utils.get_patch_value(patch, 'state') - if not api_utils.check_audit_state_transition( - patch, initial_state): - error_message = _("State transition not allowed: " - "(%(initial_state)s -> %(new_state)s)") - raise exception.PatchError( - patch=patch, - reason=error_message % dict( - initial_state=initial_state, new_state=new_state)) - - audit = Audit(**api_utils.apply_jsonpatch(audit_dict, patch)) - except api_utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) - - # Update only the fields that have changed - for field in objects.Audit.fields: - try: - patch_val = getattr(audit, field) - except AttributeError: - # Ignore fields that aren't exposed in the API - continue - if patch_val == wtypes.Unset: - patch_val = None - if audit_to_update[field] != patch_val: - audit_to_update[field] = patch_val - - audit_to_update.save() - return Audit.convert_with_links(audit_to_update) - - @wsme_pecan.wsexpose(None, types.uuid, status_code=204) - def delete(self, audit_uuid): - """Delete a audit. - - :param audit_uuid: UUID of a audit. - """ - context = pecan.request.context - audit_to_delete = api_utils.get_resource( - 'Audit', audit_uuid, eager=True) - policy.enforce(context, 'audit:update', audit_to_delete, - action='audit:update') - - audit_to_delete.soft_delete() diff --git a/watcher/api/controllers/v1/audit_template.py b/watcher/api/controllers/v1/audit_template.py deleted file mode 100644 index b85e2f2..0000000 --- a/watcher/api/controllers/v1/audit_template.py +++ /dev/null @@ -1,657 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -An :ref:`Audit ` may be launched several times with the same -settings (:ref:`Goal `, thresholds, ...). Therefore it makes -sense to save those settings in some sort of Audit preset object, which is -known as an :ref:`Audit Template `. - -An :ref:`Audit Template ` contains at least the -:ref:`Goal ` of the :ref:`Audit `. - -It may also contain some error handling settings indicating whether: - -- :ref:`Watcher Applier ` stops the - entire operation -- :ref:`Watcher Applier ` performs a rollback - -and how many retries should be attempted before failure occurs (also the latter -can be complex: for example the scenario in which there are many first-time -failures on ultimately successful :ref:`Actions `). - -Moreover, an :ref:`Audit Template ` may contain some -settings related to the level of automation for the -:ref:`Action Plan ` that will be generated by the -:ref:`Audit `. -A flag will indicate whether the :ref:`Action Plan ` -will be launched automatically or will need a manual confirmation from the -:ref:`Administrator `. -""" - -import datetime - -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from watcher._i18n import _ -from watcher.api.controllers import base -from watcher.api.controllers import link -from watcher.api.controllers.v1 import collection -from watcher.api.controllers.v1 import types -from watcher.api.controllers.v1 import utils as api_utils -from watcher.common import context as context_utils -from watcher.common import exception -from watcher.common import policy -from watcher.common import utils as common_utils -from watcher.decision_engine.scope import default -from watcher import objects - - -class AuditTemplatePostType(wtypes.Base): - _ctx = context_utils.make_context() - - name = wtypes.wsattr(wtypes.text, mandatory=True) - """Name of this audit template""" - - description = wtypes.wsattr(wtypes.text, mandatory=False) - """Short description of this audit template""" - - goal = wtypes.wsattr(wtypes.text, mandatory=True) - """Goal UUID or name of the audit template""" - - strategy = wtypes.wsattr(wtypes.text, mandatory=False) - """Strategy UUID or name of the audit template""" - - scope = wtypes.wsattr(types.jsontype, mandatory=False, default=[]) - """Audit Scope""" - - def as_audit_template(self): - return AuditTemplate( - name=self.name, - description=self.description, - goal_id=self.goal, # Dirty trick ... - goal=self.goal, - strategy_id=self.strategy, # Dirty trick ... - strategy_uuid=self.strategy, - scope=self.scope, - ) - - @staticmethod - def validate(audit_template): - available_goals = objects.Goal.list(AuditTemplatePostType._ctx) - available_goal_uuids_map = {g.uuid: g for g in available_goals} - available_goal_names_map = {g.name: g for g in available_goals} - if audit_template.goal in available_goal_uuids_map: - goal = available_goal_uuids_map[audit_template.goal] - elif audit_template.goal in available_goal_names_map: - goal = available_goal_names_map[audit_template.goal] - else: - raise exception.InvalidGoal(goal=audit_template.goal) - - common_utils.Draft4Validator( - default.DefaultScope.DEFAULT_SCHEMA).validate(audit_template.scope) - - include_host_aggregates = False - exclude_host_aggregates = False - for rule in audit_template.scope: - if 'host_aggregates' in rule: - include_host_aggregates = True - elif 'exclude' in rule: - for resource in rule['exclude']: - if 'host_aggregates' in resource: - exclude_host_aggregates = True - if include_host_aggregates and exclude_host_aggregates: - raise exception.Invalid( - message=_( - "host_aggregates can't be " - "included and excluded together")) - - if audit_template.strategy: - available_strategies = objects.Strategy.list( - AuditTemplatePostType._ctx) - available_strategies_map = { - s.uuid: s for s in available_strategies} - if audit_template.strategy not in available_strategies_map: - raise exception.InvalidStrategy( - strategy=audit_template.strategy) - - strategy = available_strategies_map[audit_template.strategy] - # Check that the strategy we indicate is actually related to the - # specified goal - if strategy.goal_id != goal.id: - choices = ["'%s' (%s)" % (s.uuid, s.name) - for s in available_strategies] - raise exception.InvalidStrategy( - message=_( - "'%(strategy)s' strategy does relate to the " - "'%(goal)s' goal. Possible choices: %(choices)s") - % dict(strategy=strategy.name, goal=goal.name, - choices=", ".join(choices))) - audit_template.strategy = strategy.uuid - - # We force the UUID so that we do not need to query the DB with the - # name afterwards - audit_template.goal = goal.uuid - - return audit_template - - -class AuditTemplatePatchType(types.JsonPatchType): - - _ctx = context_utils.make_context() - - @staticmethod - def mandatory_attrs(): - return [] - - @staticmethod - def validate(patch): - if patch.path == "/goal" and patch.op != "remove": - AuditTemplatePatchType._validate_goal(patch) - elif patch.path == "/goal" and patch.op == "remove": - raise exception.OperationNotPermitted( - _("Cannot remove 'goal' attribute " - "from an audit template")) - if patch.path == "/strategy": - AuditTemplatePatchType._validate_strategy(patch) - return types.JsonPatchType.validate(patch) - - @staticmethod - def _validate_goal(patch): - patch.path = "/goal_id" - goal = patch.value - - if goal: - available_goals = objects.Goal.list( - AuditTemplatePatchType._ctx) - available_goal_uuids_map = {g.uuid: g for g in available_goals} - available_goal_names_map = {g.name: g for g in available_goals} - if goal in available_goal_uuids_map: - patch.value = available_goal_uuids_map[goal].id - elif goal in available_goal_names_map: - patch.value = available_goal_names_map[goal].id - else: - raise exception.InvalidGoal(goal=goal) - - @staticmethod - def _validate_strategy(patch): - patch.path = "/strategy_id" - strategy = patch.value - if strategy: - available_strategies = objects.Strategy.list( - AuditTemplatePatchType._ctx) - available_strategy_uuids_map = { - s.uuid: s for s in available_strategies} - available_strategy_names_map = { - s.name: s for s in available_strategies} - if strategy in available_strategy_uuids_map: - patch.value = available_strategy_uuids_map[strategy].id - elif strategy in available_strategy_names_map: - patch.value = available_strategy_names_map[strategy].id - else: - raise exception.InvalidStrategy(strategy=strategy) - - -class AuditTemplate(base.APIBase): - """API representation of a audit template. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of an - audit template. - """ - - _goal_uuid = None - _goal_name = None - - _strategy_uuid = None - _strategy_name = None - - def _get_goal(self, value): - if value == wtypes.Unset: - return None - goal = None - try: - if (common_utils.is_uuid_like(value) or - common_utils.is_int_like(value)): - goal = objects.Goal.get( - pecan.request.context, value) - else: - goal = objects.Goal.get_by_name( - pecan.request.context, value) - except exception.GoalNotFound: - pass - if goal: - self.goal_id = goal.id - return goal - - def _get_strategy(self, value): - if value == wtypes.Unset: - return None - strategy = None - try: - if (common_utils.is_uuid_like(value) or - common_utils.is_int_like(value)): - strategy = objects.Strategy.get( - pecan.request.context, value) - else: - strategy = objects.Strategy.get_by_name( - pecan.request.context, value) - except exception.StrategyNotFound: - pass - if strategy: - self.strategy_id = strategy.id - return strategy - - def _get_goal_uuid(self): - return self._goal_uuid - - def _set_goal_uuid(self, value): - if value and self._goal_uuid != value: - self._goal_uuid = None - goal = self._get_goal(value) - if goal: - self._goal_uuid = goal.uuid - - def _get_strategy_uuid(self): - return self._strategy_uuid - - def _set_strategy_uuid(self, value): - if value and self._strategy_uuid != value: - self._strategy_uuid = None - strategy = self._get_strategy(value) - if strategy: - self._strategy_uuid = strategy.uuid - - def _get_goal_name(self): - return self._goal_name - - def _set_goal_name(self, value): - if value and self._goal_name != value: - self._goal_name = None - goal = self._get_goal(value) - if goal: - self._goal_name = goal.name - - def _get_strategy_name(self): - return self._strategy_name - - def _set_strategy_name(self, value): - if value and self._strategy_name != value: - self._strategy_name = None - strategy = self._get_strategy(value) - if strategy: - self._strategy_name = strategy.name - - uuid = wtypes.wsattr(types.uuid, readonly=True) - """Unique UUID for this audit template""" - - name = wtypes.text - """Name of this audit template""" - - description = wtypes.wsattr(wtypes.text, mandatory=False) - """Short description of this audit template""" - - goal_uuid = wsme.wsproperty( - wtypes.text, _get_goal_uuid, _set_goal_uuid, mandatory=True) - """Goal UUID the audit template refers to""" - - goal_name = wsme.wsproperty( - wtypes.text, _get_goal_name, _set_goal_name, mandatory=False) - """The name of the goal this audit template refers to""" - - strategy_uuid = wsme.wsproperty( - wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False) - """Strategy UUID the audit template refers to""" - - strategy_name = wsme.wsproperty( - wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False) - """The name of the strategy this audit template refers to""" - - audits = wsme.wsattr([link.Link], readonly=True) - """Links to the collection of audits contained in this audit template""" - - links = wsme.wsattr([link.Link], readonly=True) - """A list containing a self link and associated audit template links""" - - scope = wsme.wsattr(types.jsontype, mandatory=False) - """Audit Scope""" - - def __init__(self, **kwargs): - super(AuditTemplate, self).__init__() - self.fields = [] - fields = list(objects.AuditTemplate.fields) - - for k in fields: - # Skip fields we do not expose. - if not hasattr(self, k): - continue - self.fields.append(k) - setattr(self, k, kwargs.get(k, wtypes.Unset)) - - self.fields.append('goal_id') - self.fields.append('strategy_id') - setattr(self, 'strategy_id', kwargs.get('strategy_id', wtypes.Unset)) - - # goal_uuid & strategy_uuid are not part of - # objects.AuditTemplate.fields because they're API-only attributes. - self.fields.append('goal_uuid') - self.fields.append('goal_name') - self.fields.append('strategy_uuid') - self.fields.append('strategy_name') - setattr(self, 'goal_uuid', kwargs.get('goal_id', wtypes.Unset)) - setattr(self, 'goal_name', kwargs.get('goal_id', wtypes.Unset)) - setattr(self, 'strategy_uuid', - kwargs.get('strategy_id', wtypes.Unset)) - setattr(self, 'strategy_name', - kwargs.get('strategy_id', wtypes.Unset)) - - @staticmethod - def _convert_with_links(audit_template, url, expand=True): - if not expand: - audit_template.unset_fields_except( - ['uuid', 'name', 'goal_uuid', 'goal_name', - 'scope', 'strategy_uuid', 'strategy_name']) - - # The numeric ID should not be exposed to - # the user, it's internal only. - audit_template.goal_id = wtypes.Unset - audit_template.strategy_id = wtypes.Unset - - audit_template.links = [link.Link.make_link('self', url, - 'audit_templates', - audit_template.uuid), - link.Link.make_link('bookmark', url, - 'audit_templates', - audit_template.uuid, - bookmark=True)] - return audit_template - - @classmethod - def convert_with_links(cls, rpc_audit_template, expand=True): - audit_template = AuditTemplate(**rpc_audit_template.as_dict()) - return cls._convert_with_links(audit_template, pecan.request.host_url, - expand) - - @classmethod - def sample(cls, expand=True): - sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', - name='My Audit Template', - description='Description of my audit template', - goal_uuid='83e44733-b640-40e2-8d8a-7dd3be7134e6', - strategy_uuid='367d826e-b6a4-4b70-bc44-c3f6fe1c9986', - created_at=datetime.datetime.utcnow(), - deleted_at=None, - updated_at=datetime.datetime.utcnow(), - scope=[],) - return cls._convert_with_links(sample, 'http://localhost:9322', expand) - - -class AuditTemplateCollection(collection.Collection): - """API representation of a collection of audit templates.""" - - audit_templates = [AuditTemplate] - """A list containing audit templates objects""" - - def __init__(self, **kwargs): - super(AuditTemplateCollection, self).__init__() - self._type = 'audit_templates' - - @staticmethod - def convert_with_links(rpc_audit_templates, limit, url=None, expand=False, - **kwargs): - at_collection = AuditTemplateCollection() - at_collection.audit_templates = [ - AuditTemplate.convert_with_links(p, expand) - for p in rpc_audit_templates] - at_collection.next = at_collection.get_next(limit, url=url, **kwargs) - return at_collection - - @classmethod - def sample(cls): - sample = cls() - sample.audit_templates = [AuditTemplate.sample(expand=False)] - return sample - - -class AuditTemplatesController(rest.RestController): - """REST controller for AuditTemplates.""" - def __init__(self): - super(AuditTemplatesController, self).__init__() - - from_audit_templates = False - """A flag to indicate if the requests to this controller are coming - from the top-level resource AuditTemplates.""" - - _custom_actions = { - 'detail': ['GET'], - } - - def _get_audit_templates_collection(self, filters, marker, limit, - sort_key, sort_dir, expand=False, - resource_url=None): - api_utils.validate_search_filters( - filters, list(objects.audit_template.AuditTemplate.fields.keys()) + - ["goal_uuid", "goal_name", "strategy_uuid", "strategy_name"]) - limit = api_utils.validate_limit(limit) - api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.AuditTemplate.get_by_uuid( - pecan.request.context, - marker) - - audit_templates = objects.AuditTemplate.list( - pecan.request.context, - filters, - limit, - marker_obj, sort_key=sort_key, - sort_dir=sort_dir) - - return AuditTemplateCollection.convert_with_links(audit_templates, - limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(AuditTemplateCollection, wtypes.text, wtypes.text, - types.uuid, int, wtypes.text, wtypes.text) - def get_all(self, goal=None, strategy=None, marker=None, - limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of audit templates. - - :param goal: goal UUID or name to filter by - :param strategy: strategy UUID or name to filter by - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'audit_template:get_all', - action='audit_template:get_all') - filters = {} - if goal: - if common_utils.is_uuid_like(goal): - filters['goal_uuid'] = goal - else: - filters['goal_name'] = goal - - if strategy: - if common_utils.is_uuid_like(strategy): - filters['strategy_uuid'] = strategy - else: - filters['strategy_name'] = strategy - - return self._get_audit_templates_collection( - filters, marker, limit, sort_key, sort_dir) - - @wsme_pecan.wsexpose(AuditTemplateCollection, wtypes.text, wtypes.text, - types.uuid, int, wtypes.text, wtypes.text) - def detail(self, goal=None, strategy=None, marker=None, - limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of audit templates with detail. - - :param goal: goal UUID or name to filter by - :param strategy: strategy UUID or name to filter by - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'audit_template:detail', - action='audit_template:detail') - - # NOTE(lucasagomes): /detail should only work agaist collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "audit_templates": - raise exception.HTTPNotFound - - filters = {} - if goal: - if common_utils.is_uuid_like(goal): - filters['goal_uuid'] = goal - else: - filters['goal_name'] = goal - - if strategy: - if common_utils.is_uuid_like(strategy): - filters['strategy_uuid'] = strategy - else: - filters['strategy_name'] = strategy - - expand = True - resource_url = '/'.join(['audit_templates', 'detail']) - return self._get_audit_templates_collection(filters, marker, limit, - sort_key, sort_dir, expand, - resource_url) - - @wsme_pecan.wsexpose(AuditTemplate, wtypes.text) - def get_one(self, audit_template): - """Retrieve information about the given audit template. - - :param audit audit_template: UUID or name of an audit template. - """ - if self.from_audit_templates: - raise exception.OperationNotPermitted - - context = pecan.request.context - rpc_audit_template = api_utils.get_resource('AuditTemplate', - audit_template) - policy.enforce(context, 'audit_template:get', rpc_audit_template, - action='audit_template:get') - - return AuditTemplate.convert_with_links(rpc_audit_template) - - @wsme.validate(types.uuid, AuditTemplatePostType) - @wsme_pecan.wsexpose(AuditTemplate, body=AuditTemplatePostType, - status_code=201) - def post(self, audit_template_postdata): - """Create a new audit template. - - :param audit_template_postdata: the audit template POST data - from the request body. - """ - if self.from_audit_templates: - raise exception.OperationNotPermitted - - context = pecan.request.context - policy.enforce(context, 'audit_template:create', - action='audit_template:create') - - context = pecan.request.context - audit_template = audit_template_postdata.as_audit_template() - audit_template_dict = audit_template.as_dict() - new_audit_template = objects.AuditTemplate(context, - **audit_template_dict) - new_audit_template.create() - - # Set the HTTP Location Header - pecan.response.location = link.build_url( - 'audit_templates', new_audit_template.uuid) - return AuditTemplate.convert_with_links(new_audit_template) - - @wsme.validate(types.uuid, [AuditTemplatePatchType]) - @wsme_pecan.wsexpose(AuditTemplate, wtypes.text, - body=[AuditTemplatePatchType]) - def patch(self, audit_template, patch): - """Update an existing audit template. - - :param audit template_uuid: UUID of a audit template. - :param patch: a json PATCH document to apply to this audit template. - """ - if self.from_audit_templates: - raise exception.OperationNotPermitted - - context = pecan.request.context - audit_template_to_update = api_utils.get_resource('AuditTemplate', - audit_template) - policy.enforce(context, 'audit_template:update', - audit_template_to_update, - action='audit_template:update') - - if common_utils.is_uuid_like(audit_template): - audit_template_to_update = objects.AuditTemplate.get_by_uuid( - pecan.request.context, - audit_template) - else: - audit_template_to_update = objects.AuditTemplate.get_by_name( - pecan.request.context, - audit_template) - - try: - audit_template_dict = audit_template_to_update.as_dict() - audit_template = AuditTemplate(**api_utils.apply_jsonpatch( - audit_template_dict, patch)) - except api_utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) - - # Update only the fields that have changed - for field in objects.AuditTemplate.fields: - try: - patch_val = getattr(audit_template, field) - except AttributeError: - # Ignore fields that aren't exposed in the API - continue - if patch_val == wtypes.Unset: - patch_val = None - if audit_template_to_update[field] != patch_val: - audit_template_to_update[field] = patch_val - - audit_template_to_update.save() - return AuditTemplate.convert_with_links(audit_template_to_update) - - @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) - def delete(self, audit_template): - """Delete a audit template. - - :param audit template_uuid: UUID or name of an audit template. - """ - context = pecan.request.context - audit_template_to_delete = api_utils.get_resource('AuditTemplate', - audit_template) - policy.enforce(context, 'audit_template:update', - audit_template_to_delete, - action='audit_template:update') - - audit_template_to_delete.soft_delete() diff --git a/watcher/api/controllers/v1/collection.py b/watcher/api/controllers/v1/collection.py deleted file mode 100644 index 05e05df..0000000 --- a/watcher/api/controllers/v1/collection.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pecan -from wsme import types as wtypes - -from watcher.api.controllers import base -from watcher.api.controllers import link - - -class Collection(base.APIBase): - - next = wtypes.text - """A link to retrieve the next subset of the collection""" - - @property - def collection(self): - return getattr(self, self._type) - - def has_next(self, limit): - """Return whether collection has more items.""" - return len(self.collection) and len(self.collection) == limit - - def get_next(self, limit, url=None, marker_field="uuid", **kwargs): - """Return a link to the next subset of the collection.""" - if not self.has_next(limit): - return wtypes.Unset - - resource_url = url or self._type - q_args = ''.join(['%s=%s&' % (key, kwargs[key]) for key in kwargs]) - next_args = '?%(args)slimit=%(limit)d&marker=%(marker)s' % { - 'args': q_args, 'limit': limit, - 'marker': getattr(self.collection[-1], marker_field)} - - return link.Link.make_link('next', pecan.request.host_url, - resource_url, next_args).href diff --git a/watcher/api/controllers/v1/efficacy_indicator.py b/watcher/api/controllers/v1/efficacy_indicator.py deleted file mode 100644 index b17ccf2..0000000 --- a/watcher/api/controllers/v1/efficacy_indicator.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -An efficacy indicator is a single value that gives an indication on how the -:ref:`solution ` produced by a given :ref:`strategy -` performed. These efficacy indicators are specific to a -given :ref:`goal ` and are usually used to compute the -:ref:`global efficacy ` of the resulting :ref:`action plan -`. - -In Watcher, these efficacy indicators are specified alongside the goal they -relate to. When a strategy (which always relates to a goal) is executed, it -produces a solution containing the efficacy indicators specified by the goal. -This solution, which has been translated by the :ref:`Watcher Planner -` into an action plan, will see its indicators and -global efficacy stored and would now be accessible through the :ref:`Watcher -API `. -""" - -import numbers - -from wsme import types as wtypes - -from watcher.api.controllers import base -from watcher import objects - - -class EfficacyIndicator(base.APIBase): - """API representation of a efficacy indicator. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of an - efficacy indicator. - """ - - name = wtypes.wsattr(wtypes.text, mandatory=True) - """Name of this efficacy indicator""" - - description = wtypes.wsattr(wtypes.text, mandatory=False) - """Description of this efficacy indicator""" - - unit = wtypes.wsattr(wtypes.text, mandatory=False) - """Unit of this efficacy indicator""" - - value = wtypes.wsattr(numbers.Number, mandatory=True) - """Value of this efficacy indicator""" - - def __init__(self, **kwargs): - super(EfficacyIndicator, self).__init__() - - self.fields = [] - fields = list(objects.EfficacyIndicator.fields) - for field in fields: - # Skip fields we do not expose. - if not hasattr(self, field): - continue - self.fields.append(field) - setattr(self, field, kwargs.get(field, wtypes.Unset)) diff --git a/watcher/api/controllers/v1/goal.py b/watcher/api/controllers/v1/goal.py deleted file mode 100644 index a7dd28b..0000000 --- a/watcher/api/controllers/v1/goal.py +++ /dev/null @@ -1,240 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -A :ref:`Goal ` is a human readable, observable and measurable -end result having one objective to be achieved. - -Here are some examples of :ref:`Goals `: - -- minimize the energy consumption -- minimize the number of compute nodes (consolidation) -- balance the workload among compute nodes -- minimize the license cost (some softwares have a licensing model which is - based on the number of sockets or cores where the software is deployed) -- find the most appropriate moment for a planned maintenance on a - given group of host (which may be an entire availability zone): - power supply replacement, cooling system replacement, hardware - modification, ... -""" - -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from watcher.api.controllers import base -from watcher.api.controllers import link -from watcher.api.controllers.v1 import collection -from watcher.api.controllers.v1 import types -from watcher.api.controllers.v1 import utils as api_utils -from watcher.common import exception -from watcher.common import policy -from watcher import objects - - -class Goal(base.APIBase): - """API representation of a goal. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of a goal. - """ - - uuid = types.uuid - """Unique UUID for this goal""" - - name = wtypes.text - """Name of the goal""" - - display_name = wtypes.text - """Localized name of the goal""" - - efficacy_specification = wtypes.wsattr(types.jsontype, readonly=True) - """Efficacy specification for this goal""" - - links = wsme.wsattr([link.Link], readonly=True) - """A list containing a self link and associated audit template links""" - - def __init__(self, **kwargs): - self.fields = [] - fields = list(objects.Goal.fields) - - for k in fields: - # Skip fields we do not expose. - if not hasattr(self, k): - continue - self.fields.append(k) - setattr(self, k, kwargs.get(k, wtypes.Unset)) - - @staticmethod - def _convert_with_links(goal, url, expand=True): - if not expand: - goal.unset_fields_except(['uuid', 'name', 'display_name', - 'efficacy_specification']) - - goal.links = [link.Link.make_link('self', url, - 'goals', goal.uuid), - link.Link.make_link('bookmark', url, - 'goals', goal.uuid, - bookmark=True)] - return goal - - @classmethod - def convert_with_links(cls, goal, expand=True): - goal = Goal(**goal.as_dict()) - return cls._convert_with_links(goal, pecan.request.host_url, expand) - - @classmethod - def sample(cls, expand=True): - sample = cls( - uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', - name='DUMMY', - display_name='Dummy strategy', - efficacy_specification=[ - {'description': 'Dummy indicator', 'name': 'dummy', - 'schema': 'Range(min=0, max=100, min_included=True, ' - 'max_included=True, msg=None)', - 'unit': '%'} - ]) - return cls._convert_with_links(sample, 'http://localhost:9322', expand) - - -class GoalCollection(collection.Collection): - """API representation of a collection of goals.""" - - goals = [Goal] - """A list containing goals objects""" - - def __init__(self, **kwargs): - super(GoalCollection, self).__init__() - self._type = 'goals' - - @staticmethod - def convert_with_links(goals, limit, url=None, expand=False, - **kwargs): - goal_collection = GoalCollection() - goal_collection.goals = [ - Goal.convert_with_links(g, expand) for g in goals] - - if 'sort_key' in kwargs: - reverse = False - if kwargs['sort_key'] == 'strategy': - if 'sort_dir' in kwargs: - reverse = True if kwargs['sort_dir'] == 'desc' else False - goal_collection.goals = sorted( - goal_collection.goals, - key=lambda goal: goal.uuid, - reverse=reverse) - - goal_collection.next = goal_collection.get_next( - limit, url=url, **kwargs) - return goal_collection - - @classmethod - def sample(cls): - sample = cls() - sample.goals = [Goal.sample(expand=False)] - return sample - - -class GoalsController(rest.RestController): - """REST controller for Goals.""" - def __init__(self): - super(GoalsController, self).__init__() - - from_goals = False - """A flag to indicate if the requests to this controller are coming - from the top-level resource Goals.""" - - _custom_actions = { - 'detail': ['GET'], - } - - def _get_goals_collection(self, marker, limit, sort_key, sort_dir, - expand=False, resource_url=None): - limit = api_utils.validate_limit(limit) - api_utils.validate_sort_dir(sort_dir) - - sort_db_key = (sort_key if sort_key in objects.Goal.fields.keys() - else None) - - marker_obj = None - if marker: - marker_obj = objects.Goal.get_by_uuid( - pecan.request.context, marker) - - goals = objects.Goal.list(pecan.request.context, limit, marker_obj, - sort_key=sort_db_key, sort_dir=sort_dir) - - return GoalCollection.convert_with_links(goals, limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(GoalCollection, wtypes.text, - int, wtypes.text, wtypes.text) - def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of goals. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'goal:get_all', - action='goal:get_all') - return self._get_goals_collection(marker, limit, sort_key, sort_dir) - - @wsme_pecan.wsexpose(GoalCollection, wtypes.text, int, - wtypes.text, wtypes.text) - def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of goals with detail. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'goal:detail', - action='goal:detail') - # NOTE(lucasagomes): /detail should only work agaist collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "goals": - raise exception.HTTPNotFound - expand = True - resource_url = '/'.join(['goals', 'detail']) - return self._get_goals_collection(marker, limit, sort_key, sort_dir, - expand, resource_url) - - @wsme_pecan.wsexpose(Goal, wtypes.text) - def get_one(self, goal): - """Retrieve information about the given goal. - - :param goal: UUID or name of the goal. - """ - if self.from_goals: - raise exception.OperationNotPermitted - - context = pecan.request.context - rpc_goal = api_utils.get_resource('Goal', goal) - policy.enforce(context, 'goal:get', rpc_goal, action='goal:get') - - return Goal.convert_with_links(rpc_goal) diff --git a/watcher/api/controllers/v1/scoring_engine.py b/watcher/api/controllers/v1/scoring_engine.py deleted file mode 100644 index 0e13d38..0000000 --- a/watcher/api/controllers/v1/scoring_engine.py +++ /dev/null @@ -1,248 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2016 Intel -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -A :ref:`Scoring Engine ` is an executable that has -a well-defined input, a well-defined output, and performs a purely mathematical -task. That is, the calculation does not depend on the environment in which it -is running - it would produce the same result anywhere. - -Because there might be multiple algorithms used to build a particular data -model (and therefore a scoring engine), the usage of scoring engine might -vary. A metainfo field is supposed to contain any information which might -be needed by the user of a given scoring engine. -""" - -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from watcher.api.controllers import base -from watcher.api.controllers import link -from watcher.api.controllers.v1 import collection -from watcher.api.controllers.v1 import types -from watcher.api.controllers.v1 import utils as api_utils -from watcher.common import exception -from watcher.common import policy -from watcher import objects - - -class ScoringEngine(base.APIBase): - """API representation of a scoring engine. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of a scoring - engine. - """ - - uuid = types.uuid - """Unique UUID of the scoring engine""" - - name = wtypes.text - """The name of the scoring engine""" - - description = wtypes.text - """A human readable description of the Scoring Engine""" - - metainfo = wtypes.text - """A metadata associated with the scoring engine""" - - links = wsme.wsattr([link.Link], readonly=True) - """A list containing a self link and associated action links""" - - def __init__(self, **kwargs): - super(ScoringEngine, self).__init__() - - self.fields = [] - self.fields.append('uuid') - self.fields.append('name') - self.fields.append('description') - self.fields.append('metainfo') - setattr(self, 'uuid', kwargs.get('uuid', wtypes.Unset)) - setattr(self, 'name', kwargs.get('name', wtypes.Unset)) - setattr(self, 'description', kwargs.get('description', wtypes.Unset)) - setattr(self, 'metainfo', kwargs.get('metainfo', wtypes.Unset)) - - @staticmethod - def _convert_with_links(se, url, expand=True): - if not expand: - se.unset_fields_except( - ['uuid', 'name', 'description', 'metainfo']) - - se.links = [link.Link.make_link('self', url, - 'scoring_engines', se.uuid), - link.Link.make_link('bookmark', url, - 'scoring_engines', se.uuid, - bookmark=True)] - return se - - @classmethod - def convert_with_links(cls, scoring_engine, expand=True): - scoring_engine = ScoringEngine(**scoring_engine.as_dict()) - return cls._convert_with_links( - scoring_engine, pecan.request.host_url, expand) - - @classmethod - def sample(cls, expand=True): - sample = cls(uuid='81bbd3c7-3b08-4d12-a268-99354dbf7b71', - name='sample-se-123', - description='Sample Scoring Engine 123 just for testing') - return cls._convert_with_links(sample, 'http://localhost:9322', expand) - - -class ScoringEngineCollection(collection.Collection): - """API representation of a collection of scoring engines.""" - - scoring_engines = [ScoringEngine] - """A list containing scoring engine objects""" - - def __init__(self, **kwargs): - super(ScoringEngineCollection, self).__init__() - self._type = 'scoring_engines' - - @staticmethod - def convert_with_links(scoring_engines, limit, url=None, expand=False, - **kwargs): - - collection = ScoringEngineCollection() - collection.scoring_engines = [ScoringEngine.convert_with_links( - se, expand) for se in scoring_engines] - - if 'sort_key' in kwargs: - reverse = False - if kwargs['sort_key'] == 'name': - if 'sort_dir' in kwargs: - reverse = True if kwargs['sort_dir'] == 'desc' else False - collection.goals = sorted( - collection.scoring_engines, - key=lambda se: se.name, - reverse=reverse) - - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - @classmethod - def sample(cls): - sample = cls() - sample.scoring_engines = [ScoringEngine.sample(expand=False)] - return sample - - -class ScoringEngineController(rest.RestController): - """REST controller for Scoring Engines.""" - def __init__(self): - super(ScoringEngineController, self).__init__() - - from_scoring_engines = False - """A flag to indicate if the requests to this controller are coming - from the top-level resource Scoring Engines.""" - - _custom_actions = { - 'detail': ['GET'], - } - - def _get_scoring_engines_collection(self, marker, limit, - sort_key, sort_dir, expand=False, - resource_url=None): - - limit = api_utils.validate_limit(limit) - api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.ScoringEngine.get_by_uuid( - pecan.request.context, marker) - - filters = {} - - sort_db_key = sort_key - - scoring_engines = objects.ScoringEngine.list( - context=pecan.request.context, - limit=limit, - marker=marker_obj, - sort_key=sort_db_key, - sort_dir=sort_dir, - filters=filters) - - return ScoringEngineCollection.convert_with_links( - scoring_engines, - limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(ScoringEngineCollection, wtypes.text, - int, wtypes.text, wtypes.text) - def get_all(self, marker=None, limit=None, sort_key='id', - sort_dir='asc'): - """Retrieve a list of Scoring Engines. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: name. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'scoring_engine:get_all', - action='scoring_engine:get_all') - - return self._get_scoring_engines_collection( - marker, limit, sort_key, sort_dir) - - @wsme_pecan.wsexpose(ScoringEngineCollection, wtypes.text, - int, wtypes.text, wtypes.text) - def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of Scoring Engines with detail. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: name. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'scoring_engine:detail', - action='scoring_engine:detail') - - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "scoring_engines": - raise exception.HTTPNotFound - expand = True - resource_url = '/'.join(['scoring_engines', 'detail']) - return self._get_scoring_engines_collection( - marker, limit, sort_key, sort_dir, expand, resource_url) - - @wsme_pecan.wsexpose(ScoringEngine, wtypes.text) - def get_one(self, scoring_engine): - """Retrieve information about the given Scoring Engine. - - :param scoring_engine_name: The name of the Scoring Engine. - """ - context = pecan.request.context - policy.enforce(context, 'scoring_engine:get', - action='scoring_engine:get') - - if self.from_scoring_engines: - raise exception.OperationNotPermitted - - rpc_scoring_engine = api_utils.get_resource( - 'ScoringEngine', scoring_engine) - - return ScoringEngine.convert_with_links(rpc_scoring_engine) diff --git a/watcher/api/controllers/v1/service.py b/watcher/api/controllers/v1/service.py deleted file mode 100644 index 63ea179..0000000 --- a/watcher/api/controllers/v1/service.py +++ /dev/null @@ -1,264 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Servionica -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Service mechanism provides ability to monitor Watcher services state. -""" - -import datetime -import six - -from oslo_config import cfg -from oslo_log import log -from oslo_utils import timeutils -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from watcher.api.controllers import base -from watcher.api.controllers import link -from watcher.api.controllers.v1 import collection -from watcher.api.controllers.v1 import utils as api_utils -from watcher.common import context -from watcher.common import exception -from watcher.common import policy -from watcher import objects - - -CONF = cfg.CONF -LOG = log.getLogger(__name__) - - -class Service(base.APIBase): - """API representation of a service. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of a service. - """ - - _status = None - _context = context.RequestContext(is_admin=True) - - def _get_status(self): - return self._status - - def _set_status(self, id): - service = objects.Service.get(pecan.request.context, id) - last_heartbeat = (service.last_seen_up or service.updated_at - or service.created_at) - if isinstance(last_heartbeat, six.string_types): - # NOTE(russellb) If this service came in over rpc via - # conductor, then the timestamp will be a string and needs to be - # converted back to a datetime. - last_heartbeat = timeutils.parse_strtime(last_heartbeat) - else: - # Objects have proper UTC timezones, but the timeutils comparison - # below does not (and will fail) - last_heartbeat = last_heartbeat.replace(tzinfo=None) - elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow()) - is_up = abs(elapsed) <= CONF.service_down_time - if not is_up: - LOG.warning('Seems service %(name)s on host %(host)s is down. ' - 'Last heartbeat was %(lhb)s.' - 'Elapsed time is %(el)s', - {'name': service.name, - 'host': service.host, - 'lhb': str(last_heartbeat), 'el': str(elapsed)}) - self._status = objects.service.ServiceStatus.FAILED - else: - self._status = objects.service.ServiceStatus.ACTIVE - - id = wsme.wsattr(int, readonly=True) - """ID for this service.""" - - name = wtypes.text - """Name of the service.""" - - host = wtypes.text - """Host where service is placed on.""" - - last_seen_up = wsme.wsattr(datetime.datetime, readonly=True) - """Time when Watcher service sent latest heartbeat.""" - - status = wsme.wsproperty(wtypes.text, _get_status, _set_status, - mandatory=True) - - links = wsme.wsattr([link.Link], readonly=True) - """A list containing a self link.""" - - def __init__(self, **kwargs): - super(Service, self).__init__() - - fields = list(objects.Service.fields.keys()) + ['status'] - self.fields = [] - for field in fields: - self.fields.append(field) - setattr(self, field, kwargs.get( - field if field != 'status' else 'id', wtypes.Unset)) - - @staticmethod - def _convert_with_links(service, url, expand=True): - if not expand: - service.unset_fields_except( - ['id', 'name', 'host', 'status']) - - service.links = [ - link.Link.make_link('self', url, 'services', str(service.id)), - link.Link.make_link('bookmark', url, 'services', str(service.id), - bookmark=True)] - return service - - @classmethod - def convert_with_links(cls, service, expand=True): - service = Service(**service.as_dict()) - return cls._convert_with_links( - service, pecan.request.host_url, expand) - - @classmethod - def sample(cls, expand=True): - sample = cls(id=1, - name='watcher-applier', - host='Controller', - last_seen_up=datetime.datetime(2016, 1, 1)) - return cls._convert_with_links(sample, 'http://localhost:9322', expand) - - -class ServiceCollection(collection.Collection): - """API representation of a collection of services.""" - - services = [Service] - """A list containing services objects""" - - def __init__(self, **kwargs): - super(ServiceCollection, self).__init__() - self._type = 'services' - - @staticmethod - def convert_with_links(services, limit, url=None, expand=False, - **kwargs): - service_collection = ServiceCollection() - service_collection.services = [ - Service.convert_with_links(g, expand) for g in services] - - if 'sort_key' in kwargs: - reverse = False - if kwargs['sort_key'] == 'service': - if 'sort_dir' in kwargs: - reverse = True if kwargs['sort_dir'] == 'desc' else False - service_collection.services = sorted( - service_collection.services, - key=lambda service: service.id, - reverse=reverse) - - service_collection.next = service_collection.get_next( - limit, url=url, marker_field='id', **kwargs) - return service_collection - - @classmethod - def sample(cls): - sample = cls() - sample.services = [Service.sample(expand=False)] - return sample - - -class ServicesController(rest.RestController): - """REST controller for Services.""" - def __init__(self): - super(ServicesController, self).__init__() - - from_services = False - """A flag to indicate if the requests to this controller are coming - from the top-level resource Services.""" - - _custom_actions = { - 'detail': ['GET'], - } - - def _get_services_collection(self, marker, limit, sort_key, sort_dir, - expand=False, resource_url=None): - limit = api_utils.validate_limit(limit) - api_utils.validate_sort_dir(sort_dir) - - sort_db_key = (sort_key if sort_key in objects.Service.fields.keys() - else None) - - marker_obj = None - if marker: - marker_obj = objects.Service.get( - pecan.request.context, marker) - - services = objects.Service.list( - pecan.request.context, limit, marker_obj, - sort_key=sort_db_key, sort_dir=sort_dir) - - return ServiceCollection.convert_with_links( - services, limit, url=resource_url, expand=expand, - sort_key=sort_key, sort_dir=sort_dir) - - @wsme_pecan.wsexpose(ServiceCollection, int, int, wtypes.text, wtypes.text) - def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of services. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'service:get_all', - action='service:get_all') - - return self._get_services_collection(marker, limit, sort_key, sort_dir) - - @wsme_pecan.wsexpose(ServiceCollection, int, int, wtypes.text, wtypes.text) - def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of services with detail. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'service:detail', - action='service:detail') - # NOTE(lucasagomes): /detail should only work agaist collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "services": - raise exception.HTTPNotFound - expand = True - resource_url = '/'.join(['services', 'detail']) - - return self._get_services_collection( - marker, limit, sort_key, sort_dir, expand, resource_url) - - @wsme_pecan.wsexpose(Service, wtypes.text) - def get_one(self, service): - """Retrieve information about the given service. - - :param service: ID or name of the service. - """ - if self.from_services: - raise exception.OperationNotPermitted - - context = pecan.request.context - rpc_service = api_utils.get_resource('Service', service) - policy.enforce(context, 'service:get', rpc_service, - action='service:get') - - return Service.convert_with_links(rpc_service) diff --git a/watcher/api/controllers/v1/strategy.py b/watcher/api/controllers/v1/strategy.py deleted file mode 100644 index 2c74da1..0000000 --- a/watcher/api/controllers/v1/strategy.py +++ /dev/null @@ -1,305 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -A :ref:`Strategy ` is an algorithm implementation which is -able to find a :ref:`Solution ` for a given -:ref:`Goal `. - -There may be several potential strategies which are able to achieve the same -:ref:`Goal `. This is why it is possible to configure which -specific :ref:`Strategy ` should be used for each goal. - -Some strategies may provide better optimization results but may take more time -to find an optimal :ref:`Solution `. -""" - -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from watcher.api.controllers import base -from watcher.api.controllers import link -from watcher.api.controllers.v1 import collection -from watcher.api.controllers.v1 import types -from watcher.api.controllers.v1 import utils as api_utils -from watcher.common import exception -from watcher.common import policy -from watcher.common import utils as common_utils -from watcher import objects - - -class Strategy(base.APIBase): - """API representation of a strategy. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of a strategy. - """ - _goal_uuid = None - _goal_name = None - - def _get_goal(self, value): - if value == wtypes.Unset: - return None - goal = None - try: - if (common_utils.is_uuid_like(value) or - common_utils.is_int_like(value)): - goal = objects.Goal.get(pecan.request.context, value) - else: - goal = objects.Goal.get_by_name(pecan.request.context, value) - except exception.GoalNotFound: - pass - if goal: - self.goal_id = goal.id - return goal - - def _get_goal_uuid(self): - return self._goal_uuid - - def _set_goal_uuid(self, value): - if value and self._goal_uuid != value: - self._goal_uuid = None - goal = self._get_goal(value) - if goal: - self._goal_uuid = goal.uuid - - def _get_goal_name(self): - return self._goal_name - - def _set_goal_name(self, value): - if value and self._goal_name != value: - self._goal_name = None - goal = self._get_goal(value) - if goal: - self._goal_name = goal.name - - uuid = types.uuid - """Unique UUID for this strategy""" - - name = wtypes.text - """Name of the strategy""" - - display_name = wtypes.text - """Localized name of the strategy""" - - links = wsme.wsattr([link.Link], readonly=True) - """A list containing a self link and associated goal links""" - - goal_uuid = wsme.wsproperty(wtypes.text, _get_goal_uuid, _set_goal_uuid, - mandatory=True) - """The UUID of the goal this audit refers to""" - - goal_name = wsme.wsproperty(wtypes.text, _get_goal_name, _set_goal_name, - mandatory=False) - """The name of the goal this audit refers to""" - - parameters_spec = {wtypes.text: types.jsontype} - """Parameters spec dict""" - - def __init__(self, **kwargs): - super(Strategy, self).__init__() - - self.fields = [] - self.fields.append('uuid') - self.fields.append('name') - self.fields.append('display_name') - self.fields.append('goal_uuid') - self.fields.append('goal_name') - self.fields.append('parameters_spec') - setattr(self, 'uuid', kwargs.get('uuid', wtypes.Unset)) - setattr(self, 'name', kwargs.get('name', wtypes.Unset)) - setattr(self, 'display_name', kwargs.get('display_name', wtypes.Unset)) - setattr(self, 'goal_uuid', kwargs.get('goal_id', wtypes.Unset)) - setattr(self, 'goal_name', kwargs.get('goal_id', wtypes.Unset)) - setattr(self, 'parameters_spec', kwargs.get('parameters_spec', - wtypes.Unset)) - - @staticmethod - def _convert_with_links(strategy, url, expand=True): - if not expand: - strategy.unset_fields_except( - ['uuid', 'name', 'display_name', 'goal_uuid', 'goal_name']) - - strategy.links = [ - link.Link.make_link('self', url, 'strategies', strategy.uuid), - link.Link.make_link('bookmark', url, 'strategies', strategy.uuid, - bookmark=True)] - return strategy - - @classmethod - def convert_with_links(cls, strategy, expand=True): - strategy = Strategy(**strategy.as_dict()) - return cls._convert_with_links( - strategy, pecan.request.host_url, expand) - - @classmethod - def sample(cls, expand=True): - sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', - name='DUMMY', - display_name='Dummy strategy') - return cls._convert_with_links(sample, 'http://localhost:9322', expand) - - -class StrategyCollection(collection.Collection): - """API representation of a collection of strategies.""" - - strategies = [Strategy] - """A list containing strategies objects""" - - def __init__(self, **kwargs): - super(StrategyCollection, self).__init__() - self._type = 'strategies' - - @staticmethod - def convert_with_links(strategies, limit, url=None, expand=False, - **kwargs): - strategy_collection = StrategyCollection() - strategy_collection.strategies = [ - Strategy.convert_with_links(g, expand) for g in strategies] - - if 'sort_key' in kwargs: - reverse = False - if kwargs['sort_key'] == 'strategy': - if 'sort_dir' in kwargs: - reverse = True if kwargs['sort_dir'] == 'desc' else False - strategy_collection.strategies = sorted( - strategy_collection.strategies, - key=lambda strategy: strategy.uuid, - reverse=reverse) - - strategy_collection.next = strategy_collection.get_next( - limit, url=url, **kwargs) - return strategy_collection - - @classmethod - def sample(cls): - sample = cls() - sample.strategies = [Strategy.sample(expand=False)] - return sample - - -class StrategiesController(rest.RestController): - """REST controller for Strategies.""" - def __init__(self): - super(StrategiesController, self).__init__() - - from_strategies = False - """A flag to indicate if the requests to this controller are coming - from the top-level resource Strategies.""" - - _custom_actions = { - 'detail': ['GET'], - } - - def _get_strategies_collection(self, filters, marker, limit, sort_key, - sort_dir, expand=False, resource_url=None): - api_utils.validate_search_filters( - filters, list(objects.strategy.Strategy.fields.keys()) + - ["goal_uuid", "goal_name"]) - limit = api_utils.validate_limit(limit) - api_utils.validate_sort_dir(sort_dir) - - sort_db_key = (sort_key if sort_key in objects.Strategy.fields.keys() - else None) - - marker_obj = None - if marker: - marker_obj = objects.Strategy.get_by_uuid( - pecan.request.context, marker) - - strategies = objects.Strategy.list( - pecan.request.context, limit, marker_obj, filters=filters, - sort_key=sort_db_key, sort_dir=sort_dir) - - return StrategyCollection.convert_with_links( - strategies, limit, url=resource_url, expand=expand, - sort_key=sort_key, sort_dir=sort_dir) - - @wsme_pecan.wsexpose(StrategyCollection, wtypes.text, wtypes.text, - int, wtypes.text, wtypes.text) - def get_all(self, goal=None, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of strategies. - - :param goal: goal UUID or name to filter by. - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'strategy:get_all', - action='strategy:get_all') - filters = {} - if goal: - if common_utils.is_uuid_like(goal): - filters['goal_uuid'] = goal - else: - filters['goal_name'] = goal - - return self._get_strategies_collection( - filters, marker, limit, sort_key, sort_dir) - - @wsme_pecan.wsexpose(StrategyCollection, wtypes.text, wtypes.text, int, - wtypes.text, wtypes.text) - def detail(self, goal=None, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of strategies with detail. - - :param goal: goal UUID or name to filter by. - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'strategy:detail', - action='strategy:detail') - # NOTE(lucasagomes): /detail should only work agaist collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "strategies": - raise exception.HTTPNotFound - expand = True - resource_url = '/'.join(['strategies', 'detail']) - - filters = {} - if goal: - if common_utils.is_uuid_like(goal): - filters['goal_uuid'] = goal - else: - filters['goal_name'] = goal - - return self._get_strategies_collection( - filters, marker, limit, sort_key, sort_dir, expand, resource_url) - - @wsme_pecan.wsexpose(Strategy, wtypes.text) - def get_one(self, strategy): - """Retrieve information about the given strategy. - - :param strategy: UUID or name of the strategy. - """ - if self.from_strategies: - raise exception.OperationNotPermitted - - context = pecan.request.context - rpc_strategy = api_utils.get_resource('Strategy', strategy) - policy.enforce(context, 'strategy:get', rpc_strategy, - action='strategy:get') - - return Strategy.convert_with_links(rpc_strategy) diff --git a/watcher/api/controllers/v1/types.py b/watcher/api/controllers/v1/types.py deleted file mode 100644 index 77d41b6..0000000 --- a/watcher/api/controllers/v1/types.py +++ /dev/null @@ -1,233 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_serialization import jsonutils -from oslo_utils import strutils -import six -import wsme -from wsme import types as wtypes - -from watcher._i18n import _ -from watcher.common import exception -from watcher.common import utils - - -class UuidOrNameType(wtypes.UserType): - """A simple UUID or logical name type.""" - - basetype = wtypes.text - name = 'uuid_or_name' - - @staticmethod - def validate(value): - if not (utils.is_uuid_like(value) or utils.is_hostname_safe(value)): - raise exception.InvalidUuidOrName(name=value) - return value - - @staticmethod - def frombasetype(value): - if value is None: - return None - return UuidOrNameType.validate(value) - - -class IntervalOrCron(wtypes.UserType): - """A simple int value or cron syntax type""" - - basetype = wtypes.text - name = 'interval_or_cron' - - @staticmethod - def validate(value): - if not (utils.is_int_like(value) or utils.is_cron_like(value)): - raise exception.InvalidIntervalOrCron(name=value) - return value - - @staticmethod - def frombasetype(value): - if value is None: - return None - return IntervalOrCron.validate(value) - - -interval_or_cron = IntervalOrCron() - - -class NameType(wtypes.UserType): - """A simple logical name type.""" - - basetype = wtypes.text - name = 'name' - - @staticmethod - def validate(value): - if not utils.is_hostname_safe(value): - raise exception.InvalidName(name=value) - return value - - @staticmethod - def frombasetype(value): - if value is None: - return None - return NameType.validate(value) - - -class UuidType(wtypes.UserType): - """A simple UUID type.""" - - basetype = wtypes.text - name = 'uuid' - - @staticmethod - def validate(value): - if not utils.is_uuid_like(value): - raise exception.InvalidUUID(uuid=value) - return value - - @staticmethod - def frombasetype(value): - if value is None: - return None - return UuidType.validate(value) - - -class BooleanType(wtypes.UserType): - """A simple boolean type.""" - - basetype = wtypes.text - name = 'boolean' - - @staticmethod - def validate(value): - try: - return strutils.bool_from_string(value, strict=True) - except ValueError as e: - # raise Invalid to return 400 (BadRequest) in the API - raise exception.Invalid(e) - - @staticmethod - def frombasetype(value): - if value is None: - return None - return BooleanType.validate(value) - - -class JsonType(wtypes.UserType): - """A simple JSON type.""" - - basetype = wtypes.text - name = 'json' - - def __str__(self): - # These are the json serializable native types - return ' | '.join(map(str, (wtypes.text, six.integer_types, float, - BooleanType, list, dict, None))) - - @staticmethod - def validate(value): - try: - jsonutils.dumps(value, default=None) - except TypeError: - raise exception.Invalid(_('%s is not JSON serializable') % value) - else: - return value - - @staticmethod - def frombasetype(value): - return JsonType.validate(value) - - -uuid = UuidType() -boolean = BooleanType() -jsontype = JsonType() - - -class MultiType(wtypes.UserType): - """A complex type that represents one or more types. - - Used for validating that a value is an instance of one of the types. - - :param types: Variable-length list of types. - - """ - def __init__(self, *types): - self.types = types - - def __str__(self): - return ' | '.join(map(str, self.types)) - - def validate(self, value): - for t in self.types: - if t is wsme.types.text and isinstance(value, wsme.types.bytes): - value = value.decode() - if isinstance(value, t): - return value - else: - raise ValueError( - _("Wrong type. Expected '%(type)s', got '%(value)s'"), - type=self.types, value=type(value) - ) - - -class JsonPatchType(wtypes.Base): - """A complex type that represents a single json-patch operation.""" - - path = wtypes.wsattr(wtypes.StringType(pattern='^(/[\w-]+)+$'), - mandatory=True) - op = wtypes.wsattr(wtypes.Enum(str, 'add', 'replace', 'remove'), - mandatory=True) - value = wsme.wsattr(jsontype, default=wtypes.Unset) - - @staticmethod - def internal_attrs(): - """Returns a list of internal attributes. - - Internal attributes can't be added, replaced or removed. This - method may be overwritten by derived class. - - """ - return ['/created_at', '/id', '/links', '/updated_at', - '/deleted_at', '/uuid'] - - @staticmethod - def mandatory_attrs(): - """Returns a list of mandatory attributes. - - Mandatory attributes can't be removed from the document. This - method should be overwritten by derived class. - - """ - return [] - - @staticmethod - def validate(patch): - _path = '/{0}'.format(patch.path.split('/')[1]) - if _path in patch.internal_attrs(): - msg = _("'%s' is an internal attribute and can not be updated") - raise wsme.exc.ClientSideError(msg % patch.path) - - if patch.path in patch.mandatory_attrs() and patch.op == 'remove': - msg = _("'%s' is a mandatory attribute and can not be removed") - raise wsme.exc.ClientSideError(msg % patch.path) - - if patch.op != 'remove': - if patch.value is wsme.Unset: - msg = _("'add' and 'replace' operations needs value") - raise wsme.exc.ClientSideError(msg) - - ret = {'path': patch.path, 'op': patch.op} - if patch.value is not wsme.Unset: - ret['value'] = patch.value - return ret diff --git a/watcher/api/controllers/v1/utils.py b/watcher/api/controllers/v1/utils.py deleted file mode 100644 index 2ad5b49..0000000 --- a/watcher/api/controllers/v1/utils.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import jsonpatch -from oslo_config import cfg -from oslo_utils import reflection -from oslo_utils import uuidutils -import pecan -import wsme - -from watcher._i18n import _ -from watcher.common import utils -from watcher import objects - -CONF = cfg.CONF - - -JSONPATCH_EXCEPTIONS = (jsonpatch.JsonPatchException, - jsonpatch.JsonPointerException, - KeyError) - - -def validate_limit(limit): - if limit is None: - return CONF.api.max_limit - - if limit <= 0: - # Case where we don't a valid limit value - raise wsme.exc.ClientSideError(_("Limit must be positive")) - - if limit and not CONF.api.max_limit: - # Case where we don't have an upper limit - return limit - - return min(CONF.api.max_limit, limit) - - -def validate_sort_dir(sort_dir): - if sort_dir not in ['asc', 'desc']: - raise wsme.exc.ClientSideError(_("Invalid sort direction: %s. " - "Acceptable values are " - "'asc' or 'desc'") % sort_dir) - - -def validate_search_filters(filters, allowed_fields): - # Very lightweight validation for now - # todo: improve this (e.g. https://www.parse.com/docs/rest/guide/#queries) - for filter_name in filters.keys(): - if filter_name not in allowed_fields: - raise wsme.exc.ClientSideError( - _("Invalid filter: %s") % filter_name) - - -def apply_jsonpatch(doc, patch): - for p in patch: - if p['op'] == 'add' and p['path'].count('/') == 1: - if p['path'].lstrip('/') not in doc: - msg = _('Adding a new attribute (%s) to the root of ' - ' the resource is not allowed') - raise wsme.exc.ClientSideError(msg % p['path']) - return jsonpatch.apply_patch(doc, jsonpatch.JsonPatch(patch)) - - -def get_patch_value(patch, key): - for p in patch: - if p['op'] == 'replace' and p['path'] == '/%s' % key: - return p['value'] - - -def check_audit_state_transition(patch, initial): - is_transition_valid = True - state_value = get_patch_value(patch, "state") - if state_value is not None: - is_transition_valid = objects.audit.AuditStateTransitionManager( - ).check_transition(initial, state_value) - return is_transition_valid - - -def as_filters_dict(**filters): - filters_dict = {} - for filter_name, filter_value in filters.items(): - if filter_value: - filters_dict[filter_name] = filter_value - - return filters_dict - - -def get_resource(resource, resource_id, eager=False): - """Get the resource from the uuid, id or logical name. - - :param resource: the resource type. - :param resource_id: the UUID, ID or logical name of the resource. - - :returns: The resource. - """ - resource = getattr(objects, resource) - - _get = None - if utils.is_int_like(resource_id): - resource_id = int(resource_id) - _get = resource.get - elif uuidutils.is_uuid_like(resource_id): - _get = resource.get_by_uuid - else: - _get = resource.get_by_name - - method_signature = reflection.get_signature(_get) - if 'eager' in method_signature.parameters: - return _get(pecan.request.context, resource_id, eager=eager) - - return _get(pecan.request.context, resource_id) diff --git a/watcher/api/hooks.py b/watcher/api/hooks.py deleted file mode 100644 index 8147e39..0000000 --- a/watcher/api/hooks.py +++ /dev/null @@ -1,119 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_config import cfg -from oslo_utils import importutils -from pecan import hooks -from six.moves import http_client - -from watcher.common import context - - -class ContextHook(hooks.PecanHook): - """Configures a request context and attaches it to the request. - - The following HTTP request headers are used: - - X-User: - Used for context.user. - - X-User-Id: - Used for context.user_id. - - X-Project-Name: - Used for context.project. - - X-Project-Id: - Used for context.project_id. - - X-Auth-Token: - Used for context.auth_token. - - """ - - def before(self, state): - headers = state.request.headers - user = headers.get('X-User') - user_id = headers.get('X-User-Id') - project = headers.get('X-Project-Name') - project_id = headers.get('X-Project-Id') - domain_id = headers.get('X-User-Domain-Id') - domain_name = headers.get('X-User-Domain-Name') - auth_token = headers.get('X-Storage-Token') - auth_token = headers.get('X-Auth-Token', auth_token) - show_deleted = headers.get('X-Show-Deleted') - auth_token_info = state.request.environ.get('keystone.token_info') - roles = (headers.get('X-Roles', None) and - headers.get('X-Roles').split(',')) - - auth_url = headers.get('X-Auth-Url') - if auth_url is None: - importutils.import_module('keystonemiddleware.auth_token') - auth_url = cfg.CONF.keystone_authtoken.auth_uri - - state.request.context = context.make_context( - auth_token=auth_token, - auth_url=auth_url, - auth_token_info=auth_token_info, - user=user, - user_id=user_id, - project=project, - project_id=project_id, - domain_id=domain_id, - domain_name=domain_name, - show_deleted=show_deleted, - roles=roles) - - -class NoExceptionTracebackHook(hooks.PecanHook): - """Workaround rpc.common: deserialize_remote_exception. - - deserialize_remote_exception builds rpc exception traceback into error - message which is then sent to the client. Such behavior is a security - concern so this hook is aimed to cut-off traceback from the error message. - """ - # NOTE(max_lobur): 'after' hook used instead of 'on_error' because - # 'on_error' never fired for wsme+pecan pair. wsme @wsexpose decorator - # catches and handles all the errors, so 'on_error' dedicated for unhandled - # exceptions never fired. - def after(self, state): - # Omit empty body. Some errors may not have body at this level yet. - if not state.response.body: - return - - # Do nothing if there is no error. - # Status codes in the range 200 (OK) to 399 (400 = BAD_REQUEST) are not - # an error. - if (http_client.OK <= state.response.status_int < - http_client.BAD_REQUEST): - return - - json_body = state.response.json - # Do not remove traceback when traceback config is set - if cfg.CONF.debug: - return - - faultstring = json_body.get('faultstring') - traceback_marker = 'Traceback (most recent call last):' - if faultstring and traceback_marker in faultstring: - # Cut-off traceback. - faultstring = faultstring.split(traceback_marker, 1)[0] - # Remove trailing newlines and spaces if any. - json_body['faultstring'] = faultstring.rstrip() - # Replace the whole json. Cannot change original one because it's - # generated on the fly. - state.response.json = json_body diff --git a/watcher/api/middleware/__init__.py b/watcher/api/middleware/__init__.py deleted file mode 100644 index 6141cb9..0000000 --- a/watcher/api/middleware/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from watcher.api.middleware import auth_token -from watcher.api.middleware import parsable_error - - -ParsableErrorMiddleware = parsable_error.ParsableErrorMiddleware -AuthTokenMiddleware = auth_token.AuthTokenMiddleware - -__all__ = (ParsableErrorMiddleware, - AuthTokenMiddleware) diff --git a/watcher/api/middleware/auth_token.py b/watcher/api/middleware/auth_token.py deleted file mode 100644 index 585d495..0000000 --- a/watcher/api/middleware/auth_token.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re - -from oslo_log import log - -from keystonemiddleware import auth_token - -from watcher._i18n import _ -from watcher.common import exception -from watcher.common import utils - -LOG = log.getLogger(__name__) - - -class AuthTokenMiddleware(auth_token.AuthProtocol): - """A wrapper on Keystone auth_token middleware. - - Does not perform verification of authentication tokens - for public routes in the API. - - """ - def __init__(self, app, conf, public_api_routes=()): - route_pattern_tpl = '%s(\.json|\.xml)?$' - - try: - self.public_api_routes = [re.compile(route_pattern_tpl % route_tpl) - for route_tpl in public_api_routes] - except re.error as e: - LOG.exception(e) - raise exception.ConfigInvalid( - error_msg=_('Cannot compile public API routes')) - - super(AuthTokenMiddleware, self).__init__(app, conf) - - def __call__(self, env, start_response): - path = utils.safe_rstrip(env.get('PATH_INFO'), '/') - - # The information whether the API call is being performed against the - # public API is required for some other components. Saving it to the - # WSGI environment is reasonable thereby. - env['is_public_api'] = any(map(lambda pattern: re.match(pattern, path), - self.public_api_routes)) - - if env['is_public_api']: - return self._app(env, start_response) - - return super(AuthTokenMiddleware, self).__call__(env, start_response) diff --git a/watcher/api/middleware/parsable_error.py b/watcher/api/middleware/parsable_error.py deleted file mode 100644 index 9d905ab..0000000 --- a/watcher/api/middleware/parsable_error.py +++ /dev/null @@ -1,97 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Middleware to replace the plain text message body of an error -response with one formatted so the client can parse it. - -Based on pecan.middleware.errordocument -""" - -from xml import etree as et - -from oslo_log import log -from oslo_serialization import jsonutils -import six -import webob - -from watcher._i18n import _ - -LOG = log.getLogger(__name__) - - -class ParsableErrorMiddleware(object): - """Replace error body with something the client can parse.""" - - def __init__(self, app): - self.app = app - - def __call__(self, environ, start_response): - # Request for this state, modified by replace_start_response() - # and used when an error is being reported. - state = {} - - def replacement_start_response(status, headers, exc_info=None): - """Overrides the default response to make errors parsable.""" - try: - status_code = int(status.split(' ')[0]) - state['status_code'] = status_code - except (ValueError, TypeError): # pragma: nocover - raise Exception(_( - 'ErrorDocumentMiddleware received an invalid ' - 'status %s') % status) - else: - if (state['status_code'] // 100) not in (2, 3): - # Remove some headers so we can replace them later - # when we have the full error message and can - # compute the length. - headers = [(h, v) - for (h, v) in headers - if h not in ('Content-Length', 'Content-Type')] - # Save the headers in case we need to modify them. - state['headers'] = headers - return start_response(status, headers, exc_info) - - app_iter = self.app(environ, replacement_start_response) - if (state['status_code'] // 100) not in (2, 3): - req = webob.Request(environ) - if ( - req.accept.best_match( - ['application/json', - 'application/xml']) == 'application/xml' - ): - try: - # simple check xml is valid - body = [ - et.ElementTree.tostring( - et.ElementTree.Element( - 'error_message', text='\n'.join(app_iter)))] - except et.ElementTree.ParseError as err: - LOG.error('Error parsing HTTP response: %s', err) - body = ['%s' - '' % state['status_code']] - state['headers'].append(('Content-Type', 'application/xml')) - else: - if six.PY3: - app_iter = [i.decode('utf-8') for i in app_iter] - body = [jsonutils.dumps( - {'error_message': '\n'.join(app_iter)})] - if six.PY3: - body = [item.encode('utf-8') for item in body] - state['headers'].append(('Content-Type', 'application/json')) - state['headers'].append(('Content-Length', str(len(body[0])))) - else: - body = app_iter - return body diff --git a/watcher/api/scheduling.py b/watcher/api/scheduling.py deleted file mode 100644 index 4a2b053..0000000 --- a/watcher/api/scheduling.py +++ /dev/null @@ -1,97 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 Servionica -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import datetime -from oslo_config import cfg -from oslo_log import log -from oslo_utils import timeutils -import six - -from watcher.common import context as watcher_context -from watcher.common import scheduling -from watcher import notifications - -from watcher import objects - -CONF = cfg.CONF -LOG = log.getLogger(__name__) - - -class APISchedulingService(scheduling.BackgroundSchedulerService): - - def __init__(self, gconfig=None, **options): - self.services_status = {} - gconfig = None or {} - super(APISchedulingService, self).__init__(gconfig, **options) - - def get_services_status(self, context): - services = objects.service.Service.list(context) - for service in services: - result = self.get_service_status(context, service.id) - if service.id not in self.services_status.keys(): - self.services_status[service.id] = result - continue - if self.services_status[service.id] != result: - self.services_status[service.id] = result - notifications.service.send_service_update(context, service, - state=result) - - def get_service_status(self, context, service_id): - service = objects.Service.get(context, service_id) - last_heartbeat = (service.last_seen_up or service.updated_at - or service.created_at) - if isinstance(last_heartbeat, six.string_types): - # NOTE(russellb) If this service came in over rpc via - # conductor, then the timestamp will be a string and needs to be - # converted back to a datetime. - last_heartbeat = timeutils.parse_strtime(last_heartbeat) - else: - # Objects have proper UTC timezones, but the timeutils comparison - # below does not (and will fail) - last_heartbeat = last_heartbeat.replace(tzinfo=None) - elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow()) - is_up = abs(elapsed) <= CONF.service_down_time - if not is_up: - LOG.warning('Seems service %(name)s on host %(host)s is down. ' - 'Last heartbeat was %(lhb)s. Elapsed time is %(el)s', - {'name': service.name, - 'host': service.host, - 'lhb': str(last_heartbeat), 'el': str(elapsed)}) - return objects.service.ServiceStatus.FAILED - - return objects.service.ServiceStatus.ACTIVE - - def start(self): - """Start service.""" - context = watcher_context.make_context(is_admin=True) - self.add_job(self.get_services_status, name='service_status', - trigger='interval', jobstore='default', args=[context], - next_run_time=datetime.datetime.now(), seconds=60) - super(APISchedulingService, self).start() - - def stop(self): - """Stop service.""" - self.shutdown() - - def wait(self): - """Wait for service to complete.""" - - def reset(self): - """Reset service. - - Called in case service running in daemon mode receives SIGHUP. - """ diff --git a/watcher/applier/__init__.py b/watcher/applier/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/applier/action_plan/__init__.py b/watcher/applier/action_plan/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/applier/action_plan/base.py b/watcher/applier/action_plan/base.py deleted file mode 100644 index dbd40a6..0000000 --- a/watcher/applier/action_plan/base.py +++ /dev/null @@ -1,28 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import abc -import six - - -@six.add_metaclass(abc.ABCMeta) -class BaseActionPlanHandler(object): - @abc.abstractmethod - def execute(self): - raise NotImplementedError() diff --git a/watcher/applier/action_plan/default.py b/watcher/applier/action_plan/default.py deleted file mode 100644 index a63221e..0000000 --- a/watcher/applier/action_plan/default.py +++ /dev/null @@ -1,85 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from oslo_log import log - -from watcher.applier.action_plan import base -from watcher.applier import default -from watcher.common import exception -from watcher import notifications -from watcher import objects -from watcher.objects import fields - -LOG = log.getLogger(__name__) - - -class DefaultActionPlanHandler(base.BaseActionPlanHandler): - - def __init__(self, context, service, action_plan_uuid): - super(DefaultActionPlanHandler, self).__init__() - self.ctx = context - self.service = service - self.action_plan_uuid = action_plan_uuid - - def execute(self): - try: - action_plan = objects.ActionPlan.get_by_uuid( - self.ctx, self.action_plan_uuid, eager=True) - if action_plan.state == objects.action_plan.State.CANCELLED: - self._update_action_from_pending_to_cancelled() - return - action_plan.state = objects.action_plan.State.ONGOING - action_plan.save() - notifications.action_plan.send_action_notification( - self.ctx, action_plan, - action=fields.NotificationAction.EXECUTION, - phase=fields.NotificationPhase.START) - - applier = default.DefaultApplier(self.ctx, self.service) - applier.execute(self.action_plan_uuid) - - action_plan.state = objects.action_plan.State.SUCCEEDED - notifications.action_plan.send_action_notification( - self.ctx, action_plan, - action=fields.NotificationAction.EXECUTION, - phase=fields.NotificationPhase.END) - - except exception.ActionPlanCancelled as e: - LOG.exception(e) - action_plan.state = objects.action_plan.State.CANCELLED - self._update_action_from_pending_to_cancelled() - - except Exception as e: - LOG.exception(e) - action_plan.state = objects.action_plan.State.FAILED - notifications.action_plan.send_action_notification( - self.ctx, action_plan, - action=fields.NotificationAction.EXECUTION, - priority=fields.NotificationPriority.ERROR, - phase=fields.NotificationPhase.ERROR) - finally: - action_plan.save() - - def _update_action_from_pending_to_cancelled(self): - filters = {'action_plan_uuid': self.action_plan_uuid, - 'state': objects.action.State.PENDING} - actions = objects.Action.list(self.ctx, filters=filters, eager=True) - if actions: - for a in actions: - a.state = objects.action.State.CANCELLED - a.save() diff --git a/watcher/applier/actions/__init__.py b/watcher/applier/actions/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/applier/actions/base.py b/watcher/applier/actions/base.py deleted file mode 100644 index ec1cb5d..0000000 --- a/watcher/applier/actions/base.py +++ /dev/null @@ -1,152 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc - -import jsonschema -import six - -from watcher.common import clients -from watcher.common.loader import loadable - - -@six.add_metaclass(abc.ABCMeta) -class BaseAction(loadable.Loadable): - # NOTE(jed): by convention we decided - # that the attribute "resource_id" is the unique id of - # the resource to which the Action applies to allow us to use it in the - # watcher dashboard and will be nested in input_parameters - RESOURCE_ID = 'resource_id' - - # Add action class name to the list, if implementing abort. - ABORT_TRUE = ['Sleep', 'Nop'] - - def __init__(self, config, osc=None): - """Constructor - - :param config: A mapping containing the configuration of this action - :type config: dict - :param osc: an OpenStackClients instance, defaults to None - :type osc: :py:class:`~.OpenStackClients` instance, optional - """ - super(BaseAction, self).__init__(config) - self._input_parameters = {} - self._osc = osc - - @property - def osc(self): - if not self._osc: - self._osc = clients.OpenStackClients() - return self._osc - - @property - def input_parameters(self): - return self._input_parameters - - @input_parameters.setter - def input_parameters(self, p): - self._input_parameters = p - - @property - def resource_id(self): - return self.input_parameters[self.RESOURCE_ID] - - @classmethod - def get_config_opts(cls): - """Defines the configuration options to be associated to this loadable - - :return: A list of configuration options relative to this Loadable - :rtype: list of :class:`oslo_config.cfg.Opt` instances - """ - return [] - - @abc.abstractmethod - def execute(self): - """Executes the main logic of the action - - This method can be used to perform an action on a given set of input - parameters to accomplish some type of operation. This operation may - return a boolean value as a result of its execution. If False, this - will be considered as an error and will then trigger the reverting of - the actions. - - :returns: A flag indicating whether or not the action succeeded - :rtype: bool - """ - raise NotImplementedError() - - @abc.abstractmethod - def revert(self): - """Revert this action - - This method should rollback the resource to its initial state in the - event of a faulty execution. This happens when the action raised an - exception during its :py:meth:`~.BaseAction.execute`. - """ - raise NotImplementedError() - - @abc.abstractmethod - def pre_condition(self): - """Hook: called before the execution of an action - - This method can be used to perform some initializations or to make - some more advanced validation on its input parameters. So if you wish - to block its execution based on this factor, `raise` the related - exception. - """ - raise NotImplementedError() - - @abc.abstractmethod - def post_condition(self): - """Hook: called after the execution of an action - - This function is called regardless of whether an action succeeded or - not. So you can use it to perform cleanup operations. - """ - raise NotImplementedError() - - @abc.abstractproperty - def schema(self): - """Defines a Schema that the input parameters shall comply to - - :returns: A schema declaring the input parameters this action should be - provided along with their respective constraints - :rtype: :py:class:`voluptuous.Schema` instance - """ - raise NotImplementedError() - - def validate_parameters(self): - try: - jsonschema.validate(self.input_parameters, self.schema) - return True - except jsonschema.ValidationError as e: - raise e - - @abc.abstractmethod - def get_description(self): - """Description of the action""" - raise NotImplementedError() - - def check_abort(self): - if self.__class__.__name__ is 'Migrate': - if self.migration_type == self.LIVE_MIGRATION: - return True - else: - return False - else: - return bool(self.__class__.__name__ in self.ABORT_TRUE) diff --git a/watcher/applier/actions/change_node_power_state.py b/watcher/applier/actions/change_node_power_state.py deleted file mode 100644 index 1a085e9..0000000 --- a/watcher/applier/actions/change_node_power_state.py +++ /dev/null @@ -1,118 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 ZTE -# -# Authors: Li Canwei -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import enum - -from watcher._i18n import _ -from watcher.applier.actions import base -from watcher.common import exception - - -class NodeState(enum.Enum): - POWERON = 'on' - POWEROFF = 'off' - - -class ChangeNodePowerState(base.BaseAction): - """Compute node power on/off - - By using this action, you will be able to on/off the power of a - compute node. - - The action schema is:: - - schema = Schema({ - 'resource_id': str, - 'state': str, - }) - - The `resource_id` references a ironic node id (list of available - ironic node is returned by this command: ``ironic node-list``). - The `state` value should either be `on` or `off`. - """ - - STATE = 'state' - - @property - def schema(self): - return { - 'type': 'object', - 'properties': { - 'resource_id': { - 'type': 'string', - "minlength": 1 - }, - 'state': { - 'type': 'string', - 'enum': [NodeState.POWERON.value, - NodeState.POWEROFF.value] - } - }, - 'required': ['resource_id', 'state'], - 'additionalProperties': False, - } - - @property - def node_uuid(self): - return self.resource_id - - @property - def state(self): - return self.input_parameters.get(self.STATE) - - def execute(self): - target_state = self.state - return self._node_manage_power(target_state) - - def revert(self): - if self.state == NodeState.POWERON.value: - target_state = NodeState.POWEROFF.value - elif self.state == NodeState.POWEROFF.value: - target_state = NodeState.POWERON.value - return self._node_manage_power(target_state) - - def _node_manage_power(self, state): - if state is None: - raise exception.IllegalArgumentException( - message=_("The target state is not defined")) - - result = False - ironic_client = self.osc.ironic() - nova_client = self.osc.nova() - if state == NodeState.POWEROFF.value: - node_info = ironic_client.node.get(self.node_uuid).to_dict() - compute_node_id = node_info['extra']['compute_node_id'] - compute_node = nova_client.hypervisors.get(compute_node_id) - compute_node = compute_node.to_dict() - if (compute_node['running_vms'] == 0): - result = ironic_client.node.set_power_state( - self.node_uuid, state) - else: - result = ironic_client.node.set_power_state(self.node_uuid, state) - return result - - def pre_condition(self): - pass - - def post_condition(self): - pass - - def get_description(self): - """Description of the action""" - return ("Compute node power on/off through ironic.") diff --git a/watcher/applier/actions/change_nova_service_state.py b/watcher/applier/actions/change_nova_service_state.py deleted file mode 100644 index a2d9792..0000000 --- a/watcher/applier/actions/change_nova_service_state.py +++ /dev/null @@ -1,115 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from watcher._i18n import _ -from watcher.applier.actions import base -from watcher.common import exception -from watcher.common import nova_helper -from watcher.decision_engine.model import element - - -class ChangeNovaServiceState(base.BaseAction): - """Disables or enables the nova-compute service, deployed on a host - - By using this action, you will be able to update the state of a - nova-compute service. A disabled nova-compute service can not be selected - by the nova scheduler for future deployment of server. - - The action schema is:: - - schema = Schema({ - 'resource_id': str, - 'state': str, - }) - - The `resource_id` references a nova-compute service name (list of available - nova-compute services is returned by this command: ``nova service-list - --binary nova-compute``). - The `state` value should either be `ONLINE` or `OFFLINE`. - """ - - STATE = 'state' - - @property - def schema(self): - return { - 'type': 'object', - 'properties': { - 'resource_id': { - 'type': 'string', - "minlength": 1 - }, - 'state': { - 'type': 'string', - 'enum': [element.ServiceState.ONLINE.value, - element.ServiceState.OFFLINE.value, - element.ServiceState.ENABLED.value, - element.ServiceState.DISABLED.value] - } - }, - 'required': ['resource_id', 'state'], - 'additionalProperties': False, - } - - @property - def host(self): - return self.resource_id - - @property - def state(self): - return self.input_parameters.get(self.STATE) - - def execute(self): - target_state = None - if self.state == element.ServiceState.DISABLED.value: - target_state = False - elif self.state == element.ServiceState.ENABLED.value: - target_state = True - return self._nova_manage_service(target_state) - - def revert(self): - target_state = None - if self.state == element.ServiceState.DISABLED.value: - target_state = True - elif self.state == element.ServiceState.ENABLED.value: - target_state = False - return self._nova_manage_service(target_state) - - def _nova_manage_service(self, state): - if state is None: - raise exception.IllegalArgumentException( - message=_("The target state is not defined")) - - nova = nova_helper.NovaHelper(osc=self.osc) - if state is True: - return nova.enable_service_nova_compute(self.host) - else: - return nova.disable_service_nova_compute(self.host) - - def pre_condition(self): - pass - - def post_condition(self): - pass - - def get_description(self): - """Description of the action""" - return ("Disables or enables the nova-compute service." - "A disabled nova-compute service can not be selected " - "by the nova for future deployment of new server.") diff --git a/watcher/applier/actions/factory.py b/watcher/applier/actions/factory.py deleted file mode 100644 index 037e0db..0000000 --- a/watcher/applier/actions/factory.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from __future__ import unicode_literals - -from oslo_log import log - -from watcher.applier.loading import default - -LOG = log.getLogger(__name__) - - -class ActionFactory(object): - def __init__(self): - self.action_loader = default.DefaultActionLoader() - - def make_action(self, object_action, osc=None): - LOG.debug("Creating instance of %s", object_action.action_type) - loaded_action = self.action_loader.load(name=object_action.action_type, - osc=osc) - loaded_action.input_parameters = object_action.input_parameters - LOG.debug("Checking the input parameters") - # NOTE(jed) if we change the schema of an action and we try to reload - # an older version of the Action, the validation can fail. - # We need to add the versioning of an Action or a migration tool. - # We can also create an new Action which extends the previous one. - loaded_action.validate_parameters() - return loaded_action diff --git a/watcher/applier/actions/migration.py b/watcher/applier/actions/migration.py deleted file mode 100644 index 9763c3e..0000000 --- a/watcher/applier/actions/migration.py +++ /dev/null @@ -1,211 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from oslo_log import log -from watcher._i18n import _ -from watcher.applier.actions import base -from watcher.common import exception -from watcher.common import nova_helper - -LOG = log.getLogger(__name__) - - -class Migrate(base.BaseAction): - """Migrates a server to a destination nova-compute host - - This action will allow you to migrate a server to another compute - destination host. - Migration type 'live' can only be used for migrating active VMs. - Migration type 'cold' can be used for migrating non-active VMs - as well active VMs, which will be shut down while migrating. - - The action schema is:: - - schema = Schema({ - 'resource_id': str, # should be a UUID - 'migration_type': str, # choices -> "live", "cold" - 'destination_node': str, - 'source_node': str, - }) - - The `resource_id` is the UUID of the server to migrate. - The `source_node` and `destination_node` parameters are respectively the - source and the destination compute hostname (list of available compute - hosts is returned by this command: ``nova service-list --binary - nova-compute``). - """ - - # input parameters constants - MIGRATION_TYPE = 'migration_type' - LIVE_MIGRATION = 'live' - COLD_MIGRATION = 'cold' - DESTINATION_NODE = 'destination_node' - SOURCE_NODE = 'source_node' - - @property - def schema(self): - return { - 'type': 'object', - 'properties': { - 'destination_node': { - "anyof": [ - {'type': 'string', "minLength": 1}, - {'type': 'None'} - ] - }, - 'migration_type': { - 'type': 'string', - "enum": ["live", "cold"] - }, - 'resource_id': { - 'type': 'string', - "minlength": 1, - "pattern": ("^([a-fA-F0-9]){8}-([a-fA-F0-9]){4}-" - "([a-fA-F0-9]){4}-([a-fA-F0-9]){4}-" - "([a-fA-F0-9]){12}$") - }, - 'source_node': { - 'type': 'string', - "minLength": 1 - } - }, - 'required': ['migration_type', 'resource_id', 'source_node'], - 'additionalProperties': False, - } - - @property - def instance_uuid(self): - return self.resource_id - - @property - def migration_type(self): - return self.input_parameters.get(self.MIGRATION_TYPE) - - @property - def destination_node(self): - return self.input_parameters.get(self.DESTINATION_NODE) - - @property - def source_node(self): - return self.input_parameters.get(self.SOURCE_NODE) - - def _live_migrate_instance(self, nova, destination): - result = None - try: - result = nova.live_migrate_instance(instance_id=self.instance_uuid, - dest_hostname=destination) - except nova_helper.nvexceptions.ClientException as e: - if e.code == 400: - LOG.debug("Live migration of instance %s failed. " - "Trying to live migrate using block migration." - % self.instance_uuid) - result = nova.live_migrate_instance( - instance_id=self.instance_uuid, - dest_hostname=destination, - block_migration=True) - else: - LOG.debug("Nova client exception occurred while live " - "migrating instance %s.Exception: %s" % - (self.instance_uuid, e)) - except Exception: - LOG.critical("Unexpected error occurred. Migration failed for " - "instance %s. Leaving instance on previous " - "host.", self.instance_uuid) - - return result - - def _cold_migrate_instance(self, nova, destination): - result = None - try: - result = nova.watcher_non_live_migrate_instance( - instance_id=self.instance_uuid, - dest_hostname=destination) - except Exception as exc: - LOG.exception(exc) - LOG.critical("Unexpected error occurred. Migration failed for " - "instance %s. Leaving instance on previous " - "host.", self.instance_uuid) - return result - - def _abort_cold_migrate(self, nova): - # TODO(adisky): currently watcher uses its own version of cold migrate - # implement cold migrate using nova dependent on the blueprint - # https://blueprints.launchpad.net/nova/+spec/cold-migration-with-target - # Abort operation for cold migrate is dependent on blueprint - # https://blueprints.launchpad.net/nova/+spec/abort-cold-migration - LOG.warning("Abort operation for cold migration is not implemented") - - def _abort_live_migrate(self, nova, source, destination): - return nova.abort_live_migrate(instance_id=self.instance_uuid, - source=source, destination=destination) - - def migrate(self, destination=None): - nova = nova_helper.NovaHelper(osc=self.osc) - if destination is None: - LOG.debug("Migrating instance %s, destination node will be " - "determined by nova-scheduler", self.instance_uuid) - else: - LOG.debug("Migrate instance %s to %s", self.instance_uuid, - destination) - instance = nova.find_instance(self.instance_uuid) - if instance: - if self.migration_type == self.LIVE_MIGRATION: - return self._live_migrate_instance(nova, destination) - elif self.migration_type == self.COLD_MIGRATION: - return self._cold_migrate_instance(nova, destination) - else: - raise exception.Invalid( - message=(_("Migration of type '%(migration_type)s' is not " - "supported.") % - {'migration_type': self.migration_type})) - else: - raise exception.InstanceNotFound(name=self.instance_uuid) - - def execute(self): - return self.migrate(destination=self.destination_node) - - def revert(self): - return self.migrate(destination=self.source_node) - - def abort(self): - nova = nova_helper.NovaHelper(osc=self.osc) - instance = nova.find_instance(self.instance_uuid) - if instance: - if self.migration_type == self.COLD_MIGRATION: - return self._abort_cold_migrate(nova) - elif self.migration_type == self.LIVE_MIGRATION: - return self._abort_live_migrate( - nova, source=self.source_node, - destination=self.destination_node) - else: - raise exception.InstanceNotFound(name=self.instance_uuid) - - def pre_condition(self): - # TODO(jed): check if the instance exists / check if the instance is on - # the source_node - pass - - def post_condition(self): - # TODO(jed): check extra parameters (network response, etc.) - pass - - def get_description(self): - """Description of the action""" - return "Moving a VM instance from source_node to destination_node" diff --git a/watcher/applier/actions/nop.py b/watcher/applier/actions/nop.py deleted file mode 100644 index 3bd9220..0000000 --- a/watcher/applier/actions/nop.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from oslo_log import log - -from watcher.applier.actions import base - -LOG = log.getLogger(__name__) - - -class Nop(base.BaseAction): - """logs a message - - The action schema is:: - - schema = Schema({ - 'message': str, - }) - - The `message` is the actual message that will be logged. - """ - - MESSAGE = 'message' - - @property - def schema(self): - return { - 'type': 'object', - 'properties': { - 'message': { - 'type': ['string', 'null'] - } - }, - 'required': ['message'], - 'additionalProperties': False, - } - - @property - def message(self): - return self.input_parameters.get(self.MESSAGE) - - def execute(self): - LOG.debug("Executing action NOP message: %s ", self.message) - return True - - def revert(self): - LOG.debug("Revert action NOP") - return True - - def pre_condition(self): - pass - - def post_condition(self): - pass - - def get_description(self): - """Description of the action""" - return "Logging a NOP message" - - def abort(self): - LOG.debug("Abort action NOP") - return True diff --git a/watcher/applier/actions/resize.py b/watcher/applier/actions/resize.py deleted file mode 100644 index 561e545..0000000 --- a/watcher/applier/actions/resize.py +++ /dev/null @@ -1,111 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 Servionica -# -# Authors: Alexander Chadin -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from oslo_log import log - -from watcher.applier.actions import base -from watcher.common import nova_helper - -LOG = log.getLogger(__name__) - - -class Resize(base.BaseAction): - """Resizes a server with specified flavor. - - This action will allow you to resize a server to another flavor. - - The action schema is:: - - schema = Schema({ - 'resource_id': str, # should be a UUID - 'flavor': str, # should be either ID or Name of Flavor - }) - - The `resource_id` is the UUID of the server to resize. - The `flavor` is the ID or Name of Flavor (Nova accepts either ID or Name - of Flavor to resize() function). - """ - - # input parameters constants - FLAVOR = 'flavor' - - @property - def schema(self): - return { - 'type': 'object', - 'properties': { - 'resource_id': { - 'type': 'string', - 'minlength': 1, - 'pattern': ('^([a-fA-F0-9]){8}-([a-fA-F0-9]){4}-' - '([a-fA-F0-9]){4}-([a-fA-F0-9]){4}-' - '([a-fA-F0-9]){12}$') - }, - 'flavor': { - 'type': 'string', - 'minlength': 1, - }, - }, - 'required': ['resource_id', 'flavor'], - 'additionalProperties': False, - } - - @property - def instance_uuid(self): - return self.resource_id - - @property - def flavor(self): - return self.input_parameters.get(self.FLAVOR) - - def resize(self): - nova = nova_helper.NovaHelper(osc=self.osc) - LOG.debug("Resize instance %s to %s flavor", self.instance_uuid, - self.flavor) - instance = nova.find_instance(self.instance_uuid) - result = None - if instance: - try: - result = nova.resize_instance( - instance_id=self.instance_uuid, flavor=self.flavor) - except Exception as exc: - LOG.exception(exc) - LOG.critical( - "Unexpected error occurred. Resizing failed for " - "instance %s.", self.instance_uuid) - return result - - def execute(self): - return self.resize() - - def revert(self): - return self.migrate(destination=self.source_node) - - def pre_condition(self): - # TODO(jed): check if the instance exists / check if the instance is on - # the source_node - pass - - def post_condition(self): - # TODO(jed): check extra parameters (network response, etc.) - pass - - def get_description(self): - """Description of the action""" - return "Resize a server with specified flavor." diff --git a/watcher/applier/actions/sleep.py b/watcher/applier/actions/sleep.py deleted file mode 100644 index 5865c22..0000000 --- a/watcher/applier/actions/sleep.py +++ /dev/null @@ -1,81 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import time - -from oslo_log import log -from watcher.applier.actions import base - -LOG = log.getLogger(__name__) - - -class Sleep(base.BaseAction): - """Makes the executor of the action plan wait for a given duration - - The action schema is:: - - schema = Schema({ - 'duration': float, - }) - - The `duration` is expressed in seconds. - """ - - DURATION = 'duration' - - @property - def schema(self): - return { - 'type': 'object', - 'properties': { - 'duration': { - 'type': 'number', - 'minimum': 0 - }, - }, - 'required': ['duration'], - 'additionalProperties': False, - } - - @property - def duration(self): - return int(self.input_parameters.get(self.DURATION)) - - def execute(self): - LOG.debug("Starting action sleep with duration: %s ", self.duration) - time.sleep(self.duration) - return True - - def revert(self): - LOG.debug("Revert action sleep") - return True - - def pre_condition(self): - pass - - def post_condition(self): - pass - - def get_description(self): - """Description of the action""" - return "Wait for a given interval in seconds." - - def abort(self): - LOG.debug("Abort action sleep") - return True diff --git a/watcher/applier/base.py b/watcher/applier/base.py deleted file mode 100644 index daa4097..0000000 --- a/watcher/applier/base.py +++ /dev/null @@ -1,36 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -""" -This component is in charge of executing the -:ref:`Action Plan ` built by the -:ref:`Watcher Decision Engine `. - -See: :doc:`../architecture` for more details on this component. -""" - -import abc -import six - - -@six.add_metaclass(abc.ABCMeta) -class BaseApplier(object): - @abc.abstractmethod - def execute(self, action_plan_uuid): - raise NotImplementedError() diff --git a/watcher/applier/default.py b/watcher/applier/default.py deleted file mode 100755 index aac85d6..0000000 --- a/watcher/applier/default.py +++ /dev/null @@ -1,63 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from oslo_config import cfg -from oslo_log import log - -from watcher.applier import base -from watcher.applier.loading import default -from watcher import objects - -LOG = log.getLogger(__name__) -CONF = cfg.CONF - - -class DefaultApplier(base.BaseApplier): - def __init__(self, context, applier_manager): - super(DefaultApplier, self).__init__() - self._applier_manager = applier_manager - self._loader = default.DefaultWorkFlowEngineLoader() - self._engine = None - self._context = context - - @property - def context(self): - return self._context - - @property - def applier_manager(self): - return self._applier_manager - - @property - def engine(self): - if self._engine is None: - selected_workflow_engine = CONF.watcher_applier.workflow_engine - LOG.debug("Loading workflow engine %s ", selected_workflow_engine) - self._engine = self._loader.load( - name=selected_workflow_engine, - context=self.context, - applier_manager=self.applier_manager) - return self._engine - - def execute(self, action_plan_uuid): - LOG.debug("Executing action plan %s ", action_plan_uuid) - - filters = {'action_plan_uuid': action_plan_uuid} - actions = objects.Action.list(self.context, filters=filters, - eager=True) - return self.engine.execute(actions) diff --git a/watcher/applier/loading/__init__.py b/watcher/applier/loading/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/applier/loading/default.py b/watcher/applier/loading/default.py deleted file mode 100644 index c4d58d9..0000000 --- a/watcher/applier/loading/default.py +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - - -from watcher.common.loader import default - - -class DefaultWorkFlowEngineLoader(default.DefaultLoader): - def __init__(self): - super(DefaultWorkFlowEngineLoader, self).__init__( - namespace='watcher_workflow_engines') - - -class DefaultActionLoader(default.DefaultLoader): - def __init__(self): - super(DefaultActionLoader, self).__init__( - namespace='watcher_actions') diff --git a/watcher/applier/manager.py b/watcher/applier/manager.py deleted file mode 100644 index 03a2356..0000000 --- a/watcher/applier/manager.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# Copyright (c) 2016 Intel Corp -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from watcher.applier.messaging import trigger -from watcher.common import service_manager - -from watcher import conf - -CONF = conf.CONF - - -class ApplierManager(service_manager.ServiceManager): - - @property - def service_name(self): - return 'watcher-applier' - - @property - def api_version(self): - return '1.0' - - @property - def publisher_id(self): - return CONF.watcher_applier.publisher_id - - @property - def conductor_topic(self): - return CONF.watcher_applier.conductor_topic - - @property - def notification_topics(self): - return [] - - @property - def conductor_endpoints(self): - return [trigger.TriggerActionPlan] - - @property - def notification_endpoints(self): - return [] diff --git a/watcher/applier/messaging/__init__.py b/watcher/applier/messaging/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/applier/messaging/trigger.py b/watcher/applier/messaging/trigger.py deleted file mode 100644 index 1c4b3a7..0000000 --- a/watcher/applier/messaging/trigger.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from concurrent import futures - -from oslo_config import cfg -from oslo_log import log - -from watcher.applier.action_plan import default - -LOG = log.getLogger(__name__) -CONF = cfg.CONF - - -class TriggerActionPlan(object): - def __init__(self, applier_manager): - self.applier_manager = applier_manager - workers = CONF.watcher_applier.workers - self.executor = futures.ThreadPoolExecutor(max_workers=workers) - - def do_launch_action_plan(self, context, action_plan_uuid): - try: - cmd = default.DefaultActionPlanHandler(context, - self.applier_manager, - action_plan_uuid) - cmd.execute() - except Exception as e: - LOG.exception(e) - - def launch_action_plan(self, context, action_plan_uuid): - LOG.debug("Trigger ActionPlan %s", action_plan_uuid) - # submit - self.executor.submit(self.do_launch_action_plan, context, - action_plan_uuid) - return action_plan_uuid diff --git a/watcher/applier/rpcapi.py b/watcher/applier/rpcapi.py deleted file mode 100644 index 6788dc6..0000000 --- a/watcher/applier/rpcapi.py +++ /dev/null @@ -1,71 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# Copyright (c) 2016 Intel Corp -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.common import exception -from watcher.common import service -from watcher.common import service_manager -from watcher.common import utils - -from watcher import conf - -CONF = conf.CONF - - -class ApplierAPI(service.Service): - - def __init__(self): - super(ApplierAPI, self).__init__(ApplierAPIManager) - - def launch_action_plan(self, context, action_plan_uuid=None): - if not utils.is_uuid_like(action_plan_uuid): - raise exception.InvalidUuidOrName(name=action_plan_uuid) - - self.conductor_client.cast( - context, 'launch_action_plan', action_plan_uuid=action_plan_uuid) - - -class ApplierAPIManager(service_manager.ServiceManager): - - @property - def service_name(self): - return None - - @property - def api_version(self): - return '1.0' - - @property - def publisher_id(self): - return CONF.watcher_applier.publisher_id - - @property - def conductor_topic(self): - return CONF.watcher_applier.conductor_topic - - @property - def notification_topics(self): - return [] - - @property - def conductor_endpoints(self): - return [] - - @property - def notification_endpoints(self): - return [] diff --git a/watcher/applier/workflow_engine/__init__.py b/watcher/applier/workflow_engine/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/applier/workflow_engine/base.py b/watcher/applier/workflow_engine/base.py deleted file mode 100644 index 3e0c60f..0000000 --- a/watcher/applier/workflow_engine/base.py +++ /dev/null @@ -1,260 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import abc -import six -import time - -import eventlet - -from oslo_log import log -from taskflow import task as flow_task - -from watcher.applier.actions import factory -from watcher.common import clients -from watcher.common import exception -from watcher.common.loader import loadable -from watcher import notifications -from watcher import objects -from watcher.objects import fields - - -LOG = log.getLogger(__name__) - -CANCEL_STATE = [objects.action_plan.State.CANCELLING, - objects.action_plan.State.CANCELLED] - - -@six.add_metaclass(abc.ABCMeta) -class BaseWorkFlowEngine(loadable.Loadable): - - def __init__(self, config, context=None, applier_manager=None): - """Constructor - - :param config: A mapping containing the configuration of this - workflow engine - :type config: dict - :param osc: an OpenStackClients object, defaults to None - :type osc: :py:class:`~.OpenStackClients` instance, optional - """ - super(BaseWorkFlowEngine, self).__init__(config) - self._context = context - self._applier_manager = applier_manager - self._action_factory = factory.ActionFactory() - self._osc = None - - @classmethod - def get_config_opts(cls): - """Defines the configuration options to be associated to this loadable - - :return: A list of configuration options relative to this Loadable - :rtype: list of :class:`oslo_config.cfg.Opt` instances - """ - return [] - - @property - def context(self): - return self._context - - @property - def osc(self): - if not self._osc: - self._osc = clients.OpenStackClients() - return self._osc - - @property - def applier_manager(self): - return self._applier_manager - - @property - def action_factory(self): - return self._action_factory - - def notify(self, action, state): - db_action = objects.Action.get_by_uuid(self.context, action.uuid, - eager=True) - db_action.state = state - db_action.save() - - @abc.abstractmethod - def execute(self, actions): - raise NotImplementedError() - - -class BaseTaskFlowActionContainer(flow_task.Task): - - def __init__(self, name, db_action, engine, **kwargs): - super(BaseTaskFlowActionContainer, self).__init__(name=name) - self._db_action = db_action - self._engine = engine - self.loaded_action = None - - @property - def engine(self): - return self._engine - - @property - def action(self): - if self.loaded_action is None: - action = self.engine.action_factory.make_action( - self._db_action, - osc=self._engine.osc) - self.loaded_action = action - return self.loaded_action - - @abc.abstractmethod - def do_pre_execute(self): - raise NotImplementedError() - - @abc.abstractmethod - def do_execute(self, *args, **kwargs): - raise NotImplementedError() - - @abc.abstractmethod - def do_post_execute(self): - raise NotImplementedError() - - @abc.abstractmethod - def do_revert(self): - raise NotImplementedError() - - @abc.abstractmethod - def do_abort(self, *args, **kwargs): - raise NotImplementedError() - - # NOTE(alexchadin): taskflow does 3 method calls (pre_execute, execute, - # post_execute) independently. We want to support notifications in base - # class, so child's methods should be named with `do_` prefix and wrapped. - def pre_execute(self): - try: - # NOTE(adisky): check the state of action plan before starting - # next action, if action plan is cancelled raise the exceptions - # so that taskflow does not schedule further actions. - action_plan = objects.ActionPlan.get_by_id( - self.engine.context, self._db_action.action_plan_id) - if action_plan.state in CANCEL_STATE: - raise exception.ActionPlanCancelled(uuid=action_plan.uuid) - self.do_pre_execute() - notifications.action.send_execution_notification( - self.engine.context, self._db_action, - fields.NotificationAction.EXECUTION, - fields.NotificationPhase.START) - except exception.ActionPlanCancelled as e: - LOG.exception(e) - raise - except Exception as e: - LOG.exception(e) - self.engine.notify(self._db_action, objects.action.State.FAILED) - notifications.action.send_execution_notification( - self.engine.context, self._db_action, - fields.NotificationAction.EXECUTION, - fields.NotificationPhase.ERROR, - priority=fields.NotificationPriority.ERROR) - - def execute(self, *args, **kwargs): - def _do_execute_action(*args, **kwargs): - try: - self.do_execute(*args, **kwargs) - notifications.action.send_execution_notification( - self.engine.context, self._db_action, - fields.NotificationAction.EXECUTION, - fields.NotificationPhase.END) - except Exception as e: - LOG.exception(e) - LOG.error('The workflow engine has failed' - 'to execute the action: %s', self.name) - self.engine.notify(self._db_action, - objects.action.State.FAILED) - notifications.action.send_execution_notification( - self.engine.context, self._db_action, - fields.NotificationAction.EXECUTION, - fields.NotificationPhase.ERROR, - priority=fields.NotificationPriority.ERROR) - raise - # NOTE: spawn a new thread for action execution, so that if action plan - # is cancelled workflow engine will not wait to finish action execution - et = eventlet.spawn(_do_execute_action, *args, **kwargs) - # NOTE: check for the state of action plan periodically,so that if - # action is finished or action plan is cancelled we can exit from here. - while True: - action_object = objects.Action.get_by_uuid( - self.engine.context, self._db_action.uuid, eager=True) - action_plan_object = objects.ActionPlan.get_by_id( - self.engine.context, action_object.action_plan_id) - if (action_object.state in [objects.action.State.SUCCEEDED, - objects.action.State.FAILED] or - action_plan_object.state in CANCEL_STATE): - break - time.sleep(1) - try: - # NOTE: kill the action execution thread, if action plan is - # cancelled for all other cases wait for the result from action - # execution thread. - # Not all actions support abort operations, kill only those action - # which support abort operations - abort = self.action.check_abort() - if (action_plan_object.state in CANCEL_STATE and abort): - et.kill() - et.wait() - - # NOTE: catch the greenlet exit exception due to thread kill, - # taskflow will call revert for the action, - # we will redirect it to abort. - except eventlet.greenlet.GreenletExit: - raise exception.ActionPlanCancelled(uuid=action_plan_object.uuid) - - except Exception as e: - LOG.exception(e) - raise - - def post_execute(self): - try: - self.do_post_execute() - except Exception as e: - LOG.exception(e) - self.engine.notify(self._db_action, objects.action.State.FAILED) - notifications.action.send_execution_notification( - self.engine.context, self._db_action, - fields.NotificationAction.EXECUTION, - fields.NotificationPhase.ERROR, - priority=fields.NotificationPriority.ERROR) - - def revert(self, *args, **kwargs): - action_plan = objects.ActionPlan.get_by_id( - self.engine.context, self._db_action.action_plan_id, eager=True) - # NOTE: check if revert cause by cancel action plan or - # some other exception occured during action plan execution - # if due to some other exception keep the flow intact. - if action_plan.state not in CANCEL_STATE: - self.do_revert() - return - - action_object = objects.Action.get_by_uuid( - self.engine.context, self._db_action.uuid, eager=True) - if action_object.state == objects.action.State.ONGOING: - action_object.state = objects.action.State.CANCELLING - action_object.save() - self.abort() - elif action_object.state == objects.action.State.PENDING: - action_object.state = objects.action.State.CANCELLED - action_object.save() - else: - pass - - def abort(self, *args, **kwargs): - self.do_abort(*args, **kwargs) diff --git a/watcher/applier/workflow_engine/default.py b/watcher/applier/workflow_engine/default.py deleted file mode 100644 index 4080de7..0000000 --- a/watcher/applier/workflow_engine/default.py +++ /dev/null @@ -1,165 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import log -from taskflow import engines -from taskflow import exceptions as tf_exception -from taskflow.patterns import graph_flow as gf -from taskflow import task as flow_task - -from watcher.applier.workflow_engine import base -from watcher.common import exception -from watcher import objects - -LOG = log.getLogger(__name__) - - -class DefaultWorkFlowEngine(base.BaseWorkFlowEngine): - """Taskflow as a workflow engine for Watcher - - Full documentation on taskflow at - http://docs.openstack.org/developer/taskflow/ - """ - - def decider(self, history): - # FIXME(jed) not possible with the current Watcher Planner - # - # decider – A callback function that will be expected to - # decide at runtime whether v should be allowed to execute - # (or whether the execution of v should be ignored, - # and therefore not executed). It is expected to take as single - # keyword argument history which will be the execution results of - # all u decideable links that have v as a target. It is expected - # to return a single boolean - # (True to allow v execution or False to not). - return True - - @classmethod - def get_config_opts(cls): - return [ - cfg.IntOpt( - 'max_workers', - default=processutils.get_worker_count(), - min=1, - required=True, - help='Number of workers for taskflow engine ' - 'to execute actions.') - ] - - def execute(self, actions): - try: - # NOTE(jed) We want to have a strong separation of concern - # between the Watcher planner and the Watcher Applier in order - # to us the possibility to support several workflow engine. - # We want to provide the 'taskflow' engine by - # default although we still want to leave the possibility for - # the users to change it. - # The current implementation uses graph with linked actions. - # todo(jed) add olso conf for retry and name - flow = gf.Flow("watcher_flow") - actions_uuid = {} - for a in actions: - task = TaskFlowActionContainer(a, self) - flow.add(task) - actions_uuid[a.uuid] = task - - for a in actions: - for parent_id in a.parents: - flow.link(actions_uuid[parent_id], actions_uuid[a.uuid], - decider=self.decider) - - e = engines.load( - flow, engine='parallel', - max_workers=self.config.max_workers) - e.run() - - return flow - - except exception.ActionPlanCancelled as e: - raise - - except tf_exception.WrappedFailure as e: - if e.check("watcher.common.exception.ActionPlanCancelled"): - raise exception.ActionPlanCancelled - else: - raise exception.WorkflowExecutionException(error=e) - - except Exception as e: - raise exception.WorkflowExecutionException(error=e) - - -class TaskFlowActionContainer(base.BaseTaskFlowActionContainer): - def __init__(self, db_action, engine): - name = "action_type:{0} uuid:{1}".format(db_action.action_type, - db_action.uuid) - super(TaskFlowActionContainer, self).__init__(name, db_action, engine) - - def do_pre_execute(self): - self.engine.notify(self._db_action, objects.action.State.ONGOING) - LOG.debug("Pre-condition action: %s", self.name) - self.action.pre_condition() - - def do_execute(self, *args, **kwargs): - LOG.debug("Running action: %s", self.name) - - # NOTE: For result is False, set action state fail - result = self.action.execute() - if result is False: - self.engine.notify(self._db_action, - objects.action.State.FAILED) - else: - self.engine.notify(self._db_action, - objects.action.State.SUCCEEDED) - - def do_post_execute(self): - LOG.debug("Post-condition action: %s", self.name) - self.action.post_condition() - - def do_revert(self, *args, **kwargs): - LOG.warning("Revert action: %s", self.name) - try: - # TODO(jed): do we need to update the states in case of failure? - self.action.revert() - except Exception as e: - LOG.exception(e) - LOG.critical("Oops! We need a disaster recover plan.") - - def do_abort(self, *args, **kwargs): - LOG.warning("Aborting action: %s", self.name) - try: - result = self.action.abort() - if result: - # Aborted the action. - self.engine.notify(self._db_action, - objects.action.State.CANCELLED) - else: - self.engine.notify(self._db_action, - objects.action.State.SUCCEEDED) - except Exception as e: - self.engine.notify(self._db_action, objects.action.State.FAILED) - LOG.exception(e) - - -class TaskFlowNop(flow_task.Task): - """This class is used in case of the workflow have only one Action. - - We need at least two atoms to create a link. - """ - def execute(self): - pass diff --git a/watcher/cmd/__init__.py b/watcher/cmd/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/cmd/api.py b/watcher/cmd/api.py deleted file mode 100644 index 58c27e2..0000000 --- a/watcher/cmd/api.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Starter script for the Watcher API service.""" - -import sys - -from oslo_config import cfg -from oslo_log import log as logging - -from watcher.api import scheduling -from watcher.common import service -from watcher import conf - -LOG = logging.getLogger(__name__) -CONF = conf.CONF - - -def main(): - service.prepare_service(sys.argv, CONF) - - host, port = cfg.CONF.api.host, cfg.CONF.api.port - protocol = "http" if not CONF.api.enable_ssl_api else "https" - # Build and start the WSGI app - server = service.WSGIService('watcher-api', CONF.api.enable_ssl_api) - - if host == '127.0.0.1': - LOG.info('serving on 127.0.0.1:%(port)s, ' - 'view at %(protocol)s://127.0.0.1:%(port)s' % - dict(protocol=protocol, port=port)) - else: - LOG.info('serving on %(protocol)s://%(host)s:%(port)s' % - dict(protocol=protocol, host=host, port=port)) - - api_schedule = scheduling.APISchedulingService() - api_schedule.start() - - launcher = service.launch(CONF, server, workers=server.workers) - launcher.wait() diff --git a/watcher/cmd/applier.py b/watcher/cmd/applier.py deleted file mode 100644 index 364a9ba..0000000 --- a/watcher/cmd/applier.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Starter script for the Applier service.""" - -import os -import sys - -from oslo_log import log as logging - -from watcher.applier import manager -from watcher.common import service as watcher_service -from watcher import conf - -LOG = logging.getLogger(__name__) -CONF = conf.CONF - - -def main(): - watcher_service.prepare_service(sys.argv, CONF) - - LOG.info('Starting Watcher Applier service in PID %s', os.getpid()) - - applier_service = watcher_service.Service(manager.ApplierManager) - - # Only 1 process - launcher = watcher_service.launch(CONF, applier_service) - launcher.wait() diff --git a/watcher/cmd/dbmanage.py b/watcher/cmd/dbmanage.py deleted file mode 100644 index 883efaa..0000000 --- a/watcher/cmd/dbmanage.py +++ /dev/null @@ -1,157 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Run storage database migration. -""" - -import sys - -from oslo_config import cfg - -from watcher.common import service -from watcher import conf -from watcher.db import migration -from watcher.db import purge - -CONF = conf.CONF - - -class DBCommand(object): - - @staticmethod - def upgrade(): - migration.upgrade(CONF.command.revision) - - @staticmethod - def downgrade(): - migration.downgrade(CONF.command.revision) - - @staticmethod - def revision(): - migration.revision(CONF.command.message, CONF.command.autogenerate) - - @staticmethod - def stamp(): - migration.stamp(CONF.command.revision) - - @staticmethod - def version(): - print(migration.version()) - - @staticmethod - def create_schema(): - migration.create_schema() - - @staticmethod - def purge(): - purge.purge(CONF.command.age_in_days, CONF.command.max_number, - CONF.command.goal, CONF.command.exclude_orphans, - CONF.command.dry_run) - - -def add_command_parsers(subparsers): - parser = subparsers.add_parser( - 'upgrade', - help="Upgrade the database schema to the latest version. " - "Optionally, use --revision to specify an alembic revision " - "string to upgrade to.") - parser.set_defaults(func=DBCommand.upgrade) - parser.add_argument('--revision', nargs='?') - - parser = subparsers.add_parser( - 'downgrade', - help="Downgrade the database schema to the oldest revision. " - "While optional, one should generally use --revision to " - "specify the alembic revision string to downgrade to.") - parser.set_defaults(func=DBCommand.downgrade) - parser.add_argument('--revision', nargs='?') - - parser = subparsers.add_parser('stamp') - parser.add_argument('revision', nargs='?') - parser.set_defaults(func=DBCommand.stamp) - - parser = subparsers.add_parser( - 'revision', - help="Create a new alembic revision. " - "Use --message to set the message string.") - parser.add_argument('-m', '--message') - parser.add_argument('--autogenerate', action='store_true') - parser.set_defaults(func=DBCommand.revision) - - parser = subparsers.add_parser( - 'version', - help="Print the current version information and exit.") - parser.set_defaults(func=DBCommand.version) - - parser = subparsers.add_parser( - 'create_schema', - help="Create the database schema.") - parser.set_defaults(func=DBCommand.create_schema) - - parser = subparsers.add_parser( - 'purge', - help="Purge the database.") - parser.add_argument('-d', '--age-in-days', - help="Number of days since deletion (from today) " - "to exclude from the purge. If None, everything " - "will be purged.", - type=int, default=None, nargs='?') - parser.add_argument('-n', '--max-number', - help="Max number of objects expected to be deleted. " - "Prevents the deletion if exceeded. No limit if " - "set to None.", - type=int, default=None, nargs='?') - parser.add_argument('-t', '--goal', - help="UUID or name of the goal to purge.", - type=str, default=None, nargs='?') - parser.add_argument('-e', '--exclude-orphans', action='store_true', - help="Flag to indicate whether or not you want to " - "exclude orphans from deletion (default: False).", - default=False) - parser.add_argument('--dry-run', action='store_true', - help="Flag to indicate whether or not you want to " - "perform a dry run (no deletion).", - default=False) - - parser.set_defaults(func=DBCommand.purge) - - -command_opt = cfg.SubCommandOpt('command', - title='Command', - help='Available commands', - handler=add_command_parsers) - - -def register_sub_command_opts(): - cfg.CONF.register_cli_opt(command_opt) - - -def main(): - register_sub_command_opts() - # this is hack to work with previous usage of watcher-dbsync - # pls change it to watcher-dbsync upgrade - valid_commands = set([ - 'upgrade', 'downgrade', 'revision', - 'version', 'stamp', 'create_schema', - 'purge', - ]) - if not set(sys.argv).intersection(valid_commands): - sys.argv.append('upgrade') - - service.prepare_service(sys.argv, CONF) - CONF.command.func() diff --git a/watcher/cmd/decisionengine.py b/watcher/cmd/decisionengine.py deleted file mode 100644 index ac172ee..0000000 --- a/watcher/cmd/decisionengine.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Starter script for the Decision Engine manager service.""" - -import os -import sys - -from oslo_log import log as logging - -from watcher.common import service as watcher_service -from watcher import conf -from watcher.decision_engine import gmr -from watcher.decision_engine import manager -from watcher.decision_engine import scheduling -from watcher.decision_engine import sync - -LOG = logging.getLogger(__name__) -CONF = conf.CONF - - -def main(): - watcher_service.prepare_service(sys.argv, CONF) - gmr.register_gmr_plugins() - - LOG.info('Starting Watcher Decision Engine service in PID %s', - os.getpid()) - - syncer = sync.Syncer() - syncer.sync() - - de_service = watcher_service.Service(manager.DecisionEngineManager) - bg_scheduler_service = scheduling.DecisionEngineSchedulingService() - - # Only 1 process - launcher = watcher_service.launch(CONF, de_service) - launcher.launch_service(bg_scheduler_service) - - launcher.wait() diff --git a/watcher/cmd/sync.py b/watcher/cmd/sync.py deleted file mode 100644 index c0cbf38..0000000 --- a/watcher/cmd/sync.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright (c) 2016 Intel -# -# Authors: Tomasz Kaczynski -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Script for the sync tool.""" - -import sys - -from oslo_log import log as logging - -from watcher.common import service as service -from watcher import conf -from watcher.decision_engine import sync - -LOG = logging.getLogger(__name__) -CONF = conf.CONF - - -def main(): - LOG.info('Watcher sync started.') - - service.prepare_service(sys.argv, CONF) - syncer = sync.Syncer() - syncer.sync() - - LOG.info('Watcher sync finished.') diff --git a/watcher/common/__init__.py b/watcher/common/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/common/cinder_helper.py b/watcher/common/cinder_helper.py deleted file mode 100644 index 72058f8..0000000 --- a/watcher/common/cinder_helper.py +++ /dev/null @@ -1,79 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from oslo_log import log - -from watcher.common import clients -from watcher.common import exception - -LOG = log.getLogger(__name__) - - -class CinderHelper(object): - - def __init__(self, osc=None): - """:param osc: an OpenStackClients instance""" - self.osc = osc if osc else clients.OpenStackClients() - self.cinder = self.osc.cinder() - - def get_storage_node_list(self): - return list(self.cinder.services.list(binary='cinder-volume')) - - def get_storage_node_by_name(self, name): - """Get storage node by name(host@backendname)""" - try: - storages = list(filter(lambda storage: - storage.host == name, - self.get_storage_node_list())) - if len(storages) != 1: - raise exception.StorageNodeNotFound(name=name) - return storages[0] - except Exception as exc: - LOG.exception(exc) - raise exception.StorageNodeNotFound(name=name) - - def get_storage_pool_list(self): - return self.cinder.pools.list(detailed=True) - - def get_storage_pool_by_name(self, name): - """Get pool by name(host@backend#poolname)""" - try: - pools = list(filter(lambda pool: - pool.name == name, - self.get_storage_pool_list())) - if len(pools) != 1: - raise exception.PoolNotFound(name=name) - return pools[0] - except Exception as exc: - LOG.exception(exc) - raise exception.PoolNotFound(name=name) - - def get_volume_list(self): - return self.cinder.volumes.list(search_opts={'all_tenants': True}) - - def get_volume_type_list(self): - return self.cinder.volume_types.list() - - def get_volume_type_by_backendname(self, backendname): - volume_type_list = self.get_volume_type_list() - - volume_type = list(filter( - lambda volume_type: - volume_type.extra_specs.get( - 'volume_backend_name') == backendname, volume_type_list)) - if volume_type: - return volume_type[0].name - else: - return "" diff --git a/watcher/common/clients.py b/watcher/common/clients.py deleted file mode 100755 index a9f0bc7..0000000 --- a/watcher/common/clients.py +++ /dev/null @@ -1,204 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometerclient import client as ceclient -from cinderclient import client as ciclient -from glanceclient import client as glclient -from gnocchiclient import client as gnclient -from ironicclient import client as irclient -from keystoneauth1 import loading as ka_loading -from keystoneclient import client as keyclient -from monascaclient import client as monclient -from neutronclient.neutron import client as netclient -from novaclient import client as nvclient - -from watcher.common import exception - -from watcher import conf - -CONF = conf.CONF - -_CLIENTS_AUTH_GROUP = 'watcher_clients_auth' - - -class OpenStackClients(object): - """Convenience class to create and cache client instances.""" - - def __init__(self): - self.reset_clients() - - def reset_clients(self): - self._session = None - self._keystone = None - self._nova = None - self._glance = None - self._gnocchi = None - self._cinder = None - self._ceilometer = None - self._monasca = None - self._neutron = None - self._ironic = None - - def _get_keystone_session(self): - auth = ka_loading.load_auth_from_conf_options(CONF, - _CLIENTS_AUTH_GROUP) - sess = ka_loading.load_session_from_conf_options(CONF, - _CLIENTS_AUTH_GROUP, - auth=auth) - return sess - - @property - def auth_url(self): - return self.keystone().auth_url - - @property - def session(self): - if not self._session: - self._session = self._get_keystone_session() - return self._session - - def _get_client_option(self, client, option): - return getattr(getattr(CONF, '%s_client' % client), option) - - @exception.wrap_keystone_exception - def keystone(self): - if not self._keystone: - self._keystone = keyclient.Client(session=self.session) - - return self._keystone - - @exception.wrap_keystone_exception - def nova(self): - if self._nova: - return self._nova - - novaclient_version = self._get_client_option('nova', 'api_version') - nova_endpoint_type = self._get_client_option('nova', 'endpoint_type') - self._nova = nvclient.Client(novaclient_version, - endpoint_type=nova_endpoint_type, - session=self.session) - return self._nova - - @exception.wrap_keystone_exception - def glance(self): - if self._glance: - return self._glance - - glanceclient_version = self._get_client_option('glance', 'api_version') - glance_endpoint_type = self._get_client_option('glance', - 'endpoint_type') - self._glance = glclient.Client(glanceclient_version, - interface=glance_endpoint_type, - session=self.session) - return self._glance - - @exception.wrap_keystone_exception - def gnocchi(self): - if self._gnocchi: - return self._gnocchi - - gnocchiclient_version = self._get_client_option('gnocchi', - 'api_version') - gnocchiclient_interface = self._get_client_option('gnocchi', - 'endpoint_type') - self._gnocchi = gnclient.Client(gnocchiclient_version, - interface=gnocchiclient_interface, - session=self.session) - return self._gnocchi - - @exception.wrap_keystone_exception - def cinder(self): - if self._cinder: - return self._cinder - - cinderclient_version = self._get_client_option('cinder', 'api_version') - cinder_endpoint_type = self._get_client_option('cinder', - 'endpoint_type') - self._cinder = ciclient.Client(cinderclient_version, - endpoint_type=cinder_endpoint_type, - session=self.session) - return self._cinder - - @exception.wrap_keystone_exception - def ceilometer(self): - if self._ceilometer: - return self._ceilometer - - ceilometerclient_version = self._get_client_option('ceilometer', - 'api_version') - ceilometer_endpoint_type = self._get_client_option('ceilometer', - 'endpoint_type') - self._ceilometer = ceclient.get_client( - ceilometerclient_version, - endpoint_type=ceilometer_endpoint_type, - session=self.session) - return self._ceilometer - - @exception.wrap_keystone_exception - def monasca(self): - if self._monasca: - return self._monasca - - monascaclient_version = self._get_client_option( - 'monasca', 'api_version') - monascaclient_interface = self._get_client_option( - 'monasca', 'interface') - token = self.session.get_token() - watcher_clients_auth_config = CONF.get(_CLIENTS_AUTH_GROUP) - service_type = 'monitoring' - monasca_kwargs = { - 'auth_url': watcher_clients_auth_config.auth_url, - 'cert_file': watcher_clients_auth_config.certfile, - 'insecure': watcher_clients_auth_config.insecure, - 'key_file': watcher_clients_auth_config.keyfile, - 'keystone_timeout': watcher_clients_auth_config.timeout, - 'os_cacert': watcher_clients_auth_config.cafile, - 'service_type': service_type, - 'token': token, - 'username': watcher_clients_auth_config.username, - 'password': watcher_clients_auth_config.password, - } - endpoint = self.session.get_endpoint(service_type=service_type, - interface=monascaclient_interface) - - self._monasca = monclient.Client( - monascaclient_version, endpoint, **monasca_kwargs) - - return self._monasca - - @exception.wrap_keystone_exception - def neutron(self): - if self._neutron: - return self._neutron - - neutronclient_version = self._get_client_option('neutron', - 'api_version') - neutron_endpoint_type = self._get_client_option('neutron', - 'endpoint_type') - - self._neutron = netclient.Client(neutronclient_version, - endpoint_type=neutron_endpoint_type, - session=self.session) - self._neutron.format = 'json' - return self._neutron - - @exception.wrap_keystone_exception - def ironic(self): - if self._ironic: - return self._ironic - - ironicclient_version = self._get_client_option('ironic', 'api_version') - endpoint_type = self._get_client_option('ironic', 'endpoint_type') - self._ironic = irclient.get_client(ironicclient_version, - ironic_url=endpoint_type, - session=self.session) - return self._ironic diff --git a/watcher/common/config.py b/watcher/common/config.py deleted file mode 100644 index 5ca04e3..0000000 --- a/watcher/common/config.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# Copyright 2012 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from watcher.common import rpc -from watcher import version - - -def parse_args(argv, default_config_files=None): - default_config_files = (default_config_files or - cfg.find_config_files(project='watcher')) - rpc.set_defaults(control_exchange='watcher') - cfg.CONF(argv[1:], - project='python-watcher', - version=version.version_info.release_string(), - default_config_files=default_config_files) - rpc.init(cfg.CONF) diff --git a/watcher/common/context.py b/watcher/common/context.py deleted file mode 100644 index 3da7b22..0000000 --- a/watcher/common/context.py +++ /dev/null @@ -1,120 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_context import context -from oslo_log import log as logging -from oslo_utils import timeutils -import six - -from watcher.common import utils - -LOG = logging.getLogger(__name__) - - -class RequestContext(context.RequestContext): - """Extends security contexts from the OpenStack common library.""" - - def __init__(self, user_id=None, project_id=None, is_admin=None, - roles=None, timestamp=None, request_id=None, auth_token=None, - auth_url=None, overwrite=True, user_name=None, - project_name=None, domain_name=None, domain_id=None, - auth_token_info=None, **kwargs): - """Stores several additional request parameters: - - :param domain_id: The ID of the domain. - :param domain_name: The name of the domain. - :param is_public_api: Specifies whether the request should be processed - without authentication. - - """ - user = kwargs.pop('user', None) - tenant = kwargs.pop('tenant', None) - super(RequestContext, self).__init__( - auth_token=auth_token, - user=user_id or user, - tenant=project_id or tenant, - domain=kwargs.pop('domain', None) or domain_name or domain_id, - user_domain=kwargs.pop('user_domain', None), - project_domain=kwargs.pop('project_domain', None), - is_admin=is_admin, - read_only=kwargs.pop('read_only', False), - show_deleted=kwargs.pop('show_deleted', False), - request_id=request_id, - resource_uuid=kwargs.pop('resource_uuid', None), - is_admin_project=kwargs.pop('is_admin_project', True), - overwrite=overwrite, - roles=roles) - - self.remote_address = kwargs.pop('remote_address', None) - self.instance_lock_checked = kwargs.pop('instance_lock_checked', None) - self.read_deleted = kwargs.pop('read_deleted', None) - self.service_catalog = kwargs.pop('service_catalog', None) - self.quota_class = kwargs.pop('quota_class', None) - - # oslo_context's RequestContext.to_dict() generates this field, we can - # safely ignore this as we don't use it. - kwargs.pop('user_identity', None) - kwargs.pop('global_request_id', None) - if kwargs: - LOG.warning('Arguments dropped when creating context: %s', - str(kwargs)) - - # FIXME(dims): user_id and project_id duplicate information that is - # already present in the oslo_context's RequestContext. We need to - # get rid of them. - self.auth_url = auth_url - self.domain_name = domain_name - self.domain_id = domain_id - self.auth_token_info = auth_token_info - self.user_id = user_id or user - self.project_id = project_id - if not timestamp: - timestamp = timeutils.utcnow() - if isinstance(timestamp, six.string_types): - timestamp = timeutils.parse_isotime(timestamp) - self.timestamp = timestamp - self.user_name = user_name - self.project_name = project_name - self.is_admin = is_admin - # if self.is_admin is None: - # self.is_admin = policy.check_is_admin(self) - - def to_dict(self): - values = super(RequestContext, self).to_dict() - # FIXME(dims): defensive hasattr() checks need to be - # removed once we figure out why we are seeing stack - # traces - values.update({ - 'user_id': getattr(self, 'user_id', None), - 'user_name': getattr(self, 'user_name', None), - 'project_id': getattr(self, 'project_id', None), - 'project_name': getattr(self, 'project_name', None), - 'domain_id': getattr(self, 'domain_id', None), - 'domain_name': getattr(self, 'domain_name', None), - 'auth_token_info': getattr(self, 'auth_token_info', None), - 'is_admin': getattr(self, 'is_admin', None), - 'timestamp': utils.strtime(self.timestamp) if hasattr( - self, 'timestamp') else None, - 'request_id': getattr(self, 'request_id', None), - }) - return values - - @classmethod - def from_dict(cls, values): - return cls(**values) - - def __str__(self): - return "" % self.to_dict() - - -def make_context(*args, **kwargs): - return RequestContext(*args, **kwargs) diff --git a/watcher/common/exception.py b/watcher/common/exception.py deleted file mode 100644 index 22f1bd3..0000000 --- a/watcher/common/exception.py +++ /dev/null @@ -1,477 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Watcher base exception handling. - -Includes decorator for re-raising Watcher-type exceptions. - -SHOULD include dedicated exception logging. - -""" - -import functools -import sys - -from keystoneclient import exceptions as keystone_exceptions -from oslo_log import log as logging -import six - -from watcher._i18n import _ - -from watcher import conf - -LOG = logging.getLogger(__name__) - -CONF = conf.CONF - - -def wrap_keystone_exception(func): - """Wrap keystone exceptions and throw Watcher specific exceptions.""" - @functools.wraps(func) - def wrapped(*args, **kw): - try: - return func(*args, **kw) - except keystone_exceptions.AuthorizationFailure: - raise AuthorizationFailure( - client=func.__name__, reason=sys.exc_info()[1]) - except keystone_exceptions.ClientException: - raise AuthorizationFailure( - client=func.__name__, - reason=(_('Unexpected keystone client error occurred: %s') - % sys.exc_info()[1])) - return wrapped - - -class WatcherException(Exception): - """Base Watcher Exception - - To correctly use this class, inherit from it and define - a 'msg_fmt' property. That msg_fmt will get printf'd - with the keyword arguments provided to the constructor. - - """ - msg_fmt = _("An unknown exception occurred") - code = 500 - headers = {} - safe = False - - def __init__(self, message=None, **kwargs): - self.kwargs = kwargs - - if 'code' not in self.kwargs: - try: - self.kwargs['code'] = self.code - except AttributeError: - pass - - if not message: - try: - message = self.msg_fmt % kwargs - except Exception: - # kwargs doesn't match a variable in msg_fmt - # log the issue and the kwargs - LOG.exception('Exception in string format operation') - for name, value in kwargs.items(): - LOG.error("%(name)s: %(value)s", - {'name': name, 'value': value}) - - if CONF.fatal_exception_format_errors: - raise - else: - # at least get the core msg_fmt out if something happened - message = self.msg_fmt - - super(WatcherException, self).__init__(message) - - def __str__(self): - """Encode to utf-8 then wsme api can consume it as well""" - if not six.PY3: - return six.text_type(self.args[0]).encode('utf-8') - else: - return self.args[0] - - def __unicode__(self): - return six.text_type(self.args[0]) - - def format_message(self): - if self.__class__.__name__.endswith('_Remote'): - return self.args[0] - else: - return six.text_type(self) - - -class UnsupportedError(WatcherException): - msg_fmt = _("Not supported") - - -class NotAuthorized(WatcherException): - msg_fmt = _("Not authorized") - code = 403 - - -class PolicyNotAuthorized(NotAuthorized): - msg_fmt = _("Policy doesn't allow %(action)s to be performed.") - - -class OperationNotPermitted(NotAuthorized): - msg_fmt = _("Operation not permitted") - - -class Invalid(WatcherException, ValueError): - msg_fmt = _("Unacceptable parameters") - code = 400 - - -class ObjectNotFound(WatcherException): - msg_fmt = _("The %(name)s %(id)s could not be found") - - -class Conflict(WatcherException): - msg_fmt = _('Conflict') - code = 409 - - -class ResourceNotFound(ObjectNotFound): - msg_fmt = _("The %(name)s resource %(id)s could not be found") - code = 404 - - -class InvalidParameter(Invalid): - msg_fmt = _("%(parameter)s has to be of type %(parameter_type)s") - - -class InvalidIdentity(Invalid): - msg_fmt = _("Expected a uuid or int but received %(identity)s") - - -class InvalidOperator(Invalid): - msg_fmt = _("Filter operator is not valid: %(operator)s not " - "in %(valid_operators)s") - - -class InvalidGoal(Invalid): - msg_fmt = _("Goal %(goal)s is invalid") - - -class InvalidStrategy(Invalid): - msg_fmt = _("Strategy %(strategy)s is invalid") - - -class InvalidAudit(Invalid): - msg_fmt = _("Audit %(audit)s is invalid") - - -class EagerlyLoadedAuditRequired(InvalidAudit): - msg_fmt = _("Audit %(audit)s was not eagerly loaded") - - -class InvalidActionPlan(Invalid): - msg_fmt = _("Action plan %(action_plan)s is invalid") - - -class EagerlyLoadedActionPlanRequired(InvalidActionPlan): - msg_fmt = _("Action plan %(action_plan)s was not eagerly loaded") - - -class EagerlyLoadedActionRequired(InvalidActionPlan): - msg_fmt = _("Action %(action)s was not eagerly loaded") - - -class InvalidUUID(Invalid): - msg_fmt = _("Expected a uuid but received %(uuid)s") - - -class InvalidName(Invalid): - msg_fmt = _("Expected a logical name but received %(name)s") - - -class InvalidUuidOrName(Invalid): - msg_fmt = _("Expected a logical name or uuid but received %(name)s") - - -class InvalidIntervalOrCron(Invalid): - msg_fmt = _("Expected an interval or cron syntax but received %(name)s") - - -class GoalNotFound(ResourceNotFound): - msg_fmt = _("Goal %(goal)s could not be found") - - -class GoalAlreadyExists(Conflict): - msg_fmt = _("A goal with UUID %(uuid)s already exists") - - -class StrategyNotFound(ResourceNotFound): - msg_fmt = _("Strategy %(strategy)s could not be found") - - -class StrategyAlreadyExists(Conflict): - msg_fmt = _("A strategy with UUID %(uuid)s already exists") - - -class AuditTemplateNotFound(ResourceNotFound): - msg_fmt = _("AuditTemplate %(audit_template)s could not be found") - - -class AuditTemplateAlreadyExists(Conflict): - msg_fmt = _("An audit_template with UUID or name %(audit_template)s " - "already exists") - - -class AuditTemplateReferenced(Invalid): - msg_fmt = _("AuditTemplate %(audit_template)s is referenced by one or " - "multiple audits") - - -class AuditTypeNotFound(Invalid): - msg_fmt = _("Audit type %(audit_type)s could not be found") - - -class AuditParameterNotAllowed(Invalid): - msg_fmt = _("Audit parameter %(parameter)s are not allowed") - - -class AuditNotFound(ResourceNotFound): - msg_fmt = _("Audit %(audit)s could not be found") - - -class AuditAlreadyExists(Conflict): - msg_fmt = _("An audit with UUID %(uuid)s already exists") - - -class AuditIntervalNotSpecified(Invalid): - msg_fmt = _("Interval of audit must be specified for %(audit_type)s.") - - -class AuditIntervalNotAllowed(Invalid): - msg_fmt = _("Interval of audit must not be set for %(audit_type)s.") - - -class AuditReferenced(Invalid): - msg_fmt = _("Audit %(audit)s is referenced by one or multiple action " - "plans") - - -class ActionPlanNotFound(ResourceNotFound): - msg_fmt = _("ActionPlan %(action_plan)s could not be found") - - -class ActionPlanAlreadyExists(Conflict): - msg_fmt = _("An action plan with UUID %(uuid)s already exists") - - -class ActionPlanReferenced(Invalid): - msg_fmt = _("Action Plan %(action_plan)s is referenced by one or " - "multiple actions") - - -class ActionPlanCancelled(WatcherException): - msg_fmt = _("Action Plan with UUID %(uuid)s is cancelled by user") - - -class ActionPlanIsOngoing(Conflict): - msg_fmt = _("Action Plan %(action_plan)s is currently running.") - - -class ActionNotFound(ResourceNotFound): - msg_fmt = _("Action %(action)s could not be found") - - -class ActionAlreadyExists(Conflict): - msg_fmt = _("An action with UUID %(uuid)s already exists") - - -class ActionReferenced(Invalid): - msg_fmt = _("Action plan %(action_plan)s is referenced by one or " - "multiple goals") - - -class ActionFilterCombinationProhibited(Invalid): - msg_fmt = _("Filtering actions on both audit and action-plan is " - "prohibited") - - -class UnsupportedActionType(UnsupportedError): - msg_fmt = _("Provided %(action_type) is not supported yet") - - -class EfficacyIndicatorNotFound(ResourceNotFound): - msg_fmt = _("Efficacy indicator %(efficacy_indicator)s could not be found") - - -class EfficacyIndicatorAlreadyExists(Conflict): - msg_fmt = _("An action with UUID %(uuid)s already exists") - - -class ScoringEngineAlreadyExists(Conflict): - msg_fmt = _("A scoring engine with UUID %(uuid)s already exists") - - -class ScoringEngineNotFound(ResourceNotFound): - msg_fmt = _("ScoringEngine %(scoring_engine)s could not be found") - - -class HTTPNotFound(ResourceNotFound): - pass - - -class PatchError(Invalid): - msg_fmt = _("Couldn't apply patch '%(patch)s'. Reason: %(reason)s") - - -# decision engine - -class WorkflowExecutionException(WatcherException): - msg_fmt = _('Workflow execution error: %(error)s') - - -class IllegalArgumentException(WatcherException): - msg_fmt = _('Illegal argument') - - -class NoSuchMetric(WatcherException): - msg_fmt = _('No such metric') - - -class NoDataFound(WatcherException): - msg_fmt = _('No rows were returned') - - -class AuthorizationFailure(WatcherException): - msg_fmt = _('%(client)s connection failed. Reason: %(reason)s') - - -class KeystoneFailure(WatcherException): - msg_fmt = _("Keystone API endpoint is missing") - - -class ClusterEmpty(WatcherException): - msg_fmt = _("The list of compute node(s) in the cluster is empty") - - -class MetricCollectorNotDefined(WatcherException): - msg_fmt = _("The metrics resource collector is not defined") - - -class ClusterStateStale(WatcherException): - msg_fmt = _("The cluster state is stale") - - -class ClusterDataModelCollectionError(WatcherException): - msg_fmt = _("The cluster data model '%(cdm)s' could not be built") - - -class ClusterStateNotDefined(WatcherException): - msg_fmt = _("The cluster state is not defined") - - -class CapacityNotDefined(WatcherException): - msg_fmt = _("The capacity %(capacity)s is not defined for '%(resource)s'") - - -class NoAvailableStrategyForGoal(WatcherException): - msg_fmt = _("No strategy could be found to achieve the '%(goal)s' goal.") - - -class InvalidIndicatorValue(WatcherException): - msg_fmt = _("The indicator '%(name)s' with value '%(value)s' " - "and spec type '%(spec_type)s' is invalid.") - - -class GlobalEfficacyComputationError(WatcherException): - msg_fmt = _("Could not compute the global efficacy for the '%(goal)s' " - "goal using the '%(strategy)s' strategy.") - - -class NoMetricValuesForInstance(WatcherException): - msg_fmt = _("No values returned by %(resource_id)s for %(metric_name)s.") - - -class UnsupportedDataSource(UnsupportedError): - msg_fmt = _("Datasource %(datasource)s is not supported " - "by strategy %(strategy)s") - - -class NoSuchMetricForHost(WatcherException): - msg_fmt = _("No %(metric)s metric for %(host)s found.") - - -class ServiceAlreadyExists(Conflict): - msg_fmt = _("A service with name %(name)s is already working on %(host)s.") - - -class ServiceNotFound(ResourceNotFound): - msg_fmt = _("The service %(service)s cannot be found.") - - -class WildcardCharacterIsUsed(WatcherException): - msg_fmt = _("You shouldn't use any other IDs of %(resource)s if you use " - "wildcard character.") - - -class CronFormatIsInvalid(WatcherException): - msg_fmt = _("Provided cron is invalid: %(message)s") - - -# Model - -class ComputeResourceNotFound(WatcherException): - msg_fmt = _("The compute resource '%(name)s' could not be found") - - -class InstanceNotFound(ComputeResourceNotFound): - msg_fmt = _("The instance '%(name)s' could not be found") - - -class ComputeNodeNotFound(ComputeResourceNotFound): - msg_fmt = _("The compute node %(name)s could not be found") - - -class StorageResourceNotFound(WatcherException): - msg_fmt = _("The storage resource '%(name)s' could not be found") - - -class StorageNodeNotFound(StorageResourceNotFound): - msg_fmt = _("The storage node %(name)s could not be found") - - -class PoolNotFound(StorageResourceNotFound): - msg_fmt = _("The pool %(name)s could not be found") - - -class VolumeNotFound(StorageResourceNotFound): - msg_fmt = _("The volume '%(name)s' could not be found") - - -class LoadingError(WatcherException): - msg_fmt = _("Error loading plugin '%(name)s'") - - -class ReservedWord(WatcherException): - msg_fmt = _("The identifier '%(name)s' is a reserved word") - - -class NotSoftDeletedStateError(WatcherException): - msg_fmt = _("The %(name)s resource %(id)s is not soft deleted") - - -class NegativeLimitError(WatcherException): - msg_fmt = _("Limit should be positive") - - -class NotificationPayloadError(WatcherException): - _msg_fmt = _("Payload not populated when trying to send notification " - "\"%(class_name)s\"") diff --git a/watcher/common/loader/__init__.py b/watcher/common/loader/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/common/loader/base.py b/watcher/common/loader/base.py deleted file mode 100644 index 322cb43..0000000 --- a/watcher/common/loader/base.py +++ /dev/null @@ -1,32 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -import abc -import six - - -@six.add_metaclass(abc.ABCMeta) -class BaseLoader(object): - - @abc.abstractmethod - def list_available(self): - raise NotImplementedError() - - @abc.abstractmethod - def load(self, name): - raise NotImplementedError() diff --git a/watcher/common/loader/default.py b/watcher/common/loader/default.py deleted file mode 100644 index 3ef63bc..0000000 --- a/watcher/common/loader/default.py +++ /dev/null @@ -1,96 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -from oslo_config import cfg -from oslo_log import log -from stevedore import driver as drivermanager -from stevedore import extension as extensionmanager - -from watcher.common import exception -from watcher.common.loader import base -from watcher.common import utils - -LOG = log.getLogger(__name__) - - -class DefaultLoader(base.BaseLoader): - - def __init__(self, namespace, conf=cfg.CONF): - """Entry point loader for Watcher using Stevedore - - :param namespace: namespace of the entry point(s) to load or list - :type namespace: str - :param conf: ConfigOpts instance, defaults to cfg.CONF - """ - super(DefaultLoader, self).__init__() - self.namespace = namespace - self.conf = conf - - def load(self, name, **kwargs): - try: - LOG.debug("Loading in namespace %s => %s ", self.namespace, name) - driver_manager = drivermanager.DriverManager( - namespace=self.namespace, - name=name, - invoke_on_load=False, - ) - - driver_cls = driver_manager.driver - config = self._load_plugin_config(name, driver_cls) - - driver = driver_cls(config, **kwargs) - except Exception as exc: - LOG.exception(exc) - raise exception.LoadingError(name=name) - - return driver - - def _reload_config(self): - self.conf(default_config_files=self.conf.default_config_files) - - def get_entry_name(self, name): - return ".".join([self.namespace, name]) - - def _load_plugin_config(self, name, driver_cls): - """Load the config of the plugin""" - config = utils.Struct() - config_opts = driver_cls.get_config_opts() - if not config_opts: - return config - - group_name = self.get_entry_name(name) - self.conf.register_opts(config_opts, group=group_name) - - # Finalise the opt import by re-checking the configuration - # against the provided config files - self._reload_config() - - config_group = self.conf.get(group_name) - if not config_group: - raise exception.LoadingError(name=name) - - config.update({ - name: value for name, value in config_group.items() - }) - - return config - - def list_available(self): - extension_manager = extensionmanager.ExtensionManager( - namespace=self.namespace) - return {ext.name: ext.plugin for ext in extension_manager.extensions} diff --git a/watcher/common/loader/loadable.py b/watcher/common/loader/loadable.py deleted file mode 100644 index c234274..0000000 --- a/watcher/common/loader/loadable.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc - -import six - -from watcher.common import service - - -@six.add_metaclass(abc.ABCMeta) -class Loadable(object): - """Generic interface for dynamically loading a driver/entry point. - - This defines the contract in order to let the loader manager inject - the configuration parameters during the loading. - """ - - def __init__(self, config): - super(Loadable, self).__init__() - self.config = config - - @classmethod - @abc.abstractmethod - def get_config_opts(cls): - """Defines the configuration options to be associated to this loadable - - :return: A list of configuration options relative to this Loadable - :rtype: list of :class:`oslo_config.cfg.Opt` instances - """ - raise NotImplementedError - - -LoadableSingletonMeta = type( - "LoadableSingletonMeta", (abc.ABCMeta, service.Singleton), {}) - - -@six.add_metaclass(LoadableSingletonMeta) -class LoadableSingleton(object): - """Generic interface for dynamically loading a driver as a singleton. - - This defines the contract in order to let the loader manager inject - the configuration parameters during the loading. Classes inheriting from - this class will be singletons. - """ - - def __init__(self, config): - super(LoadableSingleton, self).__init__() - self.config = config - - @classmethod - @abc.abstractmethod - def get_config_opts(cls): - """Defines the configuration options to be associated to this loadable - - :return: A list of configuration options relative to this Loadable - :rtype: list of :class:`oslo_config.cfg.Opt` instances - """ - raise NotImplementedError diff --git a/watcher/common/nova_helper.py b/watcher/common/nova_helper.py deleted file mode 100644 index 52994f4..0000000 --- a/watcher/common/nova_helper.py +++ /dev/null @@ -1,866 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import random -import time - -from oslo_log import log - -import cinderclient.exceptions as ciexceptions -import glanceclient.exc as glexceptions -import novaclient.exceptions as nvexceptions - -from watcher.common import clients -from watcher.common import exception -from watcher.common import utils - -LOG = log.getLogger(__name__) - - -class NovaHelper(object): - - def __init__(self, osc=None): - """:param osc: an OpenStackClients instance""" - self.osc = osc if osc else clients.OpenStackClients() - self.neutron = self.osc.neutron() - self.cinder = self.osc.cinder() - self.nova = self.osc.nova() - self.glance = self.osc.glance() - - def get_compute_node_list(self): - return self.nova.hypervisors.list() - - def get_compute_node_by_id(self, node_id): - """Get compute node by ID (*not* UUID)""" - # We need to pass an object with an 'id' attribute to make it work - return self.nova.hypervisors.get(utils.Struct(id=node_id)) - - def get_compute_node_by_hostname(self, node_hostname): - """Get compute node by ID (*not* UUID)""" - # We need to pass an object with an 'id' attribute to make it work - try: - compute_nodes = self.nova.hypervisors.search(node_hostname) - if len(compute_nodes) != 1: - raise exception.ComputeNodeNotFound(name=node_hostname) - - return self.get_compute_node_by_id(compute_nodes[0].id) - except Exception as exc: - LOG.exception(exc) - raise exception.ComputeNodeNotFound(name=node_hostname) - - def get_instance_list(self): - return self.nova.servers.list(search_opts={'all_tenants': True}) - - def get_service(self, service_id): - return self.nova.services.find(id=service_id) - - def get_flavor(self, flavor_id): - return self.nova.flavors.get(flavor_id) - - def get_aggregate_list(self): - return self.nova.aggregates.list() - - def get_aggregate_detail(self, aggregate_id): - return self.nova.aggregates.get(aggregate_id) - - def get_availability_zone_list(self): - return self.nova.availability_zones.list(detailed=True) - - def find_instance(self, instance_id): - return self.nova.servers.get(instance_id) - - def confirm_resize(self, instance, previous_status, retry=60): - instance.confirm_resize() - instance = self.nova.servers.get(instance.id) - while instance.status != previous_status and retry: - instance = self.nova.servers.get(instance.id) - retry -= 1 - time.sleep(1) - if instance.status == previous_status: - return True - else: - LOG.debug("confirm resize failed for the " - "instance %s" % instance.id) - return False - - def wait_for_volume_status(self, volume, status, timeout=60, - poll_interval=1): - """Wait until volume reaches given status. - - :param volume: volume resource - :param status: expected status of volume - :param timeout: timeout in seconds - :param poll_interval: poll interval in seconds - """ - start_time = time.time() - while time.time() - start_time < timeout: - volume = self.cinder.volumes.get(volume.id) - if volume.status == status: - break - time.sleep(poll_interval) - else: - raise Exception("Volume %s did not reach status %s after %d s" - % (volume.id, status, timeout)) - return volume.status == status - - def watcher_non_live_migrate_instance(self, instance_id, dest_hostname, - keep_original_image_name=True, - retry=120): - """This method migrates a given instance - - using an image of this instance and creating a new instance - from this image. It saves some configuration information - about the original instance : security group, list of networks, - list of attached volumes, floating IP, ... - in order to apply the same settings to the new instance. - At the end of the process the original instance is deleted. - It returns True if the migration was successful, - False otherwise. - - if destination hostname not given, this method calls nova api - to migrate the instance. - - :param instance_id: the unique id of the instance to migrate. - :param keep_original_image_name: flag indicating whether the - image name from which the original instance was built must be - used as the name of the intermediate image used for migration. - If this flag is False, a temporary image name is built - """ - new_image_name = "" - LOG.debug( - "Trying a non-live migrate of instance '%s' " % instance_id) - - # Looking for the instance to migrate - instance = self.find_instance(instance_id) - if not instance: - LOG.debug("Instance %s not found !" % instance_id) - return False - else: - # NOTE: If destination node is None call Nova API to migrate - # instance - host_name = getattr(instance, "OS-EXT-SRV-ATTR:host") - LOG.debug( - "Instance %s found on host '%s'." % (instance_id, host_name)) - - if dest_hostname is None: - previous_status = getattr(instance, 'status') - - instance.migrate() - instance = self.nova.servers.get(instance_id) - while (getattr(instance, 'status') not in - ["VERIFY_RESIZE", "ERROR"] and retry): - instance = self.nova.servers.get(instance.id) - time.sleep(2) - retry -= 1 - new_hostname = getattr(instance, 'OS-EXT-SRV-ATTR:host') - - if (host_name != new_hostname and - instance.status == 'VERIFY_RESIZE'): - if not self.confirm_resize(instance, previous_status): - return False - LOG.debug( - "cold migration succeeded : " - "instance %s is now on host '%s'." % ( - instance_id, new_hostname)) - return True - else: - LOG.debug( - "cold migration for instance %s failed" % instance_id) - return False - - if not keep_original_image_name: - # randrange gives you an integral value - irand = random.randint(0, 1000) - - # Building the temporary image name - # which will be used for the migration - new_image_name = "tmp-migrate-%s-%s" % (instance_id, irand) - else: - # Get the image name of the current instance. - # We'll use the same name for the new instance. - imagedict = getattr(instance, "image") - image_id = imagedict["id"] - image = self.glance.images.get(image_id) - new_image_name = getattr(image, "name") - - instance_name = getattr(instance, "name") - flavordict = getattr(instance, "flavor") - # a_dict = dict([flavorstr.strip('{}').split(":"),]) - flavor_id = flavordict["id"] - flavor = self.nova.flavors.get(flavor_id) - flavor_name = getattr(flavor, "name") - keypair_name = getattr(instance, "key_name") - - addresses = getattr(instance, "addresses") - - floating_ip = "" - network_names_list = [] - - for network_name, network_conf_obj in addresses.items(): - LOG.debug( - "Extracting network configuration for network '%s'" % - network_name) - - network_names_list.append(network_name) - - for net_conf_item in network_conf_obj: - if net_conf_item['OS-EXT-IPS:type'] == "floating": - floating_ip = net_conf_item['addr'] - break - - sec_groups_list = getattr(instance, "security_groups") - sec_groups = [] - - for sec_group_dict in sec_groups_list: - sec_groups.append(sec_group_dict['name']) - - # Stopping the old instance properly so - # that no new data is sent to it and to its attached volumes - stopped_ok = self.stop_instance(instance_id) - - if not stopped_ok: - LOG.debug("Could not stop instance: %s" % instance_id) - return False - - # Building the temporary image which will be used - # to re-build the same instance on another target host - image_uuid = self.create_image_from_instance(instance_id, - new_image_name) - - if not image_uuid: - LOG.debug( - "Could not build temporary image of instance: %s" % - instance_id) - return False - - # - # We need to get the list of attached volumes and detach - # them from the instance in order to attache them later - # to the new instance - # - blocks = [] - - # Looks like this : - # os-extended-volumes:volumes_attached | - # [{u'id': u'c5c3245f-dd59-4d4f-8d3a-89d80135859a'}] - attached_volumes = getattr(instance, - "os-extended-volumes:volumes_attached") - - for attached_volume in attached_volumes: - volume_id = attached_volume['id'] - - try: - volume = self.cinder.volumes.get(volume_id) - - attachments_list = getattr(volume, "attachments") - - device_name = attachments_list[0]['device'] - # When a volume is attached to an instance - # it contains the following property : - # attachments = [{u'device': u'/dev/vdb', - # u'server_id': u'742cc508-a2f2-4769-a794-bcdad777e814', - # u'id': u'f6d62785-04b8-400d-9626-88640610f65e', - # u'host_name': None, u'volume_id': - # u'f6d62785-04b8-400d-9626-88640610f65e'}] - - # boot_index indicates a number - # designating the boot order of the device. - # Use -1 for the boot volume, - # choose 0 for an attached volume. - block_device_mapping_v2_item = {"device_name": device_name, - "source_type": "volume", - "destination_type": - "volume", - "uuid": volume_id, - "boot_index": "0"} - - blocks.append( - block_device_mapping_v2_item) - - LOG.debug("Detaching volume %s from instance: %s" % ( - volume_id, instance_id)) - # volume.detach() - self.nova.volumes.delete_server_volume(instance_id, - volume_id) - - if not self.wait_for_volume_status(volume, "available", 5, - 10): - LOG.debug( - "Could not detach volume %s from instance: %s" % ( - volume_id, instance_id)) - return False - except ciexceptions.NotFound: - LOG.debug("Volume '%s' not found " % image_id) - return False - - # We create the new instance from - # the intermediate image of the original instance - new_instance = self. \ - create_instance(dest_hostname, - instance_name, - image_uuid, - flavor_name, - sec_groups, - network_names_list=network_names_list, - keypair_name=keypair_name, - create_new_floating_ip=False, - block_device_mapping_v2=blocks) - - if not new_instance: - LOG.debug( - "Could not create new instance " - "for non-live migration of instance %s" % instance_id) - return False - - try: - LOG.debug("Detaching floating ip '%s' from instance %s" % ( - floating_ip, instance_id)) - # We detach the floating ip from the current instance - instance.remove_floating_ip(floating_ip) - - LOG.debug( - "Attaching floating ip '%s' to the new instance %s" % ( - floating_ip, new_instance.id)) - - # We attach the same floating ip to the new instance - new_instance.add_floating_ip(floating_ip) - except Exception as e: - LOG.debug(e) - - new_host_name = getattr(new_instance, "OS-EXT-SRV-ATTR:host") - - # Deleting the old instance (because no more useful) - delete_ok = self.delete_instance(instance_id) - if not delete_ok: - LOG.debug("Could not delete instance: %s" % instance_id) - return False - - LOG.debug( - "Instance %s has been successfully migrated " - "to new host '%s' and its new id is %s." % ( - instance_id, new_host_name, new_instance.id)) - - return True - - def resize_instance(self, instance_id, flavor, retry=120): - """This method resizes given instance with specified flavor. - - This method uses the Nova built-in resize() - action to do a resize of a given instance. - - It returns True if the resize was successful, - False otherwise. - - :param instance_id: the unique id of the instance to resize. - :param flavor: the name or ID of the flavor to resize to. - """ - LOG.debug("Trying a resize of instance %s to flavor '%s'" % ( - instance_id, flavor)) - - # Looking for the instance to resize - instance = self.find_instance(instance_id) - - flavor_id = None - - try: - flavor_id = self.nova.flavors.get(flavor) - except nvexceptions.NotFound: - flavor_id = [f.id for f in self.nova.flavors.list() if - f.name == flavor][0] - except nvexceptions.ClientException as e: - LOG.debug("Nova client exception occurred while resizing " - "instance %s. Exception: %s", instance_id, e) - - if not flavor_id: - LOG.debug("Flavor not found: %s" % flavor) - return False - - if not instance: - LOG.debug("Instance not found: %s" % instance_id) - return False - - instance_status = getattr(instance, 'OS-EXT-STS:vm_state') - LOG.debug( - "Instance %s is in '%s' status." % (instance_id, - instance_status)) - - instance.resize(flavor=flavor_id) - while getattr(instance, - 'OS-EXT-STS:vm_state') != 'resized' \ - and retry: - instance = self.nova.servers.get(instance.id) - LOG.debug( - 'Waiting the resize of {0} to {1}'.format( - instance, flavor_id)) - time.sleep(1) - retry -= 1 - - instance_status = getattr(instance, 'status') - if instance_status != 'VERIFY_RESIZE': - return False - - instance.confirm_resize() - - LOG.debug("Resizing succeeded : instance %s is now on flavor " - "'%s'.", instance_id, flavor_id) - - return True - - def live_migrate_instance(self, instance_id, dest_hostname, - block_migration=False, retry=120): - """This method does a live migration of a given instance - - This method uses the Nova built-in live_migrate() - action to do a live migration of a given instance. - - It returns True if the migration was successful, - False otherwise. - - :param instance_id: the unique id of the instance to migrate. - :param dest_hostname: the name of the destination compute node, if - destination_node is None, nova scheduler choose - the destination host - :param block_migration: No shared storage is required. - """ - LOG.debug("Trying to live migrate instance %s " % (instance_id)) - - # Looking for the instance to migrate - instance = self.find_instance(instance_id) - if not instance: - LOG.debug("Instance not found: %s" % instance_id) - return False - else: - host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host') - LOG.debug( - "Instance %s found on host '%s'." % (instance_id, host_name)) - - instance.live_migrate(host=dest_hostname, - block_migration=block_migration, - disk_over_commit=True) - - instance = self.nova.servers.get(instance_id) - - # NOTE: If destination host is not specified for live migration - # let nova scheduler choose the destination host. - if dest_hostname is None: - while (instance.status not in ['ACTIVE', 'ERROR'] and retry): - instance = self.nova.servers.get(instance.id) - LOG.debug( - 'Waiting the migration of {0}'.format(instance.id)) - time.sleep(1) - retry -= 1 - new_hostname = getattr(instance, 'OS-EXT-SRV-ATTR:host') - - if host_name != new_hostname and instance.status == 'ACTIVE': - LOG.debug( - "Live migration succeeded : " - "instance %s is now on host '%s'." % ( - instance_id, new_hostname)) - return True - else: - return False - - while getattr(instance, - 'OS-EXT-SRV-ATTR:host') != dest_hostname \ - and retry: - instance = self.nova.servers.get(instance.id) - LOG.debug( - 'Waiting the migration of {0} to {1}'.format( - instance, - getattr(instance, - 'OS-EXT-SRV-ATTR:host'))) - time.sleep(1) - retry -= 1 - - host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host') - if host_name != dest_hostname: - return False - - LOG.debug( - "Live migration succeeded : " - "instance %s is now on host '%s'." % ( - instance_id, host_name)) - - return True - - def abort_live_migrate(self, instance_id, source, destination, retry=240): - LOG.debug("Aborting live migration of instance %s" % instance_id) - migration = self.get_running_migration(instance_id) - if migration: - migration_id = getattr(migration[0], "id") - try: - self.nova.server_migrations.live_migration_abort( - server=instance_id, migration=migration_id) - except exception as e: - # Note: Does not return from here, as abort request can't be - # accepted but migration still going on. - LOG.exception(e) - else: - LOG.debug( - "No running migrations found for instance %s" % instance_id) - - while retry: - instance = self.nova.servers.get(instance_id) - if (getattr(instance, 'OS-EXT-STS:task_state') is None and - getattr(instance, 'status') in ['ACTIVE', 'ERROR']): - break - time.sleep(2) - retry -= 1 - instance_host = getattr(instance, 'OS-EXT-SRV-ATTR:host') - instance_status = getattr(instance, 'status') - - # Abort live migration successfull, action is cancelled - if instance_host == source and instance_status == 'ACTIVE': - return True - # Nova Unable to abort live migration, action is succeded - elif instance_host == destination and instance_status == 'ACTIVE': - return False - - else: - raise Exception("Live migration execution and abort both failed " - "for the instance %s" % instance_id) - - def enable_service_nova_compute(self, hostname): - if self.nova.services.enable(host=hostname, - binary='nova-compute'). \ - status == 'enabled': - return True - else: - return False - - def disable_service_nova_compute(self, hostname): - if self.nova.services.disable(host=hostname, - binary='nova-compute'). \ - status == 'disabled': - return True - else: - return False - - def set_host_offline(self, hostname): - # See API on http://developer.openstack.org/api-ref-compute-v2.1.html - # especially the PUT request - # regarding this resource : /v2.1/os-hosts/​{host_name}​ - # - # The following body should be sent : - # { - # "host": { - # "host": "65c5d5b7e3bd44308e67fc50f362aee6", - # "maintenance_mode": "off_maintenance", - # "status": "enabled" - # } - # } - - # Voir ici - # https://github.com/openstack/nova/ - # blob/master/nova/virt/xenapi/host.py - # set_host_enabled(self, enabled): - # Sets the compute host's ability to accept new instances. - # host_maintenance_mode(self, host, mode): - # Start/Stop host maintenance window. - # On start, it triggers guest instances evacuation. - host = self.nova.hosts.get(hostname) - - if not host: - LOG.debug("host not found: %s" % hostname) - return False - else: - host[0].update( - {"maintenance_mode": "disable", "status": "disable"}) - return True - - def create_image_from_instance(self, instance_id, image_name, - metadata={"reason": "instance_migrate"}): - """This method creates a new image from a given instance. - - It waits for this image to be in 'active' state before returning. - It returns the unique UUID of the created image if successful, - None otherwise. - - :param instance_id: the uniqueid of - the instance to backup as an image. - :param image_name: the name of the image to create. - :param metadata: a dictionary containing the list of - key-value pairs to associate to the image as metadata. - """ - LOG.debug( - "Trying to create an image from instance %s ..." % instance_id) - - # Looking for the instance - instance = self.find_instance(instance_id) - - if not instance: - LOG.debug("Instance not found: %s" % instance_id) - return None - else: - host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host') - LOG.debug( - "Instance %s found on host '%s'." % (instance_id, host_name)) - - # We need to wait for an appropriate status - # of the instance before we can build an image from it - if self.wait_for_instance_status(instance, ('ACTIVE', 'SHUTOFF'), - 5, - 10): - image_uuid = self.nova.servers.create_image(instance_id, - image_name, - metadata) - - image = self.glance.images.get(image_uuid) - if not image: - return None - - # Waiting for the new image to be officially in ACTIVE state - # in order to make sure it can be used - status = image.status - retry = 10 - while status != 'active' and status != 'error' and retry: - time.sleep(5) - retry -= 1 - # Retrieve the instance again so the status field updates - image = self.glance.images.get(image_uuid) - if not image: - break - status = image.status - LOG.debug("Current image status: %s" % status) - - if not image: - LOG.debug("Image not found: %s" % image_uuid) - else: - LOG.debug( - "Image %s successfully created for instance %s" % ( - image_uuid, instance_id)) - return image_uuid - return None - - def delete_instance(self, instance_id): - """This method deletes a given instance. - - :param instance_id: the unique id of the instance to delete. - """ - LOG.debug("Trying to remove instance %s ..." % instance_id) - - instance = self.find_instance(instance_id) - - if not instance: - LOG.debug("Instance not found: %s" % instance_id) - return False - else: - self.nova.servers.delete(instance_id) - LOG.debug("Instance %s removed." % instance_id) - return True - - def stop_instance(self, instance_id): - """This method stops a given instance. - - :param instance_id: the unique id of the instance to stop. - """ - LOG.debug("Trying to stop instance %s ..." % instance_id) - - instance = self.find_instance(instance_id) - - if not instance: - LOG.debug("Instance not found: %s" % instance_id) - return False - elif getattr(instance, 'OS-EXT-STS:vm_state') == "stopped": - LOG.debug("Instance has been stopped: %s" % instance_id) - return True - else: - self.nova.servers.stop(instance_id) - - if self.wait_for_instance_state(instance, "stopped", 8, 10): - LOG.debug("Instance %s stopped." % instance_id) - return True - else: - return False - - def wait_for_instance_state(self, server, state, retry, sleep): - """Waits for server to be in a specific state - - The state can be one of the following : - active, stopped - - :param server: server object. - :param state: for which state we are waiting for - :param retry: how many times to retry - :param sleep: seconds to sleep between the retries - """ - if not server: - return False - - while getattr(server, 'OS-EXT-STS:vm_state') != state and retry: - time.sleep(sleep) - server = self.nova.servers.get(server) - retry -= 1 - return getattr(server, 'OS-EXT-STS:vm_state') == state - - def wait_for_instance_status(self, instance, status_list, retry, sleep): - """Waits for instance to be in a specific status - - The status can be one of the following - : BUILD, ACTIVE, ERROR, VERIFY_RESIZE, SHUTOFF - - :param instance: instance object. - :param status_list: tuple containing the list of - status we are waiting for - :param retry: how many times to retry - :param sleep: seconds to sleep between the retries - """ - if not instance: - return False - - while instance.status not in status_list and retry: - LOG.debug("Current instance status: %s" % instance.status) - time.sleep(sleep) - instance = self.nova.servers.get(instance.id) - retry -= 1 - LOG.debug("Current instance status: %s" % instance.status) - return instance.status in status_list - - def create_instance(self, node_id, inst_name="test", image_id=None, - flavor_name="m1.tiny", - sec_group_list=["default"], - network_names_list=["demo-net"], keypair_name="mykeys", - create_new_floating_ip=True, - block_device_mapping_v2=None): - """This method creates a new instance - - It also creates, if requested, a new floating IP and associates - it with the new instance - It returns the unique id of the created instance. - """ - LOG.debug( - "Trying to create new instance '%s' " - "from image '%s' with flavor '%s' ..." % ( - inst_name, image_id, flavor_name)) - - try: - self.nova.keypairs.findall(name=keypair_name) - except nvexceptions.NotFound: - LOG.debug("Key pair '%s' not found " % keypair_name) - return - - try: - image = self.glance.images.get(image_id) - except glexceptions.NotFound: - LOG.debug("Image '%s' not found " % image_id) - return - - try: - flavor = self.nova.flavors.find(name=flavor_name) - except nvexceptions.NotFound: - LOG.debug("Flavor '%s' not found " % flavor_name) - return - - # Make sure all security groups exist - for sec_group_name in sec_group_list: - try: - self.nova.security_groups.find(name=sec_group_name) - - except nvexceptions.NotFound: - LOG.debug("Security group '%s' not found " % sec_group_name) - return - - net_list = list() - - for network_name in network_names_list: - nic_id = self.get_network_id_from_name(network_name) - - if not nic_id: - LOG.debug("Network '%s' not found " % network_name) - return - net_obj = {"net-id": nic_id} - net_list.append(net_obj) - - instance = self.nova.servers.create( - inst_name, image, - flavor=flavor, - key_name=keypair_name, - security_groups=sec_group_list, - nics=net_list, - block_device_mapping_v2=block_device_mapping_v2, - availability_zone="nova:%s" % node_id) - - # Poll at 5 second intervals, until the status is no longer 'BUILD' - if instance: - if self.wait_for_instance_status(instance, - ('ACTIVE', 'ERROR'), 5, 10): - instance = self.nova.servers.get(instance.id) - - if create_new_floating_ip and instance.status == 'ACTIVE': - LOG.debug( - "Creating a new floating IP" - " for instance '%s'" % instance.id) - # Creating floating IP for the new instance - floating_ip = self.nova.floating_ips.create() - - instance.add_floating_ip(floating_ip) - - LOG.debug("Instance %s associated to Floating IP '%s'" % ( - instance.id, floating_ip.ip)) - - return instance - - def get_network_id_from_name(self, net_name="private"): - """This method returns the unique id of the provided network name""" - networks = self.neutron.list_networks(name=net_name) - - # LOG.debug(networks) - network_id = networks['networks'][0]['id'] - - return network_id - - def get_instance_by_uuid(self, instance_uuid): - return [instance for instance in - self.nova.servers.list(search_opts={"all_tenants": True, - "uuid": instance_uuid})] - - def get_instance_by_name(self, instance_name): - return [instance for instance in - self.nova.servers.list(search_opts={"all_tenants": True, - "name": instance_name})] - - def get_instances_by_node(self, host): - return [instance for instance in - self.nova.servers.list(search_opts={"all_tenants": True}) - if self.get_hostname(instance) == host] - - def get_hostname(self, instance): - return str(getattr(instance, 'OS-EXT-SRV-ATTR:host')) - - def get_flavor_instance(self, instance, cache): - fid = instance.flavor['id'] - if fid in cache: - flavor = cache.get(fid) - else: - try: - flavor = self.nova.flavors.get(fid) - except ciexceptions.NotFound: - flavor = None - cache[fid] = flavor - attr_defaults = [('name', 'unknown-id-%s' % fid), - ('vcpus', 0), ('ram', 0), ('disk', 0), - ('ephemeral', 0), ('extra_specs', {})] - for attr, default in attr_defaults: - if not flavor: - instance.flavor[attr] = default - continue - instance.flavor[attr] = getattr(flavor, attr, default) - - def get_running_migration(self, instance_id): - return self.nova.server_migrations.list(server=instance_id) diff --git a/watcher/common/observable.py b/watcher/common/observable.py deleted file mode 100644 index 3f08185..0000000 --- a/watcher/common/observable.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.common import synchronization - - -class Observable(synchronization.Synchronization): - def __init__(self): - super(Observable, self).__init__() - self.__observers = [] - self.changed = 0 - - def set_changed(self): - self.changed = 1 - - def clear_changed(self): - self.changed = 0 - - def has_changed(self): - return self.changed - - def register_observer(self, observer): - if observer not in self.__observers: - self.__observers.append(observer) - - def unregister_observer(self, observer): - try: - self.__observers.remove(observer) - except ValueError: - pass - - def notify(self, ctx=None, publisherid=None, event_type=None, - metadata=None, payload=None, modifier=None): - self.mutex.acquire() - try: - if not self.changed: - return - for observer in self.__observers: - if modifier != observer: - observer.update(self, ctx, metadata, publisherid, - event_type, payload) - self.clear_changed() - finally: - self.mutex.release() diff --git a/watcher/common/paths.py b/watcher/common/paths.py deleted file mode 100644 index ff05291..0000000 --- a/watcher/common/paths.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# Copyright 2012 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from watcher import conf - -CONF = conf.CONF - - -def basedir_rel(*args): - """Return a path relative to $pybasedir.""" - return os.path.join(CONF.pybasedir, *args) - - -def bindir_rel(*args): - """Return a path relative to $bindir.""" - return os.path.join(CONF.bindir, *args) - - -def state_path_rel(*args): - """Return a path relative to $state_path.""" - return os.path.join(CONF.state_path, *args) diff --git a/watcher/common/policy.py b/watcher/common/policy.py deleted file mode 100644 index 30caafd..0000000 --- a/watcher/common/policy.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright (c) 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Policy Engine For Watcher.""" - -from oslo_config import cfg -from oslo_policy import policy - -from watcher.common import exception - - -_ENFORCER = None -CONF = cfg.CONF - - -# we can get a policy enforcer by this init. -# oslo policy support change policy rule dynamically. -# at present, policy.enforce will reload the policy rules when it checks -# the policy files have been touched. -def init(policy_file=None, rules=None, - default_rule=None, use_conf=True, overwrite=True): - """Init an Enforcer class. - - :param policy_file: Custom policy file to use, if none is - specified, ``conf.policy_file`` will be - used. - :param rules: Default dictionary / Rules to use. It will be - considered just in the first instantiation. If - :meth:`load_rules` with ``force_reload=True``, - :meth:`clear` or :meth:`set_rules` with - ``overwrite=True`` is called this will be overwritten. - :param default_rule: Default rule to use, conf.default_rule will - be used if none is specified. - :param use_conf: Whether to load rules from cache or config file. - :param overwrite: Whether to overwrite existing rules when reload rules - from config file. - """ - global _ENFORCER - if not _ENFORCER: - # http://docs.openstack.org/developer/oslo.policy/usage.html - _ENFORCER = policy.Enforcer(CONF, - policy_file=policy_file, - rules=rules, - default_rule=default_rule, - use_conf=use_conf, - overwrite=overwrite) - return _ENFORCER - - -def enforce(context, rule=None, target=None, - do_raise=True, exc=None, *args, **kwargs): - - """Checks authorization of a rule against the target and credentials. - - :param dict context: As much information about the user performing the - action as possible. - :param rule: The rule to evaluate. - :param dict target: As much information about the object being operated - on as possible. - :param do_raise: Whether to raise an exception or not if check - fails. - :param exc: Class of the exception to raise if the check fails. - Any remaining arguments passed to :meth:`enforce` (both - positional and keyword arguments) will be passed to - the exception class. If not specified, - :class:`PolicyNotAuthorized` will be used. - - :return: ``False`` if the policy does not allow the action and `exc` is - not provided; otherwise, returns a value that evaluates to - ``True``. Note: for rules using the "case" expression, this - ``True`` value will be the specified string from the - expression. - """ - enforcer = init() - credentials = context.to_dict() - if not exc: - exc = exception.PolicyNotAuthorized - if target is None: - target = {'project_id': context.project_id, - 'user_id': context.user_id} - return enforcer.enforce(rule, target, credentials, - do_raise=do_raise, exc=exc, *args, **kwargs) diff --git a/watcher/common/rpc.py b/watcher/common/rpc.py deleted file mode 100644 index 49197a0..0000000 --- a/watcher/common/rpc.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright 2014 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log -import oslo_messaging as messaging - -from oslo_messaging.rpc import dispatcher - -from watcher.common import context as watcher_context -from watcher.common import exception - -__all__ = [ - 'init', - 'cleanup', - 'set_defaults', - 'add_extra_exmods', - 'clear_extra_exmods', - 'get_allowed_exmods', - 'RequestContextSerializer', - 'get_client', - 'get_server', - 'get_notifier', -] - -CONF = cfg.CONF -LOG = log.getLogger(__name__) -TRANSPORT = None -NOTIFICATION_TRANSPORT = None -NOTIFIER = None - -ALLOWED_EXMODS = [ - exception.__name__, -] -EXTRA_EXMODS = [] - - -JsonPayloadSerializer = messaging.JsonPayloadSerializer - - -def init(conf): - global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER - exmods = get_allowed_exmods() - TRANSPORT = messaging.get_rpc_transport( - conf, allowed_remote_exmods=exmods) - NOTIFICATION_TRANSPORT = messaging.get_notification_transport( - conf, allowed_remote_exmods=exmods) - - serializer = RequestContextSerializer(JsonPayloadSerializer()) - if not conf.notification_level: - NOTIFIER = messaging.Notifier( - NOTIFICATION_TRANSPORT, serializer=serializer, driver='noop') - else: - NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, - serializer=serializer) - - -def initialized(): - return None not in [TRANSPORT, NOTIFIER] - - -def cleanup(): - global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER - if NOTIFIER is None: - LOG.exception("RPC cleanup: NOTIFIER is None") - TRANSPORT.cleanup() - NOTIFICATION_TRANSPORT.cleanup() - TRANSPORT = NOTIFICATION_TRANSPORT = NOTIFIER = None - - -def set_defaults(control_exchange): - messaging.set_transport_defaults(control_exchange) - - -def add_extra_exmods(*args): - EXTRA_EXMODS.extend(args) - - -def clear_extra_exmods(): - del EXTRA_EXMODS[:] - - -def get_allowed_exmods(): - return ALLOWED_EXMODS + EXTRA_EXMODS - - -class RequestContextSerializer(messaging.Serializer): - - def __init__(self, base): - self._base = base - - def serialize_entity(self, context, entity): - if not self._base: - return entity - return self._base.serialize_entity(context, entity) - - def deserialize_entity(self, context, entity): - if not self._base: - return entity - return self._base.deserialize_entity(context, entity) - - def serialize_context(self, context): - return context.to_dict() - - def deserialize_context(self, context): - return watcher_context.RequestContext.from_dict(context) - - -def get_client(target, version_cap=None, serializer=None): - assert TRANSPORT is not None - serializer = RequestContextSerializer(serializer) - return messaging.RPCClient(TRANSPORT, - target, - version_cap=version_cap, - serializer=serializer) - - -def get_server(target, endpoints, serializer=None): - assert TRANSPORT is not None - access_policy = dispatcher.DefaultRPCAccessPolicy - serializer = RequestContextSerializer(serializer) - return messaging.get_rpc_server(TRANSPORT, - target, - endpoints, - executor='eventlet', - serializer=serializer, - access_policy=access_policy) - - -def get_notifier(publisher_id): - assert NOTIFIER is not None - return NOTIFIER.prepare(publisher_id=publisher_id) diff --git a/watcher/common/scheduling.py b/watcher/common/scheduling.py deleted file mode 100644 index 90884d1..0000000 --- a/watcher/common/scheduling.py +++ /dev/null @@ -1,44 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from apscheduler import events -from apscheduler.schedulers import background -from oslo_service import service - -job_events = events - - -class BackgroundSchedulerService(service.ServiceBase, - background.BackgroundScheduler): - - def start(self): - """Start service.""" - background.BackgroundScheduler.start(self) - - def stop(self): - """Stop service.""" - self.shutdown() - - def wait(self): - """Wait for service to complete.""" - - def reset(self): - """Reset service. - - Called in case service running in daemon mode receives SIGHUP. - """ diff --git a/watcher/common/service.py b/watcher/common/service.py deleted file mode 100644 index 61bdd2e..0000000 --- a/watcher/common/service.py +++ /dev/null @@ -1,308 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2012 eNovance -## -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import socket - -import eventlet -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import _options -from oslo_log import log -import oslo_messaging as om -from oslo_reports import guru_meditation_report as gmr -from oslo_reports import opts as gmr_opts -from oslo_service import service -from oslo_service import wsgi - -from oslo_messaging.rpc import dispatcher - -from watcher._i18n import _ -from watcher.api import app -from watcher.common import config -from watcher.common import context -from watcher.common import rpc -from watcher.common import scheduling -from watcher.conf import plugins as plugins_conf -from watcher import objects -from watcher.objects import base -from watcher.objects import fields as wfields -from watcher import version - -# NOTE: -# Ubuntu 14.04 forces librabbitmq when kombu is used -# Unfortunately it forces a version that has a crash -# bug. Calling eventlet.monkey_patch() tells kombu -# to use libamqp instead. -eventlet.monkey_patch() - -NOTIFICATION_OPTS = [ - cfg.StrOpt('notification_level', - choices=[''] + list(wfields.NotificationPriority.ALL), - default=wfields.NotificationPriority.INFO, - help=_('Specifies the minimum level for which to send ' - 'notifications. If not set, no notifications will ' - 'be sent. The default is for this option to be at the ' - '`INFO` level.')) -] -cfg.CONF.register_opts(NOTIFICATION_OPTS) - - -CONF = cfg.CONF -LOG = log.getLogger(__name__) - -_DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'qpid.messaging=INFO', - 'oslo.messaging=INFO', 'sqlalchemy=WARN', - 'keystoneclient=INFO', 'stevedore=INFO', - 'eventlet.wsgi.server=WARN', 'iso8601=WARN', - 'paramiko=WARN', 'requests=WARN', 'neutronclient=WARN', - 'glanceclient=WARN', 'watcher.openstack.common=WARN'] - -Singleton = service.Singleton - - -class WSGIService(service.ServiceBase): - """Provides ability to launch Watcher API from wsgi app.""" - - def __init__(self, service_name, use_ssl=False): - """Initialize, but do not start the WSGI server. - - :param service_name: The service name of the WSGI server. - :param use_ssl: Wraps the socket in an SSL context if True. - """ - self.service_name = service_name - self.app = app.VersionSelectorApplication() - self.workers = (CONF.api.workers or - processutils.get_worker_count()) - self.server = wsgi.Server(CONF, self.service_name, self.app, - host=CONF.api.host, - port=CONF.api.port, - use_ssl=use_ssl, - logger_name=self.service_name) - - def start(self): - """Start serving this service using loaded configuration""" - self.server.start() - - def stop(self): - """Stop serving this API""" - self.server.stop() - - def wait(self): - """Wait for the service to stop serving this API""" - self.server.wait() - - def reset(self): - """Reset server greenpool size to default""" - self.server.reset() - - -class ServiceHeartbeat(scheduling.BackgroundSchedulerService): - - service_name = None - - def __init__(self, gconfig=None, service_name=None, **kwargs): - gconfig = None or {} - super(ServiceHeartbeat, self).__init__(gconfig, **kwargs) - ServiceHeartbeat.service_name = service_name - self.context = context.make_context() - self.send_beat() - - def send_beat(self): - host = CONF.host - watcher_list = objects.Service.list( - self.context, filters={'name': ServiceHeartbeat.service_name, - 'host': host}) - if watcher_list: - watcher_service = watcher_list[0] - watcher_service.last_seen_up = datetime.datetime.utcnow() - watcher_service.save() - else: - watcher_service = objects.Service(self.context) - watcher_service.name = ServiceHeartbeat.service_name - watcher_service.host = host - watcher_service.create() - - def add_heartbeat_job(self): - self.add_job(self.send_beat, 'interval', seconds=60, - next_run_time=datetime.datetime.now()) - - @classmethod - def get_service_name(cls): - return CONF.host, cls.service_name - - def start(self): - """Start service.""" - self.add_heartbeat_job() - super(ServiceHeartbeat, self).start() - - def stop(self): - """Stop service.""" - self.shutdown() - - def wait(self): - """Wait for service to complete.""" - - def reset(self): - """Reset service. - - Called in case service running in daemon mode receives SIGHUP. - """ - - -class Service(service.ServiceBase): - - API_VERSION = '1.0' - - def __init__(self, manager_class): - super(Service, self).__init__() - self.manager = manager_class() - - self.publisher_id = self.manager.publisher_id - self.api_version = self.manager.api_version - - self.conductor_topic = self.manager.conductor_topic - self.notification_topics = self.manager.notification_topics - - self.heartbeat = None - - self.service_name = self.manager.service_name - if self.service_name: - self.heartbeat = ServiceHeartbeat( - service_name=self.manager.service_name) - - self.conductor_endpoints = [ - ep(self) for ep in self.manager.conductor_endpoints - ] - self.notification_endpoints = self.manager.notification_endpoints - - self.serializer = rpc.RequestContextSerializer( - base.WatcherObjectSerializer()) - - self._transport = None - self._notification_transport = None - self._conductor_client = None - - self.conductor_topic_handler = None - self.notification_handler = None - - if self.conductor_topic and self.conductor_endpoints: - self.conductor_topic_handler = self.build_topic_handler( - self.conductor_topic, self.conductor_endpoints) - if self.notification_topics and self.notification_endpoints: - self.notification_handler = self.build_notification_handler( - self.notification_topics, self.notification_endpoints - ) - - @property - def transport(self): - if self._transport is None: - self._transport = om.get_rpc_transport(CONF) - return self._transport - - @property - def notification_transport(self): - if self._notification_transport is None: - self._notification_transport = om.get_notification_transport(CONF) - return self._notification_transport - - @property - def conductor_client(self): - if self._conductor_client is None: - target = om.Target( - topic=self.conductor_topic, - version=self.API_VERSION, - ) - self._conductor_client = om.RPCClient( - self.transport, target, serializer=self.serializer) - return self._conductor_client - - @conductor_client.setter - def conductor_client(self, c): - self.conductor_client = c - - def build_topic_handler(self, topic_name, endpoints=()): - access_policy = dispatcher.DefaultRPCAccessPolicy - serializer = rpc.RequestContextSerializer(rpc.JsonPayloadSerializer()) - target = om.Target( - topic=topic_name, - # For compatibility, we can override it with 'host' opt - server=CONF.host or socket.gethostname(), - version=self.api_version, - ) - return om.get_rpc_server( - self.transport, target, endpoints, - executor='eventlet', serializer=serializer, - access_policy=access_policy) - - def build_notification_handler(self, topic_names, endpoints=()): - serializer = rpc.RequestContextSerializer(rpc.JsonPayloadSerializer()) - targets = [om.Target(topic=topic_name) for topic_name in topic_names] - return om.get_notification_listener( - self.notification_transport, targets, endpoints, - executor='eventlet', serializer=serializer, - allow_requeue=False) - - def start(self): - LOG.debug("Connecting to '%s' (%s)", - CONF.transport_url, CONF.rpc_backend) - if self.conductor_topic_handler: - self.conductor_topic_handler.start() - if self.notification_handler: - self.notification_handler.start() - if self.heartbeat: - self.heartbeat.start() - - def stop(self): - LOG.debug("Disconnecting from '%s' (%s)", - CONF.transport_url, CONF.rpc_backend) - if self.conductor_topic_handler: - self.conductor_topic_handler.stop() - if self.notification_handler: - self.notification_handler.stop() - if self.heartbeat: - self.heartbeat.stop() - - def reset(self): - """Reset a service in case it received a SIGHUP.""" - - def wait(self): - """Wait for service to complete.""" - - def check_api_version(self, ctx): - api_manager_version = self.conductor_client.call( - ctx, 'check_api_version', api_version=self.api_version) - return api_manager_version - - -def launch(conf, service_, workers=1, restart_method='reload'): - return service.launch(conf, service_, workers, restart_method) - - -def prepare_service(argv=(), conf=cfg.CONF): - log.register_options(conf) - gmr_opts.set_defaults(conf) - - config.parse_args(argv) - cfg.set_defaults(_options.log_opts, - default_log_levels=_DEFAULT_LOG_LEVELS) - log.setup(conf, 'python-watcher') - conf.log_opt_values(LOG, log.DEBUG) - objects.register_all() - - gmr.TextGuruMeditation.register_section( - _('Plugins'), plugins_conf.show_plugins) - gmr.TextGuruMeditation.setup_autorun(version, conf=conf) diff --git a/watcher/common/service_manager.py b/watcher/common/service_manager.py deleted file mode 100644 index b87240c..0000000 --- a/watcher/common/service_manager.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2016 Servionica -## -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import six - - -@six.add_metaclass(abc.ABCMeta) -class ServiceManager(object): - - @abc.abstractproperty - def service_name(self): - raise NotImplementedError() - - @abc.abstractproperty - def api_version(self): - raise NotImplementedError() - - @abc.abstractproperty - def publisher_id(self): - raise NotImplementedError() - - @abc.abstractproperty - def conductor_topic(self): - raise NotImplementedError() - - @abc.abstractproperty - def notification_topics(self): - raise NotImplementedError() - - @abc.abstractproperty - def conductor_endpoints(self): - raise NotImplementedError() - - @abc.abstractproperty - def notification_endpoints(self): - raise NotImplementedError() diff --git a/watcher/common/synchronization.py b/watcher/common/synchronization.py deleted file mode 100644 index ffeccd1..0000000 --- a/watcher/common/synchronization.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import threading - - -class Synchronization(object): - def __init__(self): - self.mutex = threading.RLock() diff --git a/watcher/common/utils.py b/watcher/common/utils.py deleted file mode 100644 index be345af..0000000 --- a/watcher/common/utils.py +++ /dev/null @@ -1,160 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utilities and helper functions.""" - -import datetime -import re - -from croniter import croniter - -from jsonschema import validators -from oslo_log import log as logging -from oslo_utils import strutils -from oslo_utils import timeutils -from oslo_utils import uuidutils -import six - -from watcher.common import exception - -from watcher import conf - -CONF = conf.CONF - -LOG = logging.getLogger(__name__) - - -class Struct(dict): - """Specialized dict where you access an item like an attribute - - >>> struct = Struct() - >>> struct['a'] = 1 - >>> struct.b = 2 - >>> assert struct.a == 1 - >>> assert struct['b'] == 2 - """ - - def __getattr__(self, name): - try: - return self[name] - except KeyError: - raise AttributeError(name) - - def __setattr__(self, name, value): - try: - self[name] = value - except KeyError: - raise AttributeError(name) - - -generate_uuid = uuidutils.generate_uuid -is_uuid_like = uuidutils.is_uuid_like -is_int_like = strutils.is_int_like -strtime = timeutils.strtime - - -def is_cron_like(value): - """Return True is submitted value is like cron syntax""" - try: - croniter(value, datetime.datetime.now()) - except Exception as e: - raise exception.CronFormatIsInvalid(message=str(e)) - return True - - -def safe_rstrip(value, chars=None): - """Removes trailing characters from a string if that does not make it empty - - :param value: A string value that will be stripped. - :param chars: Characters to remove. - :return: Stripped value. - - """ - if not isinstance(value, six.string_types): - LOG.warning( - "Failed to remove trailing character. Returning original object." - "Supplied object is not a string: %s,", value) - return value - - return value.rstrip(chars) or value - - -def is_hostname_safe(hostname): - """Determine if the supplied hostname is RFC compliant. - - Check that the supplied hostname conforms to: - * http://en.wikipedia.org/wiki/Hostname - * http://tools.ietf.org/html/rfc952 - * http://tools.ietf.org/html/rfc1123 - - :param hostname: The hostname to be validated. - :returns: True if valid. False if not. - - """ - m = r'^[a-z0-9]([a-z0-9\-]{0,61}[a-z0-9])?$' - return (isinstance(hostname, six.string_types) and - (re.match(m, hostname) is not None)) - - -def get_cls_import_path(cls): - """Return the import path of a given class""" - module = cls.__module__ - if module is None or module == str.__module__: - return cls.__name__ - return module + '.' + cls.__name__ - - -# Default value feedback extension as jsonschema doesn't support it -def extend_with_default(validator_class): - validate_properties = validator_class.VALIDATORS["properties"] - - def set_defaults(validator, properties, instance, schema): - for prop, subschema in properties.items(): - if "default" in subschema and instance is not None: - instance.setdefault(prop, subschema["default"]) - - for error in validate_properties( - validator, properties, instance, schema - ): - yield error - - return validators.extend(validator_class, - {"properties": set_defaults}) - - -# Parameter strict check extension as jsonschema doesn't support it -def extend_with_strict_schema(validator_class): - validate_properties = validator_class.VALIDATORS["properties"] - - def strict_schema(validator, properties, instance, schema): - if instance is None: - return - - for para in instance.keys(): - if para not in properties.keys(): - raise exception.AuditParameterNotAllowed(parameter=para) - - for error in validate_properties( - validator, properties, instance, schema - ): - yield error - - return validators.extend(validator_class, {"properties": strict_schema}) - -StrictDefaultValidatingDraft4Validator = extend_with_default( - extend_with_strict_schema(validators.Draft4Validator)) - -Draft4Validator = validators.Draft4Validator diff --git a/watcher/conf/__init__.py b/watcher/conf/__init__.py deleted file mode 100755 index 625401b..0000000 --- a/watcher/conf/__init__.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# Copyright (c) 2016 Intel Corp -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -from watcher.conf import api -from watcher.conf import applier -from watcher.conf import ceilometer_client -from watcher.conf import cinder_client -from watcher.conf import clients_auth -from watcher.conf import db -from watcher.conf import decision_engine -from watcher.conf import exception -from watcher.conf import glance_client -from watcher.conf import gnocchi_client -from watcher.conf import ironic_client -from watcher.conf import monasca_client -from watcher.conf import neutron_client -from watcher.conf import nova_client -from watcher.conf import paths -from watcher.conf import planner -from watcher.conf import service -from watcher.conf import utils - -CONF = cfg.CONF - -service.register_opts(CONF) -api.register_opts(CONF) -utils.register_opts(CONF) -paths.register_opts(CONF) -exception.register_opts(CONF) -db.register_opts(CONF) -planner.register_opts(CONF) -applier.register_opts(CONF) -decision_engine.register_opts(CONF) -monasca_client.register_opts(CONF) -nova_client.register_opts(CONF) -glance_client.register_opts(CONF) -gnocchi_client.register_opts(CONF) -cinder_client.register_opts(CONF) -ceilometer_client.register_opts(CONF) -neutron_client.register_opts(CONF) -clients_auth.register_opts(CONF) -ironic_client.register_opts(CONF) diff --git a/watcher/conf/_opts.py b/watcher/conf/_opts.py deleted file mode 100644 index 73398f1..0000000 --- a/watcher/conf/_opts.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2014 -# The Cloudscaling Group, Inc. -# Copyright (c) 2016 Intel Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from keystoneauth1 import loading as ka_loading - -from watcher.conf import api as conf_api -from watcher.conf import applier as conf_applier -from watcher.conf import ceilometer_client as conf_ceilometer_client -from watcher.conf import cinder_client as conf_cinder_client -from watcher.conf import db -from watcher.conf import decision_engine as conf_de -from watcher.conf import exception -from watcher.conf import glance_client as conf_glance_client -from watcher.conf import neutron_client as conf_neutron_client -from watcher.conf import nova_client as conf_nova_client -from watcher.conf import paths -from watcher.conf import planner as conf_planner -from watcher.conf import utils - - -def list_opts(): - """Legacy aggregation of all the watcher config options""" - return [ - ('DEFAULT', - (conf_api.AUTH_OPTS + - exception.EXC_LOG_OPTS + - paths.PATH_OPTS + - utils.UTILS_OPTS)), - ('api', conf_api.API_SERVICE_OPTS), - ('database', db.SQL_OPTS), - ('watcher_planner', conf_planner.WATCHER_PLANNER_OPTS), - ('watcher_applier', conf_applier.APPLIER_MANAGER_OPTS), - ('watcher_decision_engine', - (conf_de.WATCHER_DECISION_ENGINE_OPTS + - conf_de.WATCHER_CONTINUOUS_OPTS)), - ('nova_client', conf_nova_client.NOVA_CLIENT_OPTS), - ('glance_client', conf_glance_client.GLANCE_CLIENT_OPTS), - ('cinder_client', conf_cinder_client.CINDER_CLIENT_OPTS), - ('ceilometer_client', conf_ceilometer_client.CEILOMETER_CLIENT_OPTS), - ('neutron_client', conf_neutron_client.NEUTRON_CLIENT_OPTS), - ('watcher_clients_auth', - (ka_loading.get_auth_common_conf_options() + - ka_loading.get_auth_plugin_conf_options('password') + - ka_loading.get_session_conf_options())) - ] diff --git a/watcher/conf/api.py b/watcher/conf/api.py deleted file mode 100644 index 4531eca..0000000 --- a/watcher/conf/api.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -api = cfg.OptGroup(name='api', - title='Options for the Watcher API service') - -AUTH_OPTS = [ - cfg.BoolOpt('enable_authentication', - default=True, - help='This option enables or disables user authentication ' - 'via keystone. Default value is True.'), -] - -API_SERVICE_OPTS = [ - cfg.PortOpt('port', - default=9322, - help='The port for the watcher API server'), - cfg.HostAddressOpt('host', - default='127.0.0.1', - help='The listen IP address for the watcher API server' - ), - cfg.IntOpt('max_limit', - default=1000, - help='The maximum number of items returned in a single ' - 'response from a collection resource'), - cfg.IntOpt('workers', - min=1, - help='Number of workers for Watcher API service. ' - 'The default is equal to the number of CPUs available ' - 'if that can be determined, else a default worker ' - 'count of 1 is returned.'), - - cfg.BoolOpt('enable_ssl_api', - default=False, - help="Enable the integrated stand-alone API to service " - "requests via HTTPS instead of HTTP. If there is a " - "front-end service performing HTTPS offloading from " - "the service, this option should be False; note, you " - "will want to change public API endpoint to represent " - "SSL termination URL with 'public_endpoint' option."), -] - - -def register_opts(conf): - conf.register_group(api) - conf.register_opts(API_SERVICE_OPTS, group=api) - conf.register_opts(AUTH_OPTS) - - -def list_opts(): - return [('api', API_SERVICE_OPTS), ('DEFAULT', AUTH_OPTS)] diff --git a/watcher/conf/applier.py b/watcher/conf/applier.py deleted file mode 100644 index ec1bf38..0000000 --- a/watcher/conf/applier.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -watcher_applier = cfg.OptGroup(name='watcher_applier', - title='Options for the Applier messaging' - 'core') - -APPLIER_MANAGER_OPTS = [ - cfg.IntOpt('workers', - default='1', - min=1, - required=True, - help='Number of workers for applier, default value is 1.'), - cfg.StrOpt('conductor_topic', - default='watcher.applier.control', - help='The topic name used for' - 'control events, this topic ' - 'used for rpc call '), - cfg.StrOpt('publisher_id', - default='watcher.applier.api', - help='The identifier used by watcher ' - 'module on the message broker'), - cfg.StrOpt('workflow_engine', - default='taskflow', - required=True, - help='Select the engine to use to execute the workflow'), -] - - -def register_opts(conf): - conf.register_group(watcher_applier) - conf.register_opts(APPLIER_MANAGER_OPTS, group=watcher_applier) - - -def list_opts(): - return [('watcher_applier', APPLIER_MANAGER_OPTS)] diff --git a/watcher/conf/ceilometer_client.py b/watcher/conf/ceilometer_client.py deleted file mode 100644 index 48fdf88..0000000 --- a/watcher/conf/ceilometer_client.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -ceilometer_client = cfg.OptGroup(name='ceilometer_client', - title='Configuration Options for Ceilometer') - -CEILOMETER_CLIENT_OPTS = [ - cfg.StrOpt('api_version', - default='2', - help='Version of Ceilometer API to use in ' - 'ceilometerclient.'), - cfg.StrOpt('endpoint_type', - default='internalURL', - help='Type of endpoint to use in ceilometerclient.' - 'Supported values: internalURL, publicURL, adminURL' - 'The default is internalURL.')] - - -def register_opts(conf): - conf.register_group(ceilometer_client) - conf.register_opts(CEILOMETER_CLIENT_OPTS, group=ceilometer_client) - - -def list_opts(): - return [('ceilometer_client', CEILOMETER_CLIENT_OPTS)] diff --git a/watcher/conf/cinder_client.py b/watcher/conf/cinder_client.py deleted file mode 100644 index 687c4ce..0000000 --- a/watcher/conf/cinder_client.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -cinder_client = cfg.OptGroup(name='cinder_client', - title='Configuration Options for Cinder') - -CINDER_CLIENT_OPTS = [ - cfg.StrOpt('api_version', - default='3', - help='Version of Cinder API to use in cinderclient.'), - cfg.StrOpt('endpoint_type', - default='publicURL', - help='Type of endpoint to use in cinderclient.' - 'Supported values: internalURL, publicURL, adminURL' - 'The default is publicURL.')] - - -def register_opts(conf): - conf.register_group(cinder_client) - conf.register_opts(CINDER_CLIENT_OPTS, group=cinder_client) - - -def list_opts(): - return [('cinder_client', CINDER_CLIENT_OPTS)] diff --git a/watcher/conf/clients_auth.py b/watcher/conf/clients_auth.py deleted file mode 100644 index 8e959fc..0000000 --- a/watcher/conf/clients_auth.py +++ /dev/null @@ -1,31 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from keystoneauth1 import loading as ka_loading - -WATCHER_CLIENTS_AUTH = 'watcher_clients_auth' - - -def register_opts(conf): - ka_loading.register_session_conf_options(conf, WATCHER_CLIENTS_AUTH) - ka_loading.register_auth_conf_options(conf, WATCHER_CLIENTS_AUTH) - - -def list_opts(): - return [('watcher_clients_auth', ka_loading.get_session_conf_options() + - ka_loading.get_auth_common_conf_options())] diff --git a/watcher/conf/db.py b/watcher/conf/db.py deleted file mode 100644 index 8989687..0000000 --- a/watcher/conf/db.py +++ /dev/null @@ -1,44 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg -from oslo_db import options as oslo_db_options - -from watcher.conf import paths - -_DEFAULT_SQL_CONNECTION = 'sqlite:///{0}'.format( - paths.state_path_def('watcher.sqlite')) - -database = cfg.OptGroup(name='database', - title='Configuration Options for database') - -SQL_OPTS = [ - cfg.StrOpt('mysql_engine', - default='InnoDB', - help='MySQL engine to use.') -] - - -def register_opts(conf): - oslo_db_options.set_defaults(conf, connection=_DEFAULT_SQL_CONNECTION) - conf.register_group(database) - conf.register_opts(SQL_OPTS, group=database) - - -def list_opts(): - return [('database', SQL_OPTS)] diff --git a/watcher/conf/decision_engine.py b/watcher/conf/decision_engine.py deleted file mode 100644 index 162dc29..0000000 --- a/watcher/conf/decision_engine.py +++ /dev/null @@ -1,73 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - - -watcher_decision_engine = cfg.OptGroup(name='watcher_decision_engine', - title='Defines the parameters of ' - 'the module decision engine') - -WATCHER_DECISION_ENGINE_OPTS = [ - cfg.StrOpt('conductor_topic', - default='watcher.decision.control', - help='The topic name used for ' - 'control events, this topic ' - 'used for RPC calls'), - cfg.ListOpt('notification_topics', - default=['versioned_notifications', 'watcher_notifications'], - help='The topic names from which notification events ' - 'will be listened to'), - cfg.StrOpt('publisher_id', - default='watcher.decision.api', - help='The identifier used by the Watcher ' - 'module on the message broker'), - cfg.IntOpt('max_workers', - default=2, - required=True, - help='The maximum number of threads that can be used to ' - 'execute strategies'), - cfg.IntOpt('action_plan_expiry', - default=24, - help='An expiry timespan(hours). Watcher invalidates any ' - 'action plan for which its creation time ' - '-whose number of hours has been offset by this value-' - ' is older that the current time.'), - cfg.IntOpt('check_periodic_interval', - default=30*60, - help='Interval (in seconds) for checking action plan expiry.') -] - -WATCHER_CONTINUOUS_OPTS = [ - cfg.IntOpt('continuous_audit_interval', - default=10, - help='Interval (in seconds) for checking newly created ' - 'continuous audits.') -] - - -def register_opts(conf): - conf.register_group(watcher_decision_engine) - conf.register_opts(WATCHER_DECISION_ENGINE_OPTS, - group=watcher_decision_engine) - conf.register_opts(WATCHER_CONTINUOUS_OPTS, group=watcher_decision_engine) - - -def list_opts(): - return [('watcher_decision_engine', WATCHER_DECISION_ENGINE_OPTS), - ('watcher_decision_engine', WATCHER_CONTINUOUS_OPTS)] diff --git a/watcher/conf/exception.py b/watcher/conf/exception.py deleted file mode 100644 index 3d9f67d..0000000 --- a/watcher/conf/exception.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -EXC_LOG_OPTS = [ - cfg.BoolOpt('fatal_exception_format_errors', - default=False, - help='Make exception message format errors fatal.'), -] - - -def register_opts(conf): - conf.register_opts(EXC_LOG_OPTS) - - -def list_opts(): - return [('DEFAULT', EXC_LOG_OPTS)] diff --git a/watcher/conf/glance_client.py b/watcher/conf/glance_client.py deleted file mode 100644 index 015a09c..0000000 --- a/watcher/conf/glance_client.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -glance_client = cfg.OptGroup(name='glance_client', - title='Configuration Options for Glance') - -GLANCE_CLIENT_OPTS = [ - cfg.StrOpt('api_version', - default='2', - help='Version of Glance API to use in glanceclient.'), - cfg.StrOpt('endpoint_type', - default='publicURL', - help='Type of endpoint to use in glanceclient.' - 'Supported values: internalURL, publicURL, adminURL' - 'The default is publicURL.')] - - -def register_opts(conf): - conf.register_group(glance_client) - conf.register_opts(GLANCE_CLIENT_OPTS, group=glance_client) - - -def list_opts(): - return [('glance_client', GLANCE_CLIENT_OPTS)] diff --git a/watcher/conf/gnocchi_client.py b/watcher/conf/gnocchi_client.py deleted file mode 100644 index 0e3acce..0000000 --- a/watcher/conf/gnocchi_client.py +++ /dev/null @@ -1,47 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 Servionica -# -# Authors: Alexander Chadin -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -gnocchi_client = cfg.OptGroup(name='gnocchi_client', - title='Configuration Options for Gnocchi') - -GNOCCHI_CLIENT_OPTS = [ - cfg.StrOpt('api_version', - default='1', - help='Version of Gnocchi API to use in gnocchiclient.'), - cfg.StrOpt('endpoint_type', - default='internalURL', - help='Type of endpoint to use in gnocchi client.' - 'Supported values: internalURL, publicURL, adminURL' - 'The default is internalURL.'), - cfg.IntOpt('query_max_retries', - default=10, - help='How many times Watcher is trying to query again'), - cfg.IntOpt('query_timeout', - default=1, - help='How many seconds Watcher should wait to do query again')] - - -def register_opts(conf): - conf.register_group(gnocchi_client) - conf.register_opts(GNOCCHI_CLIENT_OPTS, group=gnocchi_client) - - -def list_opts(): - return [('gnocchi_client', GNOCCHI_CLIENT_OPTS)] diff --git a/watcher/conf/ironic_client.py b/watcher/conf/ironic_client.py deleted file mode 100755 index fc4940e..0000000 --- a/watcher/conf/ironic_client.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 ZTE Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -ironic_client = cfg.OptGroup(name='ironic_client', - title='Configuration Options for Ironic') - -IRONIC_CLIENT_OPTS = [ - cfg.StrOpt('api_version', - default=1, - help='Version of Ironic API to use in ironicclient.'), - cfg.StrOpt('endpoint_type', - default='publicURL', - help='Type of endpoint to use in ironicclient.' - 'Supported values: internalURL, publicURL, adminURL' - 'The default is publicURL.')] - - -def register_opts(conf): - conf.register_group(ironic_client) - conf.register_opts(IRONIC_CLIENT_OPTS, group=ironic_client) - - -def list_opts(): - return [('ironic_client', IRONIC_CLIENT_OPTS)] diff --git a/watcher/conf/monasca_client.py b/watcher/conf/monasca_client.py deleted file mode 100644 index 26d54f4..0000000 --- a/watcher/conf/monasca_client.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -monasca_client = cfg.OptGroup(name='monasca_client', - title='Configuration Options for Monasca') - -MONASCA_CLIENT_OPTS = [ - cfg.StrOpt('api_version', - default='2_0', - help='Version of Monasca API to use in monascaclient.'), - cfg.StrOpt('interface', - default='internal', - help='Type of interface used for monasca endpoint.' - 'Supported values: internal, public, admin' - 'The default is internal.')] - - -def register_opts(conf): - conf.register_group(monasca_client) - conf.register_opts(MONASCA_CLIENT_OPTS, group=monasca_client) - - -def list_opts(): - return [('monasca_client', MONASCA_CLIENT_OPTS)] diff --git a/watcher/conf/neutron_client.py b/watcher/conf/neutron_client.py deleted file mode 100644 index 4a8888d..0000000 --- a/watcher/conf/neutron_client.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -neutron_client = cfg.OptGroup(name='neutron_client', - title='Configuration Options for Neutron') - -NEUTRON_CLIENT_OPTS = [ - cfg.StrOpt('api_version', - default='2.0', - help='Version of Neutron API to use in neutronclient.'), - cfg.StrOpt('endpoint_type', - default='publicURL', - help='Type of endpoint to use in neutronclient.' - 'Supported values: internalURL, publicURL, adminURL' - 'The default is publicURL.')] - - -def register_opts(conf): - conf.register_group(neutron_client) - conf.register_opts(NEUTRON_CLIENT_OPTS, group=neutron_client) - - -def list_opts(): - return [('neutron_client', NEUTRON_CLIENT_OPTS)] diff --git a/watcher/conf/nova_client.py b/watcher/conf/nova_client.py deleted file mode 100755 index 35d55fd..0000000 --- a/watcher/conf/nova_client.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -nova_client = cfg.OptGroup(name='nova_client', - title='Configuration Options for Nova') - -NOVA_CLIENT_OPTS = [ - cfg.StrOpt('api_version', - default='2', - help='Version of Nova API to use in novaclient.'), - cfg.StrOpt('endpoint_type', - default='publicURL', - help='Type of endpoint to use in novaclient.' - 'Supported values: internalURL, publicURL, adminURL' - 'The default is publicURL.')] - - -def register_opts(conf): - conf.register_group(nova_client) - conf.register_opts(NOVA_CLIENT_OPTS, group=nova_client) - - -def list_opts(): - return [('nova_client', NOVA_CLIENT_OPTS)] diff --git a/watcher/conf/opts.py b/watcher/conf/opts.py deleted file mode 100644 index 5af0314..0000000 --- a/watcher/conf/opts.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -This is the single point of entry to generate the sample configuration -file for Watcher. It collects all the necessary info from the other modules -in this package. It is assumed that: - -* every other module in this package has a 'list_opts' function which - return a dict where - * the keys are strings which are the group names - * the value of each key is a list of config options for that group -* the watcher.conf package doesn't have further packages with config options -* this module is only used in the context of sample file generation -""" - -import collections -import importlib -import os -import pkgutil - -LIST_OPTS_FUNC_NAME = "list_opts" - - -def _tupleize(dct): - """Take the dict of options and convert to the 2-tuple format.""" - return [(key, val) for key, val in dct.items()] - - -def list_opts(): - """Grouped list of all the Watcher-specific configuration options - - :return: A list of ``(group, [opt_1, opt_2])`` tuple pairs, where ``group`` - is either a group name as a string or an OptGroup object. - """ - opts = collections.defaultdict(list) - module_names = _list_module_names() - imported_modules = _import_modules(module_names) - _append_config_options(imported_modules, opts) - return _tupleize(opts) - - -def _list_module_names(): - module_names = [] - package_path = os.path.dirname(os.path.abspath(__file__)) - for __, modname, ispkg in pkgutil.iter_modules(path=[package_path]): - if modname == "opts" or ispkg: - continue - else: - module_names.append(modname) - return module_names - - -def _import_modules(module_names): - imported_modules = [] - for modname in module_names: - mod = importlib.import_module("watcher.conf." + modname) - if not hasattr(mod, LIST_OPTS_FUNC_NAME): - msg = "The module 'watcher.conf.%s' should have a '%s' "\ - "function which returns the config options." % \ - (modname, LIST_OPTS_FUNC_NAME) - raise Exception(msg) - else: - imported_modules.append(mod) - return imported_modules - - -def _process_old_opts(configs): - """Convert old-style 2-tuple configs to dicts.""" - if isinstance(configs, tuple): - configs = [configs] - return {label: options for label, options in configs} - - -def _append_config_options(imported_modules, config_options): - for mod in imported_modules: - configs = mod.list_opts() - # TODO(markus_z): Remove this compatibility shim once all list_opts() - # functions have been updated to return dicts. - if not isinstance(configs, dict): - configs = _process_old_opts(configs) - for key, val in configs.items(): - config_options[key].extend(val) diff --git a/watcher/conf/paths.py b/watcher/conf/paths.py deleted file mode 100644 index a499614..0000000 --- a/watcher/conf/paths.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -import os - -PATH_OPTS = [ - cfg.StrOpt('pybasedir', - default=os.path.abspath(os.path.join(os.path.dirname(__file__), - '../')), - help='Directory where the watcher python module is installed.'), - cfg.StrOpt('bindir', - default='$pybasedir/bin', - help='Directory where watcher binaries are installed.'), - cfg.StrOpt('state_path', - default='$pybasedir', - help="Top-level directory for maintaining watcher's state."), -] - - -def basedir_def(*args): - """Return an uninterpolated path relative to $pybasedir.""" - return os.path.join('$pybasedir', *args) - - -def bindir_def(*args): - """Return an uninterpolated path relative to $bindir.""" - return os.path.join('$bindir', *args) - - -def state_path_def(*args): - """Return an uninterpolated path relative to $state_path.""" - return os.path.join('$state_path', *args) - - -def register_opts(conf): - conf.register_opts(PATH_OPTS) - - -def list_opts(): - return [('DEFAULT', PATH_OPTS)] diff --git a/watcher/conf/planner.py b/watcher/conf/planner.py deleted file mode 100644 index 1386c2f..0000000 --- a/watcher/conf/planner.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -watcher_planner = cfg.OptGroup(name='watcher_planner', - title='Defines the parameters of ' - 'the planner') - -default_planner = 'weight' - -WATCHER_PLANNER_OPTS = { - cfg.StrOpt('planner', - default=default_planner, - required=True, - help='The selected planner used to schedule the actions') -} - - -def register_opts(conf): - conf.register_group(watcher_planner) - conf.register_opts(WATCHER_PLANNER_OPTS, group=watcher_planner) - - -def list_opts(): - return [('watcher_planner', WATCHER_PLANNER_OPTS)] diff --git a/watcher/conf/plugins.py b/watcher/conf/plugins.py deleted file mode 100644 index d770520..0000000 --- a/watcher/conf/plugins.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import prettytable as ptable - -from watcher.applier.loading import default as applier_loader -from watcher.common import utils -from watcher.decision_engine.loading import default as decision_engine_loader - -PLUGIN_LOADERS = ( - applier_loader.DefaultActionLoader, - decision_engine_loader.DefaultPlannerLoader, - decision_engine_loader.DefaultScoringLoader, - decision_engine_loader.DefaultScoringContainerLoader, - decision_engine_loader.DefaultStrategyLoader, - decision_engine_loader.ClusterDataModelCollectorLoader, - applier_loader.DefaultWorkFlowEngineLoader, -) - - -def list_opts(): - """Load config options for all Watcher plugins""" - plugins_opts = [] - for plugin_loader_cls in PLUGIN_LOADERS: - plugin_loader = plugin_loader_cls() - plugins_map = plugin_loader.list_available() - - for plugin_name, plugin_cls in plugins_map.items(): - plugin_opts = plugin_cls.get_config_opts() - if plugin_opts: - plugins_opts.append( - (plugin_loader.get_entry_name(plugin_name), plugin_opts)) - - return plugins_opts - - -def _show_plugins_ascii_table(rows): - headers = ["Namespace", "Plugin name", "Import path"] - table = ptable.PrettyTable(field_names=headers) - for row in rows: - table.add_row(row) - return table.get_string() - - -def show_plugins(): - rows = [] - for plugin_loader_cls in PLUGIN_LOADERS: - plugin_loader = plugin_loader_cls() - plugins_map = plugin_loader.list_available() - - rows += [ - (plugin_loader.get_entry_name(plugin_name), - plugin_name, - utils.get_cls_import_path(plugin_cls)) - for plugin_name, plugin_cls in plugins_map.items()] - - return _show_plugins_ascii_table(rows) diff --git a/watcher/conf/service.py b/watcher/conf/service.py deleted file mode 100644 index 0f18d3a..0000000 --- a/watcher/conf/service.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import socket - -from oslo_config import cfg - -from watcher._i18n import _ - -SERVICE_OPTS = [ - cfg.IntOpt('periodic_interval', - default=60, - help=_('Seconds between running periodic tasks.')), - cfg.HostAddressOpt('host', - default=socket.gethostname(), - help=_('Name of this node. This can be an opaque ' - 'identifier. It is not necessarily a hostname, ' - 'FQDN, or IP address. However, the node name ' - 'must be valid within an AMQP key, and if using ' - 'ZeroMQ, a valid hostname, FQDN, or IP address.') - ), - cfg.IntOpt('service_down_time', - default=90, - help=_('Maximum time since last check-in for up service.')) -] - - -def register_opts(conf): - conf.register_opts(SERVICE_OPTS) - - -def list_opts(): - return [ - ('DEFAULT', SERVICE_OPTS), - ] diff --git a/watcher/conf/utils.py b/watcher/conf/utils.py deleted file mode 100644 index 7c2981c..0000000 --- a/watcher/conf/utils.py +++ /dev/null @@ -1,36 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -UTILS_OPTS = [ - cfg.StrOpt('rootwrap_config', - default="/etc/watcher/rootwrap.conf", - help='Path to the rootwrap configuration file to use for ' - 'running commands as root.'), - cfg.StrOpt('tempdir', - help='Explicitly specify the temporary working directory.'), -] - - -def register_opts(conf): - conf.register_opts(UTILS_OPTS) - - -def list_opts(): - return [('DEFAULT', UTILS_OPTS)] diff --git a/watcher/datasource/__init__.py b/watcher/datasource/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/datasource/ceilometer.py b/watcher/datasource/ceilometer.py deleted file mode 100644 index a71fb3f..0000000 --- a/watcher/datasource/ceilometer.py +++ /dev/null @@ -1,184 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -from ceilometerclient import exc -from oslo_utils import timeutils - -from watcher._i18n import _ -from watcher.common import clients -from watcher.common import exception - - -class CeilometerHelper(object): - def __init__(self, osc=None): - """:param osc: an OpenStackClients instance""" - self.osc = osc if osc else clients.OpenStackClients() - self.ceilometer = self.osc.ceilometer() - - @staticmethod - def format_query(user_id, tenant_id, resource_id, - user_ids, tenant_ids, resource_ids): - query = [] - - def query_append(query, _id, _ids, field): - if _id: - _ids = [_id] - for x_id in _ids: - query.append({"field": field, "op": "eq", "value": x_id}) - - query_append(query, user_id, (user_ids or []), "user_id") - query_append(query, tenant_id, (tenant_ids or []), "project_id") - query_append(query, resource_id, (resource_ids or []), "resource_id") - - return query - - def _timestamps(self, start_time, end_time): - - def _format_timestamp(_time): - if _time: - if isinstance(_time, datetime.datetime): - return _time.isoformat() - return _time - return None - - start_timestamp = _format_timestamp(start_time) - end_timestamp = _format_timestamp(end_time) - - if ((start_timestamp is not None) and (end_timestamp is not None) and - (timeutils.parse_isotime(start_timestamp) > - timeutils.parse_isotime(end_timestamp))): - raise exception.Invalid( - _("Invalid query: %(start_time)s > %(end_time)s") % dict( - start_time=start_timestamp, end_time=end_timestamp)) - return start_timestamp, end_timestamp - - def build_query(self, user_id=None, tenant_id=None, resource_id=None, - user_ids=None, tenant_ids=None, resource_ids=None, - start_time=None, end_time=None): - """Returns query built from given parameters. - - This query can be then used for querying resources, meters and - statistics. - :param user_id: user_id, has a priority over list of ids - :param tenant_id: tenant_id, has a priority over list of ids - :param resource_id: resource_id, has a priority over list of ids - :param user_ids: list of user_ids - :param tenant_ids: list of tenant_ids - :param resource_ids: list of resource_ids - :param start_time: datetime from which measurements should be collected - :param end_time: datetime until which measurements should be collected - """ - - query = self.format_query(user_id, tenant_id, resource_id, - user_ids, tenant_ids, resource_ids) - - start_timestamp, end_timestamp = self._timestamps(start_time, - end_time) - - if start_timestamp: - query.append({"field": "timestamp", "op": "ge", - "value": start_timestamp}) - if end_timestamp: - query.append({"field": "timestamp", "op": "le", - "value": end_timestamp}) - return query - - def query_retry(self, f, *args, **kargs): - try: - return f(*args, **kargs) - except exc.HTTPUnauthorized: - self.osc.reset_clients() - self.ceilometer = self.osc.ceilometer() - return f(*args, **kargs) - except Exception: - raise - - def query_sample(self, meter_name, query, limit=1): - return self.query_retry(f=self.ceilometer.samples.list, - meter_name=meter_name, - limit=limit, - q=query) - - def statistic_list(self, meter_name, query=None, period=None): - """List of statistics.""" - statistics = self.ceilometer.statistics.list( - meter_name=meter_name, - q=query, - period=period) - return statistics - - def meter_list(self, query=None): - """List the user's meters.""" - meters = self.query_retry(f=self.ceilometer.meters.list, - query=query) - return meters - - def statistic_aggregation(self, - resource_id, - meter_name, - period, - aggregate='avg'): - """Representing a statistic aggregate by operators - - :param resource_id: id of resource to list statistics for. - :param meter_name: Name of meter to list statistics for. - :param period: Period in seconds over which to group samples. - :param aggregate: Available aggregates are: count, cardinality, - min, max, sum, stddev, avg. Defaults to avg. - :return: Return the latest statistical data, None if no data. - """ - - end_time = datetime.datetime.utcnow() - start_time = end_time - datetime.timedelta(seconds=int(period)) - query = self.build_query( - resource_id=resource_id, start_time=start_time, end_time=end_time) - statistic = self.query_retry(f=self.ceilometer.statistics.list, - meter_name=meter_name, - q=query, - period=period, - aggregates=[ - {'func': aggregate}]) - - item_value = None - if statistic: - item_value = statistic[-1]._info.get('aggregate').get(aggregate) - return item_value - - def get_last_sample_values(self, resource_id, meter_name, limit=1): - samples = self.query_sample( - meter_name=meter_name, - query=self.build_query(resource_id=resource_id), - limit=limit) - values = [] - for index, sample in enumerate(samples): - values.append( - {'sample_%s' % index: { - 'timestamp': sample._info['timestamp'], - 'value': sample._info['counter_volume']}}) - return values - - def get_last_sample_value(self, resource_id, meter_name): - samples = self.query_sample( - meter_name=meter_name, - query=self.build_query(resource_id=resource_id)) - if samples: - return samples[-1]._info['counter_volume'] - else: - return False diff --git a/watcher/datasource/gnocchi.py b/watcher/datasource/gnocchi.py deleted file mode 100644 index 539fa34..0000000 --- a/watcher/datasource/gnocchi.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 Servionica -# -# Authors: Alexander Chadin -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from datetime import datetime -import time - -from oslo_config import cfg -from oslo_log import log - -from watcher.common import clients -from watcher.common import exception - -CONF = cfg.CONF -LOG = log.getLogger(__name__) - - -class GnocchiHelper(object): - - def __init__(self, osc=None): - """:param osc: an OpenStackClients instance""" - self.osc = osc if osc else clients.OpenStackClients() - self.gnocchi = self.osc.gnocchi() - - def query_retry(self, f, *args, **kwargs): - for i in range(CONF.gnocchi_client.query_max_retries): - try: - return f(*args, **kwargs) - except Exception as e: - LOG.exception(e) - time.sleep(CONF.gnocchi_client.query_timeout) - raise - - def statistic_aggregation(self, - resource_id, - metric, - granularity, - start_time=None, - stop_time=None, - aggregation='mean'): - """Representing a statistic aggregate by operators - - :param metric: metric name of which we want the statistics - :param resource_id: id of resource to list statistics for - :param start_time: Start datetime from which metrics will be used - :param stop_time: End datetime from which metrics will be used - :param granularity: frequency of marking metric point, in seconds - :param aggregation: Should be chosen in accordance with policy - aggregations - :return: value of aggregated metric - """ - - if start_time is not None and not isinstance(start_time, datetime): - raise exception.InvalidParameter(parameter='start_time', - parameter_type=datetime) - - if stop_time is not None and not isinstance(stop_time, datetime): - raise exception.InvalidParameter(parameter='stop_time', - parameter_type=datetime) - - raw_kwargs = dict( - metric=metric, - start=start_time, - stop=stop_time, - resource_id=resource_id, - granularity=granularity, - aggregation=aggregation, - ) - - kwargs = {k: v for k, v in raw_kwargs.items() if k and v} - - statistics = self.query_retry( - f=self.gnocchi.metric.get_measures, **kwargs) - - if statistics: - # return value of latest measure - # measure has structure [time, granularity, value] - return statistics[-1][2] diff --git a/watcher/datasource/monasca.py b/watcher/datasource/monasca.py deleted file mode 100644 index a85d06f..0000000 --- a/watcher/datasource/monasca.py +++ /dev/null @@ -1,124 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -from monascaclient import exc - -from watcher.common import clients - - -class MonascaHelper(object): - - def __init__(self, osc=None): - """:param osc: an OpenStackClients instance""" - self.osc = osc if osc else clients.OpenStackClients() - self.monasca = self.osc.monasca() - - def query_retry(self, f, *args, **kwargs): - try: - return f(*args, **kwargs) - except exc.HTTPUnauthorized: - self.osc.reset_clients() - self.monasca = self.osc.monasca() - return f(*args, **kwargs) - except Exception: - raise - - def _format_time_params(self, start_time, end_time, period): - """Format time-related params to the correct Monasca format - - :param start_time: Start datetime from which metrics will be used - :param end_time: End datetime from which metrics will be used - :param period: interval in seconds (int) - :return: start ISO time, end ISO time, period - """ - - if not period: - period = int(datetime.timedelta(hours=3).total_seconds()) - if not start_time: - start_time = ( - datetime.datetime.utcnow() - - datetime.timedelta(seconds=period)) - - start_timestamp = None if not start_time else start_time.isoformat() - end_timestamp = None if not end_time else end_time.isoformat() - - return start_timestamp, end_timestamp, period - - def statistics_list(self, meter_name, dimensions, start_time=None, - end_time=None, period=None,): - """List of statistics.""" - start_timestamp, end_timestamp, period = self._format_time_params( - start_time, end_time, period - ) - raw_kwargs = dict( - name=meter_name, - start_time=start_timestamp, - end_time=end_timestamp, - dimensions=dimensions, - ) - - kwargs = {k: v for k, v in raw_kwargs.items() if k and v} - - statistics = self.query_retry( - f=self.monasca.metrics.list_measurements, **kwargs) - - return statistics - - def statistic_aggregation(self, - meter_name, - dimensions, - start_time=None, - end_time=None, - period=None, - aggregate='avg', - group_by='*'): - """Representing a statistic aggregate by operators - - :param meter_name: meter names of which we want the statistics - :param dimensions: dimensions (dict) - :param start_time: Start datetime from which metrics will be used - :param end_time: End datetime from which metrics will be used - :param period: Sampling `period`: In seconds. If no period is given, - only one aggregate statistic is returned. If given, a - faceted result will be returned, divided into given - periods. Periods with no data are ignored. - :param aggregate: Should be either 'avg', 'count', 'min' or 'max' - :return: A list of dict with each dict being a distinct result row - """ - start_timestamp, end_timestamp, period = self._format_time_params( - start_time, end_time, period - ) - - raw_kwargs = dict( - name=meter_name, - start_time=start_timestamp, - end_time=end_timestamp, - dimensions=dimensions, - period=period, - statistics=aggregate, - group_by=group_by, - ) - - kwargs = {k: v for k, v in raw_kwargs.items() if k and v} - - statistics = self.query_retry( - f=self.monasca.metrics.list_statistics, **kwargs) - - return statistics diff --git a/watcher/db/__init__.py b/watcher/db/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/db/api.py b/watcher/db/api.py deleted file mode 100644 index eb07493..0000000 --- a/watcher/db/api.py +++ /dev/null @@ -1,871 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Base classes for storage engines -""" - -import abc -from oslo_config import cfg -from oslo_db import api as db_api -import six - -_BACKEND_MAPPING = {'sqlalchemy': 'watcher.db.sqlalchemy.api'} -IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING, - lazy=True) - - -def get_instance(): - """Return a DB API instance.""" - return IMPL - - -@six.add_metaclass(abc.ABCMeta) -class BaseConnection(object): - """Base class for storage system connections.""" - - @abc.abstractmethod - def get_goal_list(self, context, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None, eager=False): - """Get specific columns for matching goals. - - Return a list of the specified columns for all goals that - match the specified filters. - - :param context: The security context - :param filters: Filters to apply. Defaults to None. - :param limit: Maximum number of goals to return. - :param marker: the last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: direction in which results should be sorted. - (asc, desc) - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A list of tuples of the specified columns. - """ - - @abc.abstractmethod - def create_goal(self, values): - """Create a new goal. - - :param values: A dict containing several items used to identify - and track the goal. For example: - - :: - - { - 'uuid': utils.generate_uuid(), - 'name': 'DUMMY', - 'display_name': 'Dummy', - } - :returns: A goal - :raises: :py:class:`~.GoalAlreadyExists` - """ - - @abc.abstractmethod - def get_goal_by_id(self, context, goal_id, eager=False): - """Return a goal given its ID. - - :param context: The security context - :param goal_id: The ID of a goal - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A goal - :raises: :py:class:`~.GoalNotFound` - """ - - @abc.abstractmethod - def get_goal_by_uuid(self, context, goal_uuid, eager=False): - """Return a goal given its UUID. - - :param context: The security context - :param goal_uuid: The UUID of a goal - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A goal - :raises: :py:class:`~.GoalNotFound` - """ - - @abc.abstractmethod - def get_goal_by_name(self, context, goal_name, eager=False): - """Return a goal given its name. - - :param context: The security context - :param goal_name: The name of a goal - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A goal - :raises: :py:class:`~.GoalNotFound` - """ - - @abc.abstractmethod - def destroy_goal(self, goal_uuid): - """Destroy a goal. - - :param goal_uuid: The UUID of a goal - :raises: :py:class:`~.GoalNotFound` - """ - - @abc.abstractmethod - def update_goal(self, goal_uuid, values): - """Update properties of a goal. - - :param goal_uuid: The UUID of a goal - :param values: A dict containing several items used to identify - and track the goal. For example: - - :: - - { - 'uuid': utils.generate_uuid(), - 'name': 'DUMMY', - 'display_name': 'Dummy', - } - :returns: A goal - :raises: :py:class:`~.GoalNotFound` - :raises: :py:class:`~.Invalid` - """ - - def soft_delete_goal(self, goal_id): - """Soft delete a goal. - - :param goal_id: The id or uuid of a goal. - :raises: :py:class:`~.GoalNotFound` - """ - - @abc.abstractmethod - def get_strategy_list(self, context, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None, - eager=True): - """Get specific columns for matching strategies. - - Return a list of the specified columns for all strategies that - match the specified filters. - - :param context: The security context - :param filters: Filters to apply. Defaults to None. - - :param limit: Maximum number of strategies to return. - :param marker: The last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: Direction in which results should be sorted. - (asc, desc) - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A list of tuples of the specified columns. - """ - - @abc.abstractmethod - def create_strategy(self, values): - """Create a new strategy. - - :param values: A dict containing items used to identify - and track the strategy. For example: - - :: - - { - 'id': 1, - 'uuid': utils.generate_uuid(), - 'name': 'my_strategy', - 'display_name': 'My strategy', - 'goal_uuid': utils.generate_uuid(), - } - :returns: A strategy - :raises: :py:class:`~.StrategyAlreadyExists` - """ - - @abc.abstractmethod - def get_strategy_by_id(self, context, strategy_id, eager=False): - """Return a strategy given its ID. - - :param context: The security context - :param strategy_id: The ID of a strategy - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A strategy - :raises: :py:class:`~.StrategyNotFound` - """ - - @abc.abstractmethod - def get_strategy_by_uuid(self, context, strategy_uuid, eager=False): - """Return a strategy given its UUID. - - :param context: The security context - :param strategy_uuid: The UUID of a strategy - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A strategy - :raises: :py:class:`~.StrategyNotFound` - """ - - @abc.abstractmethod - def get_strategy_by_name(self, context, strategy_name, eager=False): - """Return a strategy given its name. - - :param context: The security context - :param strategy_name: The name of a strategy - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A strategy - :raises: :py:class:`~.StrategyNotFound` - """ - - @abc.abstractmethod - def destroy_strategy(self, strategy_uuid): - """Destroy a strategy. - - :param strategy_uuid: The UUID of a strategy - :raises: :py:class:`~.StrategyNotFound` - """ - - @abc.abstractmethod - def update_strategy(self, strategy_uuid, values): - """Update properties of a strategy. - - :param strategy_uuid: The UUID of a strategy - :returns: A strategy - :raises: :py:class:`~.StrategyNotFound` - :raises: :py:class:`~.Invalid` - """ - - def soft_delete_strategy(self, strategy_id): - """Soft delete a strategy. - - :param strategy_id: The id or uuid of a strategy. - :raises: :py:class:`~.StrategyNotFound` - """ - - @abc.abstractmethod - def get_audit_template_list(self, context, filters=None, - limit=None, marker=None, sort_key=None, - sort_dir=None, eager=False): - """Get specific columns for matching audit templates. - - Return a list of the specified columns for all audit templates that - match the specified filters. - - :param context: The security context - :param filters: Filters to apply. Defaults to None. - :param limit: Maximum number of audit templates to return. - :param marker: the last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: direction in which results should be sorted. - (asc, desc) - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A list of tuples of the specified columns. - """ - - @abc.abstractmethod - def create_audit_template(self, values): - """Create a new audit template. - - :param values: A dict containing several items used to identify - and track the audit template. For example: - - :: - - { - 'uuid': utils.generate_uuid(), - 'name': 'example', - 'description': 'free text description' - 'goal': 'DUMMY' - } - :returns: An audit template. - :raises: :py:class:`~.AuditTemplateAlreadyExists` - """ - - @abc.abstractmethod - def get_audit_template_by_id(self, context, audit_template_id, - eager=False): - """Return an audit template. - - :param context: The security context - :param audit_template_id: The id of an audit template. - :param eager: If True, also loads One-to-X data (Default: False) - :returns: An audit template. - :raises: :py:class:`~.AuditTemplateNotFound` - """ - - @abc.abstractmethod - def get_audit_template_by_uuid(self, context, audit_template_uuid, - eager=False): - """Return an audit template. - - :param context: The security context - :param audit_template_uuid: The uuid of an audit template. - :param eager: If True, also loads One-to-X data (Default: False) - :returns: An audit template. - :raises: :py:class:`~.AuditTemplateNotFound` - """ - - def get_audit_template_by_name(self, context, audit_template_name, - eager=False): - """Return an audit template. - - :param context: The security context - :param audit_template_name: The name of an audit template. - :param eager: If True, also loads One-to-X data (Default: False) - :returns: An audit template. - :raises: :py:class:`~.AuditTemplateNotFound` - """ - - @abc.abstractmethod - def destroy_audit_template(self, audit_template_id): - """Destroy an audit template. - - :param audit_template_id: The id or uuid of an audit template. - :raises: :py:class:`~.AuditTemplateNotFound` - """ - - @abc.abstractmethod - def update_audit_template(self, audit_template_id, values): - """Update properties of an audit template. - - :param audit_template_id: The id or uuid of an audit template. - :returns: An audit template. - :raises: :py:class:`~.AuditTemplateNotFound` - :raises: :py:class:`~.Invalid` - """ - - @abc.abstractmethod - def soft_delete_audit_template(self, audit_template_id): - """Soft delete an audit template. - - :param audit_template_id: The id or uuid of an audit template. - :raises: :py:class:`~.AuditTemplateNotFound` - """ - - @abc.abstractmethod - def get_audit_list(self, context, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None, eager=False): - """Get specific columns for matching audits. - - Return a list of the specified columns for all audits that match the - specified filters. - - :param context: The security context - :param filters: Filters to apply. Defaults to None. - :param limit: Maximum number of audits to return. - :param marker: the last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: direction in which results should be sorted. - (asc, desc) - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A list of tuples of the specified columns. - """ - - @abc.abstractmethod - def create_audit(self, values): - """Create a new audit. - - :param values: A dict containing several items used to identify - and track the audit, and several dicts which are passed - into the Drivers when managing this audit. For example: - - :: - - { - 'uuid': utils.generate_uuid(), - 'type': 'ONESHOT', - } - :returns: An audit. - :raises: :py:class:`~.AuditAlreadyExists` - """ - - @abc.abstractmethod - def get_audit_by_id(self, context, audit_id, eager=False): - """Return an audit. - - :param context: The security context - :param audit_id: The id of an audit. - :param eager: If True, also loads One-to-X data (Default: False) - :returns: An audit. - :raises: :py:class:`~.AuditNotFound` - """ - - @abc.abstractmethod - def get_audit_by_uuid(self, context, audit_uuid, eager=False): - """Return an audit. - - :param context: The security context - :param audit_uuid: The uuid of an audit. - :param eager: If True, also loads One-to-X data (Default: False) - :returns: An audit. - :raises: :py:class:`~.AuditNotFound` - """ - - @abc.abstractmethod - def destroy_audit(self, audit_id): - """Destroy an audit and all associated action plans. - - :param audit_id: The id or uuid of an audit. - :raises: :py:class:`~.AuditNotFound` - """ - - @abc.abstractmethod - def update_audit(self, audit_id, values): - """Update properties of an audit. - - :param audit_id: The id or uuid of an audit. - :returns: An audit. - :raises: :py:class:`~.AuditNotFound` - :raises: :py:class:`~.Invalid` - """ - - def soft_delete_audit(self, audit_id): - """Soft delete an audit and all associated action plans. - - :param audit_id: The id or uuid of an audit. - :raises: :py:class:`~.AuditNotFound` - """ - - @abc.abstractmethod - def get_action_list(self, context, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None, - eager=False): - """Get specific columns for matching actions. - - Return a list of the specified columns for all actions that match the - specified filters. - - :param context: The security context - :param filters: Filters to apply. Defaults to None. - :param limit: Maximum number of actions to return. - :param marker: the last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: direction in which results should be sorted. - (asc, desc) - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A list of tuples of the specified columns. - """ - - @abc.abstractmethod - def create_action(self, values): - """Create a new action. - - :param values: A dict containing several items used to identify - and track the action, and several dicts which are passed - into the Drivers when managing this action. For example: - - :: - - { - 'uuid': utils.generate_uuid(), - 'name': 'example', - 'description': 'free text description' - 'aggregate': 'nova aggregate name or uuid' - } - :returns: A action. - :raises: :py:class:`~.ActionAlreadyExists` - """ - - @abc.abstractmethod - def get_action_by_id(self, context, action_id, eager=False): - """Return a action. - - :param context: The security context - :param action_id: The id of a action. - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A action. - :raises: :py:class:`~.ActionNotFound` - """ - - @abc.abstractmethod - def get_action_by_uuid(self, context, action_uuid, eager=False): - """Return a action. - - :param context: The security context - :param action_uuid: The uuid of a action. - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A action. - :raises: :py:class:`~.ActionNotFound` - """ - - @abc.abstractmethod - def destroy_action(self, action_id): - """Destroy a action and all associated interfaces. - - :param action_id: The id or uuid of a action. - :raises: :py:class:`~.ActionNotFound` - :raises: :py:class:`~.ActionReferenced` - """ - - @abc.abstractmethod - def update_action(self, action_id, values): - """Update properties of a action. - - :param action_id: The id or uuid of a action. - :returns: A action. - :raises: :py:class:`~.ActionNotFound` - :raises: :py:class:`~.ActionReferenced` - :raises: :py:class:`~.Invalid` - """ - - def soft_delete_action(self, action_id): - """Soft delete an action. - - :param action_id: The id or uuid of an action. - :raises: :py:class:`~.ActionNotFound` - """ - - @abc.abstractmethod - def get_action_plan_list( - self, context, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None, eager=False): - """Get specific columns for matching action plans. - - Return a list of the specified columns for all action plans that - match the specified filters. - - :param context: The security context - :param filters: Filters to apply. Defaults to None. - :param limit: Maximum number of audits to return. - :param marker: the last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: direction in which results should be sorted. - (asc, desc) - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A list of tuples of the specified columns. - """ - - @abc.abstractmethod - def create_action_plan(self, values): - """Create a new action plan. - - :param values: A dict containing several items used to identify - and track the action plan. - :returns: An action plan. - :raises: :py:class:`~.ActionPlanAlreadyExists` - """ - - @abc.abstractmethod - def get_action_plan_by_id(self, context, action_plan_id, eager=False): - """Return an action plan. - - :param context: The security context - :param action_plan_id: The id of an action plan. - :param eager: If True, also loads One-to-X data (Default: False) - :returns: An action plan. - :raises: :py:class:`~.ActionPlanNotFound` - """ - - @abc.abstractmethod - def get_action_plan_by_uuid(self, context, action_plan__uuid, eager=False): - """Return a action plan. - - :param context: The security context - :param action_plan__uuid: The uuid of an action plan. - :param eager: If True, also loads One-to-X data (Default: False) - :returns: An action plan. - :raises: :py:class:`~.ActionPlanNotFound` - """ - - @abc.abstractmethod - def destroy_action_plan(self, action_plan_id): - """Destroy an action plan and all associated interfaces. - - :param action_plan_id: The id or uuid of a action plan. - :raises: :py:class:`~.ActionPlanNotFound` - :raises: :py:class:`~.ActionPlanReferenced` - """ - - @abc.abstractmethod - def update_action_plan(self, action_plan_id, values): - """Update properties of an action plan. - - :param action_plan_id: The id or uuid of an action plan. - :returns: An action plan. - :raises: :py:class:`~.ActionPlanNotFound` - :raises: :py:class:`~.ActionPlanReferenced` - :raises: :py:class:`~.Invalid` - """ - - def soft_delete_action_plan(self, action_plan_id): - """Soft delete an action plan. - - :param action_plan_id: The id or uuid of an action plan. - :raises: :py:class:`~.ActionPlanNotFound` - """ - - @abc.abstractmethod - def get_efficacy_indicator_list(self, context, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None, - eager=False): - """Get specific columns for matching efficacy indicators. - - Return a list of the specified columns for all efficacy indicators that - match the specified filters. - - :param context: The security context - :param columns: List of column names to return. - Defaults to 'id' column when columns == None. - :param filters: Filters to apply. Defaults to None. - - :param limit: Maximum number of efficacy indicators to return. - :param marker: The last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: Direction in which results should be sorted. - (asc, desc) - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A list of tuples of the specified columns. - """ - - @abc.abstractmethod - def create_efficacy_indicator(self, values): - """Create a new efficacy indicator. - - :param values: A dict containing items used to identify - and track the efficacy indicator. For example: - - :: - - { - 'id': 1, - 'uuid': utils.generate_uuid(), - 'name': 'my_efficacy_indicator', - 'display_name': 'My efficacy indicator', - 'goal_uuid': utils.generate_uuid(), - } - :returns: An efficacy_indicator - :raises: :py:class:`~.EfficacyIndicatorAlreadyExists` - """ - - @abc.abstractmethod - def get_efficacy_indicator_by_id(self, context, efficacy_indicator_id, - eager=False): - """Return an efficacy indicator given its ID. - - :param context: The security context - :param efficacy_indicator_id: The ID of an efficacy indicator - :param eager: If True, also loads One-to-X data (Default: False) - :returns: An efficacy indicator - :raises: :py:class:`~.EfficacyIndicatorNotFound` - """ - - @abc.abstractmethod - def get_efficacy_indicator_by_uuid(self, context, efficacy_indicator_uuid, - eager=False): - """Return an efficacy indicator given its UUID. - - :param context: The security context - :param efficacy_indicator_uuid: The UUID of an efficacy indicator - :param eager: If True, also loads One-to-X data (Default: False) - :returns: An efficacy indicator - :raises: :py:class:`~.EfficacyIndicatorNotFound` - """ - - @abc.abstractmethod - def get_efficacy_indicator_by_name(self, context, efficacy_indicator_name, - eager=False): - """Return an efficacy indicator given its name. - - :param context: The security context - :param efficacy_indicator_name: The name of an efficacy indicator - :param eager: If True, also loads One-to-X data (Default: False) - :returns: An efficacy indicator - :raises: :py:class:`~.EfficacyIndicatorNotFound` - """ - - @abc.abstractmethod - def destroy_efficacy_indicator(self, efficacy_indicator_uuid): - """Destroy an efficacy indicator. - - :param efficacy_indicator_uuid: The UUID of an efficacy indicator - :raises: :py:class:`~.EfficacyIndicatorNotFound` - """ - - @abc.abstractmethod - def update_efficacy_indicator(self, efficacy_indicator_id, values): - """Update properties of an efficacy indicator. - - :param efficacy_indicator_id: The ID of an efficacy indicator - :returns: An efficacy indicator - :raises: :py:class:`~.EfficacyIndicatorNotFound` - :raises: :py:class:`~.Invalid` - """ - - @abc.abstractmethod - def get_scoring_engine_list( - self, context, columns=None, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None, eager=False): - """Get specific columns for matching scoring engines. - - Return a list of the specified columns for all scoring engines that - match the specified filters. - - :param context: The security context - :param columns: List of column names to return. - Defaults to 'id' column when columns == None. - :param filters: Filters to apply. Defaults to None. - :param limit: Maximum number of scoring engines to return. - :param marker: the last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: direction in which results should be sorted. - (asc, desc) - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A list of tuples of the specified columns. - """ - - @abc.abstractmethod - def create_scoring_engine(self, values): - """Create a new scoring engine. - - :param values: A dict containing several items used to identify - and track the scoring engine. - :returns: A scoring engine. - :raises: :py:class:`~.ScoringEngineAlreadyExists` - """ - - @abc.abstractmethod - def get_scoring_engine_by_id(self, context, scoring_engine_id, - eager=False): - """Return a scoring engine by its id. - - :param context: The security context - :param scoring_engine_id: The id of a scoring engine. - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A scoring engine. - :raises: :py:class:`~.ScoringEngineNotFound` - """ - - @abc.abstractmethod - def get_scoring_engine_by_uuid(self, context, scoring_engine_uuid, - eager=False): - """Return a scoring engine by its uuid. - - :param context: The security context - :param scoring_engine_uuid: The uuid of a scoring engine. - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A scoring engine. - :raises: :py:class:`~.ScoringEngineNotFound` - """ - - @abc.abstractmethod - def get_scoring_engine_by_name(self, context, scoring_engine_name, - eager=False): - """Return a scoring engine by its name. - - :param context: The security context - :param scoring_engine_name: The name of a scoring engine. - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A scoring engine. - :raises: :py:class:`~.ScoringEngineNotFound` - """ - - @abc.abstractmethod - def destroy_scoring_engine(self, scoring_engine_id): - """Destroy a scoring engine. - - :param scoring_engine_id: The id of a scoring engine. - :raises: :py:class:`~.ScoringEngineNotFound` - """ - - @abc.abstractmethod - def update_scoring_engine(self, scoring_engine_id, values): - """Update properties of a scoring engine. - - :param scoring_engine_id: The id of a scoring engine. - :returns: A scoring engine. - :raises: :py:class:`~.ScoringEngineNotFound` - :raises: :py:class:`~.Invalid` - """ - - @abc.abstractmethod - def get_service_list(self, context, filters=None, limit=None, marker=None, - sort_key=None, sort_dir=None, eager=False): - """Get specific columns for matching services. - - Return a list of the specified columns for all services that - match the specified filters. - - :param context: The security context - :param filters: Filters to apply. Defaults to None. - - :param limit: Maximum number of services to return. - :param marker: The last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: Direction in which results should be sorted. - (asc, desc) - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A list of tuples of the specified columns. - """ - - @abc.abstractmethod - def create_service(self, values): - """Create a new service. - - :param values: A dict containing items used to identify - and track the service. For example: - - :: - - { - 'id': 1, - 'name': 'watcher-api', - 'status': 'ACTIVE', - 'host': 'controller' - } - :returns: A service - :raises: :py:class:`~.ServiceAlreadyExists` - """ - - @abc.abstractmethod - def get_service_by_id(self, context, service_id, eager=False): - """Return a service given its ID. - - :param context: The security context - :param service_id: The ID of a service - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A service - :raises: :py:class:`~.ServiceNotFound` - """ - - @abc.abstractmethod - def get_service_by_name(self, context, service_name, eager=False): - """Return a service given its name. - - :param context: The security context - :param service_name: The name of a service - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A service - :raises: :py:class:`~.ServiceNotFound` - """ - - @abc.abstractmethod - def destroy_service(self, service_id): - """Destroy a service. - - :param service_id: The ID of a service - :raises: :py:class:`~.ServiceNotFound` - """ - - @abc.abstractmethod - def update_service(self, service_id, values): - """Update properties of a service. - - :param service_id: The ID of a service - :returns: A service - :raises: :py:class:`~.ServiceyNotFound` - :raises: :py:class:`~.Invalid` - """ - - @abc.abstractmethod - def soft_delete_service(self, service_id): - """Soft delete a service. - - :param service_id: The id of a service. - :returns: A service. - :raises: :py:class:`~.ServiceNotFound` - """ diff --git a/watcher/db/migration.py b/watcher/db/migration.py deleted file mode 100644 index 1d65aa8..0000000 --- a/watcher/db/migration.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Database setup and migration commands.""" - -from oslo_config import cfg -from stevedore import driver - -_IMPL = None - - -def get_backend(): - global _IMPL - if not _IMPL: - cfg.CONF.import_opt('backend', 'oslo_db.options', group='database') - _IMPL = driver.DriverManager("watcher.database.migration_backend", - cfg.CONF.database.backend).driver - return _IMPL - - -def upgrade(version=None): - """Migrate the database to `version` or the most recent version.""" - return get_backend().upgrade(version) - - -def downgrade(version=None): - return get_backend().downgrade(version) - - -def version(): - return get_backend().version() - - -def stamp(version): - return get_backend().stamp(version) - - -def revision(message, autogenerate): - return get_backend().revision(message, autogenerate) - - -def create_schema(): - return get_backend().create_schema() diff --git a/watcher/db/purge.py b/watcher/db/purge.py deleted file mode 100644 index 4fb6e5e..0000000 --- a/watcher/db/purge.py +++ /dev/null @@ -1,476 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from __future__ import print_function - -import collections -import datetime -import itertools -import sys - -from oslo_log import log -from oslo_utils import strutils -import prettytable as ptable -from six.moves import input - -from watcher._i18n import _ -from watcher._i18n import lazy_translation_enabled -from watcher.common import context -from watcher.common import exception -from watcher.common import utils -from watcher import objects - -LOG = log.getLogger(__name__) - - -class WatcherObjectsMap(object): - """Wrapper to deal with watcher objects per type - - This wrapper object contains a list of watcher objects per type. - Its main use is to simplify the merge of watcher objects by avoiding - duplicates, but also for representing the relationships between these - objects. - """ - - # This is for generating the .pot translations - keymap = collections.OrderedDict([ - ("goals", _("Goals")), - ("strategies", _("Strategies")), - ("audit_templates", _("Audit Templates")), - ("audits", _("Audits")), - ("action_plans", _("Action Plans")), - ("actions", _("Actions")), - ]) - - def __init__(self): - for attr_name in self.keys(): - setattr(self, attr_name, []) - - def values(self): - return (getattr(self, key) for key in self.keys()) - - @classmethod - def keys(cls): - return cls.keymap.keys() - - def __iter__(self): - return itertools.chain(*self.values()) - - def __add__(self, other): - new_map = self.__class__() - - # Merge the 2 items dicts into a new object (and avoid dupes) - for attr_name, initials, others in zip(self.keys(), self.values(), - other.values()): - # Creates a copy - merged = initials[:] - initials_ids = [item.id for item in initials] - non_dupes = [item for item in others - if item.id not in initials_ids] - merged += non_dupes - - setattr(new_map, attr_name, merged) - - return new_map - - def __str__(self): - out = "" - for key, vals in zip(self.keys(), self.values()): - ids = [val.id for val in vals] - out += "%(key)s: %(val)s" % (dict(key=key, val=ids)) - out += "\n" - return out - - def __len__(self): - return sum(len(getattr(self, key)) for key in self.keys()) - - def get_count_table(self): - headers = list(self.keymap.values()) - headers.append(_("Total")) # We also add a total count - translated_headers = [ - h.translate() if lazy_translation_enabled() else h - for h in headers - ] - - counters = [len(cat_vals) for cat_vals in self.values()] + [len(self)] - table = ptable.PrettyTable(field_names=translated_headers) - table.add_row(counters) - return table.get_string() - - -class PurgeCommand(object): - """Purges the DB by removing soft deleted entries - - The workflow for this purge is the following: - - # Find soft deleted objects which are expired - # Find orphan objects - # Find their related objects whether they are expired or not - # Merge them together - # If it does not exceed the limit, destroy them all - """ - - ctx = context.make_context(show_deleted=True) - - def __init__(self, age_in_days=None, max_number=None, - uuid=None, exclude_orphans=False, dry_run=None): - self.age_in_days = age_in_days - self.max_number = max_number - self.uuid = uuid - self.exclude_orphans = exclude_orphans - self.dry_run = dry_run - - self._delete_up_to_max = None - self._objects_map = WatcherObjectsMap() - - def get_expiry_date(self): - if not self.age_in_days: - return None - today = datetime.datetime.today() - expiry_date = today - datetime.timedelta(days=self.age_in_days) - return expiry_date - - @classmethod - def get_goal_uuid(cls, uuid_or_name): - if uuid_or_name is None: - return - - query_func = None - if not utils.is_uuid_like(uuid_or_name): - query_func = objects.Goal.get_by_name - else: - query_func = objects.Goal.get_by_uuid - - try: - goal = query_func(cls.ctx, uuid_or_name) - except Exception as exc: - LOG.exception(exc) - raise exception.GoalNotFound(goal=uuid_or_name) - - if not goal.deleted_at: - raise exception.NotSoftDeletedStateError( - name=_('Goal'), id=uuid_or_name) - - return goal.uuid - - def _find_goals(self, filters=None): - return objects.Goal.list(self.ctx, filters=filters) - - def _find_strategies(self, filters=None): - return objects.Strategy.list(self.ctx, filters=filters) - - def _find_audit_templates(self, filters=None): - return objects.AuditTemplate.list(self.ctx, filters=filters) - - def _find_audits(self, filters=None): - return objects.Audit.list(self.ctx, filters=filters) - - def _find_action_plans(self, filters=None): - return objects.ActionPlan.list(self.ctx, filters=filters) - - def _find_actions(self, filters=None): - return objects.Action.list(self.ctx, filters=filters) - - def _find_orphans(self): - orphans = WatcherObjectsMap() - - filters = dict(deleted=False) - goals = objects.Goal.list(self.ctx, filters=filters) - strategies = objects.Strategy.list(self.ctx, filters=filters) - audit_templates = objects.AuditTemplate.list(self.ctx, filters=filters) - audits = objects.Audit.list(self.ctx, filters=filters) - action_plans = objects.ActionPlan.list(self.ctx, filters=filters) - actions = objects.Action.list(self.ctx, filters=filters) - - goal_ids = set(g.id for g in goals) - orphans.strategies = [ - strategy for strategy in strategies - if strategy.goal_id not in goal_ids] - - strategy_ids = [s.id for s in (s for s in strategies - if s not in orphans.strategies)] - orphans.audit_templates = [ - audit_template for audit_template in audit_templates - if audit_template.goal_id not in goal_ids or - (audit_template.strategy_id and - audit_template.strategy_id not in strategy_ids)] - - orphans.audits = [ - audit for audit in audits - if audit.goal_id not in goal_ids or - (audit.strategy_id and - audit.strategy_id not in strategy_ids)] - - # Objects with orphan parents are themselves orphans - audit_ids = [audit.id for audit in audits - if audit not in orphans.audits] - orphans.action_plans = [ - ap for ap in action_plans - if ap.audit_id not in audit_ids or - ap.strategy_id not in strategy_ids] - - # Objects with orphan parents are themselves orphans - action_plan_ids = [ap.id for ap in action_plans - if ap not in orphans.action_plans] - orphans.actions = [ - action for action in actions - if action.action_plan_id not in action_plan_ids] - - LOG.debug("Orphans found:\n%s", orphans) - LOG.info("Orphans found:\n%s", orphans.get_count_table()) - - return orphans - - def _find_soft_deleted_objects(self): - to_be_deleted = WatcherObjectsMap() - expiry_date = self.get_expiry_date() - filters = dict(deleted=True) - - if self.uuid: - filters["uuid"] = self.uuid - if expiry_date: - filters.update(dict(deleted_at__lt=expiry_date)) - - to_be_deleted.goals.extend(self._find_goals(filters)) - to_be_deleted.strategies.extend(self._find_strategies(filters)) - to_be_deleted.audit_templates.extend( - self._find_audit_templates(filters)) - to_be_deleted.audits.extend(self._find_audits(filters)) - to_be_deleted.action_plans.extend( - self._find_action_plans(filters)) - to_be_deleted.actions.extend(self._find_actions(filters)) - - soft_deleted_objs = self._find_related_objects( - to_be_deleted, base_filters=dict(deleted=True)) - - LOG.debug("Soft deleted objects:\n%s", soft_deleted_objs) - - return soft_deleted_objs - - def _find_related_objects(self, objects_map, base_filters=None): - base_filters = base_filters or {} - - for goal in objects_map.goals: - filters = {} - filters.update(base_filters) - filters.update(dict(goal_id=goal.id)) - related_objs = WatcherObjectsMap() - related_objs.strategies = self._find_strategies(filters) - related_objs.audit_templates = self._find_audit_templates(filters) - related_objs.audits = self._find_audits(filters) - objects_map += related_objs - - for strategy in objects_map.strategies: - filters = {} - filters.update(base_filters) - filters.update(dict(strategy_id=strategy.id)) - related_objs = WatcherObjectsMap() - related_objs.audit_templates = self._find_audit_templates(filters) - related_objs.audits = self._find_audits(filters) - objects_map += related_objs - - for audit in objects_map.audits: - filters = {} - filters.update(base_filters) - filters.update(dict(audit_id=audit.id)) - related_objs = WatcherObjectsMap() - related_objs.action_plans = self._find_action_plans(filters) - objects_map += related_objs - - for action_plan in objects_map.action_plans: - filters = {} - filters.update(base_filters) - filters.update(dict(action_plan_id=action_plan.id)) - related_objs = WatcherObjectsMap() - related_objs.actions = self._find_actions(filters) - objects_map += related_objs - - return objects_map - - def confirmation_prompt(self): - print(self._objects_map.get_count_table()) - raw_val = input( - _("There are %(count)d objects set for deletion. " - "Continue? [y/N]") % dict(count=len(self._objects_map))) - - return strutils.bool_from_string(raw_val) - - def delete_up_to_max_prompt(self, objects_map): - print(objects_map.get_count_table()) - print(_("The number of objects (%(num)s) to delete from the database " - "exceeds the maximum number of objects (%(max_number)s) " - "specified.") % dict(max_number=self.max_number, - num=len(objects_map))) - raw_val = input( - _("Do you want to delete objects up to the specified maximum " - "number? [y/N]")) - - self._delete_up_to_max = strutils.bool_from_string(raw_val) - - return self._delete_up_to_max - - def _aggregate_objects(self): - """Objects aggregated on a 'per goal' basis""" - # todo: aggregate orphans as well - aggregate = [] - for goal in self._objects_map.goals: - related_objs = WatcherObjectsMap() - - # goals - related_objs.goals = [goal] - - # strategies - goal_ids = [goal.id] - related_objs.strategies = [ - strategy for strategy in self._objects_map.strategies - if strategy.goal_id in goal_ids - ] - - # audit templates - strategy_ids = [ - strategy.id for strategy in related_objs.strategies] - related_objs.audit_templates = [ - at for at in self._objects_map.audit_templates - if at.goal_id in goal_ids or - (at.strategy_id and at.strategy_id in strategy_ids) - ] - - # audits - related_objs.audits = [ - audit for audit in self._objects_map.audits - if audit.goal_id in goal_ids - ] - - # action plans - audit_ids = [audit.id for audit in related_objs.audits] - related_objs.action_plans = [ - action_plan for action_plan in self._objects_map.action_plans - if action_plan.audit_id in audit_ids - ] - - # actions - action_plan_ids = [ - action_plan.id for action_plan in related_objs.action_plans - ] - related_objs.actions = [ - action for action in self._objects_map.actions - if action.action_plan_id in action_plan_ids - ] - aggregate.append(related_objs) - - return aggregate - - def _get_objects_up_to_limit(self): - aggregated_objects = self._aggregate_objects() - to_be_deleted_subset = WatcherObjectsMap() - - for aggregate in aggregated_objects: - if len(aggregate) + len(to_be_deleted_subset) <= self.max_number: - to_be_deleted_subset += aggregate - else: - break - - LOG.debug(to_be_deleted_subset) - return to_be_deleted_subset - - def find_objects_to_delete(self): - """Finds all the objects to be purged - - :returns: A mapping with all the Watcher objects to purged - :rtype: :py:class:`~.WatcherObjectsMap` instance - """ - to_be_deleted = self._find_soft_deleted_objects() - - if not self.exclude_orphans: - to_be_deleted += self._find_orphans() - - LOG.debug("Objects to be deleted:\n%s", to_be_deleted) - - return to_be_deleted - - def do_delete(self): - LOG.info("Deleting...") - # Reversed to avoid errors with foreign keys - for entry in reversed(list(self._objects_map)): - entry.destroy() - - def execute(self): - LOG.info("Starting purge command") - self._objects_map = self.find_objects_to_delete() - - if (self.max_number is not None and - len(self._objects_map) > self.max_number): - if self.delete_up_to_max_prompt(self._objects_map): - self._objects_map = self._get_objects_up_to_limit() - else: - return - - _orphans_note = (_(" (orphans excluded)") if self.exclude_orphans - else _(" (may include orphans)")) - if not self.dry_run and self.confirmation_prompt(): - self.do_delete() - print(_("Purge results summary%s:") % _orphans_note) - LOG.info("Purge results summary%s:", _orphans_note) - else: - LOG.debug(self._objects_map) - print(_("Here below is a table containing the objects " - "that can be purged%s:") % _orphans_note) - - LOG.info("\n%s", self._objects_map.get_count_table()) - print(self._objects_map.get_count_table()) - LOG.info("Purge process completed") - - -def purge(age_in_days, max_number, goal, exclude_orphans, dry_run): - """Removes soft deleted objects from the database - - :param age_in_days: Number of days since deletion (from today) - to exclude from the purge. If None, everything will be purged. - :type age_in_days: int - :param max_number: Max number of objects expected to be deleted. - Prevents the deletion if exceeded. No limit if set to None. - :type max_number: int - :param goal: UUID or name of the goal to purge. - :type goal: str - :param exclude_orphans: Flag to indicate whether or not you want to - exclude orphans from deletion (default: False). - :type exclude_orphans: bool - :param dry_run: Flag to indicate whether or not you want to perform - a dry run (no deletion). - :type dry_run: bool - """ - try: - if max_number and max_number < 0: - raise exception.NegativeLimitError - - LOG.info("[options] age_in_days = %s", age_in_days) - LOG.info("[options] max_number = %s", max_number) - LOG.info("[options] goal = %s", goal) - LOG.info("[options] exclude_orphans = %s", exclude_orphans) - LOG.info("[options] dry_run = %s", dry_run) - - uuid = PurgeCommand.get_goal_uuid(goal) - - cmd = PurgeCommand(age_in_days, max_number, uuid, - exclude_orphans, dry_run) - - cmd.execute() - - except Exception as exc: - LOG.exception(exc) - print(exc) - sys.exit(1) diff --git a/watcher/db/sqlalchemy/__init__.py b/watcher/db/sqlalchemy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/db/sqlalchemy/alembic.ini b/watcher/db/sqlalchemy/alembic.ini deleted file mode 100644 index a768980..0000000 --- a/watcher/db/sqlalchemy/alembic.ini +++ /dev/null @@ -1,54 +0,0 @@ -# A generic, single database configuration. - -[alembic] -# path to migration scripts -script_location = %(here)s/alembic - -# template used to generate migration files -# file_template = %%(rev)s_%%(slug)s - -# max length of characters to apply to the -# "slug" field -#truncate_slug_length = 40 - -# set to 'true' to run the environment during -# the 'revision' command, regardless of autogenerate -# revision_environment = false - -#sqlalchemy.url = driver://user:pass@localhost/dbname - - -# Logging configuration -[loggers] -keys = root,sqlalchemy,alembic - -[handlers] -keys = console - -[formatters] -keys = generic - -[logger_root] -level = WARN -handlers = console -qualname = - -[logger_sqlalchemy] -level = WARN -handlers = -qualname = sqlalchemy.engine - -[logger_alembic] -level = INFO -handlers = -qualname = alembic - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatter_generic] -format = %(levelname)-5.5s [%(name)s] %(message)s -datefmt = %H:%M:%S diff --git a/watcher/db/sqlalchemy/alembic/README.rst b/watcher/db/sqlalchemy/alembic/README.rst deleted file mode 100644 index 1faab71..0000000 --- a/watcher/db/sqlalchemy/alembic/README.rst +++ /dev/null @@ -1,62 +0,0 @@ -The migrations in the alembic/versions contain the changes needed to migrate -from older Watcher releases to newer versions. A migration occurs by executing -a script that details the changes needed to upgrade/downgrade the database. The -migration scripts are ordered so that multiple scripts can run sequentially to -update the database. The scripts are executed by Watcher's migration wrapper -which uses the Alembic library to manage the migration. Watcher supports -migration from Ocata or later. - - -If you are a deployer or developer and want to migrate from Ocata to later -release you must first add version tracking to the database:: - - $ watcher-db-manage --config-file /path/to/watcher.conf stamp ocata - - -You can upgrade to the latest database version via:: - - $ watcher-db-manage --config-file /path/to/watcher.conf upgrade head - - -To check the current database version:: - - $ watcher-db-manage --config-file /path/to/watcher.conf current - - -To create a script to run the migration offline:: - - $ watcher-db-manage --config-file /path/to/watcher.conf upgrade head --sql - - -To run the offline migration between specific migration versions:: - - $ watcher-db-manage --config-file /path/to/watcher.conf upgrade \ - : --sql - - -Upgrade the database incrementally:: - - $ watcher-db-manage --config-file /path/to/watcher.conf upgrade --revision \ - <# of revs> - - -Downgrade the database by a certain number of revisions:: - - $ watcher-db-manage --config-file /path/to/watcher.conf downgrade --revision \ - <# of revs> - - -Create new revision:: - - $ watcher-db-manage --config-file /path/to/watcher.conf revision \ - -m "description of revision" --autogenerate - - -Create a blank file:: - - $ watcher-db-manage --config-file /path/to/watcher.conf revision \ - -m "description of revision" - -Please see https://alembic.readthedocs.org/en/latest/index.html for general -documentation - diff --git a/watcher/db/sqlalchemy/alembic/env.py b/watcher/db/sqlalchemy/alembic/env.py deleted file mode 100644 index 474b1ca..0000000 --- a/watcher/db/sqlalchemy/alembic/env.py +++ /dev/null @@ -1,54 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from logging import config as log_config - -from alembic import context - -from watcher.db.sqlalchemy import api as sqla_api -from watcher.db.sqlalchemy import models - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -log_config.fileConfig(config.config_file_name) - -# add your model's MetaData object here -# for 'autogenerate' support -# from myapp import mymodel -target_metadata = models.Base.metadata - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - engine = sqla_api.get_engine() - with engine.connect() as connection: - context.configure(connection=connection, - target_metadata=target_metadata) - with context.begin_transaction(): - context.run_migrations() - - -run_migrations_online() diff --git a/watcher/db/sqlalchemy/alembic/script.py.mako b/watcher/db/sqlalchemy/alembic/script.py.mako deleted file mode 100644 index 9570201..0000000 --- a/watcher/db/sqlalchemy/alembic/script.py.mako +++ /dev/null @@ -1,22 +0,0 @@ -"""${message} - -Revision ID: ${up_revision} -Revises: ${down_revision} -Create Date: ${create_date} - -""" - -# revision identifiers, used by Alembic. -revision = ${repr(up_revision)} -down_revision = ${repr(down_revision)} - -from alembic import op -import sqlalchemy as sa -${imports if imports else ""} - -def upgrade(): - ${upgrades if upgrades else "pass"} - - -def downgrade(): - ${downgrades if downgrades else "pass"} diff --git a/watcher/db/sqlalchemy/alembic/versions/001_ocata.py b/watcher/db/sqlalchemy/alembic/versions/001_ocata.py deleted file mode 100644 index 22d7220..0000000 --- a/watcher/db/sqlalchemy/alembic/versions/001_ocata.py +++ /dev/null @@ -1,203 +0,0 @@ -"""ocata release - -Revision ID: 9894235b4278 -Revises: None -Create Date: 2017-02-01 09:40:05.065981 - -""" -from alembic import op -import oslo_db -import sqlalchemy as sa -from watcher.db.sqlalchemy import models - - -# revision identifiers, used by Alembic. -revision = '001' -down_revision = None - - -def upgrade(): - op.create_table( - 'goals', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('deleted_at', sa.DateTime(), nullable=True), - sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), - nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=True), - sa.Column('name', sa.String(length=63), nullable=False), - sa.Column('display_name', sa.String(length=63), nullable=False), - sa.Column('efficacy_specification', models.JSONEncodedList(), - nullable=False), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('name', 'deleted', name='uniq_goals0name'), - sa.UniqueConstraint('uuid', name='uniq_goals0uuid') - ) - - op.create_table( - 'scoring_engines', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('deleted_at', sa.DateTime(), nullable=True), - sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), - nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=False), - sa.Column('name', sa.String(length=63), nullable=False), - sa.Column('description', sa.String(length=255), nullable=True), - sa.Column('metainfo', sa.Text(), nullable=True), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('name', 'deleted', - name='uniq_scoring_engines0name'), - sa.UniqueConstraint('uuid', name='uniq_scoring_engines0uuid') - ) - - op.create_table( - 'services', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('deleted_at', sa.DateTime(), nullable=True), - sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), - nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('name', sa.String(length=255), nullable=False), - sa.Column('host', sa.String(length=255), nullable=False), - sa.Column('last_seen_up', sa.DateTime(), nullable=True), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('host', 'name', 'deleted', - name='uniq_services0host0name0deleted') - ) - - op.create_table( - 'strategies', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('deleted_at', sa.DateTime(), nullable=True), - sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), - nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=True), - sa.Column('name', sa.String(length=63), nullable=False), - sa.Column('display_name', sa.String(length=63), nullable=False), - sa.Column('goal_id', sa.Integer(), nullable=False), - sa.Column('parameters_spec', models.JSONEncodedDict(), - nullable=True), - sa.ForeignKeyConstraint(['goal_id'], ['goals.id'], ), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('name', 'deleted', name='uniq_strategies0name'), - sa.UniqueConstraint('uuid', name='uniq_strategies0uuid') - ) - - op.create_table( - 'audit_templates', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('deleted_at', sa.DateTime(), nullable=True), - sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), - nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=True), - sa.Column('name', sa.String(length=63), nullable=True), - sa.Column('description', sa.String(length=255), nullable=True), - sa.Column('goal_id', sa.Integer(), nullable=False), - sa.Column('strategy_id', sa.Integer(), nullable=True), - sa.Column('scope', models.JSONEncodedList(), - nullable=True), - sa.ForeignKeyConstraint(['goal_id'], ['goals.id'], ), - sa.ForeignKeyConstraint(['strategy_id'], ['strategies.id'], ), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('name', 'deleted', - name='uniq_audit_templates0name'), - sa.UniqueConstraint('uuid', name='uniq_audit_templates0uuid') - ) - op.create_table( - 'audits', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('deleted_at', sa.DateTime(), nullable=True), - sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), - nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=True), - sa.Column('audit_type', sa.String(length=20), nullable=True), - sa.Column('state', sa.String(length=20), nullable=True), - sa.Column('parameters', models.JSONEncodedDict(), nullable=True), - sa.Column('interval', sa.Integer(), nullable=True), - sa.Column('goal_id', sa.Integer(), nullable=False), - sa.Column('strategy_id', sa.Integer(), nullable=True), - sa.Column('scope', models.JSONEncodedList(), nullable=True), - sa.Column('auto_trigger', sa.Boolean(), nullable=False), - sa.ForeignKeyConstraint(['goal_id'], ['goals.id'], ), - sa.ForeignKeyConstraint(['strategy_id'], ['strategies.id'], ), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('uuid', name='uniq_audits0uuid') - ) - op.create_table( - 'action_plans', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('deleted_at', sa.DateTime(), nullable=True), - sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), - nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=True), - sa.Column('audit_id', sa.Integer(), nullable=False), - sa.Column('strategy_id', sa.Integer(), nullable=False), - sa.Column('state', sa.String(length=20), nullable=True), - sa.Column('global_efficacy', models.JSONEncodedDict(), nullable=True), - sa.ForeignKeyConstraint(['audit_id'], ['audits.id'], ), - sa.ForeignKeyConstraint(['strategy_id'], ['strategies.id'], ), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('uuid', name='uniq_action_plans0uuid') - ) - - op.create_table( - 'actions', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('deleted_at', sa.DateTime(), nullable=True), - sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), - nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=False), - sa.Column('action_plan_id', sa.Integer(), nullable=False), - sa.Column('action_type', sa.String(length=255), nullable=False), - sa.Column('input_parameters', models.JSONEncodedDict(), nullable=True), - sa.Column('state', sa.String(length=20), nullable=True), - sa.Column('parents', models.JSONEncodedList(), nullable=True), - sa.ForeignKeyConstraint(['action_plan_id'], ['action_plans.id'], ), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('uuid', name='uniq_actions0uuid') - ) - - op.create_table( - 'efficacy_indicators', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('deleted_at', sa.DateTime(), nullable=True), - sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), - nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=True), - sa.Column('name', sa.String(length=63), nullable=True), - sa.Column('description', sa.String(length=255), nullable=True), - sa.Column('unit', sa.String(length=63), nullable=True), - sa.Column('value', sa.Numeric(), nullable=True), - sa.Column('action_plan_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['action_plan_id'], ['action_plans.id'], ), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('uuid', name='uniq_efficacy_indicators0uuid') - ) - - -def downgrade(): - op.drop_table('efficacy_indicators') - op.drop_table('actions') - op.drop_table('action_plans') - op.drop_table('audits') - op.drop_table('audit_templates') - op.drop_table('strategies') - op.drop_table('services') - op.drop_table('scoring_engines') - op.drop_table('goals') diff --git a/watcher/db/sqlalchemy/alembic/versions/0f6042416884_add_apscheduler_jobs.py b/watcher/db/sqlalchemy/alembic/versions/0f6042416884_add_apscheduler_jobs.py deleted file mode 100644 index 56f4c8c..0000000 --- a/watcher/db/sqlalchemy/alembic/versions/0f6042416884_add_apscheduler_jobs.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Add apscheduler_jobs table to store background jobs - -Revision ID: 0f6042416884 -Revises: 001 -Create Date: 2017-03-24 11:21:29.036532 - -""" -from alembic import op -import sqlalchemy as sa - -from watcher.db.sqlalchemy import models - -# revision identifiers, used by Alembic. -revision = '0f6042416884' -down_revision = '001' - - -def upgrade(): - op.create_table( - 'apscheduler_jobs', - sa.Column('id', sa.Unicode(191, _warn_on_bytestring=False), - nullable=False), - sa.Column('next_run_time', sa.Float(25), index=True), - sa.Column('job_state', sa.LargeBinary, nullable=False), - sa.Column('service_id', sa.Integer(), nullable=False), - sa.Column('tag', models.JSONEncodedDict(), nullable=True), - sa.PrimaryKeyConstraint('id'), - sa.ForeignKeyConstraint(['service_id'], ['services.id']) - ) - - -def downgrade(): - op.drop_table('apscheduler_jobs') diff --git a/watcher/db/sqlalchemy/alembic/versions/d098df6021e2_cron_support_for_audit.py b/watcher/db/sqlalchemy/alembic/versions/d098df6021e2_cron_support_for_audit.py deleted file mode 100644 index 1fae4e8..0000000 --- a/watcher/db/sqlalchemy/alembic/versions/d098df6021e2_cron_support_for_audit.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Add cron support for audit table - -Revision ID: d098df6021e2 -Revises: 0f6042416884 -Create Date: 2017-06-08 16:21:35.746752 - -""" -from alembic import op -import sqlalchemy as sa - -# revision identifiers, used by Alembic. -revision = 'd098df6021e2' -down_revision = '0f6042416884' - - -def upgrade(): - op.alter_column('audits', 'interval', existing_type=sa.String(36), - nullable=True) - op.add_column('audits', - sa.Column('next_run_time', sa.DateTime(), nullable=True)) - - -def downgrade(): - op.alter_column('audits', 'interval', existing_type=sa.Integer(), - nullable=True) - op.drop_column('audits', 'next_run_time') diff --git a/watcher/db/sqlalchemy/api.py b/watcher/db/sqlalchemy/api.py deleted file mode 100644 index ebe9197..0000000 --- a/watcher/db/sqlalchemy/api.py +++ /dev/null @@ -1,1129 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""SQLAlchemy storage backend.""" - -import collections -import datetime -import operator - -from oslo_config import cfg -from oslo_db import exception as db_exc -from oslo_db.sqlalchemy import session as db_session -from oslo_db.sqlalchemy import utils as db_utils -from oslo_utils import timeutils -from sqlalchemy.inspection import inspect -from sqlalchemy.orm import exc -from sqlalchemy.orm import joinedload - -from watcher._i18n import _ -from watcher.common import exception -from watcher.common import utils -from watcher.db import api -from watcher.db.sqlalchemy import models -from watcher import objects - -CONF = cfg.CONF - -_FACADE = None - - -def _create_facade_lazily(): - global _FACADE - if _FACADE is None: - _FACADE = db_session.EngineFacade.from_config(CONF) - return _FACADE - - -def get_engine(): - facade = _create_facade_lazily() - return facade.get_engine() - - -def get_session(**kwargs): - facade = _create_facade_lazily() - return facade.get_session(**kwargs) - - -def get_backend(): - """The backend is this module itself.""" - return Connection() - - -def model_query(model, *args, **kwargs): - """Query helper for simpler session usage. - - :param session: if present, the session to use - """ - session = kwargs.get('session') or get_session() - query = session.query(model, *args) - return query - - -def add_identity_filter(query, value): - """Adds an identity filter to a query. - - Filters results by ID, if supplied value is a valid integer. - Otherwise attempts to filter results by UUID. - - :param query: Initial query to add filter to. - :param value: Value for filtering results by. - :return: Modified query. - """ - if utils.is_int_like(value): - return query.filter_by(id=value) - elif utils.is_uuid_like(value): - return query.filter_by(uuid=value) - else: - raise exception.InvalidIdentity(identity=value) - - -def _paginate_query(model, limit=None, marker=None, sort_key=None, - sort_dir=None, query=None): - if not query: - query = model_query(model) - sort_keys = ['id'] - if sort_key and sort_key not in sort_keys: - sort_keys.insert(0, sort_key) - query = db_utils.paginate_query(query, model, limit, sort_keys, - marker=marker, sort_dir=sort_dir) - return query.all() - - -class JoinMap(utils.Struct): - """Mapping for the Join-based queries""" - - -NaturalJoinFilter = collections.namedtuple( - 'NaturalJoinFilter', ['join_fieldname', 'join_model']) - - -class Connection(api.BaseConnection): - """SqlAlchemy connection.""" - - valid_operators = { - "": operator.eq, - "eq": operator.eq, - "neq": operator.ne, - "gt": operator.gt, - "gte": operator.ge, - "lt": operator.lt, - "lte": operator.le, - "in": lambda field, choices: field.in_(choices), - "notin": lambda field, choices: field.notin_(choices), - } - - def __init__(self): - super(Connection, self).__init__() - - def __add_simple_filter(self, query, model, fieldname, value, operator_): - field = getattr(model, fieldname) - - if (fieldname != 'deleted' and value and - field.type.python_type is datetime.datetime): - if not isinstance(value, datetime.datetime): - value = timeutils.parse_isotime(value) - - return query.filter(self.valid_operators[operator_](field, value)) - - def __add_join_filter(self, query, model, fieldname, value, operator_): - query = query.join(model) - return self.__add_simple_filter(query, model, fieldname, - value, operator_) - - def __decompose_filter(self, raw_fieldname): - """Decompose a filter name into its 2 subparts - - A filter can take 2 forms: - - - "" which is a syntactic sugar for "__eq" - - "__" where is the comparison operator - to be used. - - Available operators are: - - - eq - - neq - - gt - - gte - - lt - - lte - - in - - notin - """ - separator = '__' - fieldname, separator, operator_ = raw_fieldname.partition(separator) - - if operator_ and operator_ not in self.valid_operators: - raise exception.InvalidOperator( - operator=operator_, valid_operators=self.valid_operators) - - return fieldname, operator_ - - def _add_filters(self, query, model, filters=None, - plain_fields=None, join_fieldmap=None): - """Generic way to add filters to a Watcher model - - Each filter key provided by the `filters` parameter will be decomposed - into 2 pieces: the field name and the comparison operator - - - "": By default, the "eq" is applied if no operator is provided - - "eq", which stands for "equal" : e.g. {"state__eq": "PENDING"} - will result in the "WHERE state = 'PENDING'" clause. - - "neq", which stands for "not equal" : e.g. {"state__neq": "PENDING"} - will result in the "WHERE state != 'PENDING'" clause. - - "gt", which stands for "greater than" : e.g. - {"created_at__gt": "2016-06-06T10:33:22.063176"} will result in the - "WHERE created_at > '2016-06-06T10:33:22.063176'" clause. - - "gte", which stands for "greater than or equal to" : e.g. - {"created_at__gte": "2016-06-06T10:33:22.063176"} will result in the - "WHERE created_at >= '2016-06-06T10:33:22.063176'" clause. - - "lt", which stands for "less than" : e.g. - {"created_at__lt": "2016-06-06T10:33:22.063176"} will result in the - "WHERE created_at < '2016-06-06T10:33:22.063176'" clause. - - "lte", which stands for "less than or equal to" : e.g. - {"created_at__lte": "2016-06-06T10:33:22.063176"} will result in the - "WHERE created_at <= '2016-06-06T10:33:22.063176'" clause. - - "in": e.g. {"state__in": ('SUCCEEDED', 'FAILED')} will result in the - "WHERE state IN ('SUCCEEDED', 'FAILED')" clause. - - :param query: a :py:class:`sqlalchemy.orm.query.Query` instance - :param model: the model class the filters should relate to - :param filters: dict with the following structure {"fieldname": value} - :param plain_fields: a :py:class:`sqlalchemy.orm.query.Query` instance - :param join_fieldmap: a :py:class:`sqlalchemy.orm.query.Query` instance - """ - soft_delete_mixin_fields = ['deleted', 'deleted_at'] - timestamp_mixin_fields = ['created_at', 'updated_at'] - filters = filters or {} - - # Special case for 'deleted' because it is a non-boolean flag - if 'deleted' in filters: - deleted_filter = filters.pop('deleted') - op = 'eq' if not bool(deleted_filter) else 'neq' - filters['deleted__%s' % op] = 0 - - plain_fields = tuple( - (list(plain_fields) or []) + - soft_delete_mixin_fields + - timestamp_mixin_fields) - join_fieldmap = join_fieldmap or {} - - for raw_fieldname, value in filters.items(): - fieldname, operator_ = self.__decompose_filter(raw_fieldname) - if fieldname in plain_fields: - query = self.__add_simple_filter( - query, model, fieldname, value, operator_) - elif fieldname in join_fieldmap: - join_field, join_model = join_fieldmap[fieldname] - query = self.__add_join_filter( - query, join_model, join_field, value, operator_) - - return query - - @staticmethod - def _get_relationships(model): - return inspect(model).relationships - - @staticmethod - def _set_eager_options(model, query): - relationships = inspect(model).relationships - for relationship in relationships: - if not relationship.uselist: - # We have a One-to-X relationship - query = query.options(joinedload(relationship.key)) - return query - - def _create(self, model, values): - obj = model() - cleaned_values = {k: v for k, v in values.items() - if k not in self._get_relationships(model)} - obj.update(cleaned_values) - obj.save() - return obj - - def _get(self, context, model, fieldname, value, eager): - query = model_query(model) - if eager: - query = self._set_eager_options(model, query) - - query = query.filter(getattr(model, fieldname) == value) - if not context.show_deleted: - query = query.filter(model.deleted_at.is_(None)) - - try: - obj = query.one() - except exc.NoResultFound: - raise exception.ResourceNotFound(name=model.__name__, id=value) - - return obj - - @staticmethod - def _update(model, id_, values): - session = get_session() - with session.begin(): - query = model_query(model, session=session) - query = add_identity_filter(query, id_) - try: - ref = query.with_lockmode('update').one() - except exc.NoResultFound: - raise exception.ResourceNotFound(name=model.__name__, id=id_) - - ref.update(values) - return ref - - @staticmethod - def _soft_delete(model, id_): - session = get_session() - with session.begin(): - query = model_query(model, session=session) - query = add_identity_filter(query, id_) - try: - row = query.one() - except exc.NoResultFound: - raise exception.ResourceNotFound(name=model.__name__, id=id_) - - row.soft_delete(session) - - return row - - @staticmethod - def _destroy(model, id_): - session = get_session() - with session.begin(): - query = model_query(model, session=session) - query = add_identity_filter(query, id_) - - try: - query.one() - except exc.NoResultFound: - raise exception.ResourceNotFound(name=model.__name__, id=id_) - - query.delete() - - def _add_goals_filters(self, query, filters): - if filters is None: - filters = {} - - plain_fields = ['uuid', 'name', 'display_name'] - - return self._add_filters( - query=query, model=models.Goal, filters=filters, - plain_fields=plain_fields) - - def _add_strategies_filters(self, query, filters): - plain_fields = ['uuid', 'name', 'display_name', 'goal_id'] - join_fieldmap = JoinMap( - goal_uuid=NaturalJoinFilter( - join_fieldname="uuid", join_model=models.Goal), - goal_name=NaturalJoinFilter( - join_fieldname="name", join_model=models.Goal)) - return self._add_filters( - query=query, model=models.Strategy, filters=filters, - plain_fields=plain_fields, join_fieldmap=join_fieldmap) - - def _add_audit_templates_filters(self, query, filters): - if filters is None: - filters = {} - - plain_fields = ['uuid', 'name', 'goal_id', 'strategy_id'] - join_fieldmap = JoinMap( - goal_uuid=NaturalJoinFilter( - join_fieldname="uuid", join_model=models.Goal), - goal_name=NaturalJoinFilter( - join_fieldname="name", join_model=models.Goal), - strategy_uuid=NaturalJoinFilter( - join_fieldname="uuid", join_model=models.Strategy), - strategy_name=NaturalJoinFilter( - join_fieldname="name", join_model=models.Strategy), - ) - - return self._add_filters( - query=query, model=models.AuditTemplate, filters=filters, - plain_fields=plain_fields, join_fieldmap=join_fieldmap) - - def _add_audits_filters(self, query, filters): - if filters is None: - filters = {} - - plain_fields = ['uuid', 'audit_type', 'state', 'goal_id', - 'strategy_id'] - join_fieldmap = { - 'goal_uuid': ("uuid", models.Goal), - 'goal_name': ("name", models.Goal), - 'strategy_uuid': ("uuid", models.Strategy), - 'strategy_name': ("name", models.Strategy), - } - - return self._add_filters( - query=query, model=models.Audit, filters=filters, - plain_fields=plain_fields, join_fieldmap=join_fieldmap) - - def _add_action_plans_filters(self, query, filters): - if filters is None: - filters = {} - - plain_fields = ['uuid', 'state', 'audit_id', 'strategy_id'] - join_fieldmap = JoinMap( - audit_uuid=NaturalJoinFilter( - join_fieldname="uuid", join_model=models.Audit), - strategy_uuid=NaturalJoinFilter( - join_fieldname="uuid", join_model=models.Strategy), - strategy_name=NaturalJoinFilter( - join_fieldname="name", join_model=models.Strategy), - ) - - return self._add_filters( - query=query, model=models.ActionPlan, filters=filters, - plain_fields=plain_fields, join_fieldmap=join_fieldmap) - - def _add_actions_filters(self, query, filters): - if filters is None: - filters = {} - - plain_fields = ['uuid', 'state', 'action_plan_id'] - join_fieldmap = { - 'action_plan_uuid': ("uuid", models.ActionPlan), - } - - query = self._add_filters( - query=query, model=models.Action, filters=filters, - plain_fields=plain_fields, join_fieldmap=join_fieldmap) - - if 'audit_uuid' in filters: - stmt = model_query(models.ActionPlan).join( - models.Audit, - models.Audit.id == models.ActionPlan.audit_id)\ - .filter_by(uuid=filters['audit_uuid']).subquery() - query = query.filter_by(action_plan_id=stmt.c.id) - - return query - - def _add_efficacy_indicators_filters(self, query, filters): - if filters is None: - filters = {} - - plain_fields = ['uuid', 'name', 'unit', 'schema', 'action_plan_id'] - join_fieldmap = JoinMap( - action_plan_uuid=NaturalJoinFilter( - join_fieldname="uuid", join_model=models.ActionPlan), - ) - - return self._add_filters( - query=query, model=models.EfficacyIndicator, filters=filters, - plain_fields=plain_fields, join_fieldmap=join_fieldmap) - - # ### GOALS ### # - - def get_goal_list(self, context, filters=None, limit=None, marker=None, - sort_key=None, sort_dir=None, eager=False): - query = model_query(models.Goal) - if eager: - query = self._set_eager_options(models.Goal, query) - query = self._add_goals_filters(query, filters) - if not context.show_deleted: - query = query.filter_by(deleted_at=None) - return _paginate_query(models.Goal, limit, marker, - sort_key, sort_dir, query) - - def create_goal(self, values): - # ensure defaults are present for new goals - if not values.get('uuid'): - values['uuid'] = utils.generate_uuid() - - try: - goal = self._create(models.Goal, values) - except db_exc.DBDuplicateEntry: - raise exception.GoalAlreadyExists(uuid=values['uuid']) - return goal - - def _get_goal(self, context, fieldname, value, eager): - try: - return self._get(context, model=models.Goal, - fieldname=fieldname, value=value, eager=eager) - except exception.ResourceNotFound: - raise exception.GoalNotFound(goal=value) - - def get_goal_by_id(self, context, goal_id, eager=False): - return self._get_goal( - context, fieldname="id", value=goal_id, eager=eager) - - def get_goal_by_uuid(self, context, goal_uuid, eager=False): - return self._get_goal( - context, fieldname="uuid", value=goal_uuid, eager=eager) - - def get_goal_by_name(self, context, goal_name, eager=False): - return self._get_goal( - context, fieldname="name", value=goal_name, eager=eager) - - def destroy_goal(self, goal_id): - try: - return self._destroy(models.Goal, goal_id) - except exception.ResourceNotFound: - raise exception.GoalNotFound(goal=goal_id) - - def update_goal(self, goal_id, values): - if 'uuid' in values: - raise exception.Invalid( - message=_("Cannot overwrite UUID for an existing Goal.")) - - try: - return self._update(models.Goal, goal_id, values) - except exception.ResourceNotFound: - raise exception.GoalNotFound(goal=goal_id) - - def soft_delete_goal(self, goal_id): - try: - return self._soft_delete(models.Goal, goal_id) - except exception.ResourceNotFound: - raise exception.GoalNotFound(goal=goal_id) - - # ### STRATEGIES ### # - - def get_strategy_list(self, context, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None, - eager=True): - query = model_query(models.Strategy) - if eager: - query = self._set_eager_options(models.Strategy, query) - query = self._add_strategies_filters(query, filters) - if not context.show_deleted: - query = query.filter_by(deleted_at=None) - return _paginate_query(models.Strategy, limit, marker, - sort_key, sort_dir, query) - - def create_strategy(self, values): - # ensure defaults are present for new strategies - if not values.get('uuid'): - values['uuid'] = utils.generate_uuid() - - try: - strategy = self._create(models.Strategy, values) - except db_exc.DBDuplicateEntry: - raise exception.StrategyAlreadyExists(uuid=values['uuid']) - return strategy - - def _get_strategy(self, context, fieldname, value, eager): - try: - return self._get(context, model=models.Strategy, - fieldname=fieldname, value=value, eager=eager) - except exception.ResourceNotFound: - raise exception.StrategyNotFound(strategy=value) - - def get_strategy_by_id(self, context, strategy_id, eager=False): - return self._get_strategy( - context, fieldname="id", value=strategy_id, eager=eager) - - def get_strategy_by_uuid(self, context, strategy_uuid, eager=False): - return self._get_strategy( - context, fieldname="uuid", value=strategy_uuid, eager=eager) - - def get_strategy_by_name(self, context, strategy_name, eager=False): - return self._get_strategy( - context, fieldname="name", value=strategy_name, eager=eager) - - def destroy_strategy(self, strategy_id): - try: - return self._destroy(models.Strategy, strategy_id) - except exception.ResourceNotFound: - raise exception.StrategyNotFound(strategy=strategy_id) - - def update_strategy(self, strategy_id, values): - if 'uuid' in values: - raise exception.Invalid( - message=_("Cannot overwrite UUID for an existing Strategy.")) - - try: - return self._update(models.Strategy, strategy_id, values) - except exception.ResourceNotFound: - raise exception.StrategyNotFound(strategy=strategy_id) - - def soft_delete_strategy(self, strategy_id): - try: - return self._soft_delete(models.Strategy, strategy_id) - except exception.ResourceNotFound: - raise exception.StrategyNotFound(strategy=strategy_id) - - # ### AUDIT TEMPLATES ### # - - def get_audit_template_list(self, context, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None, - eager=False): - - query = model_query(models.AuditTemplate) - if eager: - query = self._set_eager_options(models.AuditTemplate, query) - query = self._add_audit_templates_filters(query, filters) - if not context.show_deleted: - query = query.filter_by(deleted_at=None) - return _paginate_query(models.AuditTemplate, limit, marker, - sort_key, sort_dir, query) - - def create_audit_template(self, values): - # ensure defaults are present for new audit_templates - if not values.get('uuid'): - values['uuid'] = utils.generate_uuid() - - query = model_query(models.AuditTemplate) - query = query.filter_by(name=values.get('name'), - deleted_at=None) - - if len(query.all()) > 0: - raise exception.AuditTemplateAlreadyExists( - audit_template=values['name']) - - try: - audit_template = self._create(models.AuditTemplate, values) - except db_exc.DBDuplicateEntry: - raise exception.AuditTemplateAlreadyExists( - audit_template=values['name']) - return audit_template - - def _get_audit_template(self, context, fieldname, value, eager): - try: - return self._get(context, model=models.AuditTemplate, - fieldname=fieldname, value=value, eager=eager) - except exception.ResourceNotFound: - raise exception.AuditTemplateNotFound(audit_template=value) - - def get_audit_template_by_id(self, context, audit_template_id, - eager=False): - return self._get_audit_template( - context, fieldname="id", value=audit_template_id, eager=eager) - - def get_audit_template_by_uuid(self, context, audit_template_uuid, - eager=False): - return self._get_audit_template( - context, fieldname="uuid", value=audit_template_uuid, eager=eager) - - def get_audit_template_by_name(self, context, audit_template_name, - eager=False): - return self._get_audit_template( - context, fieldname="name", value=audit_template_name, eager=eager) - - def destroy_audit_template(self, audit_template_id): - try: - return self._destroy(models.AuditTemplate, audit_template_id) - except exception.ResourceNotFound: - raise exception.AuditTemplateNotFound( - audit_template=audit_template_id) - - def update_audit_template(self, audit_template_id, values): - if 'uuid' in values: - raise exception.Invalid( - message=_("Cannot overwrite UUID for an existing " - "Audit Template.")) - try: - return self._update( - models.AuditTemplate, audit_template_id, values) - except exception.ResourceNotFound: - raise exception.AuditTemplateNotFound( - audit_template=audit_template_id) - - def soft_delete_audit_template(self, audit_template_id): - try: - return self._soft_delete(models.AuditTemplate, audit_template_id) - except exception.ResourceNotFound: - raise exception.AuditTemplateNotFound( - audit_template=audit_template_id) - - # ### AUDITS ### # - - def get_audit_list(self, context, filters=None, limit=None, marker=None, - sort_key=None, sort_dir=None, eager=False): - query = model_query(models.Audit) - if eager: - query = self._set_eager_options(models.Audit, query) - query = self._add_audits_filters(query, filters) - if not context.show_deleted: - query = query.filter_by(deleted_at=None) - - return _paginate_query(models.Audit, limit, marker, - sort_key, sort_dir, query) - - def create_audit(self, values): - # ensure defaults are present for new audits - if not values.get('uuid'): - values['uuid'] = utils.generate_uuid() - - if values.get('state') is None: - values['state'] = objects.audit.State.PENDING - - if not values.get('auto_trigger'): - values['auto_trigger'] = False - - try: - audit = self._create(models.Audit, values) - except db_exc.DBDuplicateEntry: - raise exception.AuditAlreadyExists(uuid=values['uuid']) - return audit - - def _get_audit(self, context, fieldname, value, eager): - try: - return self._get(context, model=models.Audit, - fieldname=fieldname, value=value, eager=eager) - except exception.ResourceNotFound: - raise exception.AuditNotFound(audit=value) - - def get_audit_by_id(self, context, audit_id, eager=False): - return self._get_audit( - context, fieldname="id", value=audit_id, eager=eager) - - def get_audit_by_uuid(self, context, audit_uuid, eager=False): - return self._get_audit( - context, fieldname="uuid", value=audit_uuid, eager=eager) - - def destroy_audit(self, audit_id): - def is_audit_referenced(session, audit_id): - """Checks whether the audit is referenced by action_plan(s).""" - query = model_query(models.ActionPlan, session=session) - query = self._add_action_plans_filters( - query, {'audit_id': audit_id}) - return query.count() != 0 - - session = get_session() - with session.begin(): - query = model_query(models.Audit, session=session) - query = add_identity_filter(query, audit_id) - - try: - audit_ref = query.one() - except exc.NoResultFound: - raise exception.AuditNotFound(audit=audit_id) - - if is_audit_referenced(session, audit_ref['id']): - raise exception.AuditReferenced(audit=audit_id) - - query.delete() - - def update_audit(self, audit_id, values): - if 'uuid' in values: - raise exception.Invalid( - message=_("Cannot overwrite UUID for an existing " - "Audit.")) - - try: - return self._update(models.Audit, audit_id, values) - except exception.ResourceNotFound: - raise exception.AuditNotFound(audit=audit_id) - - def soft_delete_audit(self, audit_id): - try: - return self._soft_delete(models.Audit, audit_id) - except exception.ResourceNotFound: - raise exception.AuditNotFound(audit=audit_id) - - # ### ACTIONS ### # - - def get_action_list(self, context, filters=None, limit=None, marker=None, - sort_key=None, sort_dir=None, eager=False): - query = model_query(models.Action) - if eager: - query = self._set_eager_options(models.Action, query) - query = self._add_actions_filters(query, filters) - if not context.show_deleted: - query = query.filter_by(deleted_at=None) - return _paginate_query(models.Action, limit, marker, - sort_key, sort_dir, query) - - def create_action(self, values): - # ensure defaults are present for new actions - if not values.get('uuid'): - values['uuid'] = utils.generate_uuid() - - if values.get('state') is None: - values['state'] = objects.action.State.PENDING - - try: - action = self._create(models.Action, values) - except db_exc.DBDuplicateEntry: - raise exception.ActionAlreadyExists(uuid=values['uuid']) - return action - - def _get_action(self, context, fieldname, value, eager): - try: - return self._get(context, model=models.Action, - fieldname=fieldname, value=value, eager=eager) - except exception.ResourceNotFound: - raise exception.ActionNotFound(action=value) - - def get_action_by_id(self, context, action_id, eager=False): - return self._get_action( - context, fieldname="id", value=action_id, eager=eager) - - def get_action_by_uuid(self, context, action_uuid, eager=False): - return self._get_action( - context, fieldname="uuid", value=action_uuid, eager=eager) - - def destroy_action(self, action_id): - session = get_session() - with session.begin(): - query = model_query(models.Action, session=session) - query = add_identity_filter(query, action_id) - count = query.delete() - if count != 1: - raise exception.ActionNotFound(action_id) - - def update_action(self, action_id, values): - # NOTE(dtantsur): this can lead to very strange errors - if 'uuid' in values: - raise exception.Invalid( - message=_("Cannot overwrite UUID for an existing Action.")) - - return self._do_update_action(action_id, values) - - @staticmethod - def _do_update_action(action_id, values): - session = get_session() - with session.begin(): - query = model_query(models.Action, session=session) - query = add_identity_filter(query, action_id) - try: - ref = query.with_lockmode('update').one() - except exc.NoResultFound: - raise exception.ActionNotFound(action=action_id) - - ref.update(values) - return ref - - def soft_delete_action(self, action_id): - try: - return self._soft_delete(models.Action, action_id) - except exception.ResourceNotFound: - raise exception.ActionNotFound(action=action_id) - - # ### ACTION PLANS ### # - - def get_action_plan_list( - self, context, filters=None, limit=None, marker=None, - sort_key=None, sort_dir=None, eager=False): - query = model_query(models.ActionPlan) - if eager: - query = self._set_eager_options(models.ActionPlan, query) - query = self._add_action_plans_filters(query, filters) - if not context.show_deleted: - query = query.filter(models.ActionPlan.deleted_at.is_(None)) - - return _paginate_query(models.ActionPlan, limit, marker, - sort_key, sort_dir, query) - - def create_action_plan(self, values): - # ensure defaults are present for new audits - if not values.get('uuid'): - values['uuid'] = utils.generate_uuid() - - try: - action_plan = self._create(models.ActionPlan, values) - except db_exc.DBDuplicateEntry: - raise exception.ActionPlanAlreadyExists(uuid=values['uuid']) - return action_plan - - def _get_action_plan(self, context, fieldname, value, eager): - try: - return self._get(context, model=models.ActionPlan, - fieldname=fieldname, value=value, eager=eager) - except exception.ResourceNotFound: - raise exception.ActionPlanNotFound(action_plan=value) - - def get_action_plan_by_id(self, context, action_plan_id, eager=False): - return self._get_action_plan( - context, fieldname="id", value=action_plan_id, eager=eager) - - def get_action_plan_by_uuid(self, context, action_plan_uuid, eager=False): - return self._get_action_plan( - context, fieldname="uuid", value=action_plan_uuid, eager=eager) - - def destroy_action_plan(self, action_plan_id): - def is_action_plan_referenced(session, action_plan_id): - """Checks whether the action_plan is referenced by action(s).""" - query = model_query(models.Action, session=session) - query = self._add_actions_filters( - query, {'action_plan_id': action_plan_id}) - return query.count() != 0 - - session = get_session() - with session.begin(): - query = model_query(models.ActionPlan, session=session) - query = add_identity_filter(query, action_plan_id) - - try: - action_plan_ref = query.one() - except exc.NoResultFound: - raise exception.ActionPlanNotFound(action_plan=action_plan_id) - - if is_action_plan_referenced(session, action_plan_ref['id']): - raise exception.ActionPlanReferenced( - action_plan=action_plan_id) - - query.delete() - - def update_action_plan(self, action_plan_id, values): - if 'uuid' in values: - raise exception.Invalid( - message=_("Cannot overwrite UUID for an existing " - "Action Plan.")) - - return self._do_update_action_plan(action_plan_id, values) - - @staticmethod - def _do_update_action_plan(action_plan_id, values): - session = get_session() - with session.begin(): - query = model_query(models.ActionPlan, session=session) - query = add_identity_filter(query, action_plan_id) - try: - ref = query.with_lockmode('update').one() - except exc.NoResultFound: - raise exception.ActionPlanNotFound(action_plan=action_plan_id) - - ref.update(values) - return ref - - def soft_delete_action_plan(self, action_plan_id): - try: - return self._soft_delete(models.ActionPlan, action_plan_id) - except exception.ResourceNotFound: - raise exception.ActionPlanNotFound(action_plan=action_plan_id) - - # ### EFFICACY INDICATORS ### # - - def get_efficacy_indicator_list(self, context, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None, - eager=False): - - query = model_query(models.EfficacyIndicator) - if eager: - query = self._set_eager_options(models.EfficacyIndicator, query) - query = self._add_efficacy_indicators_filters(query, filters) - if not context.show_deleted: - query = query.filter_by(deleted_at=None) - return _paginate_query(models.EfficacyIndicator, limit, marker, - sort_key, sort_dir, query) - - def create_efficacy_indicator(self, values): - # ensure defaults are present for new efficacy indicators - if not values.get('uuid'): - values['uuid'] = utils.generate_uuid() - - try: - efficacy_indicator = self._create(models.EfficacyIndicator, values) - except db_exc.DBDuplicateEntry: - raise exception.EfficacyIndicatorAlreadyExists(uuid=values['uuid']) - return efficacy_indicator - - def _get_efficacy_indicator(self, context, fieldname, value, eager): - try: - return self._get(context, model=models.EfficacyIndicator, - fieldname=fieldname, value=value, eager=eager) - except exception.ResourceNotFound: - raise exception.EfficacyIndicatorNotFound(efficacy_indicator=value) - - def get_efficacy_indicator_by_id(self, context, efficacy_indicator_id, - eager=False): - return self._get_efficacy_indicator( - context, fieldname="id", - value=efficacy_indicator_id, eager=eager) - - def get_efficacy_indicator_by_uuid(self, context, efficacy_indicator_uuid, - eager=False): - return self._get_efficacy_indicator( - context, fieldname="uuid", - value=efficacy_indicator_uuid, eager=eager) - - def get_efficacy_indicator_by_name(self, context, efficacy_indicator_name, - eager=False): - return self._get_efficacy_indicator( - context, fieldname="name", - value=efficacy_indicator_name, eager=eager) - - def update_efficacy_indicator(self, efficacy_indicator_id, values): - if 'uuid' in values: - raise exception.Invalid( - message=_("Cannot overwrite UUID for an existing " - "efficacy indicator.")) - - try: - return self._update( - models.EfficacyIndicator, efficacy_indicator_id, values) - except exception.ResourceNotFound: - raise exception.EfficacyIndicatorNotFound( - efficacy_indicator=efficacy_indicator_id) - - def soft_delete_efficacy_indicator(self, efficacy_indicator_id): - try: - return self._soft_delete( - models.EfficacyIndicator, efficacy_indicator_id) - except exception.ResourceNotFound: - raise exception.EfficacyIndicatorNotFound( - efficacy_indicator=efficacy_indicator_id) - - def destroy_efficacy_indicator(self, efficacy_indicator_id): - try: - return self._destroy( - models.EfficacyIndicator, efficacy_indicator_id) - except exception.ResourceNotFound: - raise exception.EfficacyIndicatorNotFound( - efficacy_indicator=efficacy_indicator_id) - - # ### SCORING ENGINES ### # - - def _add_scoring_engine_filters(self, query, filters): - if filters is None: - filters = {} - - plain_fields = ['id', 'description'] - - return self._add_filters( - query=query, model=models.ScoringEngine, filters=filters, - plain_fields=plain_fields) - - def get_scoring_engine_list( - self, context, columns=None, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None, eager=False): - query = model_query(models.ScoringEngine) - if eager: - query = self._set_eager_options(models.ScoringEngine, query) - query = self._add_scoring_engine_filters(query, filters) - if not context.show_deleted: - query = query.filter_by(deleted_at=None) - - return _paginate_query(models.ScoringEngine, limit, marker, - sort_key, sort_dir, query) - - def create_scoring_engine(self, values): - # ensure defaults are present for new scoring engines - if not values.get('uuid'): - values['uuid'] = utils.generate_uuid() - - try: - scoring_engine = self._create(models.ScoringEngine, values) - except db_exc.DBDuplicateEntry: - raise exception.ScoringEngineAlreadyExists(uuid=values['uuid']) - return scoring_engine - - def _get_scoring_engine(self, context, fieldname, value, eager): - try: - return self._get(context, model=models.ScoringEngine, - fieldname=fieldname, value=value, eager=eager) - except exception.ResourceNotFound: - raise exception.ScoringEngineNotFound(scoring_engine=value) - - def get_scoring_engine_by_id(self, context, scoring_engine_id, - eager=False): - return self._get_scoring_engine( - context, fieldname="id", value=scoring_engine_id, eager=eager) - - def get_scoring_engine_by_uuid(self, context, scoring_engine_uuid, - eager=False): - return self._get_scoring_engine( - context, fieldname="uuid", value=scoring_engine_uuid, eager=eager) - - def get_scoring_engine_by_name(self, context, scoring_engine_name, - eager=False): - return self._get_scoring_engine( - context, fieldname="name", value=scoring_engine_name, eager=eager) - - def destroy_scoring_engine(self, scoring_engine_id): - try: - return self._destroy(models.ScoringEngine, scoring_engine_id) - except exception.ResourceNotFound: - raise exception.ScoringEngineNotFound( - scoring_engine=scoring_engine_id) - - def update_scoring_engine(self, scoring_engine_id, values): - if 'uuid' in values: - raise exception.Invalid( - message=_("Cannot overwrite UUID for an existing " - "Scoring Engine.")) - - try: - return self._update( - models.ScoringEngine, scoring_engine_id, values) - except exception.ResourceNotFound: - raise exception.ScoringEngineNotFound( - scoring_engine=scoring_engine_id) - - def soft_delete_scoring_engine(self, scoring_engine_id): - try: - return self._soft_delete( - models.ScoringEngine, scoring_engine_id) - except exception.ResourceNotFound: - raise exception.ScoringEngineNotFound( - scoring_engine=scoring_engine_id) - - # ### SERVICES ### # - - def _add_services_filters(self, query, filters): - if not filters: - filters = {} - - plain_fields = ['id', 'name', 'host'] - - return self._add_filters( - query=query, model=models.Service, filters=filters, - plain_fields=plain_fields) - - def get_service_list(self, context, filters=None, limit=None, marker=None, - sort_key=None, sort_dir=None, eager=False): - query = model_query(models.Service) - if eager: - query = self._set_eager_options(models.Service, query) - query = self._add_services_filters(query, filters) - if not context.show_deleted: - query = query.filter_by(deleted_at=None) - return _paginate_query(models.Service, limit, marker, - sort_key, sort_dir, query) - - def create_service(self, values): - try: - service = self._create(models.Service, values) - except db_exc.DBDuplicateEntry: - raise exception.ServiceAlreadyExists(name=values['name'], - host=values['host']) - return service - - def _get_service(self, context, fieldname, value, eager): - try: - return self._get(context, model=models.Service, - fieldname=fieldname, value=value, eager=eager) - except exception.ResourceNotFound: - raise exception.ServiceNotFound(service=value) - - def get_service_by_id(self, context, service_id, eager=False): - return self._get_service( - context, fieldname="id", value=service_id, eager=eager) - - def get_service_by_name(self, context, service_name, eager=False): - return self._get_service( - context, fieldname="name", value=service_name, eager=eager) - - def destroy_service(self, service_id): - try: - return self._destroy(models.Service, service_id) - except exception.ResourceNotFound: - raise exception.ServiceNotFound(service=service_id) - - def update_service(self, service_id, values): - try: - return self._update(models.Service, service_id, values) - except exception.ResourceNotFound: - raise exception.ServiceNotFound(service=service_id) - - def soft_delete_service(self, service_id): - try: - return self._soft_delete(models.Service, service_id) - except exception.ResourceNotFound: - raise exception.ServiceNotFound(service=service_id) diff --git a/watcher/db/sqlalchemy/job_store.py b/watcher/db/sqlalchemy/job_store.py deleted file mode 100644 index da5028f..0000000 --- a/watcher/db/sqlalchemy/job_store.py +++ /dev/null @@ -1,112 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 Servionica LTD -# -# Authors: Alexander Chadin -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_serialization import jsonutils - -from apscheduler.jobstores.base import ConflictingIdError -from apscheduler.jobstores import sqlalchemy -from apscheduler.util import datetime_to_utc_timestamp -from apscheduler.util import maybe_ref - -from watcher.common import context -from watcher.common import service -from watcher import objects - -try: - import cPickle as pickle -except ImportError: # pragma: nocover - import pickle - -from sqlalchemy import Table, MetaData, select, and_ -from sqlalchemy.exc import IntegrityError - - -class WatcherJobStore(sqlalchemy.SQLAlchemyJobStore): - """Stores jobs in a database table using SQLAlchemy. - - The table will be created if it doesn't exist in the database. - Plugin alias: ``sqlalchemy`` - :param str url: connection string - :param engine: an SQLAlchemy Engine to use instead of creating a new - one based on ``url`` - :param str tablename: name of the table to store jobs in - :param metadata: a :class:`~sqlalchemy.MetaData` instance to use instead of - creating a new one - :param int pickle_protocol: pickle protocol level to use - (for serialization), defaults to the highest available - :param dict tag: tag description - """ - - def __init__(self, url=None, engine=None, tablename='apscheduler_jobs', - metadata=None, pickle_protocol=pickle.HIGHEST_PROTOCOL, - tag=None): - super(WatcherJobStore, self).__init__(url, engine, tablename, - metadata, pickle_protocol) - metadata = maybe_ref(metadata) or MetaData() - self.jobs_t = Table(tablename, metadata, autoload=True, - autoload_with=engine) - service_ident = service.ServiceHeartbeat.get_service_name() - self.tag = tag or {'host': service_ident[0], 'name': service_ident[1]} - self.service_id = objects.Service.list(context=context.make_context(), - filters=self.tag)[0].id - - def start(self, scheduler, alias): - # There should be called 'start' method of parent of SQLAlchemyJobStore - super(self.__class__.__bases__[0], self).start(scheduler, alias) - - def add_job(self, job): - insert = self.jobs_t.insert().values(**{ - 'id': job.id, - 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), - 'job_state': pickle.dumps(job.__getstate__(), - self.pickle_protocol), - 'service_id': self.service_id, - 'tag': jsonutils.dumps(self.tag) - }) - try: - self.engine.execute(insert) - except IntegrityError: - raise ConflictingIdError(job.id) - - def get_all_jobs(self): - jobs = self._get_jobs(self.jobs_t.c.tag == jsonutils.dumps(self.tag)) - self._fix_paused_jobs_sorting(jobs) - return jobs - - def _get_jobs(self, *conditions): - jobs = [] - conditions += (self.jobs_t.c.service_id == self.service_id,) - selectable = select( - [self.jobs_t.c.id, self.jobs_t.c.job_state, self.jobs_t.c.tag] - ).order_by(self.jobs_t.c.next_run_time).where(and_(*conditions)) - failed_job_ids = set() - for row in self.engine.execute(selectable): - try: - jobs.append(self._reconstitute_job(row.job_state)) - except Exception: - self._logger.exception( - 'Unable to restore job "%s" -- removing it', row.id) - failed_job_ids.add(row.id) - - # Remove all the jobs we failed to restore - if failed_job_ids: - delete = self.jobs_t.delete().where( - self.jobs_t.c.id.in_(failed_job_ids)) - self.engine.execute(delete) - - return jobs diff --git a/watcher/db/sqlalchemy/migration.py b/watcher/db/sqlalchemy/migration.py deleted file mode 100644 index b342945..0000000 --- a/watcher/db/sqlalchemy/migration.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -import alembic -from alembic import config as alembic_config -import alembic.migration as alembic_migration -from oslo_db import exception as db_exc - -from watcher._i18n import _ -from watcher.db.sqlalchemy import api as sqla_api -from watcher.db.sqlalchemy import models - - -def _alembic_config(): - path = os.path.join(os.path.dirname(__file__), 'alembic.ini') - config = alembic_config.Config(path) - return config - - -def version(engine=None): - """Current database version. - - :returns: Database version - :rtype: string - """ - if engine is None: - engine = sqla_api.get_engine() - with engine.connect() as conn: - context = alembic_migration.MigrationContext.configure(conn) - return context.get_current_revision() - - -def upgrade(revision, config=None): - """Used for upgrading database. - - :param version: Desired database version - :type version: string - """ - revision = revision or 'head' - config = config or _alembic_config() - - alembic.command.upgrade(config, revision) - - -def create_schema(config=None, engine=None): - """Create database schema from models description. - - Can be used for initial installation instead of upgrade('head'). - """ - if engine is None: - engine = sqla_api.get_engine() - - # NOTE(viktors): If we will use metadata.create_all() for non empty db - # schema, it will only add the new tables, but leave - # existing as is. So we should avoid of this situation. - if version(engine=engine) is not None: - raise db_exc.DbMigrationError( - _("Watcher database schema is already under version control; " - "use upgrade() instead")) - - models.Base.metadata.create_all(engine) - stamp('head', config=config) - - -def downgrade(revision, config=None): - """Used for downgrading database. - - :param version: Desired database version - :type version: string - """ - revision = revision or 'base' - config = config or _alembic_config() - return alembic.command.downgrade(config, revision) - - -def stamp(revision, config=None): - """Stamps database with provided revision. - - Don't run any migrations. - - :param revision: Should match one from repository or head - to stamp - database with most recent revision - :type revision: string - """ - config = config or _alembic_config() - return alembic.command.stamp(config, revision=revision) - - -def revision(message=None, autogenerate=False, config=None): - """Creates template for migration. - - :param message: Text that will be used for migration title - :type message: string - :param autogenerate: If True - generates diff based on current database - state - :type autogenerate: bool - """ - config = config or _alembic_config() - return alembic.command.revision(config, message=message, - autogenerate=autogenerate) diff --git a/watcher/db/sqlalchemy/models.py b/watcher/db/sqlalchemy/models.py deleted file mode 100644 index dbe972b..0000000 --- a/watcher/db/sqlalchemy/models.py +++ /dev/null @@ -1,280 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -SQLAlchemy models for watcher service -""" - -from oslo_db.sqlalchemy import models -from oslo_serialization import jsonutils -import six.moves.urllib.parse as urlparse -from sqlalchemy import Boolean -from sqlalchemy import Column -from sqlalchemy import DateTime -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy import ForeignKey -from sqlalchemy import Integer -from sqlalchemy import Numeric -from sqlalchemy import orm -from sqlalchemy import String -from sqlalchemy import Text -from sqlalchemy.types import TypeDecorator, TEXT -from sqlalchemy import UniqueConstraint - -from watcher import conf - -CONF = conf.CONF - - -def table_args(): - engine_name = urlparse.urlparse(CONF.database.connection).scheme - if engine_name == 'mysql': - return {'mysql_engine': CONF.database.mysql_engine, - 'mysql_charset': "utf8"} - return None - - -class JsonEncodedType(TypeDecorator): - """Abstract base type serialized as json-encoded string in db.""" - - type = None - impl = TEXT - - def process_bind_param(self, value, dialect): - if value is None: - # Save default value according to current type to keep the - # interface the consistent. - value = self.type() - elif not isinstance(value, self.type): - raise TypeError("%s supposes to store %s objects, but %s given" - % (self.__class__.__name__, - self.type.__name__, - type(value).__name__)) - serialized_value = jsonutils.dumps(value) - return serialized_value - - def process_result_value(self, value, dialect): - if value is not None: - value = jsonutils.loads(value) - return value - - -class JSONEncodedDict(JsonEncodedType): - """Represents dict serialized as json-encoded string in db.""" - - type = dict - - -class JSONEncodedList(JsonEncodedType): - """Represents list serialized as json-encoded string in db.""" - - type = list - - -class WatcherBase(models.SoftDeleteMixin, - models.TimestampMixin, models.ModelBase): - metadata = None - - def as_dict(self): - d = {} - for c in self.__table__.columns: - d[c.name] = self[c.name] - return d - - def save(self, session=None): - import watcher.db.sqlalchemy.api as db_api - - if session is None: - session = db_api.get_session() - - super(WatcherBase, self).save(session) - - -Base = declarative_base(cls=WatcherBase) - - -class Goal(Base): - """Represents a goal.""" - - __tablename__ = 'goals' - __table_args__ = ( - UniqueConstraint('uuid', name='uniq_goals0uuid'), - UniqueConstraint('name', 'deleted', name='uniq_goals0name'), - table_args(), - ) - id = Column(Integer, primary_key=True, autoincrement=True) - uuid = Column(String(36)) - name = Column(String(63), nullable=False) - display_name = Column(String(63), nullable=False) - efficacy_specification = Column(JSONEncodedList, nullable=False) - - -class Strategy(Base): - """Represents a strategy.""" - - __tablename__ = 'strategies' - __table_args__ = ( - UniqueConstraint('uuid', name='uniq_strategies0uuid'), - UniqueConstraint('name', 'deleted', name='uniq_strategies0name'), - table_args() - ) - id = Column(Integer, primary_key=True, autoincrement=True) - uuid = Column(String(36)) - name = Column(String(63), nullable=False) - display_name = Column(String(63), nullable=False) - goal_id = Column(Integer, ForeignKey('goals.id'), nullable=False) - parameters_spec = Column(JSONEncodedDict, nullable=True) - - goal = orm.relationship(Goal, foreign_keys=goal_id, lazy=None) - - -class AuditTemplate(Base): - """Represents an audit template.""" - - __tablename__ = 'audit_templates' - __table_args__ = ( - UniqueConstraint('uuid', name='uniq_audit_templates0uuid'), - UniqueConstraint('name', 'deleted', name='uniq_audit_templates0name'), - table_args() - ) - id = Column(Integer, primary_key=True) - uuid = Column(String(36)) - name = Column(String(63), nullable=True) - description = Column(String(255), nullable=True) - goal_id = Column(Integer, ForeignKey('goals.id'), nullable=False) - strategy_id = Column(Integer, ForeignKey('strategies.id'), nullable=True) - scope = Column(JSONEncodedList) - - goal = orm.relationship(Goal, foreign_keys=goal_id, lazy=None) - strategy = orm.relationship(Strategy, foreign_keys=strategy_id, lazy=None) - - -class Audit(Base): - """Represents an audit.""" - - __tablename__ = 'audits' - __table_args__ = ( - UniqueConstraint('uuid', name='uniq_audits0uuid'), - table_args() - ) - id = Column(Integer, primary_key=True, autoincrement=True) - uuid = Column(String(36)) - audit_type = Column(String(20)) - state = Column(String(20), nullable=True) - parameters = Column(JSONEncodedDict, nullable=True) - interval = Column(String(36), nullable=True) - goal_id = Column(Integer, ForeignKey('goals.id'), nullable=False) - strategy_id = Column(Integer, ForeignKey('strategies.id'), nullable=True) - scope = Column(JSONEncodedList, nullable=True) - auto_trigger = Column(Boolean, nullable=False) - next_run_time = Column(DateTime, nullable=True) - - goal = orm.relationship(Goal, foreign_keys=goal_id, lazy=None) - strategy = orm.relationship(Strategy, foreign_keys=strategy_id, lazy=None) - - -class ActionPlan(Base): - """Represents an action plan.""" - - __tablename__ = 'action_plans' - __table_args__ = ( - UniqueConstraint('uuid', name='uniq_action_plans0uuid'), - table_args() - ) - id = Column(Integer, primary_key=True, autoincrement=True) - uuid = Column(String(36)) - audit_id = Column(Integer, ForeignKey('audits.id'), nullable=False) - strategy_id = Column(Integer, ForeignKey('strategies.id'), nullable=False) - state = Column(String(20), nullable=True) - global_efficacy = Column(JSONEncodedDict, nullable=True) - - audit = orm.relationship(Audit, foreign_keys=audit_id, lazy=None) - strategy = orm.relationship(Strategy, foreign_keys=strategy_id, lazy=None) - - -class Action(Base): - """Represents an action.""" - - __tablename__ = 'actions' - __table_args__ = ( - UniqueConstraint('uuid', name='uniq_actions0uuid'), - table_args() - ) - id = Column(Integer, primary_key=True, autoincrement=True) - uuid = Column(String(36), nullable=False) - action_plan_id = Column(Integer, ForeignKey('action_plans.id'), - nullable=False) - # only for the first version - action_type = Column(String(255), nullable=False) - input_parameters = Column(JSONEncodedDict, nullable=True) - state = Column(String(20), nullable=True) - parents = Column(JSONEncodedList, nullable=True) - - action_plan = orm.relationship( - ActionPlan, foreign_keys=action_plan_id, lazy=None) - - -class EfficacyIndicator(Base): - """Represents an efficacy indicator.""" - - __tablename__ = 'efficacy_indicators' - __table_args__ = ( - UniqueConstraint('uuid', name='uniq_efficacy_indicators0uuid'), - table_args() - ) - id = Column(Integer, primary_key=True, autoincrement=True) - uuid = Column(String(36)) - name = Column(String(63)) - description = Column(String(255), nullable=True) - unit = Column(String(63), nullable=True) - value = Column(Numeric()) - action_plan_id = Column(Integer, ForeignKey('action_plans.id'), - nullable=False) - - action_plan = orm.relationship( - ActionPlan, foreign_keys=action_plan_id, lazy=None) - - -class ScoringEngine(Base): - """Represents a scoring engine.""" - - __tablename__ = 'scoring_engines' - __table_args__ = ( - UniqueConstraint('uuid', name='uniq_scoring_engines0uuid'), - UniqueConstraint('name', 'deleted', name='uniq_scoring_engines0name'), - table_args() - ) - id = Column(Integer, primary_key=True, autoincrement=True) - uuid = Column(String(36), nullable=False) - name = Column(String(63), nullable=False) - description = Column(String(255), nullable=True) - # Metainfo might contain some additional information about the data model. - # The format might vary between different models (e.g. be JSON, XML or - # even some custom format), the blob type should cover all scenarios. - metainfo = Column(Text, nullable=True) - - -class Service(Base): - """Represents a service entity""" - - __tablename__ = 'services' - __table_args__ = ( - UniqueConstraint('host', 'name', 'deleted', - name="uniq_services0host0name0deleted"), - table_args() - ) - id = Column(Integer, primary_key=True) - name = Column(String(255), nullable=False) - host = Column(String(255), nullable=False) - last_seen_up = Column(DateTime, nullable=True) diff --git a/watcher/decision_engine/__init__.py b/watcher/decision_engine/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/audit/__init__.py b/watcher/decision_engine/audit/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/audit/base.py b/watcher/decision_engine/audit/base.py deleted file mode 100644 index cccb1aa..0000000 --- a/watcher/decision_engine/audit/base.py +++ /dev/null @@ -1,135 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# Alexander Chadin -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -import six - -from oslo_log import log - -from watcher.applier import rpcapi -from watcher.common import exception -from watcher.common import service -from watcher.decision_engine.planner import manager as planner_manager -from watcher.decision_engine.strategy.context import default as default_context -from watcher import notifications -from watcher import objects -from watcher.objects import fields - -LOG = log.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -@six.add_metaclass(service.Singleton) -class BaseAuditHandler(object): - - @abc.abstractmethod - def execute(self, audit_uuid, request_context): - raise NotImplementedError() - - @abc.abstractmethod - def pre_execute(self, audit_uuid, request_context): - raise NotImplementedError() - - @abc.abstractmethod - def do_execute(self, audit, request_context): - raise NotImplementedError() - - @abc.abstractmethod - def post_execute(self, audit, solution, request_context): - raise NotImplementedError() - - -@six.add_metaclass(abc.ABCMeta) -class AuditHandler(BaseAuditHandler): - - def __init__(self): - super(AuditHandler, self).__init__() - self._strategy_context = default_context.DefaultStrategyContext() - self._planner_manager = planner_manager.PlannerManager() - self._planner = None - - @property - def planner(self): - if self._planner is None: - self._planner = self._planner_manager.load() - return self._planner - - @property - def strategy_context(self): - return self._strategy_context - - def do_schedule(self, request_context, audit, solution): - try: - notifications.audit.send_action_notification( - request_context, audit, - action=fields.NotificationAction.PLANNER, - phase=fields.NotificationPhase.START) - action_plan = self.planner.schedule(request_context, audit.id, - solution) - notifications.audit.send_action_notification( - request_context, audit, - action=fields.NotificationAction.PLANNER, - phase=fields.NotificationPhase.END) - return action_plan - except Exception: - notifications.audit.send_action_notification( - request_context, audit, - action=fields.NotificationAction.PLANNER, - priority=fields.NotificationPriority.ERROR, - phase=fields.NotificationPhase.ERROR) - raise - - def update_audit_state(self, audit, state): - LOG.debug("Update audit state: %s", state) - audit.state = state - audit.save() - - def check_ongoing_action_plans(self, request_context): - a_plan_filters = {'state': objects.action_plan.State.ONGOING} - ongoing_action_plans = objects.ActionPlan.list( - request_context, filters=a_plan_filters) - if ongoing_action_plans: - raise exception.ActionPlanIsOngoing( - action_plan=ongoing_action_plans[0].uuid) - - def pre_execute(self, audit, request_context): - LOG.debug("Trigger audit %s", audit.uuid) - self.check_ongoing_action_plans(request_context) - # change state of the audit to ONGOING - self.update_audit_state(audit, objects.audit.State.ONGOING) - - def post_execute(self, audit, solution, request_context): - action_plan = self.do_schedule(request_context, audit, solution) - if audit.auto_trigger: - applier_client = rpcapi.ApplierAPI() - applier_client.launch_action_plan(request_context, - action_plan.uuid) - - def execute(self, audit, request_context): - try: - self.pre_execute(audit, request_context) - solution = self.do_execute(audit, request_context) - self.post_execute(audit, solution, request_context) - except exception.ActionPlanIsOngoing as e: - LOG.warning(e) - if audit.audit_type == objects.audit.AuditType.ONESHOT.value: - self.update_audit_state(audit, objects.audit.State.CANCELLED) - except Exception as e: - LOG.exception(e) - self.update_audit_state(audit, objects.audit.State.FAILED) diff --git a/watcher/decision_engine/audit/continuous.py b/watcher/decision_engine/audit/continuous.py deleted file mode 100644 index 2afcafe..0000000 --- a/watcher/decision_engine/audit/continuous.py +++ /dev/null @@ -1,170 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Servionica LTD -# Copyright (c) 2016 Intel Corp -# -# Authors: Alexander Chadin -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import datetime -from dateutil import tz - -from apscheduler.jobstores import memory -from croniter import croniter - -from watcher.common import context -from watcher.common import scheduling -from watcher.common import utils -from watcher import conf -from watcher.db.sqlalchemy import api as sq_api -from watcher.db.sqlalchemy import job_store -from watcher.decision_engine.audit import base -from watcher import objects - - -CONF = conf.CONF - - -class ContinuousAuditHandler(base.AuditHandler): - def __init__(self): - super(ContinuousAuditHandler, self).__init__() - self._scheduler = None - self.context_show_deleted = context.RequestContext(is_admin=True, - show_deleted=True) - - @property - def scheduler(self): - if self._scheduler is None: - self._scheduler = scheduling.BackgroundSchedulerService( - jobstores={ - 'default': job_store.WatcherJobStore( - engine=sq_api.get_engine()), - 'memory': memory.MemoryJobStore() - } - ) - return self._scheduler - - def _is_audit_inactive(self, audit): - audit = objects.Audit.get_by_uuid( - self.context_show_deleted, audit.uuid) - if objects.audit.AuditStateTransitionManager().is_inactive(audit): - # if audit isn't in active states, audit's job must be removed to - # prevent using of inactive audit in future. - [job for job in self.scheduler.get_jobs() - if job.name == 'execute_audit' and - job.args[0].uuid == audit.uuid][0].remove() - return True - - return False - - def do_execute(self, audit, request_context): - # execute the strategy - solution = self.strategy_context.execute_strategy( - audit, request_context) - - if audit.audit_type == objects.audit.AuditType.CONTINUOUS.value: - a_plan_filters = {'audit_uuid': audit.uuid, - 'state': objects.action_plan.State.RECOMMENDED} - action_plans = objects.ActionPlan.list( - request_context, filters=a_plan_filters, eager=True) - for plan in action_plans: - plan.state = objects.action_plan.State.CANCELLED - plan.save() - return solution - - def _next_cron_time(self, audit): - if utils.is_cron_like(audit.interval): - return croniter(audit.interval, datetime.datetime.utcnow() - ).get_next(datetime.datetime) - - @classmethod - def execute_audit(cls, audit, request_context): - self = cls() - if not self._is_audit_inactive(audit): - try: - self.execute(audit, request_context) - except Exception: - raise - finally: - if utils.is_int_like(audit.interval): - audit.next_run_time = ( - datetime.datetime.utcnow() + - datetime.timedelta(seconds=int(audit.interval))) - else: - audit.next_run_time = self._next_cron_time(audit) - audit.save() - - def _add_job(self, trigger, audit, audit_context, **trigger_args): - time_var = 'next_run_time' if trigger_args.get( - 'next_run_time') else 'run_date' - # We should convert UTC time to local time without tzinfo - trigger_args[time_var] = trigger_args[time_var].replace( - tzinfo=tz.tzutc()).astimezone(tz.tzlocal()).replace(tzinfo=None) - self.scheduler.add_job(self.execute_audit, trigger, - args=[audit, audit_context], - name='execute_audit', - **trigger_args) - - def launch_audits_periodically(self): - audit_context = context.RequestContext(is_admin=True) - audit_filters = { - 'audit_type': objects.audit.AuditType.CONTINUOUS.value, - 'state__in': (objects.audit.State.PENDING, - objects.audit.State.ONGOING, - objects.audit.State.SUCCEEDED) - } - audits = objects.Audit.list( - audit_context, filters=audit_filters, eager=True) - scheduler_job_args = [ - job.args for job in self.scheduler.get_jobs() - if job.name == 'execute_audit'] - for audit in audits: - # if audit is not presented in scheduled audits yet. - if audit.uuid not in [arg[0].uuid for arg in scheduler_job_args]: - # if interval is provided with seconds - if utils.is_int_like(audit.interval): - # if audit has already been provided and we need - # to restore it after shutdown - if audit.next_run_time is not None: - old_run_time = audit.next_run_time - current = datetime.datetime.utcnow() - if old_run_time < current: - delta = datetime.timedelta( - seconds=(int(audit.interval) - ( - current - old_run_time).seconds % - int(audit.interval))) - audit.next_run_time = current + delta - next_run_time = audit.next_run_time - # if audit is new one - else: - next_run_time = datetime.datetime.utcnow() - self._add_job('interval', audit, audit_context, - seconds=int(audit.interval), - next_run_time=next_run_time) - - else: - audit.next_run_time = self._next_cron_time(audit) - self._add_job('date', audit, audit_context, - run_date=audit.next_run_time) - audit.save() - - def start(self): - self.scheduler.add_job( - self.launch_audits_periodically, - 'interval', - seconds=CONF.watcher_decision_engine.continuous_audit_interval, - next_run_time=datetime.datetime.now(), - jobstore='memory') - self.scheduler.start() diff --git a/watcher/decision_engine/audit/oneshot.py b/watcher/decision_engine/audit/oneshot.py deleted file mode 100644 index fae2512..0000000 --- a/watcher/decision_engine/audit/oneshot.py +++ /dev/null @@ -1,34 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.decision_engine.audit import base -from watcher import objects - - -class OneShotAuditHandler(base.AuditHandler): - - def do_execute(self, audit, request_context): - # execute the strategy - solution = self.strategy_context.execute_strategy( - audit, request_context) - - return solution - - def post_execute(self, audit, solution, request_context): - super(OneShotAuditHandler, self).post_execute(audit, solution, - request_context) - # change state of the audit to SUCCEEDED - self.update_audit_state(audit, objects.audit.State.SUCCEEDED) diff --git a/watcher/decision_engine/gmr.py b/watcher/decision_engine/gmr.py deleted file mode 100644 index 8ddc561..0000000 --- a/watcher/decision_engine/gmr.py +++ /dev/null @@ -1,48 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_reports import guru_meditation_report as gmr - -from watcher._i18n import _ -from watcher.decision_engine.model.collector import manager - - -def register_gmr_plugins(): - """Register GMR plugins that are specific to watcher-decision-engine.""" - gmr.TextGuruMeditation.register_section(_('CDMCs'), show_models) - - -def show_models(): - """Create a formatted output of all the CDMs - - Mainly used as a Guru Meditation Report (GMR) plugin - """ - mgr = manager.CollectorManager() - - output = [] - for name, cdmc in mgr.get_collectors().items(): - output.append("") - output.append("~" * len(name)) - output.append(name) - output.append("~" * len(name)) - output.append("") - - cdmc_struct = cdmc.cluster_data_model.to_string() - output.append(cdmc_struct) - - return "\n".join(output) diff --git a/watcher/decision_engine/goal/__init__.py b/watcher/decision_engine/goal/__init__.py deleted file mode 100644 index 1607884..0000000 --- a/watcher/decision_engine/goal/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.decision_engine.goal import goals - -Dummy = goals.Dummy -ServerConsolidation = goals.ServerConsolidation -ThermalOptimization = goals.ThermalOptimization -Unclassified = goals.Unclassified -WorkloadBalancing = goals.WorkloadBalancing -NoisyNeighbor = goals.NoisyNeighborOptimization - -__all__ = ("Dummy", "ServerConsolidation", "ThermalOptimization", - "Unclassified", "WorkloadBalancing", - "NoisyNeighborOptimization",) diff --git a/watcher/decision_engine/goal/base.py b/watcher/decision_engine/goal/base.py deleted file mode 100644 index b272cfc..0000000 --- a/watcher/decision_engine/goal/base.py +++ /dev/null @@ -1,64 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc -import six - -from watcher.common.loader import loadable - - -@six.add_metaclass(abc.ABCMeta) -class Goal(loadable.Loadable): - - def __init__(self, config): - super(Goal, self).__init__(config) - self.name = self.get_name() - self.display_name = self.get_display_name() - self.efficacy_specification = self.get_efficacy_specification() - - @classmethod - @abc.abstractmethod - def get_name(cls): - """Name of the goal: should be identical to the related entry point""" - raise NotImplementedError() - - @classmethod - @abc.abstractmethod - def get_display_name(cls): - """The goal display name for the goal""" - raise NotImplementedError() - - @classmethod - @abc.abstractmethod - def get_translatable_display_name(cls): - """The translatable msgid of the goal""" - # Note(v-francoise): Defined here to be used as the translation key for - # other services - raise NotImplementedError() - - @classmethod - def get_config_opts(cls): - """Defines the configuration options to be associated to this loadable - - :return: A list of configuration options relative to this Loadable - :rtype: list of :class:`oslo_config.cfg.Opt` instances - """ - return [] - - @abc.abstractmethod - def get_efficacy_specification(cls): - """The efficacy spec for the current goal""" - raise NotImplementedError() diff --git a/watcher/decision_engine/goal/efficacy/__init__.py b/watcher/decision_engine/goal/efficacy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/goal/efficacy/base.py b/watcher/decision_engine/goal/efficacy/base.py deleted file mode 100644 index b517700..0000000 --- a/watcher/decision_engine/goal/efficacy/base.py +++ /dev/null @@ -1,84 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -An efficacy specification is a contract that is associated to each :ref:`Goal -` that defines the various :ref:`efficacy indicators -` a strategy achieving the associated goal -should provide within its :ref:`solution `. Indeed, each -solution proposed by a strategy will be validated against this contract before -calculating its :ref:`global efficacy `. -""" - -import abc -from oslo_serialization import jsonutils - -import six -import voluptuous - - -@six.add_metaclass(abc.ABCMeta) -class EfficacySpecification(object): - - def __init__(self): - self._indicators_specs = self.get_indicators_specifications() - - @property - def indicators_specs(self): - return self._indicators_specs - - @abc.abstractmethod - def get_indicators_specifications(self): - """List the specifications of the indicator for this efficacy spec - - :return: Tuple of indicator specifications - :rtype: Tuple of :py:class:`~.IndicatorSpecification` instances - """ - raise NotImplementedError() - - @abc.abstractmethod - def get_global_efficacy_indicator(self, indicators_map): - """Compute the global efficacy for the goal it achieves - - :param indicators_map: dict-like object containing the - efficacy indicators related to this spec - :type indicators_map: :py:class:`~.IndicatorsMap` instance - :raises: NotImplementedError - :returns: :py:class:`~.Indicator` instance - """ - raise NotImplementedError() - - @property - def schema(self): - """Combined schema from the schema of the indicators""" - schema = voluptuous.Schema({}, required=True) - for indicator in self.indicators_specs: - key_constraint = (voluptuous.Required - if indicator.required else voluptuous.Optional) - schema = schema.extend( - {key_constraint(indicator.name): indicator.schema.schema}) - - return schema - - def validate_efficacy_indicators(self, indicators_map): - return self.schema(indicators_map) - - def get_indicators_specs_dicts(self): - return [indicator.to_dict() - for indicator in self.indicators_specs] - - def serialize_indicators_specs(self): - return jsonutils.dumps(self.get_indicators_specs_dicts()) diff --git a/watcher/decision_engine/goal/efficacy/indicators.py b/watcher/decision_engine/goal/efficacy/indicators.py deleted file mode 100644 index 1b24262..0000000 --- a/watcher/decision_engine/goal/efficacy/indicators.py +++ /dev/null @@ -1,146 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc -import six - -from oslo_log import log -import voluptuous - -from watcher._i18n import _ -from watcher.common import exception - -LOG = log.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class IndicatorSpecification(object): - - def __init__(self, name=None, description=None, unit=None, required=True): - self.name = name - self.description = description - self.unit = unit - self.required = required - - @abc.abstractproperty - def schema(self): - """Schema used to validate the indicator value - - :return: A Voplutuous Schema - :rtype: :py:class:`.voluptuous.Schema` instance - """ - raise NotImplementedError() - - @classmethod - def validate(cls, solution): - """Validate the given solution - - :raises: :py:class:`~.InvalidIndicatorValue` when the validation fails - """ - indicator = cls() - value = None - try: - value = getattr(solution, indicator.name) - indicator.schema(value) - except Exception as exc: - LOG.exception(exc) - raise exception.InvalidIndicatorValue( - name=indicator.name, value=value, spec_type=type(indicator)) - - def to_dict(self): - return { - "name": self.name, - "description": self.description, - "unit": self.unit, - "schema": str(self.schema.schema) if self.schema else None, - } - - def __str__(self): - return str(self.to_dict()) - - -class AverageCpuLoad(IndicatorSpecification): - - def __init__(self): - super(AverageCpuLoad, self).__init__( - name="avg_cpu_percent", - description=_("Average CPU load as a percentage of the CPU time."), - unit="%", - ) - - @property - def schema(self): - return voluptuous.Schema( - voluptuous.Range(min=0, max=100), required=True) - - -class MigrationEfficacy(IndicatorSpecification): - - def __init__(self): - super(MigrationEfficacy, self).__init__( - name="migration_efficacy", - description=_("Represents the percentage of released nodes out of " - "the total number of migrations."), - unit="%", - required=True - ) - - @property - def schema(self): - return voluptuous.Schema( - voluptuous.Range(min=0, max=100), required=True) - - -class ComputeNodesCount(IndicatorSpecification): - def __init__(self): - super(ComputeNodesCount, self).__init__( - name="compute_nodes_count", - description=_("The total number of enabled compute nodes."), - unit=None, - ) - - @property - def schema(self): - return voluptuous.Schema( - voluptuous.Range(min=0), required=True) - - -class ReleasedComputeNodesCount(IndicatorSpecification): - def __init__(self): - super(ReleasedComputeNodesCount, self).__init__( - name="released_compute_nodes_count", - description=_("The number of compute nodes to be released."), - unit=None, - ) - - @property - def schema(self): - return voluptuous.Schema( - voluptuous.Range(min=0), required=True) - - -class InstanceMigrationsCount(IndicatorSpecification): - def __init__(self): - super(InstanceMigrationsCount, self).__init__( - name="instance_migrations_count", - description=_("The number of VM migrations to be performed."), - unit=None, - ) - - @property - def schema(self): - return voluptuous.Schema( - voluptuous.Range(min=0), required=True) diff --git a/watcher/decision_engine/goal/efficacy/specs.py b/watcher/decision_engine/goal/efficacy/specs.py deleted file mode 100644 index 474459e..0000000 --- a/watcher/decision_engine/goal/efficacy/specs.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher._i18n import _ -from watcher.decision_engine.goal.efficacy import base -from watcher.decision_engine.goal.efficacy import indicators -from watcher.decision_engine.solution import efficacy - - -class Unclassified(base.EfficacySpecification): - - def get_indicators_specifications(self): - return () - - def get_global_efficacy_indicator(self, indicators_map): - return None - - -class ServerConsolidation(base.EfficacySpecification): - - def get_indicators_specifications(self): - return [ - indicators.ComputeNodesCount(), - indicators.ReleasedComputeNodesCount(), - indicators.InstanceMigrationsCount(), - ] - - def get_global_efficacy_indicator(self, indicators_map=None): - value = 0 - if indicators_map and indicators_map.compute_nodes_count > 0: - value = (float(indicators_map.released_compute_nodes_count) / - float(indicators_map.compute_nodes_count)) * 100 - - return efficacy.Indicator( - name="released_nodes_ratio", - description=_("Ratio of released compute nodes divided by the " - "total number of enabled compute nodes."), - unit='%', - value=value, - ) diff --git a/watcher/decision_engine/goal/goals.py b/watcher/decision_engine/goal/goals.py deleted file mode 100644 index e5be78f..0000000 --- a/watcher/decision_engine/goal/goals.py +++ /dev/null @@ -1,194 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher._i18n import _ -from watcher.decision_engine.goal import base -from watcher.decision_engine.goal.efficacy import specs - - -class Dummy(base.Goal): - """Dummy - - Reserved goal that is used for testing purposes. - """ - - @classmethod - def get_name(cls): - return "dummy" - - @classmethod - def get_display_name(cls): - return _("Dummy goal") - - @classmethod - def get_translatable_display_name(cls): - return "Dummy goal" - - @classmethod - def get_efficacy_specification(cls): - """The efficacy spec for the current goal""" - return specs.Unclassified() - - -class Unclassified(base.Goal): - """Unclassified - - This goal is used to ease the development process of a strategy. Containing - no actual indicator specification, this goal can be used whenever a - strategy has yet to be formally associated with an existing goal. If the - goal achieve has been identified but there is no available implementation, - this Goal can also be used as a transitional stage. - """ - - @classmethod - def get_name(cls): - return "unclassified" - - @classmethod - def get_display_name(cls): - return _("Unclassified") - - @classmethod - def get_translatable_display_name(cls): - return "Unclassified" - - @classmethod - def get_efficacy_specification(cls): - """The efficacy spec for the current goal""" - return specs.Unclassified() - - -class ServerConsolidation(base.Goal): - """ServerConsolidation - - This goal is for efficient usage of compute server resources in order to - reduce the total number of servers. - """ - - @classmethod - def get_name(cls): - return "server_consolidation" - - @classmethod - def get_display_name(cls): - return _("Server Consolidation") - - @classmethod - def get_translatable_display_name(cls): - return "Server Consolidation" - - @classmethod - def get_efficacy_specification(cls): - """The efficacy spec for the current goal""" - return specs.ServerConsolidation() - - -class ThermalOptimization(base.Goal): - """ThermalOptimization - - This goal is used to balance the temperature across different servers. - """ - - @classmethod - def get_name(cls): - return "thermal_optimization" - - @classmethod - def get_display_name(cls): - return _("Thermal Optimization") - - @classmethod - def get_translatable_display_name(cls): - return "Thermal Optimization" - - @classmethod - def get_efficacy_specification(cls): - """The efficacy spec for the current goal""" - return specs.Unclassified() - - -class WorkloadBalancing(base.Goal): - """WorkloadBalancing - - This goal is used to evenly distribute workloads across different servers. - """ - - @classmethod - def get_name(cls): - return "workload_balancing" - - @classmethod - def get_display_name(cls): - return _("Workload Balancing") - - @classmethod - def get_translatable_display_name(cls): - return "Workload Balancing" - - @classmethod - def get_efficacy_specification(cls): - """The efficacy spec for the current goal""" - return specs.Unclassified() - - -class AirflowOptimization(base.Goal): - """AirflowOptimization - - This goal is used to optimize the airflow within a cloud infrastructure. - """ - - @classmethod - def get_name(cls): - return "airflow_optimization" - - @classmethod - def get_display_name(cls): - return _("Airflow Optimization") - - @classmethod - def get_translatable_display_name(cls): - return "Airflow Optimization" - - @classmethod - def get_efficacy_specification(cls): - """The efficacy spec for the current goal""" - return specs.Unclassified() - - -class NoisyNeighborOptimization(base.Goal): - """NoisyNeighborOptimization - - This goal is used to identify and migrate a Noisy Neighbor - - a low priority VM that negatively affects peformance of a high priority VM - in terms of IPC by over utilizing Last Level Cache. - """ - - @classmethod - def get_name(cls): - return "noisy_neighbor" - - @classmethod - def get_display_name(cls): - return _("Noisy Neighbor") - - @classmethod - def get_translatable_display_name(cls): - return "Noisy Neighbor" - - @classmethod - def get_efficacy_specification(cls): - """The efficacy spec for the current goal""" - return specs.Unclassified() diff --git a/watcher/decision_engine/loading/__init__.py b/watcher/decision_engine/loading/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/loading/default.py b/watcher/decision_engine/loading/default.py deleted file mode 100644 index 8fbd5b8..0000000 --- a/watcher/decision_engine/loading/default.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# Vincent FRANCOISE -# Tomasz Kaczynski -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from __future__ import unicode_literals - - -from watcher.common.loader import default - - -class DefaultStrategyLoader(default.DefaultLoader): - def __init__(self): - super(DefaultStrategyLoader, self).__init__( - namespace='watcher_strategies') - - -class DefaultGoalLoader(default.DefaultLoader): - def __init__(self): - super(DefaultGoalLoader, self).__init__( - namespace='watcher_goals') - - -class DefaultPlannerLoader(default.DefaultLoader): - def __init__(self): - super(DefaultPlannerLoader, self).__init__( - namespace='watcher_planners') - - -class ClusterDataModelCollectorLoader(default.DefaultLoader): - def __init__(self): - super(ClusterDataModelCollectorLoader, self).__init__( - namespace='watcher_cluster_data_model_collectors') - - -class DefaultScoringLoader(default.DefaultLoader): - def __init__(self): - super(DefaultScoringLoader, self).__init__( - namespace='watcher_scoring_engines') - - -class DefaultScoringContainerLoader(default.DefaultLoader): - def __init__(self): - super(DefaultScoringContainerLoader, self).__init__( - namespace='watcher_scoring_engine_containers') diff --git a/watcher/decision_engine/manager.py b/watcher/decision_engine/manager.py deleted file mode 100644 index 7655d32..0000000 --- a/watcher/decision_engine/manager.py +++ /dev/null @@ -1,81 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# Copyright (c) 2016 Intel Corp -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This component is responsible for computing a set of potential optimization -:ref:`Actions ` in order to fulfill the -:ref:`Goal ` of an :ref:`Audit `. - -It first reads the parameters of the :ref:`Audit ` from the -associated :ref:`Audit Template ` and knows the -:ref:`Goal ` to achieve. - -It then selects the most appropriate :ref:`Strategy ` -depending on how Watcher was configured for this :ref:`Goal `. - -The :ref:`Strategy ` is then executed and generates a set -of :ref:`Actions ` which are scheduled in time by the -:ref:`Watcher Planner ` (i.e., it generates an -:ref:`Action Plan `). - -See :doc:`../architecture` for more details on this component. -""" - -from watcher.common import service_manager -from watcher.decision_engine.messaging import audit_endpoint -from watcher.decision_engine.model.collector import manager - -from watcher import conf - -CONF = conf.CONF - - -class DecisionEngineManager(service_manager.ServiceManager): - - @property - def service_name(self): - return 'watcher-decision-engine' - - @property - def api_version(self): - return '1.0' - - @property - def publisher_id(self): - return CONF.watcher_decision_engine.publisher_id - - @property - def conductor_topic(self): - return CONF.watcher_decision_engine.conductor_topic - - @property - def notification_topics(self): - return CONF.watcher_decision_engine.notification_topics - - @property - def conductor_endpoints(self): - return [audit_endpoint.AuditEndpoint] - - @property - def notification_endpoints(self): - return self.collector_manager.get_notification_endpoints() - - @property - def collector_manager(self): - return manager.CollectorManager() diff --git a/watcher/decision_engine/messaging/__init__.py b/watcher/decision_engine/messaging/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/messaging/audit_endpoint.py b/watcher/decision_engine/messaging/audit_endpoint.py deleted file mode 100644 index 54d47ae..0000000 --- a/watcher/decision_engine/messaging/audit_endpoint.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from concurrent import futures - -from oslo_config import cfg -from oslo_log import log - -from watcher.decision_engine.audit import continuous as c_handler -from watcher.decision_engine.audit import oneshot as o_handler - -from watcher import objects - -CONF = cfg.CONF -LOG = log.getLogger(__name__) - - -class AuditEndpoint(object): - - def __init__(self, messaging): - self._messaging = messaging - self._executor = futures.ThreadPoolExecutor( - max_workers=CONF.watcher_decision_engine.max_workers) - self._oneshot_handler = o_handler.OneShotAuditHandler() - self._continuous_handler = c_handler.ContinuousAuditHandler().start() - - @property - def executor(self): - return self._executor - - def do_trigger_audit(self, context, audit_uuid): - audit = objects.Audit.get_by_uuid(context, audit_uuid, eager=True) - self._oneshot_handler.execute(audit, context) - - def trigger_audit(self, context, audit_uuid): - LOG.debug("Trigger audit %s" % audit_uuid) - self.executor.submit(self.do_trigger_audit, - context, - audit_uuid) - return audit_uuid diff --git a/watcher/decision_engine/model/__init__.py b/watcher/decision_engine/model/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/model/base.py b/watcher/decision_engine/model/base.py deleted file mode 100644 index 8629d05..0000000 --- a/watcher/decision_engine/model/base.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This component is in charge of executing the -:ref:`Action Plan ` built by the -:ref:`Watcher Decision Engine `. - -See: :doc:`../architecture` for more details on this component. -""" - -import abc -import six - - -@six.add_metaclass(abc.ABCMeta) -class Model(object): - - @abc.abstractmethod - def to_string(self): - raise NotImplementedError() - - @abc.abstractmethod - def to_xml(self): - raise NotImplementedError() diff --git a/watcher/decision_engine/model/collector/__init__.py b/watcher/decision_engine/model/collector/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/model/collector/base.py b/watcher/decision_engine/model/collector/base.py deleted file mode 100644 index b251a0c..0000000 --- a/watcher/decision_engine/model/collector/base.py +++ /dev/null @@ -1,185 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -""" -A :ref:`Cluster Data Model ` (or CDM) is a -logical representation of the current state and topology of the :ref:`Cluster -` :ref:`Managed resources `. - -It is represented as a set of :ref:`Managed resources -` (which may be a simple tree or a flat list of -key-value pairs) which enables Watcher :ref:`Strategies ` -to know the current relationships between the different :ref:`resources -`) of the :ref:`Cluster ` -during an :ref:`Audit ` and enables the :ref:`Strategy -` to request information such as: - -- What compute nodes are in a given :ref:`Audit Scope - `? -- What :ref:`Instances ` are hosted on a given compute - node? -- What is the current load of a compute node? -- What is the current free memory of a compute node? -- What is the network link between two compute nodes? -- What is the available bandwidth on a given network link? -- What is the current space available on a given virtual disk of a given - :ref:`Instance ` ? -- What is the current state of a given :ref:`Instance `? -- ... - -In a word, this data model enables the :ref:`Strategy ` -to know: - -- the current topology of the :ref:`Cluster ` -- the current capacity for each :ref:`Managed resource - ` -- the current amount of used/free space for each :ref:`Managed resource - ` -- the current state of each :ref:`Managed resources - ` - -In the Watcher project, we aim at providing a some generic and basic -:ref:`Cluster Data Model ` for each :ref:`Goal -`, usable in the associated :ref:`Strategies -` through a plugin-based mechanism which are called -cluster data model collectors (or CDMCs). These CDMCs are responsible for -loading and keeping up-to-date their associated CDM by listening to events and -also periodically rebuilding themselves from the ground up. They are also -directly accessible from the strategies classes. These CDMs are used to: - -- simplify the development of a new :ref:`Strategy ` for a - given :ref:`Goal ` when there already are some existing - :ref:`Strategies ` associated to the same :ref:`Goal - ` -- avoid duplicating the same code in several :ref:`Strategies - ` associated to the same :ref:`Goal ` -- have a better consistency between the different :ref:`Strategies - ` for a given :ref:`Goal ` -- avoid any strong coupling with any external :ref:`Cluster Data Model - ` (the proposed data model acts as a pivot - data model) - -There may be various :ref:`generic and basic Cluster Data Models -` proposed in Watcher helpers, each of them -being adapted to achieving a given :ref:`Goal `: - -- For example, for a :ref:`Goal ` which aims at optimizing - the network :ref:`resources ` the :ref:`Strategy - ` may need to know which :ref:`resources - ` are communicating together. -- Whereas for a :ref:`Goal ` which aims at optimizing thermal - and power conditions, the :ref:`Strategy ` may need to - know the location of each compute node in the racks and the location of each - rack in the room. - -Note however that a developer can use his/her own :ref:`Cluster Data Model -` if the proposed data model does not fit -his/her needs as long as the :ref:`Strategy ` is able to -produce a :ref:`Solution ` for the requested :ref:`Goal -`. For example, a developer could rely on the Nova Data Model -to optimize some compute resources. - -The :ref:`Cluster Data Model ` may be persisted -in any appropriate storage system (SQL database, NoSQL database, JSON file, -XML File, In Memory Database, ...). As of now, an in-memory model is built and -maintained in the background in order to accelerate the execution of -strategies. -""" - -import abc -import copy -import threading - -from oslo_config import cfg -from oslo_log import log -import six - -from watcher.common import clients -from watcher.common.loader import loadable -from watcher.decision_engine.model import model_root - -LOG = log.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class BaseClusterDataModelCollector(loadable.LoadableSingleton): - - STALE_MODEL = model_root.ModelRoot(stale=True) - - def __init__(self, config, osc=None): - super(BaseClusterDataModelCollector, self).__init__(config) - self.osc = osc if osc else clients.OpenStackClients() - self._cluster_data_model = None - self.lock = threading.RLock() - - @property - def cluster_data_model(self): - if self._cluster_data_model is None: - self.lock.acquire() - self._cluster_data_model = self.execute() - self.lock.release() - - return self._cluster_data_model - - @cluster_data_model.setter - def cluster_data_model(self, model): - self.lock.acquire() - self._cluster_data_model = model - self.lock.release() - - @abc.abstractproperty - def notification_endpoints(self): - """Associated notification endpoints - - :return: Associated notification endpoints - :rtype: List of :py:class:`~.EventsNotificationEndpoint` instances - """ - raise NotImplementedError() - - def set_cluster_data_model_as_stale(self): - self.cluster_data_model = self.STALE_MODEL - - @abc.abstractmethod - def execute(self): - """Build a cluster data model""" - raise NotImplementedError() - - @classmethod - def get_config_opts(cls): - return [ - cfg.IntOpt( - 'period', - default=3600, - help='The time interval (in seconds) between each ' - 'synchronization of the model'), - ] - - def get_latest_cluster_data_model(self): - LOG.debug("Creating copy") - LOG.debug(self.cluster_data_model.to_xml()) - return copy.deepcopy(self.cluster_data_model) - - def synchronize(self): - """Synchronize the cluster data model - - Whenever called this synchronization will perform a drop-in replacement - with the existing cluster data model - """ - self.cluster_data_model = self.execute() diff --git a/watcher/decision_engine/model/collector/cinder.py b/watcher/decision_engine/model/collector/cinder.py deleted file mode 100644 index 72aa644..0000000 --- a/watcher/decision_engine/model/collector/cinder.py +++ /dev/null @@ -1,209 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2017 NEC Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import six - -from oslo_log import log - -from watcher.common import cinder_helper -from watcher.common import exception -from watcher.decision_engine.model.collector import base -from watcher.decision_engine.model import element -from watcher.decision_engine.model import model_root -from watcher.decision_engine.model.notification import cinder - -LOG = log.getLogger(__name__) - - -class CinderClusterDataModelCollector(base.BaseClusterDataModelCollector): - """Cinder cluster data model collector - - The Cinder cluster data model collector creates an in-memory - representation of the resources exposed by the storage service. - """ - - def __init__(self, config, osc=None): - super(CinderClusterDataModelCollector, self).__init__(config, osc) - - @property - def notification_endpoints(self): - """Associated notification endpoints - - :return: Associated notification endpoints - :rtype: List of :py:class:`~.EventsNotificationEndpoint` instances - """ - return [ - cinder.CapacityNotificationEndpoint(self), - cinder.VolumeCreateEnd(self), - cinder.VolumeDeleteEnd(self), - cinder.VolumeUpdateEnd(self), - cinder.VolumeAttachEnd(self), - cinder.VolumeDetachEnd(self), - cinder.VolumeResizeEnd(self) - ] - - def execute(self): - """Build the storage cluster data model""" - LOG.debug("Building latest Cinder cluster data model") - - builder = ModelBuilder(self.osc) - return builder.execute() - - -class ModelBuilder(object): - """Build the graph-based model - - This model builder adds the following data" - - Storage-related knowledge (Cinder) - - """ - def __init__(self, osc): - self.osc = osc - self.model = model_root.StorageModelRoot() - self.cinder = osc.cinder() - self.cinder_helper = cinder_helper.CinderHelper(osc=self.osc) - - def _add_physical_layer(self): - """Add the physical layer of the graph. - - This includes components which represent actual infrastructure - hardware. - """ - for snode in self.cinder_helper.get_storage_node_list(): - self.add_storage_node(snode) - for pool in self.cinder_helper.get_storage_pool_list(): - pool = self._build_storage_pool(pool) - self.model.add_pool(pool) - storage_name = getattr(pool, 'name') - try: - storage_node = self.model.get_node_by_name( - storage_name) - # Connect the instance to its compute node - self.model.map_pool(pool, storage_node) - except exception.StorageNodeNotFound: - continue - - def add_storage_node(self, node): - # Build and add base node. - storage_node = self.build_storage_node(node) - self.model.add_node(storage_node) - - def add_storage_pool(self, pool): - storage_pool = self._build_storage_pool(pool) - self.model.add_pool(storage_pool) - - def build_storage_node(self, node): - """Build a storage node from a Cinder storage node - - :param node: A storage node - :type node: :py:class:`~cinderclient.v2.services.Service` - """ - # node.host is formatted as host@backendname since ocata, - # or may be only host as of ocata - backend = "" - try: - backend = node.host.split('@')[1] - except IndexError: - pass - - volume_type = self.cinder_helper.get_volume_type_by_backendname( - backend) - - # build up the storage node. - node_attributes = { - "host": node.host, - "zone": node.zone, - "state": node.state, - "status": node.status, - "volume_type": volume_type} - - storage_node = element.StorageNode(**node_attributes) - return storage_node - - def _build_storage_pool(self, pool): - """Build a storage pool from a Cinder storage pool - - :param pool: A storage pool - :type pool: :py:class:`~cinderlient.v2.capabilities.Capabilities` - """ - # build up the storage pool. - node_attributes = { - "name": pool.name, - "total_volumes": pool.total_volumes, - "total_capacity_gb": pool.total_capacity_gb, - "free_capacity_gb": pool.free_capacity_gb, - "provisioned_capacity_gb": pool.provisioned_capacity_gb, - "allocated_capacity_gb": pool.allocated_capacity_gb} - - storage_pool = element.Pool(**node_attributes) - return storage_pool - - def _add_virtual_layer(self): - """Add the virtual layer to the graph. - - This layer is the virtual components of the infrastructure. - """ - self._add_virtual_storage() - - def _add_virtual_storage(self): - volumes = self.cinder_helper.get_volume_list() - for vol in volumes: - volume = self._build_volume_node(vol) - self.model.add_volume(volume) - pool_name = getattr(vol, 'os-vol-host-attr:host') - if pool_name is None: - # The volume is not attached to any pool - continue - try: - pool = self.model.get_pool_by_pool_name( - pool_name) - self.model.map_volume(volume, pool) - except exception.PoolNotFound: - continue - - def _build_volume_node(self, volume): - """Build an volume node - - Create an volume node for the graph using cinder and the - `volume` cinder object. - :param instance: Cinder Volume object. - :return: A volume node for the graph. - """ - attachments = [{k: v for k, v in six.iteritems(d) if k in ( - 'server_id', 'attachment_id')} for d in volume.attachments] - - volume_attributes = { - "uuid": volume.id, - "size": volume.size, - "status": volume.status, - "attachments": attachments, - "name": volume.name or "", - "multiattach": volume.multiattach, - "snapshot_id": volume.snapshot_id or "", - "project_id": getattr(volume, 'os-vol-tenant-attr:tenant_id'), - "metadata": volume.metadata, - "bootable": volume.bootable} - - return element.Volume(**volume_attributes) - - def execute(self): - """Instantiates the graph with the openstack cluster data. - - The graph is populated along 2 layers: virtual and physical. As each - new layer is built connections are made back to previous layers. - """ - self._add_physical_layer() - self._add_virtual_layer() - return self.model diff --git a/watcher/decision_engine/model/collector/manager.py b/watcher/decision_engine/model/collector/manager.py deleted file mode 100644 index 1191036..0000000 --- a/watcher/decision_engine/model/collector/manager.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.common import utils -from watcher.decision_engine.loading import default - - -class CollectorManager(object): - - def __init__(self): - self.collector_loader = default.ClusterDataModelCollectorLoader() - self._collectors = None - self._notification_endpoints = None - - def get_collectors(self): - if self._collectors is None: - collectors = utils.Struct() - available_collectors = self.collector_loader.list_available() - for collector_name in available_collectors: - collector = self.collector_loader.load(collector_name) - collectors[collector_name] = collector - self._collectors = collectors - - return self._collectors - - def get_notification_endpoints(self): - if self._notification_endpoints is None: - endpoints = [] - for collector in self.get_collectors().values(): - endpoints.extend(collector.notification_endpoints) - self._notification_endpoints = endpoints - - return self._notification_endpoints - - def get_cluster_model_collector(self, name, osc=None): - """Retrieve cluster data model collector - - :param name: name of the cluster data model collector plugin - :type name: str - :param osc: an OpenStackClients instance - :type osc: :py:class:`~.OpenStackClients` instance - :returns: cluster data model collector plugin - :rtype: :py:class:`~.BaseClusterDataModelCollector` - """ - return self.collector_loader.load(name, osc=osc) diff --git a/watcher/decision_engine/model/collector/nova.py b/watcher/decision_engine/model/collector/nova.py deleted file mode 100644 index a5fe3bd..0000000 --- a/watcher/decision_engine/model/collector/nova.py +++ /dev/null @@ -1,370 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 Intel Innovation and Research Ireland Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log - -from watcher.common import exception -from watcher.common import nova_helper -from watcher.decision_engine.model.collector import base -from watcher.decision_engine.model import element -from watcher.decision_engine.model import model_root -from watcher.decision_engine.model.notification import nova - -LOG = log.getLogger(__name__) - - -class NovaClusterDataModelCollector(base.BaseClusterDataModelCollector): - """Nova cluster data model collector - - The Nova cluster data model collector creates an in-memory - representation of the resources exposed by the compute service. - """ - - def __init__(self, config, osc=None): - super(NovaClusterDataModelCollector, self).__init__(config, osc) - - @property - def notification_endpoints(self): - """Associated notification endpoints - - :return: Associated notification endpoints - :rtype: List of :py:class:`~.EventsNotificationEndpoint` instances - """ - return [ - nova.ServiceUpdated(self), - - nova.InstanceCreated(self), - nova.InstanceUpdated(self), - nova.InstanceDeletedEnd(self), - - nova.LegacyInstanceCreatedEnd(self), - nova.LegacyInstanceUpdated(self), - nova.LegacyInstanceDeletedEnd(self), - nova.LegacyLiveMigratedEnd(self), - ] - - def execute(self): - """Build the compute cluster data model""" - LOG.debug("Building latest Nova cluster data model") - - builder = ModelBuilder(self.osc) - return builder.execute() - - -class ModelBuilder(object): - """Build the graph-based model - - This model builder adds the following data" - - - Compute-related knowledge (Nova) - - TODO(v-francoise): Storage-related knowledge (Cinder) - - TODO(v-francoise): Network-related knowledge (Neutron) - - NOTE(v-francoise): This model builder is meant to be extended in the future - to also include both storage and network information respectively coming - from Cinder and Neutron. Some prelimary work has been done in this - direction in https://review.openstack.org/#/c/362730 but since we cannot - guarantee a sufficient level of consistency for neither the storage nor the - network part before the end of the Ocata cycle, this work has been - re-scheduled for Pike. In the meantime, all the associated code has been - commented out. - """ - def __init__(self, osc): - self.osc = osc - self.model = model_root.ModelRoot() - self.nova = osc.nova() - self.nova_helper = nova_helper.NovaHelper(osc=self.osc) - # self.neutron = osc.neutron() - # self.cinder = osc.cinder() - - def _add_physical_layer(self): - """Add the physical layer of the graph. - - This includes components which represent actual infrastructure - hardware. - """ - for cnode in self.nova_helper.get_compute_node_list(): - self.add_compute_node(cnode) - - def add_compute_node(self, node): - # Build and add base node. - compute_node = self.build_compute_node(node) - self.model.add_node(compute_node) - - # NOTE(v-francoise): we can encapsulate capabilities of the node - # (special instruction sets of CPUs) in the attributes; as well as - # sub-nodes can be added re-presenting e.g. GPUs/Accelerators etc. - - # # Build & add disk, memory, network and cpu nodes. - # disk_id, disk_node = self.build_disk_compute_node(base_id, node) - # self.add_node(disk_id, disk_node) - # mem_id, mem_node = self.build_memory_compute_node(base_id, node) - # self.add_node(mem_id, mem_node) - # net_id, net_node = self._build_network_compute_node(base_id) - # self.add_node(net_id, net_node) - # cpu_id, cpu_node = self.build_cpu_compute_node(base_id, node) - # self.add_node(cpu_id, cpu_node) - - # # Connect the base compute node to the dependant nodes. - # self.add_edges_from([(base_id, disk_id), (base_id, mem_id), - # (base_id, cpu_id), (base_id, net_id)], - # label="contains") - - def build_compute_node(self, node): - """Build a compute node from a Nova compute node - - :param node: A node hypervisor instance - :type node: :py:class:`~novaclient.v2.hypervisors.Hypervisor` - """ - # build up the compute node. - compute_service = self.nova_helper.get_service(node.service["id"]) - node_attributes = { - "id": node.id, - "uuid": compute_service.host, - "hostname": node.hypervisor_hostname, - "memory": node.memory_mb, - "disk": node.free_disk_gb, - "disk_capacity": node.local_gb, - "vcpus": node.vcpus, - "state": node.state, - "status": node.status} - - compute_node = element.ComputeNode(**node_attributes) - # compute_node = self._build_node("physical", "compute", "hypervisor", - # node_attributes) - return compute_node - - # def _build_network_compute_node(self, base_node): - # attributes = {} - # net_node = self._build_node("physical", "network", "NIC", attributes) - # net_id = "{}_network".format(base_node) - # return net_id, net_node - - # def build_disk_compute_node(self, base_node, compute): - # # Build disk node attributes. - # disk_attributes = { - # "size_gb": compute.local_gb, - # "used_gb": compute.local_gb_used, - # "available_gb": compute.free_disk_gb} - # disk_node = self._build_node("physical", "storage", "disk", - # disk_attributes) - # disk_id = "{}_disk".format(base_node) - # return disk_id, disk_node - - # def build_memory_compute_node(self, base_node, compute): - # # Build memory node attributes. - # memory_attrs = {"size_mb": compute.memory_mb, - # "used_mb": compute.memory_mb_used, - # "available_mb": compute.free_ram_mb} - # memory_node = self._build_node("physical", "memory", "memory", - # memory_attrs) - # memory_id = "{}_memory".format(base_node) - # return memory_id, memory_node - - # def build_cpu_compute_node(self, base_node, compute): - # # Build memory node attributes. - # cpu_attributes = {"vcpus": compute.vcpus, - # "vcpus_used": compute.vcpus_used, - # "info": jsonutils.loads(compute.cpu_info)} - # cpu_node = self._build_node("physical", "cpu", "cpu", cpu_attributes) - # cpu_id = "{}_cpu".format(base_node) - # return cpu_id, cpu_node - - # @staticmethod - # def _build_node(layer, category, node_type, attributes): - # return {"layer": layer, "category": category, "type": node_type, - # "attributes": attributes} - - def _add_virtual_layer(self): - """Add the virtual layer to the graph. - - This layer is the virtual components of the infrastructure, - such as vms. - """ - self._add_virtual_servers() - # self._add_virtual_network() - # self._add_virtual_storage() - - def _add_virtual_servers(self): - all_instances = self.nova_helper.get_instance_list() - for inst in all_instances: - # Add Node - instance = self._build_instance_node(inst) - self.model.add_instance(instance) - # Get the cnode_name uuid. - cnode_uuid = getattr(inst, "OS-EXT-SRV-ATTR:host") - if cnode_uuid is None: - # The instance is not attached to any Compute node - continue - try: - # Nova compute node - # cnode = self.nova_helper.get_compute_node_by_hostname( - # cnode_uuid) - compute_node = self.model.get_node_by_uuid( - cnode_uuid) - # Connect the instance to its compute node - self.model.map_instance(instance, compute_node) - except exception.ComputeNodeNotFound: - continue - - def _build_instance_node(self, instance): - """Build an instance node - - Create an instance node for the graph using nova and the - `server` nova object. - :param instance: Nova VM object. - :return: A instance node for the graph. - """ - flavor = self.nova_helper.get_flavor(instance.flavor["id"]) - instance_attributes = { - "uuid": instance.id, - "human_id": instance.human_id, - "memory": flavor.ram, - "disk": flavor.disk, - "disk_capacity": flavor.disk, - "vcpus": flavor.vcpus, - "state": getattr(instance, "OS-EXT-STS:vm_state"), - "metadata": instance.metadata} - - # node_attributes = dict() - # node_attributes["layer"] = "virtual" - # node_attributes["category"] = "compute" - # node_attributes["type"] = "compute" - # node_attributes["attributes"] = instance_attributes - return element.Instance(**instance_attributes) - - # def _add_virtual_storage(self): - # try: - # volumes = self.cinder.volumes.list() - # except Exception: - # return - # for volume in volumes: - # volume_id, volume_node = self._build_storage_node(volume) - # self.add_node(volume_id, volume_node) - # host = self._get_volume_host_id(volume_node) - # self.add_edge(volume_id, host) - # # Add connections to an instance. - # if volume_node['attributes']['attachments']: - # for attachment in volume_node['attributes']['attachments']: - # self.add_edge(volume_id, attachment['server_id'], - # label='ATTACHED_TO') - # volume_node['attributes'].pop('attachments') - - # def _add_virtual_network(self): - # try: - # routers = self.neutron.list_routers() - # except Exception: - # return - - # for network in self.neutron.list_networks()['networks']: - # self.add_node(*self._build_network(network)) - - # for router in routers['routers']: - # self.add_node(*self._build_router(router)) - - # router_interfaces, _, compute_ports = self._group_ports() - # for router_interface in router_interfaces: - # interface = self._build_router_interface(router_interface) - # router_interface_id = interface[0] - # router_interface_node = interface[1] - # router_id = interface[2] - # self.add_node(router_interface_id, router_interface_node) - # self.add_edge(router_id, router_interface_id) - # network_id = router_interface_node['attributes']['network_id'] - # self.add_edge(router_interface_id, network_id) - - # for compute_port in compute_ports: - # cp_id, cp_node, instance_id = self._build_compute_port_node( - # compute_port) - # self.add_node(cp_id, cp_node) - # self.add_edge(cp_id, vm_id) - # net_id = cp_node['attributes']['network_id'] - # self.add_edge(net_id, cp_id) - # # Connect port to physical node - # phys_net_node = "{}_network".format(cp_node['attributes'] - # ['binding:host_id']) - # self.add_edge(cp_id, phys_net_node) - - # def _get_volume_host_id(self, volume_node): - # host = volume_node['attributes']['os-vol-host-attr:host'] - # if host.find('@') != -1: - # host = host.split('@')[0] - # elif host.find('#') != -1: - # host = host.split('#')[0] - # return "{}_disk".format(host) - - # def _build_storage_node(self, volume_obj): - # volume = volume_obj.__dict__ - # volume["name"] = volume["id"] - # volume.pop("id") - # volume.pop("manager") - # node = self._build_node("virtual", "storage", 'volume', volume) - # return volume["name"], node - - # def _build_compute_port_node(self, compute_port): - # compute_port["name"] = compute_port["id"] - # compute_port.pop("id") - # nde_type = "{}_port".format( - # compute_port["device_owner"].split(":")[0]) - # compute_port.pop("device_owner") - # device_id = compute_port["device_id"] - # compute_port.pop("device_id") - # node = self._build_node("virtual", "network", nde_type, compute_port) - # return compute_port["name"], node, device_id - - # def _group_ports(self): - # router_interfaces = [] - # floating_ips = [] - # compute_ports = [] - # interface_types = ["network:router_interface", - # 'network:router_gateway'] - - # for port in self.neutron.list_ports()['ports']: - # if port['device_owner'] in interface_types: - # router_interfaces.append(port) - # elif port['device_owner'].startswith('compute:'): - # compute_ports.append(port) - # elif port['device_owner'] == 'network:floatingip': - # floating_ips.append(port) - - # return router_interfaces, floating_ips, compute_ports - - # def _build_router_interface(self, interface): - # interface["name"] = interface["id"] - # interface.pop("id") - # node_type = interface["device_owner"].split(":")[1] - # node = self._build_node("virtual", "network", node_type, interface) - # return interface["name"], node, interface["device_id"] - - # def _build_router(self, router): - # router_attrs = {"uuid": router['id'], - # "name": router['name'], - # "state": router['status']} - # node = self._build_node('virtual', 'network', 'router', router_attrs) - # return str(router['id']), node - - # def _build_network(self, network): - # node = self._build_node('virtual', 'network', 'network', network) - # return network['id'], node - - def execute(self): - """Instantiates the graph with the openstack cluster data. - - The graph is populated along 2 layers: virtual and physical. As each - new layer is built connections are made back to previous layers. - """ - self._add_physical_layer() - self._add_virtual_layer() - return self.model diff --git a/watcher/decision_engine/model/element/__init__.py b/watcher/decision_engine/model/element/__init__.py deleted file mode 100644 index dce2528..0000000 --- a/watcher/decision_engine/model/element/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.decision_engine.model.element import instance -from watcher.decision_engine.model.element import node -from watcher.decision_engine.model.element import volume - -ServiceState = node.ServiceState -ComputeNode = node.ComputeNode -StorageNode = node.StorageNode -Pool = node.Pool - -InstanceState = instance.InstanceState -Instance = instance.Instance -VolumeState = volume.VolumeState -Volume = volume.Volume - -__all__ = ['ServiceState', - 'ComputeNode', - 'InstanceState', - 'Instance', - 'StorageNode', - 'Pool', - 'VolumeState', - 'Volume'] diff --git a/watcher/decision_engine/model/element/base.py b/watcher/decision_engine/model/element/base.py deleted file mode 100644 index 6ff04da..0000000 --- a/watcher/decision_engine/model/element/base.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc -import collections - -from lxml import etree -from oslo_log import log -import six - -from watcher.objects import base -from watcher.objects import fields as wfields - -LOG = log.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class Element(base.WatcherObject, base.WatcherObjectDictCompat, - base.WatcherComparableObject): - - # Initial version - VERSION = '1.0' - - fields = {} - - def __init__(self, context=None, **kwargs): - for name, field in self.fields.items(): - # The idea here is to force the initialization of unspecified - # fields that have a default value - if (name not in kwargs and not field.nullable and - field.default != wfields.UnspecifiedDefault): - kwargs[name] = field.default - super(Element, self).__init__(context, **kwargs) - - @abc.abstractmethod - def accept(self, visitor): - raise NotImplementedError() - - def as_xml_element(self): - sorted_fieldmap = [] - for field in self.fields: - try: - value = str(self[field]) - sorted_fieldmap.append((field, value)) - except Exception as exc: - LOG.exception(exc) - - attrib = collections.OrderedDict(sorted_fieldmap) - - element_name = self.__class__.__name__ - instance_el = etree.Element(element_name, attrib=attrib) - - return instance_el diff --git a/watcher/decision_engine/model/element/compute_resource.py b/watcher/decision_engine/model/element/compute_resource.py deleted file mode 100644 index 4b0348a..0000000 --- a/watcher/decision_engine/model/element/compute_resource.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc - -import six - -from watcher.decision_engine.model.element import base -from watcher.objects import fields as wfields - - -@six.add_metaclass(abc.ABCMeta) -class ComputeResource(base.Element): - - VERSION = '1.0' - - fields = { - "uuid": wfields.StringField(), - "human_id": wfields.StringField(default=""), - } diff --git a/watcher/decision_engine/model/element/instance.py b/watcher/decision_engine/model/element/instance.py deleted file mode 100644 index ebdb16d..0000000 --- a/watcher/decision_engine/model/element/instance.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import enum - -from watcher.decision_engine.model.element import compute_resource -from watcher.objects import base -from watcher.objects import fields as wfields - - -class InstanceState(enum.Enum): - ACTIVE = 'active' # Instance is running - BUILDING = 'building' # Instance only exists in DB - PAUSED = 'paused' - SUSPENDED = 'suspended' # Instance is suspended to disk. - STOPPED = 'stopped' # Instance is shut off, the disk image is still there. - RESCUED = 'rescued' # A rescue image is running with the original image - # attached. - RESIZED = 'resized' # a Instance with the new size is active. - - SOFT_DELETED = 'soft-delete' - # still available to restore. - DELETED = 'deleted' # Instance is permanently deleted. - - ERROR = 'error' - - -@base.WatcherObjectRegistry.register_if(False) -class Instance(compute_resource.ComputeResource): - - fields = { - "state": wfields.StringField(default=InstanceState.ACTIVE.value), - - "memory": wfields.NonNegativeIntegerField(), - "disk": wfields.IntegerField(), - "disk_capacity": wfields.NonNegativeIntegerField(), - "vcpus": wfields.NonNegativeIntegerField(), - "metadata": wfields.JsonField(), - } - - def accept(self, visitor): - raise NotImplementedError() diff --git a/watcher/decision_engine/model/element/node.py b/watcher/decision_engine/model/element/node.py deleted file mode 100644 index 3807a6f..0000000 --- a/watcher/decision_engine/model/element/node.py +++ /dev/null @@ -1,80 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import enum - -from watcher.decision_engine.model.element import compute_resource -from watcher.decision_engine.model.element import storage_resource -from watcher.objects import base -from watcher.objects import fields as wfields - - -class ServiceState(enum.Enum): - ONLINE = 'up' - OFFLINE = 'down' - ENABLED = 'enabled' - DISABLED = 'disabled' - - -@base.WatcherObjectRegistry.register_if(False) -class ComputeNode(compute_resource.ComputeResource): - - fields = { - "id": wfields.NonNegativeIntegerField(), - "hostname": wfields.StringField(), - "status": wfields.StringField(default=ServiceState.ENABLED.value), - "state": wfields.StringField(default=ServiceState.ONLINE.value), - - "memory": wfields.NonNegativeIntegerField(), - "disk": wfields.IntegerField(), - "disk_capacity": wfields.NonNegativeIntegerField(), - "vcpus": wfields.NonNegativeIntegerField(), - } - - def accept(self, visitor): - raise NotImplementedError() - - -@base.WatcherObjectRegistry.register_if(False) -class StorageNode(storage_resource.StorageResource): - - fields = { - "host": wfields.StringField(), - "zone": wfields.StringField(), - "status": wfields.StringField(default=ServiceState.ENABLED.value), - "state": wfields.StringField(default=ServiceState.ONLINE.value), - "volume_type": wfields.StringField() - } - - def accept(self, visitor): - raise NotImplementedError() - - -@base.WatcherObjectRegistry.register_if(False) -class Pool(storage_resource.StorageResource): - - fields = { - "name": wfields.StringField(), - "total_volumes": wfields.NonNegativeIntegerField(), - "total_capacity_gb": wfields.NonNegativeIntegerField(), - "free_capacity_gb": wfields.NonNegativeIntegerField(), - "provisioned_capacity_gb": wfields.NonNegativeIntegerField(), - "allocated_capacity_gb": wfields.NonNegativeIntegerField(), - "virtual_free": wfields.NonNegativeIntegerField(), - } - - def accept(self, visitor): - raise NotImplementedError() diff --git a/watcher/decision_engine/model/element/storage_resource.py b/watcher/decision_engine/model/element/storage_resource.py deleted file mode 100644 index e65fb01..0000000 --- a/watcher/decision_engine/model/element/storage_resource.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2017 NEC Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc - -import six - -from watcher.decision_engine.model.element import base -from watcher.objects import fields as wfields - - -@six.add_metaclass(abc.ABCMeta) -class StorageResource(base.Element): - - VERSION = '1.0' - - fields = { - "uuid": wfields.StringField(), - "human_id": wfields.StringField(default=""), - } diff --git a/watcher/decision_engine/model/element/volume.py b/watcher/decision_engine/model/element/volume.py deleted file mode 100644 index f96cd7c..0000000 --- a/watcher/decision_engine/model/element/volume.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2017 NEC Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import enum - -from watcher.decision_engine.model.element import storage_resource -from watcher.objects import base -from watcher.objects import fields as wfields - - -class VolumeState(enum.Enum): - # https://developer.openstack.org/api-ref/block-storage/v3/#volumes-volumes - - CREATING = 'creating' - AVAILABLE = 'available' - ATTACHING = 'attaching' - IN_USE = 'in-use' - DELETING = 'deleting' - ERROR = 'error' - ERROR_DELETING = 'error_deleting' - BACKING_UP = 'backing-up' - RESTORING_BACKUP = 'restoring-backup' - ERROR_RESTORING = 'error_restoring' - ERROR_EXTENDING = 'error_extending' - - -@base.WatcherObjectRegistry.register_if(False) -class Volume(storage_resource.StorageResource): - - fields = { - "size": wfields.NonNegativeIntegerField(), - "status": wfields.StringField(default=VolumeState.AVAILABLE.value), - "attachments": wfields.FlexibleListOfDictField(), - "name": wfields.StringField(), - "multiattach": wfields.BooleanField(), - "snapshot_id": wfields.UUIDField(), - "project_id": wfields.UUIDField(), - "metadata": wfields.JsonField(), - "bootable": wfields.BooleanField() - } - - def accept(self, visitor): - raise NotImplementedError() diff --git a/watcher/decision_engine/model/model_root.py b/watcher/decision_engine/model/model_root.py deleted file mode 100644 index 3b47085..0000000 --- a/watcher/decision_engine/model/model_root.py +++ /dev/null @@ -1,541 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Innovation and Research Ireland Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Openstack implementation of the cluster graph. -""" - -from lxml import etree -import networkx as nx -from oslo_concurrency import lockutils -from oslo_log import log -import six - -from watcher._i18n import _ -from watcher.common import exception -from watcher.decision_engine.model import base -from watcher.decision_engine.model import element - -LOG = log.getLogger(__name__) - - -class ModelRoot(nx.DiGraph, base.Model): - """Cluster graph for an Openstack cluster.""" - - def __init__(self, stale=False): - super(ModelRoot, self).__init__() - self.stale = stale - - def __nonzero__(self): - return not self.stale - - __bool__ = __nonzero__ - - @staticmethod - def assert_node(obj): - if not isinstance(obj, element.ComputeNode): - raise exception.IllegalArgumentException( - message=_("'obj' argument type is not valid: %s") % type(obj)) - - @staticmethod - def assert_instance(obj): - if not isinstance(obj, element.Instance): - raise exception.IllegalArgumentException( - message=_("'obj' argument type is not valid")) - - @lockutils.synchronized("model_root") - def add_node(self, node): - self.assert_node(node) - super(ModelRoot, self).add_node(node.uuid, node) - - @lockutils.synchronized("model_root") - def remove_node(self, node): - self.assert_node(node) - try: - super(ModelRoot, self).remove_node(node.uuid) - except nx.NetworkXError as exc: - LOG.exception(exc) - raise exception.ComputeNodeNotFound(name=node.uuid) - - @lockutils.synchronized("model_root") - def add_instance(self, instance): - self.assert_instance(instance) - try: - super(ModelRoot, self).add_node(instance.uuid, instance) - except nx.NetworkXError as exc: - LOG.exception(exc) - raise exception.InstanceNotFound(name=instance.uuid) - - @lockutils.synchronized("model_root") - def remove_instance(self, instance): - self.assert_instance(instance) - super(ModelRoot, self).remove_node(instance.uuid) - - @lockutils.synchronized("model_root") - def map_instance(self, instance, node): - """Map a newly created instance to a node - - :param instance: :py:class:`~.Instance` object or instance UUID - :type instance: str or :py:class:`~.Instance` - :param node: :py:class:`~.ComputeNode` object or node UUID - :type node: str or :py:class:`~.Instance` - """ - if isinstance(instance, six.string_types): - instance = self.get_instance_by_uuid(instance) - if isinstance(node, six.string_types): - node = self.get_node_by_uuid(node) - self.assert_node(node) - self.assert_instance(instance) - - self.add_edge(instance.uuid, node.uuid) - - @lockutils.synchronized("model_root") - def unmap_instance(self, instance, node): - if isinstance(instance, six.string_types): - instance = self.get_instance_by_uuid(instance) - if isinstance(node, six.string_types): - node = self.get_node_by_uuid(node) - - self.remove_edge(instance.uuid, node.uuid) - - def delete_instance(self, instance, node=None): - self.assert_instance(instance) - self.remove_instance(instance) - - @lockutils.synchronized("model_root") - def migrate_instance(self, instance, source_node, destination_node): - """Migrate single instance from source_node to destination_node - - :param instance: - :param source_node: - :param destination_node: - :return: - """ - self.assert_instance(instance) - self.assert_node(source_node) - self.assert_node(destination_node) - - if source_node == destination_node: - return False - - # unmap - self.remove_edge(instance.uuid, source_node.uuid) - # map - self.add_edge(instance.uuid, destination_node.uuid) - return True - - @lockutils.synchronized("model_root") - def get_all_compute_nodes(self): - return {uuid: cn for uuid, cn in self.nodes(data=True) - if isinstance(cn, element.ComputeNode)} - - @lockutils.synchronized("model_root") - def get_node_by_uuid(self, uuid): - try: - return self._get_by_uuid(uuid) - except exception.ComputeResourceNotFound: - raise exception.ComputeNodeNotFound(name=uuid) - - @lockutils.synchronized("model_root") - def get_instance_by_uuid(self, uuid): - try: - return self._get_by_uuid(uuid) - except exception.ComputeResourceNotFound: - raise exception.InstanceNotFound(name=uuid) - - def _get_by_uuid(self, uuid): - try: - return self.node[uuid] - except Exception as exc: - LOG.exception(exc) - raise exception.ComputeResourceNotFound(name=uuid) - - @lockutils.synchronized("model_root") - def get_node_by_instance_uuid(self, instance_uuid): - instance = self._get_by_uuid(instance_uuid) - for node_uuid in self.neighbors(instance.uuid): - node = self._get_by_uuid(node_uuid) - if isinstance(node, element.ComputeNode): - return node - raise exception.ComputeNodeNotFound(name=instance_uuid) - - @lockutils.synchronized("model_root") - def get_all_instances(self): - return {uuid: inst for uuid, inst in self.nodes(data=True) - if isinstance(inst, element.Instance)} - - @lockutils.synchronized("model_root") - def get_node_instances(self, node): - self.assert_node(node) - node_instances = [] - for instance_uuid in self.predecessors(node.uuid): - instance = self._get_by_uuid(instance_uuid) - if isinstance(instance, element.Instance): - node_instances.append(instance) - - return node_instances - - def to_string(self): - return self.to_xml() - - def to_xml(self): - root = etree.Element("ModelRoot") - # Build compute node tree - for cn in sorted(self.get_all_compute_nodes().values(), - key=lambda cn: cn.uuid): - compute_node_el = cn.as_xml_element() - - # Build mapped instance tree - node_instances = self.get_node_instances(cn) - for instance in sorted(node_instances, key=lambda x: x.uuid): - instance_el = instance.as_xml_element() - compute_node_el.append(instance_el) - - root.append(compute_node_el) - - # Build unmapped instance tree (i.e. not assigned to any compute node) - for instance in sorted(self.get_all_instances().values(), - key=lambda inst: inst.uuid): - try: - self.get_node_by_instance_uuid(instance.uuid) - except (exception.InstanceNotFound, exception.ComputeNodeNotFound): - root.append(instance.as_xml_element()) - - return etree.tostring(root, pretty_print=True).decode('utf-8') - - @classmethod - def from_xml(cls, data): - model = cls() - - root = etree.fromstring(data) - for cn in root.findall('.//ComputeNode'): - node = element.ComputeNode(**cn.attrib) - model.add_node(node) - - for inst in root.findall('.//Instance'): - instance = element.Instance(**inst.attrib) - model.add_instance(instance) - - parent = inst.getparent() - if parent.tag == 'ComputeNode': - node = model.get_node_by_uuid(parent.get('uuid')) - model.map_instance(instance, node) - else: - model.add_instance(instance) - - return model - - @classmethod - def is_isomorphic(cls, G1, G2): - def node_match(node1, node2): - return node1.as_dict() == node2.as_dict() - return nx.algorithms.isomorphism.isomorph.is_isomorphic( - G1, G2, node_match=node_match) - - -class StorageModelRoot(nx.DiGraph, base.Model): - """Cluster graph for an Openstack cluster.""" - - def __init__(self, stale=False): - super(StorageModelRoot, self).__init__() - self.stale = stale - - def __nonzero__(self): - return not self.stale - - __bool__ = __nonzero__ - - @staticmethod - def assert_node(obj): - if not isinstance(obj, element.StorageNode): - raise exception.IllegalArgumentException( - message=_("'obj' argument type is not valid: %s") % type(obj)) - - @staticmethod - def assert_pool(obj): - if not isinstance(obj, element.Pool): - raise exception.IllegalArgumentException( - message=_("'obj' argument type is not valid: %s") % type(obj)) - - @staticmethod - def assert_volume(obj): - if not isinstance(obj, element.Volume): - raise exception.IllegalArgumentException( - message=_("'obj' argument type is not valid: %s") % type(obj)) - - @lockutils.synchronized("storage_model") - def add_node(self, node): - self.assert_node(node) - super(StorageModelRoot, self).add_node(node.host, node) - - @lockutils.synchronized("storage_model") - def add_pool(self, pool): - self.assert_pool(pool) - super(StorageModelRoot, self).add_node(pool.name, pool) - - @lockutils.synchronized("storage_model") - def remove_node(self, node): - self.assert_node(node) - try: - super(StorageModelRoot, self).remove_node(node.host) - except nx.NetworkXError as exc: - LOG.exception(exc) - raise exception.StorageNodeNotFound(name=node.host) - - @lockutils.synchronized("storage_model") - def remove_pool(self, pool): - self.assert_pool(pool) - try: - super(StorageModelRoot, self).remove_node(pool.name) - except nx.NetworkXError as exc: - LOG.exception(exc) - raise exception.PoolNotFound(name=pool.name) - - @lockutils.synchronized("storage_model") - def map_pool(self, pool, node): - """Map a newly created pool to a node - - :param pool: :py:class:`~.Pool` object or pool name - :param node: :py:class:`~.StorageNode` object or node host - """ - if isinstance(pool, six.string_types): - pool = self.get_pool_by_pool_name(pool) - if isinstance(node, six.string_types): - node = self.get_node_by_name(node) - self.assert_node(node) - self.assert_pool(pool) - - self.add_edge(pool.name, node.host) - - @lockutils.synchronized("storage_model") - def unmap_pool(self, pool, node): - """Unmap a pool from a node - - :param pool: :py:class:`~.Pool` object or pool name - :param node: :py:class:`~.StorageNode` object or node name - """ - if isinstance(pool, six.string_types): - pool = self.get_pool_by_pool_name(pool) - if isinstance(node, six.string_types): - node = self.get_node_by_name(node) - - self.remove_edge(pool.name, node.host) - - @lockutils.synchronized("storage_model") - def add_volume(self, volume): - self.assert_volume(volume) - super(StorageModelRoot, self).add_node(volume.uuid, volume) - - @lockutils.synchronized("storage_model") - def remove_volume(self, volume): - self.assert_volume(volume) - try: - super(StorageModelRoot, self).remove_node(volume.uuid) - except nx.NetworkXError as exc: - LOG.exception(exc) - raise exception.VolumeNotFound(name=volume.uuid) - - @lockutils.synchronized("storage_model") - def map_volume(self, volume, pool): - """Map a newly created volume to a pool - - :param volume: :py:class:`~.Volume` object or volume UUID - :param pool: :py:class:`~.Pool` object or pool name - """ - if isinstance(volume, six.string_types): - volume = self.get_volume_by_uuid(volume) - if isinstance(pool, six.string_types): - pool = self.get_pool_by_pool_name(pool) - self.assert_pool(pool) - self.assert_volume(volume) - - self.add_edge(volume.uuid, pool.name) - - @lockutils.synchronized("storage_model") - def unmap_volume(self, volume, pool): - """Unmap a volume from a pool - - :param volume: :py:class:`~.Volume` object or volume UUID - :param pool: :py:class:`~.Pool` object or pool name - """ - if isinstance(volume, six.string_types): - volume = self.get_volume_by_uuid(volume) - if isinstance(pool, six.string_types): - pool = self.get_pool_by_pool_name(pool) - - self.remove_edge(volume.uuid, pool.name) - - def delete_volume(self, volume): - self.assert_volume(volume) - self.remove_volume(volume) - - @lockutils.synchronized("storage_model") - def get_all_storage_nodes(self): - return {host: cn for host, cn in self.nodes(data=True) - if isinstance(cn, element.StorageNode)} - - @lockutils.synchronized("storage_model") - def get_node_by_name(self, name): - """Get a node by node name - - :param node: :py:class:`~.StorageNode` object or node name - """ - try: - return self._get_by_name(name.split("#")[0]) - except exception.StorageResourceNotFound: - raise exception.StorageNodeNotFound(name=name) - - @lockutils.synchronized("storage_model") - def get_pool_by_pool_name(self, name): - try: - return self._get_by_name(name) - except exception.StorageResourceNotFound: - raise exception.PoolNotFound(name=name) - - @lockutils.synchronized("storage_model") - def get_volume_by_uuid(self, uuid): - try: - return self._get_by_uuid(uuid) - except exception.StorageResourceNotFound: - raise exception.VolumeNotFound(name=uuid) - - def _get_by_uuid(self, uuid): - try: - return self.node[uuid] - except Exception as exc: - LOG.exception(exc) - raise exception.StorageResourceNotFound(name=uuid) - - def _get_by_name(self, name): - try: - return self.node[name] - except Exception as exc: - LOG.exception(exc) - raise exception.StorageResourceNotFound(name=name) - - @lockutils.synchronized("storage_model") - def get_node_by_pool_name(self, pool_name): - pool = self._get_by_name(pool_name) - for node_name in self.neighbors(pool.name): - node = self._get_by_name(node_name) - if isinstance(node, element.StorageNode): - return node - raise exception.StorageNodeNotFound(name=pool_name) - - @lockutils.synchronized("storage_model") - def get_node_pools(self, node): - self.assert_node(node) - node_pools = [] - for pool_name in self.predecessors(node.host): - pool = self._get_by_name(pool_name) - if isinstance(pool, element.Pool): - node_pools.append(pool) - - return node_pools - - @lockutils.synchronized("storage_model") - def get_pool_by_volume(self, volume): - self.assert_volume(volume) - volume = self._get_by_uuid(volume.uuid) - for p in self.neighbors(volume.uuid): - pool = self._get_by_name(p) - if isinstance(pool, element.Pool): - return pool - raise exception.PoolNotFound(name=volume.uuid) - - @lockutils.synchronized("storage_model") - def get_all_volumes(self): - return {name: vol for name, vol in self.nodes(data=True) - if isinstance(vol, element.Volume)} - - @lockutils.synchronized("storage_model") - def get_pool_volumes(self, pool): - self.assert_pool(pool) - volumes = [] - for vol in self.predecessors(pool.name): - volume = self._get_by_uuid(vol) - if isinstance(volume, element.Volume): - volumes.append(volume) - - return volumes - - def to_string(self): - return self.to_xml() - - def to_xml(self): - root = etree.Element("ModelRoot") - # Build storage node tree - for cn in sorted(self.get_all_storage_nodes().values(), - key=lambda cn: cn.host): - storage_node_el = cn.as_xml_element() - # Build mapped pool tree - node_pools = self.get_node_pools(cn) - for pool in sorted(node_pools, key=lambda x: x.name): - pool_el = pool.as_xml_element() - storage_node_el.append(pool_el) - # Build mapped volume tree - pool_volumes = self.get_pool_volumes(pool) - for volume in sorted(pool_volumes, key=lambda x: x.uuid): - volume_el = volume.as_xml_element() - pool_el.append(volume_el) - - root.append(storage_node_el) - - # Build unmapped volume tree (i.e. not assigned to any pool) - for volume in sorted(self.get_all_volumes().values(), - key=lambda vol: vol.uuid): - try: - self.get_pool_by_volume(volume) - except (exception.VolumeNotFound, exception.PoolNotFound): - root.append(volume.as_xml_element()) - - return etree.tostring(root, pretty_print=True).decode('utf-8') - - @classmethod - def from_xml(cls, data): - model = cls() - - root = etree.fromstring(data) - for cn in root.findall('.//StorageNode'): - node = element.StorageNode(**cn.attrib) - model.add_node(node) - - for p in root.findall('.//Pool'): - pool = element.Pool(**p.attrib) - model.add_pool(pool) - - parent = p.getparent() - if parent.tag == 'StorageNode': - node = model.get_node_by_name(parent.get('host')) - model.map_pool(pool, node) - else: - model.add_pool(pool) - - for vol in root.findall('.//Volume'): - volume = element.Volume(**vol.attrib) - model.add_volume(volume) - - parent = vol.getparent() - if parent.tag == 'Pool': - pool = model.get_pool_by_pool_name(parent.get('name')) - model.map_volume(volume, pool) - else: - model.add_volume(volume) - - return model - - @classmethod - def is_isomorphic(cls, G1, G2): - return nx.algorithms.isomorphism.isomorph.is_isomorphic( - G1, G2) diff --git a/watcher/decision_engine/model/notification/__init__.py b/watcher/decision_engine/model/notification/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/model/notification/base.py b/watcher/decision_engine/model/notification/base.py deleted file mode 100644 index 9090ab3..0000000 --- a/watcher/decision_engine/model/notification/base.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc -import six - - -@six.add_metaclass(abc.ABCMeta) -class NotificationEndpoint(object): - - def __init__(self, collector): - super(NotificationEndpoint, self).__init__() - self.collector = collector - self._notifier = None - - @abc.abstractproperty - def filter_rule(self): - """Notification Filter""" - raise NotImplementedError() - - @property - def cluster_data_model(self): - return self.collector.cluster_data_model diff --git a/watcher/decision_engine/model/notification/cinder.py b/watcher/decision_engine/model/notification/cinder.py deleted file mode 100644 index 7d305dc..0000000 --- a/watcher/decision_engine/model/notification/cinder.py +++ /dev/null @@ -1,387 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2017 NEC Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import six - -from oslo_log import log -from watcher.common import cinder_helper -from watcher.common import exception -from watcher.decision_engine.model import element -from watcher.decision_engine.model.notification import base -from watcher.decision_engine.model.notification import filtering - -LOG = log.getLogger(__name__) - - -class CinderNotification(base.NotificationEndpoint): - - def __init__(self, collector): - super(CinderNotification, self).__init__(collector) - self._cinder = None - - @property - def cinder(self): - if self._cinder is None: - self._cinder = cinder_helper.CinderHelper() - return self._cinder - - def update_pool(self, pool, data): - """Update the storage pool using the notification data.""" - pool.update({ - "total_capacity_gb": data['total'], - "free_capacity_gb": data['free'], - "provisioned_capacity_gb": data['provisioned'], - "allocated_capacity_gb": data['allocated'], - "virtual_free": data['virtual_free'] - }) - - node_name = pool.name.split("#")[0] - node = self.get_or_create_node(node_name) - self.cluster_data_model.map_pool(pool, node) - LOG.debug("Mapped pool %s to %s", pool.name, node.host) - - def update_pool_by_api(self, pool): - """Update the storage pool using the API data.""" - if not pool: - return - _pool = self.cinder.get_storage_pool_by_name(pool.name) - pool.update({ - "total_volumes": _pool.total_volumes, - "total_capacity_gb": _pool.total_capacity_gb, - "free_capacity_gb": _pool.free_capacity_gb, - "provisioned_capacity_gb": _pool.provisioned_capacity_gb, - "allocated_capacity_gb": _pool.allocated_capacity_gb - }) - node_name = pool.name.split("#")[0] - node = self.get_or_create_node(node_name) - self.cluster_data_model.map_pool(pool, node) - LOG.debug("Mapped pool %s to %s", pool.name, node.host) - - def create_storage_node(self, name): - """Create the storage node by querying the Cinder API.""" - try: - _node = self.cinder.get_storage_node_by_name(name) - _volume_type = self.cinder.get_volume_type_by_backendname( - # name is formatted as host@backendname - name.split('@')[1]) - storage_node = element.StorageNode( - host=_node.host, - zone=_node.zone, - state=_node.state, - status=_node.status, - volume_type=_volume_type) - return storage_node - except Exception as exc: - LOG.exception(exc) - LOG.debug("Could not create storage node %s.", name) - raise exception.StorageNodeNotFound(name=name) - - def get_or_create_node(self, name): - """Get storage node by name, otherwise create storage node""" - if name is None: - LOG.debug("Storage node name not provided: skipping") - return - try: - return self.cluster_data_model.get_node_by_name(name) - except exception.StorageNodeNotFound: - # The node didn't exist yet so we create a new node object - node = self.create_storage_node(name) - LOG.debug("New storage node created: %s", name) - self.cluster_data_model.add_node(node) - LOG.debug("New storage node added: %s", name) - return node - - def create_pool(self, pool_name): - """Create the storage pool by querying the Cinder API.""" - try: - _pool = self.cinder.get_storage_pool_by_name(pool_name) - pool = element.Pool( - name=_pool.name, - total_volumes=_pool.total_volumes, - total_capacity_gb=_pool.total_capacity_gb, - free_capacity_gb=_pool.free_capacity_gb, - provisioned_capacity_gb=_pool.provisioned_capacity_gb, - allocated_capacity_gb=_pool.allocated_capacity_gb) - return pool - except Exception as exc: - LOG.exception(exc) - LOG.debug("Could not refresh the pool %s.", pool_name) - raise exception.PoolNotFound(name=pool_name) - - def get_or_create_pool(self, name): - if not name: - LOG.debug("Pool name not provided: skipping") - return - try: - return self.cluster_data_model.get_pool_by_pool_name(name) - except exception.PoolNotFound: - # The pool didn't exist yet so we create a new pool object - pool = self.create_pool(name) - LOG.debug("New storage pool created: %s", name) - self.cluster_data_model.add_pool(pool) - LOG.debug("New storage pool added: %s", name) - return pool - - def get_or_create_volume(self, volume_id, pool_name=None): - try: - if pool_name: - self.get_or_create_pool(pool_name) - except exception.PoolNotFound: - LOG.warning("Could not find storage pool %(pool)s for " - "volume %(volume)s", - dict(pool=pool_name, volume=volume_id)) - try: - return self.cluster_data_model.get_volume_by_uuid(volume_id) - except exception.VolumeNotFound: - # The volume didn't exist yet so we create a new volume object - volume = element.Volume(uuid=volume_id) - self.cluster_data_model.add_volume(volume) - return volume - - def update_volume(self, volume, data): - """Update the volume using the notification data.""" - - def _keyReplace(key): - if key == 'instance_uuid': - return 'server_id' - if key == 'id': - return 'attachment_id' - - attachments = [ - {_keyReplace(k): v for k, v in six.iteritems(d) - if k in ('instance_uuid', 'id')} - for d in data['volume_attachment'] - ] - - # glance_metadata is provided if volume is bootable - bootable = False - if 'glance_metadata' in data: - bootable = True - - volume.update({ - "name": data['display_name'] or "", - "size": data['size'], - "status": data['status'], - "attachments": attachments, - "snapshot_id": data['snapshot_id'] or "", - "project_id": data['tenant_id'], - "metadata": data['metadata'], - "bootable": bootable - }) - - try: - # if volume is under pool, let's update pool element. - # get existing pool or create pool by cinder api - pool = self.get_or_create_pool(data['host']) - self.update_pool_by_api(pool) - - except exception.PoolNotFound as exc: - LOG.exception(exc) - pool = None - - self.update_volume_mapping(volume, pool) - - def update_volume_mapping(self, volume, pool): - if pool is None: - self.cluster_data_model.add_volume(volume) - LOG.debug("Volume %s not yet attached to any pool: skipping", - volume.uuid) - return - try: - try: - current_pool = ( - self.cluster_data_model.get_pool_by_volume( - volume) or self.get_or_create_pool(pool.name)) - except exception.PoolNotFound as exc: - LOG.exception(exc) - # If we can't create the pool, - # we consider the volume as unmapped - current_pool = None - - LOG.debug("Mapped pool %s found", pool.name) - if current_pool and pool != current_pool: - LOG.debug("Unmapping volume %s from %s", - volume.uuid, pool.name) - self.cluster_data_model.unmap_volume(volume, current_pool) - except exception.VolumeNotFound: - # The instance didn't exist yet so we map it for the first time - LOG.debug("New volume: mapping it to %s", pool.name) - finally: - if pool: - self.cluster_data_model.map_volume(volume, pool) - LOG.debug("Mapped volume %s to %s", volume.uuid, pool.name) - - def delete_volume(self, volume, pool): - try: - self.cluster_data_model.delete_volume(volume) - except Exception: - LOG.info("Volume %s already deleted", volume.uuid) - - try: - if pool: - # if volume is under pool, let's update pool element. - # get existing pool or create pool by cinder api - pool = self.get_or_create_pool(pool.name) - self.update_pool_by_api(pool) - except exception.PoolNotFound as exc: - LOG.exception(exc) - pool = None - - -class CapacityNotificationEndpoint(CinderNotification): - - @property - def filter_rule(self): - """Cinder capacity notification filter""" - return filtering.NotificationFilter( - publisher_id=r'capacity.*', - event_type='capacity.pool', - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - ctxt.request_id = metadata['message_id'] - ctxt.project_domain = event_type - LOG.info("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s" % - dict(event=event_type, - publisher=publisher_id, - metadata=metadata)) - LOG.debug(payload) - name = payload['name_to_id'] - try: - pool = self.get_or_create_pool(name) - self.update_pool(pool, payload) - except exception.PoolNotFound as exc: - LOG.exception(exc) - - -class VolumeNotificationEndpoint(CinderNotification): - publisher_id_regex = r'^volume.*' - - -class VolumeCreateEnd(VolumeNotificationEndpoint): - - @property - def filter_rule(self): - """Cinder volume notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='volume.create.end', - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - ctxt.request_id = metadata['message_id'] - ctxt.project_domain = event_type - LOG.info("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s" % - dict(event=event_type, - publisher=publisher_id, - metadata=metadata)) - LOG.debug(payload) - volume_id = payload['volume_id'] - poolname = payload['host'] - volume = self.get_or_create_volume(volume_id, poolname) - self.update_volume(volume, payload) - - -class VolumeUpdateEnd(VolumeNotificationEndpoint): - - @property - def filter_rule(self): - """Cinder volume notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='volume.update.end', - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - ctxt.request_id = metadata['message_id'] - ctxt.project_domain = event_type - LOG.info("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s" % - dict(event=event_type, - publisher=publisher_id, - metadata=metadata)) - LOG.debug(payload) - volume_id = payload['volume_id'] - poolname = payload['host'] - volume = self.get_or_create_volume(volume_id, poolname) - self.update_volume(volume, payload) - - -class VolumeAttachEnd(VolumeUpdateEnd): - - @property - def filter_rule(self): - """Cinder volume notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='volume.attach.end', - ) - - -class VolumeDetachEnd(VolumeUpdateEnd): - - @property - def filter_rule(self): - """Cinder volume notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='volume.detach.end', - ) - - -class VolumeResizeEnd(VolumeUpdateEnd): - - @property - def filter_rule(self): - """Cinder volume notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='volume.resize.end', - ) - - -class VolumeDeleteEnd(VolumeNotificationEndpoint): - - @property - def filter_rule(self): - """Cinder volume notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='volume.delete.end', - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - ctxt.request_id = metadata['message_id'] - ctxt.project_domain = event_type - LOG.info("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s" % - dict(event=event_type, - publisher=publisher_id, - metadata=metadata)) - LOG.debug(payload) - volume_id = payload['volume_id'] - poolname = payload['host'] - volume = self.get_or_create_volume(volume_id, poolname) - - try: - pool = self.get_or_create_pool(poolname) - except exception.PoolNotFound as exc: - LOG.exception(exc) - pool = None - - self.delete_volume(volume, pool) diff --git a/watcher/decision_engine/model/notification/filtering.py b/watcher/decision_engine/model/notification/filtering.py deleted file mode 100644 index 737e317..0000000 --- a/watcher/decision_engine/model/notification/filtering.py +++ /dev/null @@ -1,88 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re - -import oslo_messaging as om -import six - - -class NotificationFilter(om.NotificationFilter): - """Notification Endpoint base class - - This class is responsible for handling incoming notifications. Depending - on the priority level of the incoming, you may need to implement one or - more of the following methods: - - .. code: py - def audit(self, ctxt, publisher_id, event_type, payload, metadata): - do_something(payload) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - do_something(payload) - - def warn(self, ctxt, publisher_id, event_type, payload, metadata): - do_something(payload) - - def error(self, ctxt, publisher_id, event_type, payload, metadata): - do_something(payload) - - def critical(self, ctxt, publisher_id, event_type, payload, metadata): - do_something(payload) - """ - - def _build_regex_dict(self, regex_list): - if regex_list is None: - return {} - - regex_mapping = {} - for key, value in regex_list.items(): - if isinstance(value, dict): - regex_mapping[key] = self._build_regex_dict(value) - else: - if callable(value): - regex_mapping[key] = value - elif value is not None: - regex_mapping[key] = re.compile(value) - else: - regex_mapping[key] = None - - return regex_mapping - - def _check_for_mismatch(self, data, regex): - if isinstance(regex, dict): - mismatch_results = [ - k not in data or not self._check_for_mismatch(data[k], v) - for k, v in regex.items() - ] - if not mismatch_results: - return False - - return all(mismatch_results) - elif callable(regex): - # The filter is a callable that should return True - # if there is a mismatch - return regex(data) - elif regex is not None and data is None: - return True - elif (regex is not None and - isinstance(data, six.string_types) and - not regex.match(data)): - return True - - return False diff --git a/watcher/decision_engine/model/notification/nova.py b/watcher/decision_engine/model/notification/nova.py deleted file mode 100644 index 42df5cd..0000000 --- a/watcher/decision_engine/model/notification/nova.py +++ /dev/null @@ -1,466 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log -from watcher.common import exception -from watcher.common import nova_helper -from watcher.decision_engine.model import element -from watcher.decision_engine.model.notification import base -from watcher.decision_engine.model.notification import filtering - -LOG = log.getLogger(__name__) - - -class NovaNotification(base.NotificationEndpoint): - - def __init__(self, collector): - super(NovaNotification, self).__init__(collector) - self._nova = None - - @property - def nova(self): - if self._nova is None: - self._nova = nova_helper.NovaHelper() - return self._nova - - def get_or_create_instance(self, instance_uuid, node_uuid=None): - try: - if node_uuid: - self.get_or_create_node(node_uuid) - except exception.ComputeNodeNotFound: - LOG.warning("Could not find compute node %(node)s for " - "instance %(instance)s", - dict(node=node_uuid, instance=instance_uuid)) - try: - instance = self.cluster_data_model.get_instance_by_uuid( - instance_uuid) - except exception.InstanceNotFound: - # The instance didn't exist yet so we create a new instance object - LOG.debug("New instance created: %s", instance_uuid) - instance = element.Instance(uuid=instance_uuid) - - self.cluster_data_model.add_instance(instance) - - return instance - - def update_instance(self, instance, data): - instance_data = data['nova_object.data'] - instance_flavor_data = instance_data['flavor']['nova_object.data'] - - memory_mb = instance_flavor_data['memory_mb'] - num_cores = instance_flavor_data['vcpus'] - disk_gb = instance_flavor_data['root_gb'] - instance_metadata = data['nova_object.data']['metadata'] - - instance.update({ - 'state': instance_data['state'], - 'hostname': instance_data['host_name'], - 'human_id': instance_data['display_name'], - 'memory': memory_mb, - 'vcpus': num_cores, - 'disk': disk_gb, - 'disk_capacity': disk_gb, - 'metadata': instance_metadata, - }) - - try: - node = self.get_or_create_node(instance_data['host']) - except exception.ComputeNodeNotFound as exc: - LOG.exception(exc) - # If we can't create the node, we consider the instance as unmapped - node = None - - self.update_instance_mapping(instance, node) - - def legacy_update_instance(self, instance, data): - memory_mb = data['memory_mb'] - num_cores = data['vcpus'] - disk_gb = data['root_gb'] - instance_metadata = data['metadata'] - - instance.update({ - 'state': data['state'], - 'hostname': data['hostname'], - 'human_id': data['display_name'], - 'memory': memory_mb, - 'vcpus': num_cores, - 'disk': disk_gb, - 'disk_capacity': disk_gb, - 'metadata': instance_metadata, - }) - - try: - node = self.get_or_create_node(data['host']) - except exception.ComputeNodeNotFound as exc: - LOG.exception(exc) - # If we can't create the node, we consider the instance as unmapped - node = None - - self.update_instance_mapping(instance, node) - - def update_compute_node(self, node, data): - """Update the compute node using the notification data.""" - node_data = data['nova_object.data'] - node_state = ( - element.ServiceState.OFFLINE.value - if node_data['forced_down'] else element.ServiceState.ONLINE.value) - node_status = ( - element.ServiceState.DISABLED.value - if node_data['disabled'] else element.ServiceState.ENABLED.value) - - node.update({ - 'hostname': node_data['host'], - 'state': node_state, - 'status': node_status, - }) - - def create_compute_node(self, node_hostname): - """Update the compute node by querying the Nova API.""" - try: - _node = self.nova.get_compute_node_by_hostname(node_hostname) - node = element.ComputeNode( - id=_node.id, - uuid=node_hostname, - hostname=_node.hypervisor_hostname, - state=_node.state, - status=_node.status, - memory=_node.memory_mb, - vcpus=_node.vcpus, - disk=_node.free_disk_gb, - disk_capacity=_node.local_gb, - ) - return node - except Exception as exc: - LOG.exception(exc) - LOG.debug("Could not refresh the node %s.", node_hostname) - raise exception.ComputeNodeNotFound(name=node_hostname) - - return False - - def get_or_create_node(self, uuid): - if uuid is None: - LOG.debug("Compute node UUID not provided: skipping") - return - try: - return self.cluster_data_model.get_node_by_uuid(uuid) - except exception.ComputeNodeNotFound: - # The node didn't exist yet so we create a new node object - node = self.create_compute_node(uuid) - LOG.debug("New compute node created: %s", uuid) - self.cluster_data_model.add_node(node) - LOG.debug("New compute node mapped: %s", uuid) - return node - - def update_instance_mapping(self, instance, node): - if node is None: - self.cluster_data_model.add_instance(instance) - LOG.debug("Instance %s not yet attached to any node: skipping", - instance.uuid) - return - try: - try: - current_node = ( - self.cluster_data_model.get_node_by_instance_uuid( - instance.uuid) or self.get_or_create_node(node.uuid)) - except exception.ComputeNodeNotFound as exc: - LOG.exception(exc) - # If we can't create the node, - # we consider the instance as unmapped - current_node = None - - LOG.debug("Mapped node %s found", node.uuid) - if current_node and node != current_node: - LOG.debug("Unmapping instance %s from %s", - instance.uuid, node.uuid) - self.cluster_data_model.unmap_instance(instance, current_node) - except exception.InstanceNotFound: - # The instance didn't exist yet so we map it for the first time - LOG.debug("New instance: mapping it to %s", node.uuid) - finally: - if node: - self.cluster_data_model.map_instance(instance, node) - LOG.debug("Mapped instance %s to %s", instance.uuid, node.uuid) - - def delete_instance(self, instance, node): - try: - self.cluster_data_model.delete_instance(instance, node) - except Exception: - LOG.info("Instance %s already deleted", instance.uuid) - - -class VersionedNotificationEndpoint(NovaNotification): - publisher_id_regex = r'^nova-compute.*' - - -class UnversionedNotificationEndpoint(NovaNotification): - publisher_id_regex = r'^compute.*' - - -class ServiceUpdated(VersionedNotificationEndpoint): - - @property - def filter_rule(self): - """Nova service.update notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='service.update', - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - ctxt.request_id = metadata['message_id'] - ctxt.project_domain = event_type - LOG.info("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s" % - dict(event=event_type, - publisher=publisher_id, - metadata=metadata)) - LOG.debug(payload) - node_data = payload['nova_object.data'] - node_uuid = node_data['host'] - try: - node = self.get_or_create_node(node_uuid) - self.update_compute_node(node, payload) - except exception.ComputeNodeNotFound as exc: - LOG.exception(exc) - - -class InstanceCreated(VersionedNotificationEndpoint): - - @property - def filter_rule(self): - """Nova instance.update notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='instance.update', - # To be "fully" created, an instance transitions - # from the 'building' state to the 'active' one. - # See http://docs.openstack.org/developer/nova/vmstates.html - payload={ - 'nova_object.data': { - 'state': element.InstanceState.ACTIVE.value, - 'state_update': { - 'nova_object.data': { - 'old_state': element.InstanceState.BUILDING.value, - 'state': element.InstanceState.ACTIVE.value, - }, - 'nova_object.name': 'InstanceStateUpdatePayload', - 'nova_object.namespace': 'nova', - }, - } - } - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - ctxt.request_id = metadata['message_id'] - ctxt.project_domain = event_type - LOG.info("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s" % - dict(event=event_type, - publisher=publisher_id, - metadata=metadata)) - LOG.debug(payload) - instance_data = payload['nova_object.data'] - instance_uuid = instance_data['uuid'] - node_uuid = instance_data.get('host') - instance = self.get_or_create_instance(instance_uuid, node_uuid) - - self.update_instance(instance, payload) - - -class InstanceUpdated(VersionedNotificationEndpoint): - - @staticmethod - def _match_not_new_instance_state(data): - is_new_instance = ( - data['old_state'] == element.InstanceState.BUILDING.value and - data['state'] == element.InstanceState.ACTIVE.value) - - return not is_new_instance - - @property - def filter_rule(self): - """Nova instance.update notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='instance.update', - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - ctxt.request_id = metadata['message_id'] - ctxt.project_domain = event_type - LOG.info("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s" % - dict(event=event_type, - publisher=publisher_id, - metadata=metadata)) - LOG.debug(payload) - instance_data = payload['nova_object.data'] - instance_uuid = instance_data['uuid'] - node_uuid = instance_data.get('host') - instance = self.get_or_create_instance(instance_uuid, node_uuid) - - self.update_instance(instance, payload) - - -class InstanceDeletedEnd(VersionedNotificationEndpoint): - - @property - def filter_rule(self): - """Nova service.update notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='instance.delete.end', - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - ctxt.request_id = metadata['message_id'] - ctxt.project_domain = event_type - LOG.info("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s" % - dict(event=event_type, - publisher=publisher_id, - metadata=metadata)) - LOG.debug(payload) - - instance_data = payload['nova_object.data'] - instance_uuid = instance_data['uuid'] - node_uuid = instance_data.get('host') - instance = self.get_or_create_instance(instance_uuid, node_uuid) - - try: - node = self.get_or_create_node(instance_data['host']) - except exception.ComputeNodeNotFound as exc: - LOG.exception(exc) - # If we can't create the node, we consider the instance as unmapped - node = None - - self.delete_instance(instance, node) - - -class LegacyInstanceUpdated(UnversionedNotificationEndpoint): - - @property - def filter_rule(self): - """Nova compute.instance.update notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='compute.instance.update', - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - ctxt.request_id = metadata['message_id'] - ctxt.project_domain = event_type - LOG.info("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s" % - dict(event=event_type, - publisher=publisher_id, - metadata=metadata)) - LOG.debug(payload) - - instance_uuid = payload['instance_id'] - node_uuid = payload.get('node') - instance = self.get_or_create_instance(instance_uuid, node_uuid) - - self.legacy_update_instance(instance, payload) - - -class LegacyInstanceCreatedEnd(UnversionedNotificationEndpoint): - - @property - def filter_rule(self): - """Nova compute.instance.create.end notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='compute.instance.create.end', - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - ctxt.request_id = metadata['message_id'] - ctxt.project_domain = event_type - LOG.info("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s" % - dict(event=event_type, - publisher=publisher_id, - metadata=metadata)) - LOG.debug(payload) - - instance_uuid = payload['instance_id'] - node_uuid = payload.get('node') - instance = self.get_or_create_instance(instance_uuid, node_uuid) - - self.legacy_update_instance(instance, payload) - - -class LegacyInstanceDeletedEnd(UnversionedNotificationEndpoint): - - @property - def filter_rule(self): - """Nova compute.instance.delete.end notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='compute.instance.delete.end', - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - ctxt.request_id = metadata['message_id'] - ctxt.project_domain = event_type - LOG.info("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s" % - dict(event=event_type, - publisher=publisher_id, - metadata=metadata)) - LOG.debug(payload) - instance_uuid = payload['instance_id'] - node_uuid = payload.get('node') - instance = self.get_or_create_instance(instance_uuid, node_uuid) - - try: - node = self.get_or_create_node(payload['host']) - except exception.ComputeNodeNotFound as exc: - LOG.exception(exc) - # If we can't create the node, we consider the instance as unmapped - node = None - - self.delete_instance(instance, node) - - -class LegacyLiveMigratedEnd(UnversionedNotificationEndpoint): - - @property - def filter_rule(self): - """Nova *.live_migration.post.dest.end notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='compute.instance.live_migration.post.dest.end', - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - ctxt.request_id = metadata['message_id'] - ctxt.project_domain = event_type - LOG.info("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s" % - dict(event=event_type, - publisher=publisher_id, - metadata=metadata)) - LOG.debug(payload) - - instance_uuid = payload['instance_id'] - node_uuid = payload.get('node') - instance = self.get_or_create_instance(instance_uuid, node_uuid) - - self.legacy_update_instance(instance, payload) diff --git a/watcher/decision_engine/planner/__init__.py b/watcher/decision_engine/planner/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/planner/base.py b/watcher/decision_engine/planner/base.py deleted file mode 100644 index 9c255b4..0000000 --- a/watcher/decision_engine/planner/base.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -""" -The :ref:`Watcher Planner ` is part of the -:ref:`Watcher Decision Engine `. - -This module takes the set of :ref:`Actions ` generated by a -:ref:`Strategy ` and builds the design of a workflow which -defines how-to schedule in time those different -:ref:`Actions ` and for each -:ref:`Action ` what are the prerequisite conditions. - -It is important to schedule :ref:`Actions ` in time in order -to prevent overload of the :ref:`Cluster ` while applying -the :ref:`Action Plan `. For example, it is important -not to migrate too many instances at the same time in order to avoid a network -congestion which may decrease the :ref:`SLA ` for -:ref:`Customers `. - -It is also important to schedule :ref:`Actions ` in order to -avoid security issues such as denial of service on core OpenStack services. - -:ref:`Some default implementations are provided `, but it is -possible to :ref:`develop new implementations ` -which are dynamically loaded by Watcher at launch time. - -See :doc:`../architecture` for more details on this component. -""" - -import abc -import six - -from watcher.common.loader import loadable - - -@six.add_metaclass(abc.ABCMeta) -class BasePlanner(loadable.Loadable): - - @classmethod - def get_config_opts(cls): - """Defines the configuration options to be associated to this loadable - - :return: A list of configuration options relative to this Loadable - :rtype: list of :class:`oslo_config.cfg.Opt` instances - """ - return [] - - @abc.abstractmethod - def schedule(self, context, audit_uuid, solution): - """The planner receives a solution to schedule - - :param solution: A solution provided by a strategy for scheduling - :type solution: :py:class:`~.BaseSolution` subclass instance - :param audit_uuid: the audit uuid - :type audit_uuid: str - :return: Action plan with an ordered sequence of actions such that all - security, dependency, and performance requirements are met. - :rtype: :py:class:`watcher.objects.ActionPlan` instance - """ - # example: directed acyclic graph - raise NotImplementedError() diff --git a/watcher/decision_engine/planner/manager.py b/watcher/decision_engine/planner/manager.py deleted file mode 100644 index 7169470..0000000 --- a/watcher/decision_engine/planner/manager.py +++ /dev/null @@ -1,39 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from oslo_log import log - -from watcher.decision_engine.loading import default as loader - -from watcher import conf - -LOG = log.getLogger(__name__) -CONF = conf.CONF - - -class PlannerManager(object): - def __init__(self): - self._loader = loader.DefaultPlannerLoader() - - @property - def loader(self): - return self._loader - - def load(self): - selected_planner = CONF.watcher_planner.planner - LOG.debug("Loading %s", selected_planner) - return self.loader.load(name=selected_planner) diff --git a/watcher/decision_engine/planner/weight.py b/watcher/decision_engine/planner/weight.py deleted file mode 100644 index 24c707a..0000000 --- a/watcher/decision_engine/planner/weight.py +++ /dev/null @@ -1,222 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Authors: Vincent Francoise -# Alexander Chadin -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections - -import networkx as nx -from oslo_config import cfg -from oslo_config import types -from oslo_log import log - -from watcher.common import utils -from watcher.decision_engine.planner import base -from watcher import objects - -LOG = log.getLogger(__name__) - - -class WeightPlanner(base.BasePlanner): - """Weight planner implementation - - This implementation builds actions with parents in accordance with weights. - Set of actions having a higher weight will be scheduled before - the other ones. There are two config options to configure: - action_weights and parallelization. - - *Limitations* - - - This planner requires to have action_weights and parallelization configs - tuned well. - """ - - def __init__(self, config): - super(WeightPlanner, self).__init__(config) - - action_weights = { - 'nop': 60, - 'change_nova_service_state': 50, - 'sleep': 40, - 'migrate': 30, - 'resize': 20, - 'turn_host_to_acpi_s3_state': 10, - 'change_node_power_state': 9, - } - - parallelization = { - 'turn_host_to_acpi_s3_state': 2, - 'resize': 2, - 'migrate': 2, - 'sleep': 1, - 'change_nova_service_state': 1, - 'nop': 1, - 'change_node_power_state': 2, - } - - @classmethod - def get_config_opts(cls): - return [ - cfg.Opt( - 'weights', - type=types.Dict(value_type=types.Integer()), - help="These weights are used to schedule the actions. " - "Action Plan will be build in accordance with sets of " - "actions ordered by descending weights." - "Two action types cannot have the same weight. ", - default=cls.action_weights), - cfg.Opt( - 'parallelization', - type=types.Dict(value_type=types.Integer()), - help="Number of actions to be run in parallel on a per " - "action type basis.", - default=cls.parallelization), - ] - - @staticmethod - def chunkify(lst, n): - """Yield successive n-sized chunks from lst.""" - if n < 1: - # Just to make sure the number is valid - n = 1 - - # Split a flat list in a list of chunks of size n. - # e.g. chunkify([0, 1, 2, 3, 4], 2) -> [[0, 1], [2, 3], [4]] - for i in range(0, len(lst), n): - yield lst[i:i + n] - - def compute_action_graph(self, sorted_weighted_actions): - reverse_weights = {v: k for k, v in self.config.weights.items()} - # leaf_groups contains a list of list of nodes called groups - # each group is a set of nodes from which a future node will - # branch off (parent nodes). - - # START --> migrate-1 --> migrate-3 - # \ \--> resize-1 --> FINISH - # \--> migrate-2 -------------/ - # In the above case migrate-1 will be the only member of the leaf - # group that migrate-3 will use as parent group, whereas - # resize-1 will have both migrate-2 and migrate-3 in its - # parent/leaf group - leaf_groups = [] - action_graph = nx.DiGraph() - # We iterate through each action type category (sorted by weight) to - # insert them in a Directed Acyclic Graph - for idx, (weight, actions) in enumerate(sorted_weighted_actions): - action_chunks = self.chunkify( - actions, self.config.parallelization[reverse_weights[weight]]) - - # We split the actions into chunks/layers that will have to be - # spread across all the available branches of the graph - for chunk_idx, actions_chunk in enumerate(action_chunks): - for action in actions_chunk: - action_graph.add_node(action) - - # all other actions - parent_nodes = [] - if not idx and not chunk_idx: - parent_nodes = [] - elif leaf_groups: - parent_nodes = leaf_groups - - for parent_node in parent_nodes: - action_graph.add_edge(parent_node, action) - action.parents.append(parent_node.uuid) - - if leaf_groups: - leaf_groups = [] - leaf_groups.extend([a for a in actions_chunk]) - - return action_graph - - def schedule(self, context, audit_id, solution): - LOG.debug('Creating an action plan for the audit uuid: %s', audit_id) - action_plan = self.create_action_plan(context, audit_id, solution) - - sorted_weighted_actions = self.get_sorted_actions_by_weight( - context, action_plan, solution) - action_graph = self.compute_action_graph(sorted_weighted_actions) - - self._create_efficacy_indicators( - context, action_plan.id, solution.efficacy_indicators) - - if len(action_graph.nodes()) == 0: - LOG.warning("The action plan is empty") - action_plan.state = objects.action_plan.State.SUCCEEDED - action_plan.save() - - self.create_scheduled_actions(action_graph) - return action_plan - - def get_sorted_actions_by_weight(self, context, action_plan, solution): - # We need to make them immutable to add them to the graph - action_objects = list([ - objects.Action( - context, uuid=utils.generate_uuid(), parents=[], - action_plan_id=action_plan.id, **a) - for a in solution.actions]) - # This is a dict of list with each being a weight and the list being - # all the actions associated to this weight - weighted_actions = collections.defaultdict(list) - for action in action_objects: - action_weight = self.config.weights[action.action_type] - weighted_actions[action_weight].append(action) - - return reversed(sorted(weighted_actions.items(), key=lambda x: x[0])) - - def create_scheduled_actions(self, graph): - for action in graph.nodes(): - LOG.debug("Creating the %s in the Watcher database", - action.action_type) - try: - action.create() - except Exception as exc: - LOG.exception(exc) - raise - - def create_action_plan(self, context, audit_id, solution): - strategy = objects.Strategy.get_by_name( - context, solution.strategy.name) - - action_plan_dict = { - 'uuid': utils.generate_uuid(), - 'audit_id': audit_id, - 'strategy_id': strategy.id, - 'state': objects.action_plan.State.RECOMMENDED, - 'global_efficacy': solution.global_efficacy, - } - - new_action_plan = objects.ActionPlan(context, **action_plan_dict) - new_action_plan.create() - - return new_action_plan - - def _create_efficacy_indicators(self, context, action_plan_id, indicators): - efficacy_indicators = [] - for indicator in indicators: - efficacy_indicator_dict = { - 'uuid': utils.generate_uuid(), - 'name': indicator.name, - 'description': indicator.description, - 'unit': indicator.unit, - 'value': indicator.value, - 'action_plan_id': action_plan_id, - } - new_efficacy_indicator = objects.EfficacyIndicator( - context, **efficacy_indicator_dict) - new_efficacy_indicator.create() - - efficacy_indicators.append(new_efficacy_indicator) - return efficacy_indicators diff --git a/watcher/decision_engine/planner/workload_stabilization.py b/watcher/decision_engine/planner/workload_stabilization.py deleted file mode 100644 index f7cd96a..0000000 --- a/watcher/decision_engine/planner/workload_stabilization.py +++ /dev/null @@ -1,300 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import abc - -from oslo_config import cfg -from oslo_config import types -from oslo_log import log - -from watcher.common import clients -from watcher.common import exception -from watcher.common import nova_helper -from watcher.common import utils -from watcher.decision_engine.planner import base -from watcher import objects - -LOG = log.getLogger(__name__) - - -class WorkloadStabilizationPlanner(base.BasePlanner): - """Workload Stabilization planner implementation - - This implementation comes with basic rules with a set of action types that - are weighted. An action having a lower weight will be scheduled before the - other ones. The set of action types can be specified by 'weights' in the - ``watcher.conf``. You need to associate a different weight to all available - actions into the configuration file, otherwise you will get an error when - the new action will be referenced in the solution produced by a strategy. - - *Limitations* - - - This is a proof of concept that is not meant to be used in production - """ - - def __init__(self, config): - super(WorkloadStabilizationPlanner, self).__init__(config) - self._osc = clients.OpenStackClients() - - @property - def osc(self): - return self._osc - - weights_dict = { - 'turn_host_to_acpi_s3_state': 0, - 'resize': 1, - 'migrate': 2, - 'sleep': 3, - 'change_nova_service_state': 4, - 'nop': 5, - } - - @classmethod - def get_config_opts(cls): - return [ - cfg.Opt( - 'weights', - type=types.Dict(value_type=types.Integer()), - help="These weights are used to schedule the actions", - default=cls.weights_dict), - ] - - def create_action(self, - action_plan_id, - action_type, - input_parameters=None): - uuid = utils.generate_uuid() - action = { - 'uuid': uuid, - 'action_plan_id': int(action_plan_id), - 'action_type': action_type, - 'input_parameters': input_parameters, - 'state': objects.action.State.PENDING, - 'parents': None - } - - return action - - def load_child_class(self, child_name): - for c in BaseActionValidator.__subclasses__(): - if child_name == c.action_name: - return c() - return None - - def schedule(self, context, audit_id, solution): - LOG.debug('Creating an action plan for the audit uuid: %s', audit_id) - weights = self.config.weights - action_plan = self._create_action_plan(context, audit_id, solution) - - actions = list(solution.actions) - to_schedule = [] - for action in actions: - json_action = self.create_action( - action_plan_id=action_plan.id, - action_type=action.get('action_type'), - input_parameters=action.get('input_parameters')) - to_schedule.append((weights[action.get('action_type')], - json_action)) - - self._create_efficacy_indicators( - context, action_plan.id, solution.efficacy_indicators) - - # scheduling - scheduled = sorted(to_schedule, key=lambda weight: (weight[0]), - reverse=True) - if len(scheduled) == 0: - LOG.warning("The action plan is empty") - action_plan.state = objects.action_plan.State.SUCCEEDED - action_plan.save() - else: - resource_action_map = {} - scheduled_actions = [x[1] for x in scheduled] - for action in scheduled_actions: - a_type = action['action_type'] - if a_type != 'turn_host_to_acpi_s3_state': - plugin_action = self.load_child_class( - action.get("action_type")) - if not plugin_action: - raise exception.UnsupportedActionType( - action_type=action.get("action_type")) - db_action = self._create_action(context, action) - parents = plugin_action.validate_parents( - resource_action_map, action) - if parents: - db_action.parents = parents - db_action.save() - # if we have an action that will make host unreachable, we need - # to complete all actions (resize and migration type) - # related to the host. - # Note(alexchadin): turn_host_to_acpi_s3_state doesn't - # actually exist. Placed code shows relations between - # action types. - # TODO(alexchadin): add turn_host_to_acpi_s3_state action type. - else: - host_to_acpi_s3 = action['input_parameters']['resource_id'] - host_actions = resource_action_map.get(host_to_acpi_s3) - action_parents = [] - if host_actions: - resize_actions = [x[0] for x in host_actions - if x[1] == 'resize'] - migrate_actions = [x[0] for x in host_actions - if x[1] == 'migrate'] - resize_migration_parents = [ - x.parents for x in - [objects.Action.get_by_uuid(context, resize_action) - for resize_action in resize_actions]] - # resize_migration_parents should be one level list - resize_migration_parents = [ - parent for sublist in resize_migration_parents - for parent in sublist] - action_parents.extend([uuid for uuid in - resize_actions]) - action_parents.extend([uuid for uuid in - migrate_actions if uuid not in - resize_migration_parents]) - db_action = self._create_action(context, action) - db_action.parents = action_parents - db_action.save() - - return action_plan - - def _create_action_plan(self, context, audit_id, solution): - strategy = objects.Strategy.get_by_name( - context, solution.strategy.name) - - action_plan_dict = { - 'uuid': utils.generate_uuid(), - 'audit_id': audit_id, - 'strategy_id': strategy.id, - 'state': objects.action_plan.State.RECOMMENDED, - 'global_efficacy': solution.global_efficacy, - } - - new_action_plan = objects.ActionPlan(context, **action_plan_dict) - new_action_plan.create() - - return new_action_plan - - def _create_efficacy_indicators(self, context, action_plan_id, indicators): - efficacy_indicators = [] - for indicator in indicators: - efficacy_indicator_dict = { - 'uuid': utils.generate_uuid(), - 'name': indicator.name, - 'description': indicator.description, - 'unit': indicator.unit, - 'value': indicator.value, - 'action_plan_id': action_plan_id, - } - new_efficacy_indicator = objects.EfficacyIndicator( - context, **efficacy_indicator_dict) - new_efficacy_indicator.create() - - efficacy_indicators.append(new_efficacy_indicator) - return efficacy_indicators - - def _create_action(self, context, _action): - try: - LOG.debug("Creating the %s in the Watcher database", - _action.get("action_type")) - - new_action = objects.Action(context, **_action) - new_action.create() - - return new_action - except Exception as exc: - LOG.exception(exc) - raise - - -class BaseActionValidator(object): - action_name = None - - def __init__(self): - super(BaseActionValidator, self).__init__() - self._osc = None - - @property - def osc(self): - if not self._osc: - self._osc = clients.OpenStackClients() - return self._osc - - @abc.abstractmethod - def validate_parents(self, resource_action_map, action): - raise NotImplementedError() - - def _mapping(self, resource_action_map, resource_id, action_uuid, - action_type): - if resource_id not in resource_action_map: - resource_action_map[resource_id] = [(action_uuid, - action_type,)] - else: - resource_action_map[resource_id].append((action_uuid, - action_type,)) - - -class MigrationActionValidator(BaseActionValidator): - action_name = "migrate" - - def validate_parents(self, resource_action_map, action): - instance_uuid = action['input_parameters']['resource_id'] - host_name = action['input_parameters']['source_node'] - self._mapping(resource_action_map, instance_uuid, action['uuid'], - 'migrate') - self._mapping(resource_action_map, host_name, action['uuid'], - 'migrate') - - -class ResizeActionValidator(BaseActionValidator): - action_name = "resize" - - def validate_parents(self, resource_action_map, action): - nova = nova_helper.NovaHelper(osc=self.osc) - instance_uuid = action['input_parameters']['resource_id'] - parent_actions = resource_action_map.get(instance_uuid) - host_of_instance = nova.get_hostname( - nova.get_instance_by_uuid(instance_uuid)[0]) - self._mapping(resource_action_map, host_of_instance, action['uuid'], - 'resize') - if parent_actions: - return [x[0] for x in parent_actions] - else: - return [] - - -class ChangeNovaServiceStateActionValidator(BaseActionValidator): - action_name = "change_nova_service_state" - - def validate_parents(self, resource_action_map, action): - host_name = action['input_parameters']['resource_id'] - self._mapping(resource_action_map, host_name, action.uuid, - 'change_nova_service_state') - return [] - - -class SleepActionValidator(BaseActionValidator): - action_name = "sleep" - - def validate_parents(self, resource_action_map, action): - return [] - - -class NOPActionValidator(BaseActionValidator): - action_name = "nop" - - def validate_parents(self, resource_action_map, action): - return [] diff --git a/watcher/decision_engine/rpcapi.py b/watcher/decision_engine/rpcapi.py deleted file mode 100644 index f0e0e2a..0000000 --- a/watcher/decision_engine/rpcapi.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# Copyright (c) 2016 Intel Corp -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from watcher.common import exception -from watcher.common import service -from watcher.common import service_manager -from watcher.common import utils - -from watcher import conf - -CONF = conf.CONF - - -class DecisionEngineAPI(service.Service): - - def __init__(self): - super(DecisionEngineAPI, self).__init__(DecisionEngineAPIManager) - - def trigger_audit(self, context, audit_uuid=None): - if not utils.is_uuid_like(audit_uuid): - raise exception.InvalidUuidOrName(name=audit_uuid) - - self.conductor_client.cast( - context, 'trigger_audit', audit_uuid=audit_uuid) - - -class DecisionEngineAPIManager(service_manager.ServiceManager): - - @property - def service_name(self): - return None - - @property - def api_version(self): - return '1.0' - - @property - def publisher_id(self): - return CONF.watcher_decision_engine.publisher_id - - @property - def conductor_topic(self): - return CONF.watcher_decision_engine.conductor_topic - - @property - def notification_topics(self): - return [] - - @property - def conductor_endpoints(self): - return [] - - @property - def notification_endpoints(self): - return [] diff --git a/watcher/decision_engine/scheduling.py b/watcher/decision_engine/scheduling.py deleted file mode 100644 index 4ef0481..0000000 --- a/watcher/decision_engine/scheduling.py +++ /dev/null @@ -1,108 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -import eventlet -from oslo_log import log - -from watcher.common import context -from watcher.common import exception -from watcher.common import scheduling - -from watcher.decision_engine.model.collector import manager -from watcher import objects - -from watcher import conf - -LOG = log.getLogger(__name__) -CONF = conf.CONF - - -class DecisionEngineSchedulingService(scheduling.BackgroundSchedulerService): - - def __init__(self, gconfig=None, **options): - gconfig = None or {} - super(DecisionEngineSchedulingService, self).__init__( - gconfig, **options) - self.collector_manager = manager.CollectorManager() - - @property - def collectors(self): - return self.collector_manager.get_collectors() - - def add_sync_jobs(self): - for name, collector in self.collectors.items(): - timed_task = self._wrap_collector_sync_with_timeout( - collector, name) - self.add_job(timed_task, - trigger='interval', - seconds=collector.config.period, - next_run_time=datetime.datetime.now()) - - def _as_timed_sync_func(self, sync_func, name, timeout): - def _timed_sync(): - with eventlet.Timeout( - timeout, - exception=exception.ClusterDataModelCollectionError(cdm=name) - ): - sync_func() - - return _timed_sync - - def _wrap_collector_sync_with_timeout(self, collector, name): - """Add an execution timeout constraint on a function""" - timeout = collector.config.period - - def _sync(): - try: - timed_sync = self._as_timed_sync_func( - collector.synchronize, name, timeout) - timed_sync() - except Exception as exc: - LOG.exception(exc) - collector.set_cluster_data_model_as_stale() - - return _sync - - def add_checkstate_job(self): - # 30 minutes interval - interval = CONF.watcher_decision_engine.check_periodic_interval - ap_manager = objects.action_plan.StateManager() - if CONF.watcher_decision_engine.action_plan_expiry != 0: - self.add_job(ap_manager.check_expired, 'interval', - args=[context.make_context()], - seconds=interval, - next_run_time=datetime.datetime.now()) - - def start(self): - """Start service.""" - self.add_sync_jobs() - self.add_checkstate_job() - super(DecisionEngineSchedulingService, self).start() - - def stop(self): - """Stop service.""" - self.shutdown() - - def wait(self): - """Wait for service to complete.""" - - def reset(self): - """Reset service. - - Called in case service running in daemon mode receives SIGHUP. - """ diff --git a/watcher/decision_engine/scope/__init__.py b/watcher/decision_engine/scope/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/scope/base.py b/watcher/decision_engine/scope/base.py deleted file mode 100644 index 76f1746..0000000 --- a/watcher/decision_engine/scope/base.py +++ /dev/null @@ -1,39 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Servionica -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import abc -import six - -from watcher.common import context - - -@six.add_metaclass(abc.ABCMeta) -class BaseScope(object): - """A base class for Scope mechanism - - Child of this class is called when audit launches strategy. This strategy - requires Cluster Data Model which can be segregated to achieve audit scope. - """ - - def __init__(self, scope, config): - self.ctx = context.make_context() - self.scope = scope - self.config = config - - @abc.abstractmethod - def get_scoped_model(self, cluster_model): - """Leave only nodes and instances proposed in the audit scope""" diff --git a/watcher/decision_engine/scope/default.py b/watcher/decision_engine/scope/default.py deleted file mode 100644 index 4e74f6e..0000000 --- a/watcher/decision_engine/scope/default.py +++ /dev/null @@ -1,263 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Servionica -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log - -from watcher.common import exception -from watcher.common import nova_helper -from watcher.decision_engine.scope import base - - -LOG = log.getLogger(__name__) - - -class DefaultScope(base.BaseScope): - """Default Audit Scope Handler""" - - DEFAULT_SCHEMA = { - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "array", - "items": { - "type": "object", - "properties": { - "host_aggregates": { - "type": "array", - "items": { - "type": "object", - "properties": { - "anyOf": [ - {"type": ["string", "number"]} - ] - }, - } - }, - "availability_zones": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - }, - "additionalProperties": False - } - }, - "exclude": { - "type": "array", - "items": { - "type": "object", - "properties": { - "instances": { - "type": "array", - "items": { - "type": "object", - "properties": { - "uuid": { - "type": "string" - } - } - } - }, - "compute_nodes": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - } - } - }, - "host_aggregates": { - "type": "array", - "items": { - "type": "object", - "properties": { - "anyOf": [ - {"type": ["string", "number"]} - ] - }, - } - }, - "instance_metadata": { - "type": "array", - "items": { - "type": "object" - } - } - }, - "additionalProperties": False - } - } - }, - "additionalProperties": False - } - } - - def __init__(self, scope, config, osc=None): - super(DefaultScope, self).__init__(scope, config) - self._osc = osc - self.wrapper = nova_helper.NovaHelper(osc=self._osc) - - def remove_instance(self, cluster_model, instance, node_name): - node = cluster_model.get_node_by_uuid(node_name) - cluster_model.delete_instance(instance, node) - - def _check_wildcard(self, aggregate_list): - if '*' in aggregate_list: - if len(aggregate_list) == 1: - return True - else: - raise exception.WildcardCharacterIsUsed( - resource="host aggregates") - return False - - def _collect_aggregates(self, host_aggregates, compute_nodes): - aggregate_list = self.wrapper.get_aggregate_list() - aggregate_ids = [aggregate['id'] for aggregate - in host_aggregates if 'id' in aggregate] - aggregate_names = [aggregate['name'] for aggregate - in host_aggregates if 'name' in aggregate] - include_all_nodes = any(self._check_wildcard(field) - for field in (aggregate_ids, aggregate_names)) - - for aggregate in aggregate_list: - detailed_aggregate = self.wrapper.get_aggregate_detail( - aggregate.id) - if (detailed_aggregate.id in aggregate_ids or - detailed_aggregate.name in aggregate_names or - include_all_nodes): - compute_nodes.extend(detailed_aggregate.hosts) - - def _collect_zones(self, availability_zones, allowed_nodes): - zone_list = self.wrapper.get_availability_zone_list() - zone_names = [zone['name'] for zone - in availability_zones] - include_all_nodes = False - if '*' in zone_names: - if len(zone_names) == 1: - include_all_nodes = True - else: - raise exception.WildcardCharacterIsUsed( - resource="availability zones") - for zone in zone_list: - if zone.zoneName in zone_names or include_all_nodes: - allowed_nodes.extend(zone.hosts.keys()) - - def exclude_resources(self, resources, **kwargs): - instances_to_exclude = kwargs.get('instances') - nodes_to_exclude = kwargs.get('nodes') - instance_metadata = kwargs.get('instance_metadata') - - for resource in resources: - if 'instances' in resource: - instances_to_exclude.extend( - [instance['uuid'] for instance - in resource['instances']]) - elif 'compute_nodes' in resource: - nodes_to_exclude.extend( - [host['name'] for host - in resource['compute_nodes']]) - elif 'host_aggregates' in resource: - prohibited_nodes = [] - self._collect_aggregates(resource['host_aggregates'], - prohibited_nodes) - nodes_to_exclude.extend(prohibited_nodes) - elif 'instance_metadata' in resource: - instance_metadata.extend( - [metadata for metadata in resource['instance_metadata']]) - - def remove_nodes_from_model(self, nodes_to_remove, cluster_model): - for node_uuid in nodes_to_remove: - node = cluster_model.get_node_by_uuid(node_uuid) - instances = cluster_model.get_node_instances(node) - for instance in instances: - self.remove_instance(cluster_model, instance, node_uuid) - cluster_model.remove_node(node) - - def remove_instances_from_model(self, instances_to_remove, cluster_model): - for instance_uuid in instances_to_remove: - try: - node_name = cluster_model.get_node_by_instance_uuid( - instance_uuid).uuid - except exception.ComputeResourceNotFound: - LOG.warning("The following instance %s cannot be found. " - "It might be deleted from CDM along with node" - " instance was hosted on.", - instance_uuid) - continue - self.remove_instance( - cluster_model, - cluster_model.get_instance_by_uuid(instance_uuid), - node_name) - - def exclude_instances_with_given_metadata( - self, instance_metadata, cluster_model, instances_to_remove): - metadata_dict = { - key: val for d in instance_metadata for key, val in d.items()} - instances = cluster_model.get_all_instances() - for uuid, instance in instances.items(): - metadata = instance.metadata - common_metadata = set(metadata_dict) & set(metadata) - if common_metadata and len(common_metadata) == len(metadata_dict): - for key, value in metadata_dict.items(): - if str(value).lower() == str(metadata.get(key)).lower(): - instances_to_remove.add(uuid) - - def get_scoped_model(self, cluster_model): - """Leave only nodes and instances proposed in the audit scope""" - if not cluster_model: - return None - - allowed_nodes = [] - nodes_to_exclude = [] - nodes_to_remove = set() - instances_to_exclude = [] - instance_metadata = [] - model_hosts = list(cluster_model.get_all_compute_nodes().keys()) - - if not self.scope: - return cluster_model - - for rule in self.scope: - if 'host_aggregates' in rule: - self._collect_aggregates(rule['host_aggregates'], - allowed_nodes) - elif 'availability_zones' in rule: - self._collect_zones(rule['availability_zones'], - allowed_nodes) - elif 'exclude' in rule: - self.exclude_resources( - rule['exclude'], instances=instances_to_exclude, - nodes=nodes_to_exclude, - instance_metadata=instance_metadata) - - instances_to_remove = set(instances_to_exclude) - if allowed_nodes: - nodes_to_remove = set(model_hosts) - set(allowed_nodes) - nodes_to_remove.update(nodes_to_exclude) - - self.remove_nodes_from_model(nodes_to_remove, cluster_model) - - if instance_metadata and self.config.check_optimize_metadata: - self.exclude_instances_with_given_metadata( - instance_metadata, cluster_model, instances_to_remove) - - self.remove_instances_from_model(instances_to_remove, cluster_model) - - return cluster_model diff --git a/watcher/decision_engine/scoring/__init__.py b/watcher/decision_engine/scoring/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/scoring/base.py b/watcher/decision_engine/scoring/base.py deleted file mode 100644 index 3fcc68b..0000000 --- a/watcher/decision_engine/scoring/base.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel -# -# Authors: Tomasz Kaczynski -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc -import six - -from watcher.common.loader import loadable - - -@six.add_metaclass(abc.ABCMeta) -class ScoringEngine(loadable.Loadable): - """A base class for all the Scoring Engines. - - A Scoring Engine is an instance of a data model, to which the learning - data was applied. - - Please note that this class contains non-static and non-class methods by - design, so that it's easy to create multiple Scoring Engine instances - using a single class (possibly configured differently). - """ - - @abc.abstractmethod - def get_name(self): - """Returns the name of the Scoring Engine. - - The name should be unique across all Scoring Engines. - - :return: A Scoring Engine name - :rtype: str - """ - - @abc.abstractmethod - def get_description(self): - """Returns the description of the Scoring Engine. - - The description might contain any human readable information, which - might be useful for Strategy developers planning to use this Scoring - Engine. It will be also visible in the Watcher API and CLI. - - :return: A Scoring Engine description - :rtype: str - """ - - @abc.abstractmethod - def get_metainfo(self): - """Returns the metadata information about Scoring Engine. - - The metadata might contain a machine-friendly (e.g. in JSON format) - information needed to use this Scoring Engine. For example, some - Scoring Engines require to pass the array of features in particular - order to be able to calculate the score value. This order can be - defined in metadata and used in Strategy. - - :return: A Scoring Engine metadata - :rtype: str - """ - - @abc.abstractmethod - def calculate_score(self, features): - """Calculates a score value based on arguments passed. - - Scoring Engines might be very different to each other. They might - solve different problems or use different algorithms or frameworks - internally. To enable this kind of flexibility, the method takes only - one argument (string) and produces the results in the same format - (string). The consumer of the Scoring Engine is ultimately responsible - for providing the right arguments and parsing the result. - - :param features: Input data for Scoring Engine - :type features: str - :return: A score result - :rtype: str - """ - - @classmethod - def get_config_opts(cls): - """Defines the configuration options to be associated to this loadable - - :return: A list of configuration options relative to this Loadable - :rtype: list of :class:`oslo_config.cfg.Opt` instances - """ - return [] - - -@six.add_metaclass(abc.ABCMeta) -class ScoringEngineContainer(loadable.Loadable): - """A base class for all the Scoring Engines Containers. - - A Scoring Engine Container is an abstraction which allows to plugin - multiple Scoring Engines as a single Stevedore plugin. This enables some - more advanced scenarios like dynamic reloading of Scoring Engine - implementations without having to restart any Watcher services. - """ - - @classmethod - @abc.abstractmethod - def get_scoring_engine_list(self): - """Returns a list of Scoring Engine instances. - - :return: A list of Scoring Engine instances - :rtype: :class: `~.scoring_engine.ScoringEngine` - """ - - @classmethod - def get_config_opts(cls): - """Defines the configuration options to be associated to this loadable - - :return: A list of configuration options relative to this Loadable - :rtype: list of :class:`oslo_config.cfg.Opt` instances - """ - return [] diff --git a/watcher/decision_engine/scoring/dummy_scorer.py b/watcher/decision_engine/scoring/dummy_scorer.py deleted file mode 100644 index 735dbac..0000000 --- a/watcher/decision_engine/scoring/dummy_scorer.py +++ /dev/null @@ -1,169 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel -# -# Authors: Tomasz Kaczynski -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log -from oslo_serialization import jsonutils -from oslo_utils import units - -from watcher._i18n import _ -from watcher.decision_engine.scoring import base - -LOG = log.getLogger(__name__) - - -class DummyScorer(base.ScoringEngine): - """Sample Scoring Engine implementing simplified workload classification. - - Typically a scoring engine would be implemented using machine learning - techniques. For example, for workload classification problem the solution - could consist of the following steps: - - 1. Define a problem to solve: we want to detect the workload on the - machine based on the collected metrics like power consumption, - temperature, CPU load, memory usage, disk usage, network usage, etc. - 2. The workloads could be predefined, e.g. IDLE, CPU-INTENSIVE, - MEMORY-INTENSIVE, IO-BOUND, ... - Or we could let the ML algorithm to find the workloads based on the - learning data provided. The decision here leads to learning algorithm - used (supervised vs. non-supervised learning). - 3. Collect metrics from sample servers (learning data). - 4. Define the analytical model, pick ML framework and algorithm. - 5. Apply learning data to the data model. Once taught, the data model - becomes a scoring engine and can start doing predictions or - classifications. - 6. Wrap up the scoring engine with the class like this one, so it has a - standard interface and can be used inside Watcher. - - This class is a greatly very simplified version of the above model. The - goal is to provide an example how such class could be implemented and used - in Watcher, without adding additional dependencies like machine learning - frameworks (which can be quite heavy) or over-complicating it's internal - implementation, which can distract from looking at the overall picture. - - That said, this class implements a workload classification "manually" - (in plain python code) and is not intended to be used in production. - """ - - # Constants defining column indices for the input data - PROCESSOR_TIME_PERC = 0 - MEM_TOTAL_BYTES = 1 - MEM_AVAIL_BYTES = 2 - MEM_PAGE_READS_PER_SEC = 3 - MEM_PAGE_WRITES_PER_SEC = 4 - DISK_READ_BYTES_PER_SEC = 5 - DISK_WRITE_BYTES_PER_SEC = 6 - NET_BYTES_RECEIVED_PER_SEC = 7 - NET_BYTES_SENT_PER_SEC = 8 - - # Types of workload - WORKLOAD_IDLE = 0 - WORKLOAD_CPU = 1 - WORKLOAD_MEM = 2 - WORKLOAD_DISK = 3 - - def get_name(self): - return 'dummy_scorer' - - def get_description(self): - return 'Dummy workload classifier' - - def get_metainfo(self): - """Metadata about input/output format of this scoring engine. - - This information is used in strategy using this scoring engine to - prepare the input information and to understand the results. - """ - - return """{ - "feature_columns": [ - "proc-processor-time-%", - "mem-total-bytes", - "mem-avail-bytes", - "mem-page-reads/sec", - "mem-page-writes/sec", - "disk-read-bytes/sec", - "disk-write-bytes/sec", - "net-bytes-received/sec", - "net-bytes-sent/sec"], - "result_columns": [ - "workload", - "idle-probability", - "cpu-probability", - "memory-probability", - "disk-probability"], - "workloads": [ - "idle", - "cpu-intensive", - "memory-intensive", - "disk-intensive"] - }""" - - def calculate_score(self, features): - """Arbitrary algorithm calculating the score. - - It demonstrates how to parse the input data (features) and serialize - the results. It detects the workload type based on the metrics and - also returns the probabilities of each workload detection (again, - the arbitrary values are returned, just for demonstration how the - "real" machine learning algorithm could work. For example, the - Gradient Boosting Machine from H2O framework is using exactly the - same format: - http://www.h2o.ai/verticals/algos/gbm/ - """ - - LOG.debug('Calculating score, features: %s', features) - - # By default IDLE workload will be returned - workload = self.WORKLOAD_IDLE - idle_prob = 0.0 - cpu_prob = 0.0 - mem_prob = 0.0 - disk_prob = 0.0 - - # Basic input validation - try: - flist = jsonutils.loads(features) - except Exception as e: - raise ValueError(_('Unable to parse features: ') % e) - if type(flist) is not list: - raise ValueError(_('JSON list expected in feature argument')) - if len(flist) != 9: - raise ValueError(_('Invalid number of features, expected 9')) - - # Simple logic for workload classification - if flist[self.PROCESSOR_TIME_PERC] >= 80: - workload = self.WORKLOAD_CPU - cpu_prob = 100.0 - elif flist[self.MEM_PAGE_READS_PER_SEC] >= 1000 \ - and flist[self.MEM_PAGE_WRITES_PER_SEC] >= 1000: - workload = self.WORKLOAD_MEM - mem_prob = 100.0 - elif flist[self.DISK_READ_BYTES_PER_SEC] >= 50*units.Mi \ - and flist[self.DISK_WRITE_BYTES_PER_SEC] >= 50*units.Mi: - workload = self.WORKLOAD_DISK - disk_prob = 100.0 - else: - idle_prob = 100.0 - if flist[self.PROCESSOR_TIME_PERC] >= 40: - cpu_prob = 50.0 - if flist[self.MEM_PAGE_READS_PER_SEC] >= 500 \ - or flist[self.MEM_PAGE_WRITES_PER_SEC] >= 500: - mem_prob = 50.0 - - return jsonutils.dumps( - [workload, idle_prob, cpu_prob, mem_prob, disk_prob]) diff --git a/watcher/decision_engine/scoring/dummy_scoring_container.py b/watcher/decision_engine/scoring/dummy_scoring_container.py deleted file mode 100644 index 5b89bdf..0000000 --- a/watcher/decision_engine/scoring/dummy_scoring_container.py +++ /dev/null @@ -1,98 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel -# -# Authors: Tomasz Kaczynski -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from oslo_log import log -from oslo_serialization import jsonutils - -from watcher._i18n import _ -from watcher.decision_engine.scoring import base - -LOG = log.getLogger(__name__) - - -class DummyScoringContainer(base.ScoringEngineContainer): - """Sample Scoring Engine container returning a list of scoring engines. - - Please note that it can be used in dynamic scenarios and the returned list - might return instances based on some external configuration (e.g. in - database). In order for these scoring engines to become discoverable in - Watcher API and Watcher CLI, a database re-sync is required. It can be - executed using watcher-sync tool for example. - """ - - @classmethod - def get_scoring_engine_list(self): - return [ - SimpleFunctionScorer( - 'dummy_min_scorer', - 'Dummy Scorer calculating the minimum value', - min), - SimpleFunctionScorer( - 'dummy_max_scorer', - 'Dummy Scorer calculating the maximum value', - max), - SimpleFunctionScorer( - 'dummy_avg_scorer', - 'Dummy Scorer calculating the average value', - lambda x: float(sum(x)) / len(x)), - ] - - -class SimpleFunctionScorer(base.ScoringEngine): - """A simple generic scoring engine for demonstration purposes only. - - A generic scoring engine implementation, which is expecting a JSON - formatted array of numbers to be passed as an input for score calculation. - It then executes the aggregate function on this array and returns an - array with a single aggregated number (also JSON formatted). - """ - - def __init__(self, name, description, aggregate_function): - super(SimpleFunctionScorer, self).__init__(config=None) - self._name = name - self._description = description - self._aggregate_function = aggregate_function - - def get_name(self): - return self._name - - def get_description(self): - return self._description - - def get_metainfo(self): - return '' - - def calculate_score(self, features): - LOG.debug('Calculating score, features: %s', features) - - # Basic input validation - try: - flist = jsonutils.loads(features) - except Exception as e: - raise ValueError(_('Unable to parse features: %s') % e) - if type(flist) is not list: - raise ValueError(_('JSON list expected in feature argument')) - if len(flist) < 1: - raise ValueError(_('At least one feature is required')) - - # Calculate the result - result = self._aggregate_function(flist) - - # Return the aggregated result - return jsonutils.dumps([result]) diff --git a/watcher/decision_engine/scoring/scoring_factory.py b/watcher/decision_engine/scoring/scoring_factory.py deleted file mode 100644 index c716cff..0000000 --- a/watcher/decision_engine/scoring/scoring_factory.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel -# -# Authors: Tomasz Kaczynski -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -A module providing helper methods to work with Scoring Engines. -""" - -from oslo_log import log - -from watcher._i18n import _ -from watcher.decision_engine.loading import default - - -LOG = log.getLogger(__name__) - -_scoring_engine_map = None - - -def get_scoring_engine(scoring_engine_name): - """Returns a Scoring Engine by its name. - - Method retrieves a Scoring Engine instance by its name. Scoring Engine - instances are being cached in memory to avoid enumerating the Stevedore - plugins on each call. - - When called for the first time, it reloads the cache. - - :return: A Scoring Engine instance with a given name - :rtype: :class: - `watcher.decision_engine.scoring.scoring_engine.ScoringEngine` - """ - global _scoring_engine_map - - _reload_scoring_engines() - scoring_engine = _scoring_engine_map.get(scoring_engine_name) - if scoring_engine is None: - raise KeyError(_('Scoring Engine with name=%s not found') - % scoring_engine_name) - - return scoring_engine - - -def get_scoring_engine_list(): - """Returns a list of Scoring Engine instances. - - The main use case for this method is discoverability, so the Scoring - Engine list is always reloaded before returning any results. - - Frequent calling of this method might have a negative performance impact. - - :return: A list of all available Scoring Engine instances - :rtype: List of :class: - `watcher.decision_engine.scoring.scoring_engine.ScoringEngine` - """ - global _scoring_engine_map - - _reload_scoring_engines(True) - return _scoring_engine_map.values() - - -def _reload_scoring_engines(refresh=False): - """Reloads Scoring Engines from Stevedore plugins to memory. - - Please note that two Stevedore entry points are used: - - watcher_scoring_engines: for simple plugin implementations - - watcher_scoring_engine_containers: for container plugins, which enable - the dynamic scenarios (its get_scoring_engine_list method might return - different values on each call) - """ - global _scoring_engine_map - - if _scoring_engine_map is None or refresh: - LOG.debug("Reloading Scoring Engine plugins") - engines = default.DefaultScoringLoader().list_available() - _scoring_engine_map = dict() - - for name in engines.keys(): - se_impl = default.DefaultScoringLoader().load(name) - LOG.debug("Found Scoring Engine plugin: %s" % se_impl.get_name()) - _scoring_engine_map[se_impl.get_name()] = se_impl - - engine_containers = \ - default.DefaultScoringContainerLoader().list_available() - - for container_id, container_cls in engine_containers.items(): - LOG.debug("Found Scoring Engine container plugin: %s" % - container_id) - for se in container_cls.get_scoring_engine_list(): - LOG.debug("Found Scoring Engine plugin: %s" % - se.get_name()) - _scoring_engine_map[se.get_name()] = se diff --git a/watcher/decision_engine/solution/__init__.py b/watcher/decision_engine/solution/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/solution/base.py b/watcher/decision_engine/solution/base.py deleted file mode 100644 index 3aa895c..0000000 --- a/watcher/decision_engine/solution/base.py +++ /dev/null @@ -1,117 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -""" -A :ref:`Solution ` is the result of execution of a -:ref:`strategy ` (i.e., an algorithm). -Each solution is composed of many pieces of information: - -- A set of :ref:`actions ` generated by the strategy in - order to achieve the :ref:`goal ` of an associated - :ref:`audit `. -- A set of :ref:`efficacy indicators ` as - defined by the associated goal -- A :ref:`global efficacy ` which is computed by the - associated goal using the aforementioned efficacy indicators. - -A :ref:`Solution ` is different from an -:ref:`Action Plan ` because it contains the -non-scheduled list of :ref:`Actions ` which is produced by a -:ref:`Strategy `. In other words, the list of Actions in -a :ref:`Solution ` has not yet been re-ordered by the -:ref:`Watcher Planner `. - -Note that some algorithms (i.e. :ref:`Strategies `) may -generate several :ref:`Solutions `. This gives rise to the -problem of determining which :ref:`Solution ` should be -applied. - -Two approaches to dealing with this can be envisaged: - -- **fully automated mode**: only the :ref:`Solution ` - with the highest ranking (i.e., the highest - :ref:`Optimization Efficacy `) will be sent to the - :ref:`Watcher Planner ` and translated into - concrete :ref:`Actions `. -- **manual mode**: several :ref:`Solutions ` are proposed - to the :ref:`Administrator ` with a detailed - measurement of the estimated :ref:`Optimization Efficacy - ` and he/she decides which one will be launched. -""" - -import abc -import six - -from watcher.decision_engine.solution import efficacy - - -@six.add_metaclass(abc.ABCMeta) -class BaseSolution(object): - def __init__(self, goal, strategy): - """Base Solution constructor - - :param goal: Goal associated to this solution - :type goal: :py:class:`~.base.Goal` instance - :param strategy: Strategy associated to this solution - :type strategy: :py:class:`~.BaseStrategy` instance - """ - self.goal = goal - self.strategy = strategy - self.origin = None - self.model = None - self.efficacy = efficacy.Efficacy(self.goal, self.strategy) - - @property - def global_efficacy(self): - return self.efficacy.global_efficacy - - @property - def efficacy_indicators(self): - return self.efficacy.indicators - - def compute_global_efficacy(self): - """Compute the global efficacy given a map of efficacy indicators""" - self.efficacy.compute_global_efficacy() - - def set_efficacy_indicators(self, **indicators_map): - """Set the efficacy indicators mapping (no validation) - - :param indicators_map: mapping between the indicator name and its value - :type indicators_map: dict {`str`: `object`} - """ - self.efficacy.set_efficacy_indicators(**indicators_map) - - @abc.abstractmethod - def add_action(self, action_type, resource_id, input_parameters=None): - """Add a new Action in the Solution - - :param action_type: the unique id of an action type defined in - entry point 'watcher_actions' - :param resource_id: the unique id of the resource to which the - `Action` applies. - :param input_parameters: An array of input parameters provided as - key-value pairs of strings. Each key-pair contains names and - values that match what was previously defined in the `Action` - type schema. - """ - raise NotImplementedError() - - @abc.abstractproperty - def actions(self): - raise NotImplementedError() diff --git a/watcher/decision_engine/solution/default.py b/watcher/decision_engine/solution/default.py deleted file mode 100644 index be895df..0000000 --- a/watcher/decision_engine/solution/default.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from oslo_log import log - -from watcher.applier.actions import base as baction -from watcher.common import exception -from watcher.decision_engine.solution import base - -LOG = log.getLogger(__name__) - - -class DefaultSolution(base.BaseSolution): - def __init__(self, goal, strategy): - """Stores a set of actions generated by a strategy - - The DefaultSolution class store a set of actions generated by a - strategy in order to achieve the goal. - - :param goal: Goal associated to this solution - :type goal: :py:class:`~.base.Goal` instance - :param strategy: Strategy associated to this solution - :type strategy: :py:class:`~.BaseStrategy` instance - """ - super(DefaultSolution, self).__init__(goal, strategy) - self._actions = [] - - def add_action(self, action_type, input_parameters=None, resource_id=None): - if input_parameters is not None: - if baction.BaseAction.RESOURCE_ID in input_parameters.keys(): - raise exception.ReservedWord(name=baction.BaseAction. - RESOURCE_ID) - else: - input_parameters = {} - - if resource_id is not None: - input_parameters[baction.BaseAction.RESOURCE_ID] = resource_id - action = { - 'action_type': action_type, - 'input_parameters': input_parameters - } - if action not in self._actions: - self._actions.append(action) - else: - LOG.warning('Action %s has been added into the solution, ' - 'duplicate action will be dropped.', str(action)) - - def __str__(self): - return "\n".join(self._actions) - - @property - def actions(self): - """Get the current actions of the solution""" - return self._actions diff --git a/watcher/decision_engine/solution/efficacy.py b/watcher/decision_engine/solution/efficacy.py deleted file mode 100644 index 108e78f..0000000 --- a/watcher/decision_engine/solution/efficacy.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numbers - -from oslo_log import log as logging - -from watcher._i18n import _ -from watcher.common import exception -from watcher.common import utils - -LOG = logging.getLogger(__name__) - - -class IndicatorsMap(utils.Struct): - pass - - -class Indicator(utils.Struct): - - def __init__(self, name, description, unit, value): - super(Indicator, self).__init__() - self.name = name - self.description = description - self.unit = unit - if not isinstance(value, numbers.Number): - raise exception.InvalidIndicatorValue( - _("An indicator value should be a number")) - self.value = value - - -class Efficacy(object): - """Solution efficacy""" - - def __init__(self, goal, strategy): - """Solution efficacy - - :param goal: Goal associated to this solution - :type goal: :py:class:`~.base.Goal` instance - :param strategy: Strategy associated to this solution - :type strategy: :py:class:`~.BaseStrategy` instance - """ - self.goal = goal - self.strategy = strategy - - self._efficacy_spec = self.goal.efficacy_specification - - # Used to store in DB the info related to the efficacy indicators - self.indicators = [] - # Used to compute the global efficacy - self._indicators_mapping = IndicatorsMap() - self.global_efficacy = None - - def set_efficacy_indicators(self, **indicators_map): - """Set the efficacy indicators - - :param indicators_map: kwargs where the key is the name of the efficacy - indicator as defined in the associated - :py:class:`~.IndicatorSpecification` and the - value is a number. - :type indicators_map: dict {str: numerical value} - """ - self._indicators_mapping.update(indicators_map) - - def compute_global_efficacy(self): - self._efficacy_spec.validate_efficacy_indicators( - self._indicators_mapping) - try: - self.global_efficacy = ( - self._efficacy_spec.get_global_efficacy_indicator( - self._indicators_mapping)) - - indicators_specs_map = { - indicator_spec.name: indicator_spec - for indicator_spec in self._efficacy_spec.indicators_specs} - - indicators = [] - for indicator_name, value in self._indicators_mapping.items(): - related_indicator_spec = indicators_specs_map[indicator_name] - indicators.append( - Indicator( - name=related_indicator_spec.name, - description=related_indicator_spec.description, - unit=related_indicator_spec.unit, - value=value)) - - self.indicators = indicators - except Exception as exc: - LOG.exception(exc) - raise exception.GlobalEfficacyComputationError( - goal=self.goal.name, - strategy=self.strategy.name) diff --git a/watcher/decision_engine/solution/solution_comparator.py b/watcher/decision_engine/solution/solution_comparator.py deleted file mode 100644 index 254cd6c..0000000 --- a/watcher/decision_engine/solution/solution_comparator.py +++ /dev/null @@ -1,27 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -import six - - -@six.add_metaclass(abc.ABCMeta) -class BaseSolutionComparator(object): - @abc.abstractmethod - def compare(self, sol1, sol2): - raise NotImplementedError() diff --git a/watcher/decision_engine/solution/solution_evaluator.py b/watcher/decision_engine/solution/solution_evaluator.py deleted file mode 100644 index b36b70f..0000000 --- a/watcher/decision_engine/solution/solution_evaluator.py +++ /dev/null @@ -1,27 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -import six - - -@six.add_metaclass(abc.ABCMeta) -class BaseSolutionEvaluator(object): - @abc.abstractmethod - def evaluate(self, solution): - raise NotImplementedError() diff --git a/watcher/decision_engine/strategy/__init__.py b/watcher/decision_engine/strategy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/strategy/common/__init__.py b/watcher/decision_engine/strategy/common/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/strategy/common/level.py b/watcher/decision_engine/strategy/common/level.py deleted file mode 100644 index 83d95c0..0000000 --- a/watcher/decision_engine/strategy/common/level.py +++ /dev/null @@ -1,27 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import enum - - -class StrategyLevel(enum.Enum): - conservative = "conservative" - balanced = "balanced" - growth = "growth" - aggressive = "aggressive" diff --git a/watcher/decision_engine/strategy/context/__init__.py b/watcher/decision_engine/strategy/context/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/strategy/context/base.py b/watcher/decision_engine/strategy/context/base.py deleted file mode 100644 index 37286b2..0000000 --- a/watcher/decision_engine/strategy/context/base.py +++ /dev/null @@ -1,70 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc -import six - -from watcher import notifications -from watcher.objects import fields - - -@six.add_metaclass(abc.ABCMeta) -class StrategyContext(object): - - def execute_strategy(self, audit, request_context): - """Execute the strategy for the given an audit - - :param audit: Audit object - :type audit: :py:class:`~.objects.audit.Audit` instance - :param request_context: Current request context - :type request_context: :py:class:`~.RequestContext` instance - :returns: The computed solution - :rtype: :py:class:`~.BaseSolution` instance - """ - try: - notifications.audit.send_action_notification( - request_context, audit, - action=fields.NotificationAction.STRATEGY, - phase=fields.NotificationPhase.START) - solution = self.do_execute_strategy(audit, request_context) - notifications.audit.send_action_notification( - request_context, audit, - action=fields.NotificationAction.STRATEGY, - phase=fields.NotificationPhase.END) - return solution - except Exception: - notifications.audit.send_action_notification( - request_context, audit, - action=fields.NotificationAction.STRATEGY, - priority=fields.NotificationPriority.ERROR, - phase=fields.NotificationPhase.ERROR) - raise - - @abc.abstractmethod - def do_execute_strategy(self, audit, request_context): - """Execute the strategy for the given an audit - - :param audit: Audit object - :type audit: :py:class:`~.objects.audit.Audit` instance - :param request_context: Current request context - :type request_context: :py:class:`~.RequestContext` instance - :returns: The computed solution - :rtype: :py:class:`~.BaseSolution` instance - """ - raise NotImplementedError() diff --git a/watcher/decision_engine/strategy/context/default.py b/watcher/decision_engine/strategy/context/default.py deleted file mode 100644 index e0b56ba..0000000 --- a/watcher/decision_engine/strategy/context/default.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from oslo_log import log - -from watcher.common import clients -from watcher.common import utils -from watcher.decision_engine.strategy.context import base -from watcher.decision_engine.strategy.selection import default - -from watcher import objects - -LOG = log.getLogger(__name__) - - -class DefaultStrategyContext(base.StrategyContext): - def __init__(self): - super(DefaultStrategyContext, self).__init__() - LOG.debug("Initializing Strategy Context") - - def do_execute_strategy(self, audit, request_context): - osc = clients.OpenStackClients() - # todo(jed) retrieve in audit parameters (threshold,...) - # todo(jed) create ActionPlan - - goal = objects.Goal.get_by_id(request_context, audit.goal_id) - - # NOTE(jed56) In the audit object, the 'strategy_id' attribute - # is optional. If the admin wants to force the trigger of a Strategy - # it could specify the Strategy uuid in the Audit. - strategy_name = None - if audit.strategy_id: - strategy = objects.Strategy.get_by_id( - request_context, audit.strategy_id) - strategy_name = strategy.name - - strategy_selector = default.DefaultStrategySelector( - goal_name=goal.name, - strategy_name=strategy_name, - osc=osc) - - selected_strategy = strategy_selector.select() - - selected_strategy.audit_scope = audit.scope - - schema = selected_strategy.get_schema() - if not audit.parameters and schema: - # Default value feedback if no predefined strategy - utils.StrictDefaultValidatingDraft4Validator(schema).validate( - audit.parameters) - - selected_strategy.input_parameters.update({ - name: value for name, value in audit.parameters.items() - }) - - return selected_strategy.execute() diff --git a/watcher/decision_engine/strategy/selection/__init__.py b/watcher/decision_engine/strategy/selection/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/strategy/selection/base.py b/watcher/decision_engine/strategy/selection/base.py deleted file mode 100644 index 7bf9490..0000000 --- a/watcher/decision_engine/strategy/selection/base.py +++ /dev/null @@ -1,28 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -import six - - -@six.add_metaclass(abc.ABCMeta) -class BaseSelector(object): - - @abc.abstractmethod - def select(self): - raise NotImplementedError() diff --git a/watcher/decision_engine/strategy/selection/default.py b/watcher/decision_engine/strategy/selection/default.py deleted file mode 100644 index ac73447..0000000 --- a/watcher/decision_engine/strategy/selection/default.py +++ /dev/null @@ -1,73 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log - -from watcher._i18n import _ -from watcher.common import exception -from watcher.decision_engine.loading import default -from watcher.decision_engine.strategy.selection import base - -LOG = log.getLogger(__name__) - - -class DefaultStrategySelector(base.BaseSelector): - - def __init__(self, goal_name, strategy_name=None, osc=None): - """Default strategy selector - - :param goal_name: Name of the goal - :param strategy_name: Name of the strategy - :param osc: an OpenStackClients instance - """ - super(DefaultStrategySelector, self).__init__() - self.goal_name = goal_name - self.strategy_name = strategy_name - self.osc = osc - self.strategy_loader = default.DefaultStrategyLoader() - - def select(self): - """Selects a strategy - - :raises: :py:class:`~.LoadingError` if it failed to load a strategy - :returns: A :py:class:`~.BaseStrategy` instance - """ - strategy_to_load = None - try: - if self.strategy_name: - strategy_to_load = self.strategy_name - else: - available_strategies = self.strategy_loader.list_available() - available_strategies_for_goal = list( - key for key, strat in available_strategies.items() - if strat.get_goal_name() == self.goal_name) - - if not available_strategies_for_goal: - raise exception.NoAvailableStrategyForGoal( - goal=self.goal_name) - - # TODO(v-francoise): We should do some more work here to select - # a strategy out of a given goal instead of just choosing the - # 1st one - strategy_to_load = available_strategies_for_goal[0] - return self.strategy_loader.load(strategy_to_load, osc=self.osc) - except exception.NoAvailableStrategyForGoal: - raise - except Exception as exc: - LOG.exception(exc) - raise exception.LoadingError( - _("Could not load any strategy for goal %(goal)s"), - goal=self.goal_name) diff --git a/watcher/decision_engine/strategy/strategies/__init__.py b/watcher/decision_engine/strategy/strategies/__init__.py deleted file mode 100644 index c1a2821..0000000 --- a/watcher/decision_engine/strategy/strategies/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.decision_engine.strategy.strategies import basic_consolidation -from watcher.decision_engine.strategy.strategies import dummy_strategy -from watcher.decision_engine.strategy.strategies import dummy_with_scorer -from watcher.decision_engine.strategy.strategies import noisy_neighbor -from watcher.decision_engine.strategy.strategies import outlet_temp_control -from watcher.decision_engine.strategy.strategies import uniform_airflow -from watcher.decision_engine.strategy.strategies import \ - vm_workload_consolidation -from watcher.decision_engine.strategy.strategies import workload_balance -from watcher.decision_engine.strategy.strategies import workload_stabilization - -BasicConsolidation = basic_consolidation.BasicConsolidation -OutletTempControl = outlet_temp_control.OutletTempControl -DummyStrategy = dummy_strategy.DummyStrategy -DummyWithScorer = dummy_with_scorer.DummyWithScorer -VMWorkloadConsolidation = vm_workload_consolidation.VMWorkloadConsolidation -WorkloadBalance = workload_balance.WorkloadBalance -WorkloadStabilization = workload_stabilization.WorkloadStabilization -UniformAirflow = uniform_airflow.UniformAirflow -NoisyNeighbor = noisy_neighbor.NoisyNeighbor - -__all__ = ("BasicConsolidation", "OutletTempControl", "DummyStrategy", - "DummyWithScorer", "VMWorkloadConsolidation", "WorkloadBalance", - "WorkloadStabilization", "UniformAirflow", "NoisyNeighbor") diff --git a/watcher/decision_engine/strategy/strategies/base.py b/watcher/decision_engine/strategy/strategies/base.py deleted file mode 100644 index 607f98a..0000000 --- a/watcher/decision_engine/strategy/strategies/base.py +++ /dev/null @@ -1,360 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -A :ref:`Strategy ` is an algorithm implementation which is -able to find a :ref:`Solution ` for a given -:ref:`Goal `. - -There may be several potential strategies which are able to achieve the same -:ref:`Goal `. This is why it is possible to configure which -specific :ref:`Strategy ` should be used for each -:ref:`Goal `. - -Some strategies may provide better optimization results but may take more time -to find an optimal :ref:`Solution `. - -When a new :ref:`Goal ` is added to the Watcher configuration, -at least one default associated :ref:`Strategy ` should be -provided as well. - -:ref:`Some default implementations are provided `, but it -is possible to :ref:`develop new implementations ` -which are dynamically loaded by Watcher at launch time. -""" - -import abc -import six - -from oslo_utils import strutils - -from watcher.common import clients -from watcher.common import context -from watcher.common import exception -from watcher.common.loader import loadable -from watcher.common import utils -from watcher.decision_engine.loading import default as loading -from watcher.decision_engine.model.collector import manager -from watcher.decision_engine.scope import default as default_scope -from watcher.decision_engine.solution import default -from watcher.decision_engine.strategy.common import level - - -@six.add_metaclass(abc.ABCMeta) -class BaseStrategy(loadable.Loadable): - """A base class for all the strategies - - A Strategy is an algorithm implementation which is able to find a - Solution for a given Goal. - """ - - def __init__(self, config, osc=None): - """Constructor: the signature should be identical within the subclasses - - :param config: Configuration related to this plugin - :type config: :py:class:`~.Struct` - :param osc: An OpenStackClients instance - :type osc: :py:class:`~.OpenStackClients` instance - """ - super(BaseStrategy, self).__init__(config) - self.ctx = context.make_context() - self._name = self.get_name() - self._display_name = self.get_display_name() - self._goal = self.get_goal() - # default strategy level - self._strategy_level = level.StrategyLevel.conservative - self._cluster_state_collector = None - # the solution given by the strategy - self._solution = default.DefaultSolution(goal=self.goal, strategy=self) - self._osc = osc - self._collector_manager = None - self._compute_model = None - self._storage_model = None - self._input_parameters = utils.Struct() - self._audit_scope = None - self._audit_scope_handler = None - - @classmethod - @abc.abstractmethod - def get_name(cls): - """The name of the strategy""" - raise NotImplementedError() - - @classmethod - @abc.abstractmethod - def get_display_name(cls): - """The goal display name for the strategy""" - raise NotImplementedError() - - @classmethod - @abc.abstractmethod - def get_translatable_display_name(cls): - """The translatable msgid of the strategy""" - # Note(v-francoise): Defined here to be used as the translation key for - # other services - raise NotImplementedError() - - @classmethod - @abc.abstractmethod - def get_goal_name(cls): - """The goal name the strategy achieves""" - raise NotImplementedError() - - @classmethod - def get_goal(cls): - """The goal the strategy achieves""" - goal_loader = loading.DefaultGoalLoader() - return goal_loader.load(cls.get_goal_name()) - - @classmethod - def get_config_opts(cls): - """Defines the configuration options to be associated to this loadable - - :return: A list of configuration options relative to this Loadable - :rtype: list of :class:`oslo_config.cfg.Opt` instances - """ - return [] - - @abc.abstractmethod - def pre_execute(self): - """Pre-execution phase - - This can be used to fetch some pre-requisites or data. - """ - raise NotImplementedError() - - @abc.abstractmethod - def do_execute(self): - """Strategy execution phase - - This phase is where you should put the main logic of your strategy. - """ - raise NotImplementedError() - - @abc.abstractmethod - def post_execute(self): - """Post-execution phase - - This can be used to compute the global efficacy - """ - raise NotImplementedError() - - def execute(self): - """Execute a strategy - - :return: A computed solution (via a placement algorithm) - :rtype: :py:class:`~.BaseSolution` instance - """ - self.pre_execute() - self.do_execute() - self.post_execute() - - self.solution.compute_global_efficacy() - - return self.solution - - @property - def collector_manager(self): - if self._collector_manager is None: - self._collector_manager = manager.CollectorManager() - return self._collector_manager - - @property - def compute_model(self): - """Cluster data model - - :returns: Cluster data model the strategy is executed on - :rtype model: :py:class:`~.ModelRoot` instance - """ - if self._compute_model is None: - collector = self.collector_manager.get_cluster_model_collector( - 'compute', osc=self.osc) - self._compute_model = self.audit_scope_handler.get_scoped_model( - collector.get_latest_cluster_data_model()) - - if not self._compute_model: - raise exception.ClusterStateNotDefined() - - if self._compute_model.stale: - raise exception.ClusterStateStale() - - return self._compute_model - - @property - def storage_model(self): - """Cluster data model - - :returns: Cluster data model the strategy is executed on - :rtype model: :py:class:`~.ModelRoot` instance - """ - if self._storage_model is None: - collector = self.collector_manager.get_cluster_model_collector( - 'storage', osc=self.osc) - self._storage_model = self.audit_scope_handler.get_scoped_model( - collector.get_latest_cluster_data_model()) - - if not self._storage_model: - raise exception.ClusterStateNotDefined() - - if self._storage_model.stale: - raise exception.ClusterStateStale() - - return self._storage_model - - @classmethod - def get_schema(cls): - """Defines a Schema that the input parameters shall comply to - - :return: A jsonschema format (mandatory default setting) - :rtype: dict - """ - return {} - - @property - def input_parameters(self): - return self._input_parameters - - @input_parameters.setter - def input_parameters(self, p): - self._input_parameters = p - - @property - def osc(self): - if not self._osc: - self._osc = clients.OpenStackClients() - return self._osc - - @property - def solution(self): - return self._solution - - @solution.setter - def solution(self, s): - self._solution = s - - @property - def audit_scope(self): - return self._audit_scope - - @audit_scope.setter - def audit_scope(self, s): - self._audit_scope = s - - @property - def audit_scope_handler(self): - if not self._audit_scope_handler: - self._audit_scope_handler = default_scope.DefaultScope( - self.audit_scope, self.config) - return self._audit_scope_handler - - @property - def name(self): - return self._name - - @property - def display_name(self): - return self._display_name - - @property - def goal(self): - return self._goal - - @property - def strategy_level(self): - return self._strategy_level - - @strategy_level.setter - def strategy_level(self, s): - self._strategy_level = s - - @property - def state_collector(self): - return self._cluster_state_collector - - @state_collector.setter - def state_collector(self, s): - self._cluster_state_collector = s - - def filter_instances_by_audit_tag(self, instances): - if not self.config.check_optimize_metadata: - return instances - instances_to_migrate = [] - for instance in instances: - optimize = True - if instance.metadata: - try: - optimize = strutils.bool_from_string( - instance.metadata.get('optimize')) - except ValueError: - optimize = False - if optimize: - instances_to_migrate.append(instance) - return instances_to_migrate - - -@six.add_metaclass(abc.ABCMeta) -class DummyBaseStrategy(BaseStrategy): - - @classmethod - def get_goal_name(cls): - return "dummy" - - -@six.add_metaclass(abc.ABCMeta) -class UnclassifiedStrategy(BaseStrategy): - """This base class is used to ease the development of new strategies - - The goal defined within this strategy can be used to simplify the - documentation explaining how to implement a new strategy plugin by - omitting the need for the strategy developer to define a goal straight - away. - """ - - @classmethod - def get_goal_name(cls): - return "unclassified" - - -@six.add_metaclass(abc.ABCMeta) -class ServerConsolidationBaseStrategy(BaseStrategy): - - @classmethod - def get_goal_name(cls): - return "server_consolidation" - - -@six.add_metaclass(abc.ABCMeta) -class ThermalOptimizationBaseStrategy(BaseStrategy): - - @classmethod - def get_goal_name(cls): - return "thermal_optimization" - - -@six.add_metaclass(abc.ABCMeta) -class WorkloadStabilizationBaseStrategy(BaseStrategy): - - @classmethod - def get_goal_name(cls): - return "workload_balancing" - - -@six.add_metaclass(abc.ABCMeta) -class NoisyNeighborBaseStrategy(BaseStrategy): - - @classmethod - def get_goal_name(cls): - return "noisy_neighbor" diff --git a/watcher/decision_engine/strategy/strategies/basic_consolidation.py b/watcher/decision_engine/strategy/strategies/basic_consolidation.py deleted file mode 100644 index 5618d0f..0000000 --- a/watcher/decision_engine/strategy/strategies/basic_consolidation.py +++ /dev/null @@ -1,565 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -""" -*Good server consolidation strategy* - -Consolidation of VMs is essential to achieve energy optimization in cloud -environments such as OpenStack. As VMs are spinned up and/or moved over time, -it becomes necessary to migrate VMs among servers to lower the costs. However, -migration of VMs introduces runtime overheads and consumes extra energy, thus -a good server consolidation strategy should carefully plan for migration in -order to both minimize energy consumption and comply to the various SLAs. - -This algorithm not only minimizes the overall number of used servers, but also -minimizes the number of migrations. - -It has been developed only for tests. You must have at least 2 physical compute -nodes to run it, so you can easily run it on DevStack. It assumes that live -migration is possible on your OpenStack cluster. - -""" - -import datetime - -from oslo_config import cfg -from oslo_log import log - -from watcher._i18n import _ -from watcher.common import exception -from watcher.datasource import ceilometer as ceil -from watcher.datasource import gnocchi as gnoc -from watcher.datasource import monasca as mon -from watcher.decision_engine.model import element -from watcher.decision_engine.strategy.strategies import base - -LOG = log.getLogger(__name__) - - -class BasicConsolidation(base.ServerConsolidationBaseStrategy): - """Basic offline consolidation using live migration""" - - HOST_CPU_USAGE_METRIC_NAME = 'compute.node.cpu.percent' - INSTANCE_CPU_USAGE_METRIC_NAME = 'cpu_util' - - METRIC_NAMES = dict( - ceilometer=dict( - host_cpu_usage='compute.node.cpu.percent', - instance_cpu_usage='cpu_util'), - monasca=dict( - host_cpu_usage='cpu.percent', - instance_cpu_usage='vm.cpu.utilization_perc'), - gnocchi=dict( - host_cpu_usage='compute.node.cpu.percent', - instance_cpu_usage='cpu_util'), - ) - - MIGRATION = "migrate" - CHANGE_NOVA_SERVICE_STATE = "change_nova_service_state" - - def __init__(self, config, osc=None): - """Basic offline Consolidation using live migration - - :param config: A mapping containing the configuration of this strategy - :type config: :py:class:`~.Struct` instance - :param osc: :py:class:`~.OpenStackClients` instance - """ - super(BasicConsolidation, self).__init__(config, osc) - - # set default value for the number of enabled compute nodes - self.number_of_enabled_nodes = 0 - # set default value for the number of released nodes - self.number_of_released_nodes = 0 - # set default value for the number of migrations - self.number_of_migrations = 0 - - # set default value for the efficacy - self.efficacy = 100 - - self._ceilometer = None - self._monasca = None - self._gnocchi = None - - # TODO(jed): improve threshold overbooking? - self.threshold_mem = 1 - self.threshold_disk = 1 - self.threshold_cores = 1 - - @classmethod - def get_name(cls): - return "basic" - - @property - def migration_attempts(self): - return self.input_parameters.get('migration_attempts', 0) - - @property - def period(self): - return self.input_parameters.get('period', 7200) - - @property - def granularity(self): - return self.input_parameters.get('granularity', 300) - - @classmethod - def get_display_name(cls): - return _("Basic offline consolidation") - - @classmethod - def get_translatable_display_name(cls): - return "Basic offline consolidation" - - @classmethod - def get_schema(cls): - # Mandatory default setting for each element - return { - "properties": { - "migration_attempts": { - "description": "Maximum number of combinations to be " - "tried by the strategy while searching " - "for potential candidates. To remove the " - "limit, set it to 0 (by default)", - "type": "number", - "default": 0 - }, - "period": { - "description": "The time interval in seconds for " - "getting statistic aggregation", - "type": "number", - "default": 7200 - }, - "granularity": { - "description": "The time between two measures in an " - "aggregated timeseries of a metric.", - "type": "number", - "default": 300 - }, - }, - } - - @classmethod - def get_config_opts(cls): - return [ - cfg.StrOpt( - "datasource", - help="Data source to use in order to query the needed metrics", - default="ceilometer", - choices=["ceilometer", "monasca", "gnocchi"]), - cfg.BoolOpt( - "check_optimize_metadata", - help="Check optimize metadata field in instance before " - "migration", - default=False), - ] - - @property - def ceilometer(self): - if self._ceilometer is None: - self._ceilometer = ceil.CeilometerHelper(osc=self.osc) - return self._ceilometer - - @ceilometer.setter - def ceilometer(self, ceilometer): - self._ceilometer = ceilometer - - @property - def monasca(self): - if self._monasca is None: - self._monasca = mon.MonascaHelper(osc=self.osc) - return self._monasca - - @monasca.setter - def monasca(self, monasca): - self._monasca = monasca - - @property - def gnocchi(self): - if self._gnocchi is None: - self._gnocchi = gnoc.GnocchiHelper(osc=self.osc) - return self._gnocchi - - @gnocchi.setter - def gnocchi(self, gnocchi): - self._gnocchi = gnocchi - - def check_migration(self, source_node, destination_node, - instance_to_migrate): - """Check if the migration is possible - - :param source_node: the current node of the virtual machine - :param destination_node: the destination of the virtual machine - :param instance_to_migrate: the instance / virtual machine - :return: True if the there is enough place otherwise false - """ - if source_node == destination_node: - return False - - LOG.debug('Migrate instance %s from %s to %s', - instance_to_migrate, source_node, destination_node) - - total_cores = 0 - total_disk = 0 - total_mem = 0 - for instance in self.compute_model.get_node_instances( - destination_node): - total_cores += instance.vcpus - total_disk += instance.disk - total_mem += instance.memory - - # capacity requested by the compute node - total_cores += instance_to_migrate.vcpus - total_disk += instance_to_migrate.disk - total_mem += instance_to_migrate.memory - - return self.check_threshold(destination_node, total_cores, total_disk, - total_mem) - - def check_threshold(self, destination_node, total_cores, - total_disk, total_mem): - """Check threshold - - Check the threshold value defined by the ratio of - aggregated CPU capacity of VMs on one node to CPU capacity - of this node must not exceed the threshold value. - - :param destination_node: the destination of the virtual machine - :param total_cores: total cores of the virtual machine - :param total_disk: total disk size used by the virtual machine - :param total_mem: total memory used by the virtual machine - :return: True if the threshold is not exceed - """ - cpu_capacity = destination_node.vcpus - disk_capacity = destination_node.disk - memory_capacity = destination_node.memory - - return (cpu_capacity >= total_cores * self.threshold_cores and - disk_capacity >= total_disk * self.threshold_disk and - memory_capacity >= total_mem * self.threshold_mem) - - def calculate_weight(self, compute_resource, total_cores_used, - total_disk_used, total_memory_used): - """Calculate weight of every resource - - :param compute_resource: - :param total_cores_used: - :param total_disk_used: - :param total_memory_used: - :return: - """ - cpu_capacity = compute_resource.vcpus - disk_capacity = compute_resource.disk - memory_capacity = compute_resource.memory - - score_cores = (1 - (float(cpu_capacity) - float(total_cores_used)) / - float(cpu_capacity)) - - # It's possible that disk_capacity is 0, e.g., m1.nano.disk = 0 - if disk_capacity == 0: - score_disk = 0 - else: - score_disk = (1 - (float(disk_capacity) - float(total_disk_used)) / - float(disk_capacity)) - - score_memory = ( - 1 - (float(memory_capacity) - float(total_memory_used)) / - float(memory_capacity)) - # TODO(jed): take in account weight - return (score_cores + score_disk + score_memory) / 3 - - def get_node_cpu_usage(self, node): - metric_name = self.METRIC_NAMES[ - self.config.datasource]['host_cpu_usage'] - if self.config.datasource == "ceilometer": - resource_id = "%s_%s" % (node.uuid, node.hostname) - return self.ceilometer.statistic_aggregation( - resource_id=resource_id, - meter_name=metric_name, - period=self.period, - aggregate='avg', - ) - elif self.config.datasource == "gnocchi": - resource_id = "%s_%s" % (node.uuid, node.hostname) - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int(self.period)) - return self.gnocchi.statistic_aggregation( - resource_id=resource_id, - metric=metric_name, - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean' - ) - elif self.config.datasource == "monasca": - statistics = self.monasca.statistic_aggregation( - meter_name=metric_name, - dimensions=dict(hostname=node.uuid), - period=self.period, - aggregate='avg' - ) - cpu_usage = None - for stat in statistics: - avg_col_idx = stat['columns'].index('avg') - values = [r[avg_col_idx] for r in stat['statistics']] - value = float(sum(values)) / len(values) - cpu_usage = value - - return cpu_usage - - raise exception.UnsupportedDataSource( - strategy=self.name, datasource=self.config.datasource) - - def get_instance_cpu_usage(self, instance): - metric_name = self.METRIC_NAMES[ - self.config.datasource]['instance_cpu_usage'] - if self.config.datasource == "ceilometer": - return self.ceilometer.statistic_aggregation( - resource_id=instance.uuid, - meter_name=metric_name, - period=self.period, - aggregate='avg' - ) - elif self.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int(self.period)) - return self.gnocchi.statistic_aggregation( - resource_id=instance.uuid, - metric=metric_name, - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean', - ) - elif self.config.datasource == "monasca": - statistics = self.monasca.statistic_aggregation( - meter_name=metric_name, - dimensions=dict(resource_id=instance.uuid), - period=self.period, - aggregate='avg' - ) - cpu_usage = None - for stat in statistics: - avg_col_idx = stat['columns'].index('avg') - values = [r[avg_col_idx] for r in stat['statistics']] - value = float(sum(values)) / len(values) - cpu_usage = value - return cpu_usage - - raise exception.UnsupportedDataSource( - strategy=self.name, datasource=self.config.datasource) - - def calculate_score_node(self, node): - """Calculate the score that represent the utilization level - - :param node: :py:class:`~.ComputeNode` instance - :return: Score for the given compute node - :rtype: float - """ - host_avg_cpu_util = self.get_node_cpu_usage(node) - - if host_avg_cpu_util is None: - resource_id = "%s_%s" % (node.uuid, node.hostname) - LOG.error( - "No values returned by %(resource_id)s " - "for %(metric_name)s" % dict( - resource_id=resource_id, - metric_name=self.METRIC_NAMES[ - self.config.datasource]['host_cpu_usage'])) - host_avg_cpu_util = 100 - - total_cores_used = node.vcpus * (host_avg_cpu_util / 100.0) - - return self.calculate_weight(node, total_cores_used, 0, 0) - - def calculate_score_instance(self, instance): - """Calculate Score of virtual machine - - :param instance: the virtual machine - :return: score - """ - instance_cpu_utilization = self.get_instance_cpu_usage(instance) - if instance_cpu_utilization is None: - LOG.error( - "No values returned by %(resource_id)s " - "for %(metric_name)s" % dict( - resource_id=instance.uuid, - metric_name=self.METRIC_NAMES[ - self.config.datasource]['instance_cpu_usage'])) - instance_cpu_utilization = 100 - - total_cores_used = instance.vcpus * (instance_cpu_utilization / 100.0) - - return self.calculate_weight(instance, total_cores_used, 0, 0) - - def add_change_service_state(self, resource_id, state): - parameters = {'state': state} - self.solution.add_action(action_type=self.CHANGE_NOVA_SERVICE_STATE, - resource_id=resource_id, - input_parameters=parameters) - - def add_migration(self, - resource_id, - migration_type, - source_node, - destination_node): - parameters = {'migration_type': migration_type, - 'source_node': source_node, - 'destination_node': destination_node} - self.solution.add_action(action_type=self.MIGRATION, - resource_id=resource_id, - input_parameters=parameters) - - def compute_score_of_nodes(self): - """Calculate score of nodes based on load by VMs""" - score = [] - for node in self.compute_model.get_all_compute_nodes().values(): - if node.status == element.ServiceState.ENABLED.value: - self.number_of_enabled_nodes += 1 - - instances = self.compute_model.get_node_instances(node) - if len(instances) > 0: - result = self.calculate_score_node(node) - score.append((node.uuid, result)) - - return score - - def node_and_instance_score(self, sorted_scores): - """Get List of VMs from node""" - node_to_release = sorted_scores[len(sorted_scores) - 1][0] - instances = self.compute_model.get_node_instances( - self.compute_model.get_node_by_uuid(node_to_release)) - - instances_to_migrate = self.filter_instances_by_audit_tag(instances) - instance_score = [] - for instance in instances_to_migrate: - if instance.state == element.InstanceState.ACTIVE.value: - instance_score.append( - (instance, self.calculate_score_instance(instance))) - - return node_to_release, instance_score - - def create_migration_instance(self, mig_instance, mig_source_node, - mig_destination_node): - """Create migration VM""" - if self.compute_model.migrate_instance( - mig_instance, mig_source_node, mig_destination_node): - self.add_migration(mig_instance.uuid, 'live', - mig_source_node.uuid, - mig_destination_node.uuid) - - if len(self.compute_model.get_node_instances(mig_source_node)) == 0: - self.add_change_service_state(mig_source_node. - uuid, - element.ServiceState.DISABLED.value) - self.number_of_released_nodes += 1 - - def calculate_num_migrations(self, sorted_instances, node_to_release, - sorted_score): - number_migrations = 0 - for mig_instance, __ in sorted_instances: - for node_uuid, __ in sorted_score: - mig_source_node = self.compute_model.get_node_by_uuid( - node_to_release) - mig_destination_node = self.compute_model.get_node_by_uuid( - node_uuid) - - result = self.check_migration( - mig_source_node, mig_destination_node, mig_instance) - if result: - self.create_migration_instance( - mig_instance, mig_source_node, mig_destination_node) - number_migrations += 1 - break - return number_migrations - - def unsuccessful_migration_actualization(self, number_migrations, - unsuccessful_migration): - if number_migrations > 0: - self.number_of_migrations += number_migrations - return 0 - else: - return unsuccessful_migration + 1 - - def pre_execute(self): - LOG.info("Initializing Server Consolidation") - - if not self.compute_model: - raise exception.ClusterStateNotDefined() - - if len(self.compute_model.get_all_compute_nodes()) == 0: - raise exception.ClusterEmpty() - - if self.compute_model.stale: - raise exception.ClusterStateStale() - - LOG.debug(self.compute_model.to_string()) - - def do_execute(self): - unsuccessful_migration = 0 - - scores = self.compute_score_of_nodes() - # Sort compute nodes by Score decreasing - sorted_scores = sorted(scores, reverse=True, key=lambda x: (x[1])) - LOG.debug("Compute node(s) BFD %s", sorted_scores) - # Get Node to be released - if len(scores) == 0: - LOG.warning( - "The workloads of the compute nodes" - " of the cluster is zero") - return - - while sorted_scores and ( - not self.migration_attempts or - self.migration_attempts >= unsuccessful_migration): - node_to_release, instance_score = self.node_and_instance_score( - sorted_scores) - - # Sort instances by Score - sorted_instances = sorted( - instance_score, reverse=True, key=lambda x: (x[1])) - # BFD: Best Fit Decrease - LOG.debug("Instance(s) BFD %s", sorted_instances) - - migrations = self.calculate_num_migrations( - sorted_instances, node_to_release, sorted_scores) - - unsuccessful_migration = self.unsuccessful_migration_actualization( - migrations, unsuccessful_migration) - - if not migrations: - # We don't have any possible migrations to perform on this node - # so we discard the node so we can try to migrate instances - # from the next one in the list - sorted_scores.pop() - - infos = { - "compute_nodes_count": self.number_of_enabled_nodes, - "released_compute_nodes_count": self.number_of_released_nodes, - "instance_migrations_count": self.number_of_migrations, - "efficacy": self.efficacy - } - LOG.debug(infos) - - def post_execute(self): - self.solution.set_efficacy_indicators( - compute_nodes_count=self.number_of_enabled_nodes, - released_compute_nodes_count=self.number_of_released_nodes, - instance_migrations_count=self.number_of_migrations, - ) - LOG.debug(self.compute_model.to_string()) diff --git a/watcher/decision_engine/strategy/strategies/dummy_strategy.py b/watcher/decision_engine/strategy/strategies/dummy_strategy.py deleted file mode 100644 index 22d7f4d..0000000 --- a/watcher/decision_engine/strategy/strategies/dummy_strategy.py +++ /dev/null @@ -1,103 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from oslo_log import log - -from watcher._i18n import _ -from watcher.decision_engine.strategy.strategies import base - -LOG = log.getLogger(__name__) - - -class DummyStrategy(base.DummyBaseStrategy): - """Dummy strategy used for integration testing via Tempest - - *Description* - - This strategy does not provide any useful optimization. Its only purpose - is to be used by Tempest tests. - - *Requirements* - - - - *Limitations* - - Do not use in production. - - *Spec URL* - - - """ - - NOP = "nop" - SLEEP = "sleep" - - def pre_execute(self): - pass - - def do_execute(self): - para1 = self.input_parameters.para1 - para2 = self.input_parameters.para2 - LOG.debug("Executing Dummy strategy with para1=%(p1)f, para2=%(p2)s", - {'p1': para1, 'p2': para2}) - parameters = {'message': 'hello World'} - self.solution.add_action(action_type=self.NOP, - input_parameters=parameters) - - parameters = {'message': para2} - self.solution.add_action(action_type=self.NOP, - input_parameters=parameters) - - self.solution.add_action(action_type=self.SLEEP, - input_parameters={'duration': para1}) - - def post_execute(self): - pass - - @classmethod - def get_name(cls): - return "dummy" - - @classmethod - def get_display_name(cls): - return _("Dummy strategy") - - @classmethod - def get_translatable_display_name(cls): - return "Dummy strategy" - - @classmethod - def get_schema(cls): - # Mandatory default setting for each element - return { - "properties": { - "para1": { - "description": "number parameter example", - "type": "number", - "default": 3.2, - "minimum": 1.0, - "maximum": 10.2, - }, - "para2": { - "description": "string parameter example", - "type": "string", - "default": "hello" - }, - }, - } diff --git a/watcher/decision_engine/strategy/strategies/dummy_with_resize.py b/watcher/decision_engine/strategy/strategies/dummy_with_resize.py deleted file mode 100644 index 1c4c27c..0000000 --- a/watcher/decision_engine/strategy/strategies/dummy_with_resize.py +++ /dev/null @@ -1,121 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from oslo_log import log - -from watcher._i18n import _ -from watcher.decision_engine.strategy.strategies import base - -LOG = log.getLogger(__name__) - - -class DummyWithResize(base.DummyBaseStrategy): - """Dummy strategy used for integration testing via Tempest - - *Description* - - This strategy does not provide any useful optimization. Its only purpose - is to be used by Tempest tests. - - *Requirements* - - - - *Limitations* - - Do not use in production. - - *Spec URL* - - - """ - - NOP = "nop" - SLEEP = "sleep" - - def pre_execute(self): - pass - - def do_execute(self): - para1 = self.input_parameters.para1 - para2 = self.input_parameters.para2 - LOG.debug("Executing Dummy strategy with para1=%(p1)f, para2=%(p2)s", - {'p1': para1, 'p2': para2}) - parameters = {'message': 'hello World'} - self.solution.add_action(action_type=self.NOP, - input_parameters=parameters) - - parameters = {'message': 'Welcome'} - self.solution.add_action(action_type=self.NOP, - input_parameters=parameters) - - self.solution.add_action(action_type=self.SLEEP, - input_parameters={'duration': 5.0}) - self.solution.add_action( - action_type='migrate', - resource_id='b199db0c-1408-4d52-b5a5-5ca14de0ff36', - input_parameters={ - 'source_node': 'compute2', - 'destination_node': 'compute3', - 'migration_type': 'live'}) - - self.solution.add_action( - action_type='migrate', - resource_id='8db1b3c1-7938-4c34-8c03-6de14b874f8f', - input_parameters={ - 'source_node': 'compute2', - 'destination_node': 'compute3', - 'migration_type': 'live'} - ) - self.solution.add_action( - action_type='resize', - resource_id='8db1b3c1-7938-4c34-8c03-6de14b874f8f', - input_parameters={'flavor': 'x2'} - ) - - def post_execute(self): - pass - - @classmethod - def get_name(cls): - return "dummy_with_resize" - - @classmethod - def get_display_name(cls): - return _("Dummy strategy with resize") - - @classmethod - def get_translatable_display_name(cls): - return "Dummy strategy with resize" - - @classmethod - def get_schema(cls): - # Mandatory default setting for each element - return { - "properties": { - "para1": { - "description": "number parameter example", - "type": "number", - "default": 3.2, - "minimum": 1.0, - "maximum": 10.2, - }, - "para2": { - "description": "string parameter example", - "type": "string", - "default": "hello" - }, - }, - } diff --git a/watcher/decision_engine/strategy/strategies/dummy_with_scorer.py b/watcher/decision_engine/strategy/strategies/dummy_with_scorer.py deleted file mode 100644 index d99db22..0000000 --- a/watcher/decision_engine/strategy/strategies/dummy_with_scorer.py +++ /dev/null @@ -1,166 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel -# -# Authors: Tomasz Kaczynski -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import random - -from oslo_log import log -from oslo_serialization import jsonutils -from oslo_utils import units - -from watcher._i18n import _ -from watcher.decision_engine.scoring import scoring_factory -from watcher.decision_engine.strategy.strategies import base - -LOG = log.getLogger(__name__) - - -class DummyWithScorer(base.DummyBaseStrategy): - """A dummy strategy using dummy scoring engines. - - This is a dummy strategy demonstrating how to work with scoring - engines. One scoring engine is predicting the workload type of a machine - based on the telemetry data, the other one is simply calculating the - average value for given elements in a list. Results are then passed to the - NOP action. - - The strategy is presenting the whole workflow: - - Get a reference to a scoring engine - - Prepare input data (features) for score calculation - - Perform score calculation - - Use scorer's metadata for results interpretation - """ - - DEFAULT_NAME = "dummy_with_scorer" - DEFAULT_DESCRIPTION = "Dummy Strategy with Scorer" - - NOP = "nop" - SLEEP = "sleep" - - def __init__(self, config, osc=None): - """Constructor: the signature should be identical within the subclasses - - :param config: Configuration related to this plugin - :type config: :py:class:`~.Struct` - :param osc: An OpenStackClients instance - :type osc: :py:class:`~.OpenStackClients` instance - """ - - super(DummyWithScorer, self).__init__(config, osc) - - # Setup Scoring Engines - self._workload_scorer = (scoring_factory - .get_scoring_engine('dummy_scorer')) - self._avg_scorer = (scoring_factory - .get_scoring_engine('dummy_avg_scorer')) - - # Get metainfo from Workload Scorer for result intepretation - metainfo = jsonutils.loads(self._workload_scorer.get_metainfo()) - self._workloads = {index: workload - for index, workload in enumerate( - metainfo['workloads'])} - - def pre_execute(self): - pass - - def do_execute(self): - # Simple "hello world" from strategy - param1 = self.input_parameters.param1 - param2 = self.input_parameters.param2 - LOG.debug('DummyWithScorer params: param1=%(p1)f, param2=%(p2)s', - {'p1': param1, 'p2': param2}) - parameters = {'message': 'Hello from Dummy Strategy with Scorer!'} - self.solution.add_action(action_type=self.NOP, - input_parameters=parameters) - - # Demonstrate workload scorer - features = self._generate_random_telemetry() - result_str = self._workload_scorer.calculate_score(features) - LOG.debug('Workload Scorer result: %s', result_str) - - # Parse the result using workloads from scorer's metainfo - result = self._workloads[jsonutils.loads(result_str)[0]] - LOG.debug('Detected Workload: %s', result) - parameters = {'message': 'Detected Workload: %s' % result} - self.solution.add_action(action_type=self.NOP, - input_parameters=parameters) - - # Demonstrate AVG scorer - features = jsonutils.dumps(random.sample(range(1000), 20)) - result_str = self._avg_scorer.calculate_score(features) - LOG.debug('AVG Scorer result: %s', result_str) - result = jsonutils.loads(result_str)[0] - LOG.debug('AVG Scorer result (parsed): %d', result) - parameters = {'message': 'AVG Scorer result: %s' % result} - self.solution.add_action(action_type=self.NOP, - input_parameters=parameters) - - # Sleep action - self.solution.add_action(action_type=self.SLEEP, - input_parameters={'duration': 5.0}) - - def post_execute(self): - pass - - @classmethod - def get_name(cls): - return 'dummy_with_scorer' - - @classmethod - def get_display_name(cls): - return _('Dummy Strategy using sample Scoring Engines') - - @classmethod - def get_translatable_display_name(cls): - return 'Dummy Strategy using sample Scoring Engines' - - @classmethod - def get_schema(cls): - # Mandatory default setting for each element - return { - 'properties': { - 'param1': { - 'description': 'number parameter example', - 'type': 'number', - 'default': 3.2, - 'minimum': 1.0, - 'maximum': 10.2, - }, - 'param2': { - 'description': 'string parameter example', - 'type': "string", - 'default': "hello" - }, - }, - } - - def _generate_random_telemetry(self): - processor_time = random.randint(0, 100) - mem_total_bytes = 4*units.Gi - mem_avail_bytes = random.randint(1*units.Gi, 4*units.Gi) - mem_page_reads = random.randint(0, 2000) - mem_page_writes = random.randint(0, 2000) - disk_read_bytes = random.randint(0*units.Mi, 200*units.Mi) - disk_write_bytes = random.randint(0*units.Mi, 200*units.Mi) - net_bytes_received = random.randint(0*units.Mi, 20*units.Mi) - net_bytes_sent = random.randint(0*units.Mi, 10*units.Mi) - - return jsonutils.dumps([ - processor_time, mem_total_bytes, mem_avail_bytes, - mem_page_reads, mem_page_writes, disk_read_bytes, - disk_write_bytes, net_bytes_received, net_bytes_sent]) diff --git a/watcher/decision_engine/strategy/strategies/noisy_neighbor.py b/watcher/decision_engine/strategy/strategies/noisy_neighbor.py deleted file mode 100644 index d67b411..0000000 --- a/watcher/decision_engine/strategy/strategies/noisy_neighbor.py +++ /dev/null @@ -1,304 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from oslo_log import log - -from watcher._i18n import _ -from watcher.common import exception as wexc -from watcher.datasource import ceilometer as ceil -from watcher.decision_engine.strategy.strategies import base - -LOG = log.getLogger(__name__) - - -class NoisyNeighbor(base.NoisyNeighborBaseStrategy): - - MIGRATION = "migrate" - # The meter to report L3 cache in ceilometer - METER_NAME_L3 = "cpu_l3_cache" - DEFAULT_WATCHER_PRIORITY = 5 - - def __init__(self, config, osc=None): - """Noisy Neighbor strategy using live migration - - :param config: A mapping containing the configuration of this strategy - :type config: dict - :param osc: an OpenStackClients object, defaults to None - :type osc: :py:class:`~.OpenStackClients` instance, optional - """ - - super(NoisyNeighbor, self).__init__(config, osc) - - self.meter_name = self.METER_NAME_L3 - self._ceilometer = None - - @property - def ceilometer(self): - if self._ceilometer is None: - self._ceilometer = ceil.CeilometerHelper(osc=self.osc) - return self._ceilometer - - @ceilometer.setter - def ceilometer(self, c): - self._ceilometer = c - - @classmethod - def get_name(cls): - return "noisy_neighbor" - - @classmethod - def get_display_name(cls): - return _("Noisy Neighbor") - - @classmethod - def get_translatable_display_name(cls): - return "Noisy Neighbor" - - @classmethod - def get_schema(cls): - # Mandatory default setting for each element - return { - "properties": { - "cache_threshold": { - "description": "Performance drop in L3_cache threshold " - "for migration", - "type": "number", - "default": 35.0 - }, - "period": { - "description": "Aggregate time period of ceilometer", - "type": "number", - "default": 100.0 - }, - }, - } - - def get_current_and_previous_cache(self, instance): - - try: - current_cache = self.ceilometer.statistic_aggregation( - resource_id=instance.uuid, - meter_name=self.meter_name, period=self.period, - aggregate='avg') - - previous_cache = 2 * ( - self.ceilometer.statistic_aggregation( - resource_id=instance.uuid, - meter_name=self.meter_name, - period=2*self.period, aggregate='avg')) - current_cache - - except Exception as exc: - LOG.exception(exc) - return None - - return current_cache, previous_cache - - def find_priority_instance(self, instance): - - current_cache, previous_cache = \ - self.get_current_and_previous_cache(instance) - - if None in (current_cache, previous_cache): - LOG.warning("Ceilometer unable to pick L3 Cache " - "values. Skipping the instance") - return None - - if (current_cache < (1 - (self.cache_threshold / 100.0)) * - previous_cache): - return instance - else: - return None - - def find_noisy_instance(self, instance): - - noisy_current_cache, noisy_previous_cache = \ - self.get_current_and_previous_cache(instance) - - if None in (noisy_current_cache, noisy_previous_cache): - LOG.warning("Ceilometer unable to pick " - "L3 Cache. Skipping the instance") - return None - - if (noisy_current_cache > (1 + (self.cache_threshold / 100.0)) * - noisy_previous_cache): - return instance - else: - return None - - def group_hosts(self): - - nodes = self.compute_model.get_all_compute_nodes() - size_cluster = len(nodes) - if size_cluster == 0: - raise wexc.ClusterEmpty() - - hosts_need_release = {} - hosts_target = [] - - for node in nodes.values(): - instances_of_node = self.compute_model.get_node_instances(node) - node_instance_count = len(instances_of_node) - - # Flag that tells us whether to skip the node or not. If True, - # the node is skipped. Will be true if we find a noisy instance or - # when potential priority instance will be same as potential noisy - # instance - loop_break_flag = False - - if node_instance_count > 1: - - instance_priority_list = [] - - for instance in instances_of_node: - instance_priority_list.append(instance) - - # If there is no metadata regarding watcher-priority, it takes - # DEFAULT_WATCHER_PRIORITY as priority. - instance_priority_list.sort(key=lambda a: ( - a.get('metadata').get('watcher-priority'), - self.DEFAULT_WATCHER_PRIORITY)) - - instance_priority_list_reverse = list(instance_priority_list) - instance_priority_list_reverse.reverse() - - for potential_priority_instance in instance_priority_list: - - priority_instance = self.find_priority_instance( - potential_priority_instance) - - if (priority_instance is not None): - - for potential_noisy_instance in ( - instance_priority_list_reverse): - if(potential_noisy_instance == - potential_priority_instance): - loop_break_flag = True - break - - noisy_instance = self.find_noisy_instance( - potential_noisy_instance) - - if noisy_instance is not None: - hosts_need_release[node.uuid] = { - 'priority_vm': potential_priority_instance, - 'noisy_vm': potential_noisy_instance} - LOG.debug("Priority VM found: %s" % ( - potential_priority_instance.uuid)) - LOG.debug("Noisy VM found: %s" % ( - potential_noisy_instance.uuid)) - loop_break_flag = True - break - - # No need to check other instances in the node - if loop_break_flag is True: - break - - if node.uuid not in hosts_need_release: - hosts_target.append(node) - - return hosts_need_release, hosts_target - - def calc_used_resource(self, node): - """Calculate the used vcpus, memory and disk based on VM flavors""" - instances = self.compute_model.get_node_instances(node) - vcpus_used = 0 - memory_mb_used = 0 - disk_gb_used = 0 - for instance in instances: - vcpus_used += instance.vcpus - memory_mb_used += instance.memory - disk_gb_used += instance.disk - - return vcpus_used, memory_mb_used, disk_gb_used - - def filter_dest_servers(self, hosts, instance_to_migrate): - required_cores = instance_to_migrate.vcpus - required_disk = instance_to_migrate.disk - required_memory = instance_to_migrate.memory - - dest_servers = [] - for host in hosts: - cores_used, mem_used, disk_used = self.calc_used_resource(host) - cores_available = host.vcpus - cores_used - disk_available = host.disk - disk_used - mem_available = host.memory - mem_used - if (cores_available >= required_cores and disk_available >= - required_disk and mem_available >= required_memory): - dest_servers.append(host) - - return dest_servers - - def pre_execute(self): - LOG.debug("Initializing Noisy Neighbor strategy") - - if not self.compute_model: - raise wexc.ClusterStateNotDefined() - - if self.compute_model.stale: - raise wexc.ClusterStateStale() - - LOG.debug(self.compute_model.to_string()) - - def do_execute(self): - self.cache_threshold = self.input_parameters.cache_threshold - self.period = self.input_parameters.period - - hosts_need_release, hosts_target = self.group_hosts() - - if len(hosts_need_release) == 0: - LOG.debug("No hosts require optimization") - return - - if len(hosts_target) == 0: - LOG.debug("No hosts available to migrate") - return - - mig_source_node_name = max(hosts_need_release.keys(), key=lambda a: - hosts_need_release[a]['priority_vm']) - instance_to_migrate = hosts_need_release[mig_source_node_name][ - 'noisy_vm'] - - if instance_to_migrate is None: - return - - dest_servers = self.filter_dest_servers(hosts_target, - instance_to_migrate) - - if len(dest_servers) == 0: - LOG.info("No proper target host could be found") - return - - # Destination node will be the first available node in the list. - mig_destination_node = dest_servers[0] - mig_source_node = self.compute_model.get_node_by_uuid( - mig_source_node_name) - - if self.compute_model.migrate_instance(instance_to_migrate, - mig_source_node, - mig_destination_node): - parameters = {'migration_type': 'live', - 'source_node': mig_source_node.uuid, - 'destination_node': mig_destination_node.uuid} - self.solution.add_action(action_type=self.MIGRATION, - resource_id=instance_to_migrate.uuid, - input_parameters=parameters) - - def post_execute(self): - self.solution.model = self.compute_model - - LOG.debug(self.compute_model.to_string()) diff --git a/watcher/decision_engine/strategy/strategies/outlet_temp_control.py b/watcher/decision_engine/strategy/strategies/outlet_temp_control.py deleted file mode 100644 index bbafd02..0000000 --- a/watcher/decision_engine/strategy/strategies/outlet_temp_control.py +++ /dev/null @@ -1,333 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 Intel Corp -# -# Authors: Junjie-Huang -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -""" -*Good Thermal Strategy*: - -Towards to software defined infrastructure, the power and thermal -intelligences is being adopted to optimize workload, which can help -improve efficiency, reduce power, as well as to improve datacenter PUE -and lower down operation cost in data center. -Outlet (Exhaust Air) Temperature is one of the important thermal -telemetries to measure thermal/workload status of server. -""" - -import datetime - -from oslo_log import log - -from watcher._i18n import _ -from watcher.common import exception as wexc -from watcher.datasource import ceilometer as ceil -from watcher.datasource import gnocchi as gnoc -from watcher.decision_engine.model import element -from watcher.decision_engine.strategy.strategies import base - - -LOG = log.getLogger(__name__) - - -class OutletTempControl(base.ThermalOptimizationBaseStrategy): - """[PoC] Outlet temperature control using live migration - - *Description* - - It is a migration strategy based on the outlet temperature of compute - hosts. It generates solutions to move a workload whenever a server's - outlet temperature is higher than the specified threshold. - - *Requirements* - - * Hardware: All computer hosts should support IPMI and PTAS technology - * Software: Ceilometer component ceilometer-agent-ipmi running - in each compute host, and Ceilometer API can report such telemetry - ``hardware.ipmi.node.outlet_temperature`` successfully. - * You must have at least 2 physical compute hosts to run this strategy. - - *Limitations* - - - This is a proof of concept that is not meant to be used in production - - We cannot forecast how many servers should be migrated. This is the - reason why we only plan a single virtual machine migration at a time. - So it's better to use this algorithm with `CONTINUOUS` audits. - - It assume that live migrations are possible - - *Spec URL* - - https://github.com/openstack/watcher-specs/blob/master/specs/mitaka/approved/outlet-temperature-based-strategy.rst - """ # noqa - - # The meter to report outlet temperature in ceilometer - MIGRATION = "migrate" - - METRIC_NAMES = dict( - ceilometer=dict( - host_outlet_temp='hardware.ipmi.node.outlet_temperature'), - gnocchi=dict( - host_outlet_temp='hardware.ipmi.node.outlet_temperature'), - ) - - def __init__(self, config, osc=None): - """Outlet temperature control using live migration - - :param config: A mapping containing the configuration of this strategy - :type config: dict - :param osc: an OpenStackClients object, defaults to None - :type osc: :py:class:`~.OpenStackClients` instance, optional - """ - super(OutletTempControl, self).__init__(config, osc) - self._ceilometer = None - self._gnocchi = None - - @classmethod - def get_name(cls): - return "outlet_temperature" - - @classmethod - def get_display_name(cls): - return _("Outlet temperature based strategy") - - @classmethod - def get_translatable_display_name(cls): - return "Outlet temperature based strategy" - - @property - def period(self): - return self.input_parameters.get('period', 30) - - @classmethod - def get_schema(cls): - # Mandatory default setting for each element - return { - "properties": { - "threshold": { - "description": "temperature threshold for migration", - "type": "number", - "default": 35.0 - }, - "period": { - "description": "The time interval in seconds for " - "getting statistic aggregation", - "type": "number", - "default": 30 - }, - "granularity": { - "description": "The time between two measures in an " - "aggregated timeseries of a metric.", - "type": "number", - "default": 300 - }, - }, - } - - @property - def ceilometer(self): - if self._ceilometer is None: - self._ceilometer = ceil.CeilometerHelper(osc=self.osc) - return self._ceilometer - - @ceilometer.setter - def ceilometer(self, c): - self._ceilometer = c - - @property - def gnocchi(self): - if self._gnocchi is None: - self._gnocchi = gnoc.GnocchiHelper(osc=self.osc) - return self._gnocchi - - @gnocchi.setter - def gnocchi(self, g): - self._gnocchi = g - - @property - def granularity(self): - return self.input_parameters.get('granularity', 300) - - def calc_used_resource(self, node): - """Calculate the used vcpus, memory and disk based on VM flavors""" - instances = self.compute_model.get_node_instances(node) - vcpus_used = 0 - memory_mb_used = 0 - disk_gb_used = 0 - for instance in instances: - vcpus_used += instance.vcpus - memory_mb_used += instance.memory - disk_gb_used += instance.disk - - return vcpus_used, memory_mb_used, disk_gb_used - - def group_hosts_by_outlet_temp(self): - """Group hosts based on outlet temp meters""" - nodes = self.compute_model.get_all_compute_nodes() - size_cluster = len(nodes) - if size_cluster == 0: - raise wexc.ClusterEmpty() - - hosts_need_release = [] - hosts_target = [] - metric_name = self.METRIC_NAMES[ - self.config.datasource]['host_outlet_temp'] - for node in nodes.values(): - resource_id = node.uuid - outlet_temp = None - - if self.config.datasource == "ceilometer": - outlet_temp = self.ceilometer.statistic_aggregation( - resource_id=resource_id, - meter_name=metric_name, - period=self.period, - aggregate='avg' - ) - elif self.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int(self.period)) - outlet_temp = self.gnocchi.statistic_aggregation( - resource_id=resource_id, - metric=metric_name, - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean' - ) - # some hosts may not have outlet temp meters, remove from target - if outlet_temp is None: - LOG.warning("%s: no outlet temp data", resource_id) - continue - - LOG.debug("%s: outlet temperature %f" % (resource_id, outlet_temp)) - instance_data = {'node': node, 'outlet_temp': outlet_temp} - if outlet_temp >= self.threshold: - # mark the node to release resources - hosts_need_release.append(instance_data) - else: - hosts_target.append(instance_data) - return hosts_need_release, hosts_target - - def choose_instance_to_migrate(self, hosts): - """Pick up an active instance to migrate from provided hosts""" - for instance_data in hosts: - mig_source_node = instance_data['node'] - instances_of_src = self.compute_model.get_node_instances( - mig_source_node) - for instance in instances_of_src: - try: - # select the first active instance to migrate - if (instance.state != - element.InstanceState.ACTIVE.value): - LOG.info("Instance not active, skipped: %s", - instance.uuid) - continue - return mig_source_node, instance - except wexc.InstanceNotFound as e: - LOG.exception(e) - LOG.info("Instance not found") - - return None - - def filter_dest_servers(self, hosts, instance_to_migrate): - """Only return hosts with sufficient available resources""" - required_cores = instance_to_migrate.vcpus - required_disk = instance_to_migrate.disk - required_memory = instance_to_migrate.memory - - # filter nodes without enough resource - dest_servers = [] - for instance_data in hosts: - host = instance_data['node'] - # available - cores_used, mem_used, disk_used = self.calc_used_resource(host) - cores_available = host.vcpus - cores_used - disk_available = host.disk - disk_used - mem_available = host.memory - mem_used - if cores_available >= required_cores \ - and disk_available >= required_disk \ - and mem_available >= required_memory: - dest_servers.append(instance_data) - - return dest_servers - - def pre_execute(self): - LOG.debug("Initializing Outlet temperature strategy") - - if not self.compute_model: - raise wexc.ClusterStateNotDefined() - - if self.compute_model.stale: - raise wexc.ClusterStateStale() - - LOG.debug(self.compute_model.to_string()) - - def do_execute(self): - # the migration plan will be triggered when the outlet temperature - # reaches threshold - self.threshold = self.input_parameters.threshold - LOG.debug("Initializing Outlet temperature strategy with threshold=%d", - self.threshold) - hosts_need_release, hosts_target = self.group_hosts_by_outlet_temp() - - if len(hosts_need_release) == 0: - # TODO(zhenzanz): return something right if there's no hot servers - LOG.debug("No hosts require optimization") - return self.solution - - if len(hosts_target) == 0: - LOG.warning("No hosts under outlet temp threshold found") - return self.solution - - # choose the server with highest outlet t - hosts_need_release = sorted(hosts_need_release, - reverse=True, - key=lambda x: (x["outlet_temp"])) - - instance_to_migrate = self.choose_instance_to_migrate( - hosts_need_release) - # calculate the instance's cpu cores,memory,disk needs - if instance_to_migrate is None: - return self.solution - - mig_source_node, instance_src = instance_to_migrate - dest_servers = self.filter_dest_servers(hosts_target, instance_src) - # sort the filtered result by outlet temp - # pick up the lowest one as dest server - if len(dest_servers) == 0: - # TODO(zhenzanz): maybe to warn that there's no resource - # for instance. - LOG.info("No proper target host could be found") - return self.solution - - dest_servers = sorted(dest_servers, key=lambda x: (x["outlet_temp"])) - # always use the host with lowerest outlet temperature - mig_destination_node = dest_servers[0]['node'] - # generate solution to migrate the instance to the dest server, - if self.compute_model.migrate_instance( - instance_src, mig_source_node, mig_destination_node): - parameters = {'migration_type': 'live', - 'source_node': mig_source_node.uuid, - 'destination_node': mig_destination_node.uuid} - self.solution.add_action(action_type=self.MIGRATION, - resource_id=instance_src.uuid, - input_parameters=parameters) - - def post_execute(self): - self.solution.model = self.compute_model - # TODO(v-francoise): Add the indicators to the solution - - LOG.debug(self.compute_model.to_string()) diff --git a/watcher/decision_engine/strategy/strategies/uniform_airflow.py b/watcher/decision_engine/strategy/strategies/uniform_airflow.py deleted file mode 100644 index e58b733..0000000 --- a/watcher/decision_engine/strategy/strategies/uniform_airflow.py +++ /dev/null @@ -1,442 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Junjie-Huang -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -""" -[PoC]Uniform Airflow using live migration - -*Description* - -It is a migration strategy based on the airflow of physical -servers. It generates solutions to move VM whenever a server's -airflow is higher than the specified threshold. - -*Requirements* - -* Hardware: compute node with NodeManager 3.0 support -* Software: Ceilometer component ceilometer-agent-compute running - in each compute node, and Ceilometer API can report such telemetry - "airflow, system power, inlet temperature" successfully. -* You must have at least 2 physical compute nodes to run this strategy - -*Limitations* - -- This is a proof of concept that is not meant to be used in production. -- We cannot forecast how many servers should be migrated. This is the - reason why we only plan a single virtual machine migration at a time. - So it's better to use this algorithm with `CONTINUOUS` audits. -- It assumes that live migrations are possible. -""" - -import datetime - -from oslo_config import cfg -from oslo_log import log - -from watcher._i18n import _ -from watcher.common import exception as wexc -from watcher.datasource import ceilometer as ceil -from watcher.datasource import gnocchi as gnoc -from watcher.decision_engine.model import element -from watcher.decision_engine.strategy.strategies import base - -LOG = log.getLogger(__name__) - - -class UniformAirflow(base.BaseStrategy): - """[PoC]Uniform Airflow using live migration - - *Description* - - It is a migration strategy based on the airflow of physical - servers. It generates solutions to move VM whenever a server's - airflow is higher than the specified threshold. - - *Requirements* - - * Hardware: compute node with NodeManager 3.0 support - * Software: Ceilometer component ceilometer-agent-compute running - in each compute node, and Ceilometer API can report such telemetry - "airflow, system power, inlet temperature" successfully. - * You must have at least 2 physical compute nodes to run this strategy - - *Limitations* - - - This is a proof of concept that is not meant to be used in production. - - We cannot forecast how many servers should be migrated. This is the - reason why we only plan a single virtual machine migration at a time. - So it's better to use this algorithm with `CONTINUOUS` audits. - - It assumes that live migrations are possible. - """ - - # choose 300 seconds as the default duration of meter aggregation - PERIOD = 300 - - METRIC_NAMES = dict( - ceilometer=dict( - # The meter to report Airflow of physical server in ceilometer - host_airflow='hardware.ipmi.node.airflow', - # The meter to report inlet temperature of physical server - # in ceilometer - host_inlet_temp='hardware.ipmi.node.temperature', - # The meter to report system power of physical server in ceilometer - host_power='hardware.ipmi.node.power'), - gnocchi=dict( - # The meter to report Airflow of physical server in gnocchi - host_airflow='hardware.ipmi.node.airflow', - # The meter to report inlet temperature of physical server - # in gnocchi - host_inlet_temp='hardware.ipmi.node.temperature', - # The meter to report system power of physical server in gnocchi - host_power='hardware.ipmi.node.power'), - ) - - MIGRATION = "migrate" - - def __init__(self, config, osc=None): - """Using live migration - - :param config: A mapping containing the configuration of this strategy - :type config: dict - :param osc: an OpenStackClients object - """ - super(UniformAirflow, self).__init__(config, osc) - # The migration plan will be triggered when the airflow reaches - # threshold - self.meter_name_airflow = self.METRIC_NAMES[ - self.config.datasource]['host_airflow'] - self.meter_name_inlet_t = self.METRIC_NAMES[ - self.config.datasource]['host_inlet_temp'] - self.meter_name_power = self.METRIC_NAMES[ - self.config.datasource]['host_power'] - self._ceilometer = None - self._gnocchi = None - self._period = self.PERIOD - - @property - def ceilometer(self): - if self._ceilometer is None: - self._ceilometer = ceil.CeilometerHelper(osc=self.osc) - return self._ceilometer - - @ceilometer.setter - def ceilometer(self, c): - self._ceilometer = c - - @property - def gnocchi(self): - if self._gnocchi is None: - self._gnocchi = gnoc.GnocchiHelper(osc=self.osc) - return self._gnocchi - - @gnocchi.setter - def gnocchi(self, g): - self._gnocchi = g - - @classmethod - def get_name(cls): - return "uniform_airflow" - - @classmethod - def get_display_name(cls): - return _("Uniform airflow migration strategy") - - @classmethod - def get_translatable_display_name(cls): - return "Uniform airflow migration strategy" - - @classmethod - def get_goal_name(cls): - return "airflow_optimization" - - @property - def granularity(self): - return self.input_parameters.get('granularity', 300) - - @classmethod - def get_schema(cls): - # Mandatory default setting for each element - return { - "properties": { - "threshold_airflow": { - "description": ("airflow threshold for migration, Unit is " - "0.1CFM"), - "type": "number", - "default": 400.0 - }, - "threshold_inlet_t": { - "description": ("inlet temperature threshold for " - "migration decision"), - "type": "number", - "default": 28.0 - }, - "threshold_power": { - "description": ("system power threshold for migration " - "decision"), - "type": "number", - "default": 350.0 - }, - "period": { - "description": "aggregate time period of ceilometer", - "type": "number", - "default": 300 - }, - "granularity": { - "description": "The time between two measures in an " - "aggregated timeseries of a metric.", - "type": "number", - "default": 300 - }, - }, - } - - @classmethod - def get_config_opts(cls): - return [ - cfg.StrOpt( - "datasource", - help="Data source to use in order to query the needed metrics", - default="ceilometer", - choices=["ceilometer", "gnocchi"]) - ] - - def calculate_used_resource(self, node): - """Compute the used vcpus, memory and disk based on instance flavors""" - instances = self.compute_model.get_node_instances(node) - vcpus_used = 0 - memory_mb_used = 0 - disk_gb_used = 0 - for instance in instances: - vcpus_used += instance.vcpus - memory_mb_used += instance.memory - disk_gb_used += instance.disk - - return vcpus_used, memory_mb_used, disk_gb_used - - def choose_instance_to_migrate(self, hosts): - """Pick up an active instance instance to migrate from provided hosts - - :param hosts: the array of dict which contains node object - """ - instances_tobe_migrate = [] - for nodemap in hosts: - source_node = nodemap['node'] - source_instances = self.compute_model.get_node_instances( - source_node) - if source_instances: - if self.config.datasource == "ceilometer": - inlet_t = self.ceilometer.statistic_aggregation( - resource_id=source_node.uuid, - meter_name=self.meter_name_inlet_t, - period=self._period, - aggregate='avg') - power = self.ceilometer.statistic_aggregation( - resource_id=source_node.uuid, - meter_name=self.meter_name_power, - period=self._period, - aggregate='avg') - elif self.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int(self._period)) - inlet_t = self.gnocchi.statistic_aggregation( - resource_id=source_node.uuid, - metric=self.meter_name_inlet_t, - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean') - power = self.gnocchi.statistic_aggregation( - resource_id=source_node.uuid, - metric=self.meter_name_power, - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean') - if (power < self.threshold_power and - inlet_t < self.threshold_inlet_t): - # hardware issue, migrate all instances from this node - for instance in source_instances: - instances_tobe_migrate.append(instance) - return source_node, instances_tobe_migrate - else: - # migrate the first active instance - for instance in source_instances: - if (instance.state != - element.InstanceState.ACTIVE.value): - LOG.info( - "Instance not active, skipped: %s", - instance.uuid) - continue - instances_tobe_migrate.append(instance) - return source_node, instances_tobe_migrate - else: - LOG.info("Instance not found on node: %s", - source_node.uuid) - - def filter_destination_hosts(self, hosts, instances_to_migrate): - """Find instance and host with sufficient available resources""" - # large instances go first - instances_to_migrate = sorted( - instances_to_migrate, reverse=True, - key=lambda x: (x.vcpus)) - # find hosts for instances - destination_hosts = [] - for instance_to_migrate in instances_to_migrate: - required_cores = instance_to_migrate.vcpus - required_disk = instance_to_migrate.disk - required_mem = instance_to_migrate.memory - dest_migrate_info = {} - for nodemap in hosts: - host = nodemap['node'] - if 'cores_used' not in nodemap: - # calculate the available resources - nodemap['cores_used'], nodemap['mem_used'],\ - nodemap['disk_used'] = self.calculate_used_resource( - host) - cores_available = (host.vcpus - - nodemap['cores_used']) - disk_available = (host.disk - - nodemap['disk_used']) - mem_available = ( - host.memory - nodemap['mem_used']) - if (cores_available >= required_cores and - disk_available >= required_disk and - mem_available >= required_mem): - dest_migrate_info['instance'] = instance_to_migrate - dest_migrate_info['node'] = host - nodemap['cores_used'] += required_cores - nodemap['mem_used'] += required_mem - nodemap['disk_used'] += required_disk - destination_hosts.append(dest_migrate_info) - break - # check if all instances have target hosts - if len(destination_hosts) != len(instances_to_migrate): - LOG.warning("Not all target hosts could be found; it might " - "be because there is not enough resource") - return None - return destination_hosts - - def group_hosts_by_airflow(self): - """Group hosts based on airflow meters""" - - nodes = self.compute_model.get_all_compute_nodes() - if not nodes: - raise wexc.ClusterEmpty() - overload_hosts = [] - nonoverload_hosts = [] - for node_id in nodes: - airflow = None - node = self.compute_model.get_node_by_uuid( - node_id) - resource_id = node.uuid - if self.config.datasource == "ceilometer": - airflow = self.ceilometer.statistic_aggregation( - resource_id=resource_id, - meter_name=self.meter_name_airflow, - period=self._period, - aggregate='avg') - elif self.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int(self._period)) - airflow = self.gnocchi.statistic_aggregation( - resource_id=resource_id, - metric=self.meter_name_airflow, - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean') - # some hosts may not have airflow meter, remove from target - if airflow is None: - LOG.warning("%s: no airflow data", resource_id) - continue - - LOG.debug("%s: airflow %f" % (resource_id, airflow)) - nodemap = {'node': node, 'airflow': airflow} - if airflow >= self.threshold_airflow: - # mark the node to release resources - overload_hosts.append(nodemap) - else: - nonoverload_hosts.append(nodemap) - return overload_hosts, nonoverload_hosts - - def pre_execute(self): - LOG.debug("Initializing Uniform Airflow Strategy") - - if not self.compute_model: - raise wexc.ClusterStateNotDefined() - - if self.compute_model.stale: - raise wexc.ClusterStateStale() - - LOG.debug(self.compute_model.to_string()) - - def do_execute(self): - self.threshold_airflow = self.input_parameters.threshold_airflow - self.threshold_inlet_t = self.input_parameters.threshold_inlet_t - self.threshold_power = self.input_parameters.threshold_power - self._period = self.input_parameters.period - source_nodes, target_nodes = self.group_hosts_by_airflow() - - if not source_nodes: - LOG.debug("No hosts require optimization") - return self.solution - - if not target_nodes: - LOG.warning("No hosts currently have airflow under %s, " - "therefore there are no possible target " - "hosts for any migration", - self.threshold_airflow) - return self.solution - - # migrate the instance from server with largest airflow first - source_nodes = sorted(source_nodes, - reverse=True, - key=lambda x: (x["airflow"])) - instances_to_migrate = self.choose_instance_to_migrate(source_nodes) - if not instances_to_migrate: - return self.solution - source_node, instances_src = instances_to_migrate - # sort host with airflow - target_nodes = sorted(target_nodes, key=lambda x: (x["airflow"])) - # find the hosts that have enough resource - # for the instance to be migrated - destination_hosts = self.filter_destination_hosts( - target_nodes, instances_src) - if not destination_hosts: - LOG.warning("No target host could be found; it might " - "be because there is not enough resources") - return self.solution - # generate solution to migrate the instance to the dest server, - for info in destination_hosts: - instance = info['instance'] - destination_node = info['node'] - if self.compute_model.migrate_instance( - instance, source_node, destination_node): - parameters = {'migration_type': 'live', - 'source_node': source_node.uuid, - 'destination_node': destination_node.uuid} - self.solution.add_action(action_type=self.MIGRATION, - resource_id=instance.uuid, - input_parameters=parameters) - - def post_execute(self): - self.solution.model = self.compute_model - # TODO(v-francoise): Add the indicators to the solution - - LOG.debug(self.compute_model.to_string()) diff --git a/watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py b/watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py deleted file mode 100755 index 681f34d..0000000 --- a/watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py +++ /dev/null @@ -1,651 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Authors: Vojtech CIMA -# Bruno GRAZIOLI -# Sean MURPHY -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -""" -*VM Workload Consolidation Strategy* - -A load consolidation strategy based on heuristic first-fit -algorithm which focuses on measured CPU utilization and tries to -minimize hosts which have too much or too little load respecting -resource capacity constraints. - -This strategy produces a solution resulting in more efficient -utilization of cluster resources using following four phases: - -* Offload phase - handling over-utilized resources -* Consolidation phase - handling under-utilized resources -* Solution optimization - reducing number of migrations -* Disability of unused compute nodes - -A capacity coefficients (cc) might be used to adjust optimization -thresholds. Different resources may require different coefficient -values as well as setting up different coefficient values in both -phases may lead to to more efficient consolidation in the end. -If the cc equals 1 the full resource capacity may be used, cc -values lower than 1 will lead to resource under utilization and -values higher than 1 will lead to resource overbooking. -e.g. If targeted utilization is 80 percent of a compute node capacity, -the coefficient in the consolidation phase will be 0.8, but -may any lower value in the offloading phase. The lower it gets -the cluster will appear more released (distributed) for the -following consolidation phase. - -As this strategy leverages VM live migration to move the load -from one compute node to another, this feature needs to be set up -correctly on all compute nodes within the cluster. -This strategy assumes it is possible to live migrate any VM from -an active compute node to any other active compute node. -""" -import datetime - -from oslo_config import cfg -from oslo_log import log -import six - -from watcher._i18n import _ -from watcher.common import exception -from watcher.datasource import ceilometer as ceil -from watcher.datasource import gnocchi as gnoc -from watcher.decision_engine.model import element -from watcher.decision_engine.strategy.strategies import base - -LOG = log.getLogger(__name__) - - -class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy): - """VM Workload Consolidation Strategy""" - - HOST_CPU_USAGE_METRIC_NAME = 'compute.node.cpu.percent' - INSTANCE_CPU_USAGE_METRIC_NAME = 'cpu_util' - - METRIC_NAMES = dict( - ceilometer=dict( - cpu_util_metric='cpu_util', - ram_util_metric='memory.usage', - ram_alloc_metric='memory', - disk_alloc_metric='disk.root.size'), - gnocchi=dict( - cpu_util_metric='cpu_util', - ram_util_metric='memory.usage', - ram_alloc_metric='memory', - disk_alloc_metric='disk.root.size'), - ) - - MIGRATION = "migrate" - CHANGE_NOVA_SERVICE_STATE = "change_nova_service_state" - - def __init__(self, config, osc=None): - super(VMWorkloadConsolidation, self).__init__(config, osc) - self._ceilometer = None - self._gnocchi = None - self.number_of_migrations = 0 - self.number_of_released_nodes = 0 - # self.ceilometer_instance_data_cache = dict() - self.datasource_instance_data_cache = dict() - - @classmethod - def get_name(cls): - return "vm_workload_consolidation" - - @classmethod - def get_display_name(cls): - return _("VM Workload Consolidation Strategy") - - @classmethod - def get_translatable_display_name(cls): - return "VM Workload Consolidation Strategy" - - @property - def period(self): - return self.input_parameters.get('period', 3600) - - @property - def ceilometer(self): - if self._ceilometer is None: - self._ceilometer = ceil.CeilometerHelper(osc=self.osc) - return self._ceilometer - - @ceilometer.setter - def ceilometer(self, ceilometer): - self._ceilometer = ceilometer - - @property - def gnocchi(self): - if self._gnocchi is None: - self._gnocchi = gnoc.GnocchiHelper(osc=self.osc) - return self._gnocchi - - @gnocchi.setter - def gnocchi(self, gnocchi): - self._gnocchi = gnocchi - - @property - def granularity(self): - return self.input_parameters.get('granularity', 300) - - @classmethod - def get_schema(cls): - # Mandatory default setting for each element - return { - "properties": { - "period": { - "description": "The time interval in seconds for " - "getting statistic aggregation", - "type": "number", - "default": 3600 - }, - "granularity": { - "description": "The time between two measures in an " - "aggregated timeseries of a metric.", - "type": "number", - "default": 300 - }, - } - } - - @classmethod - def get_config_opts(cls): - return [ - cfg.StrOpt( - "datasource", - help="Data source to use in order to query the needed metrics", - default="ceilometer", - choices=["ceilometer", "gnocchi"]) - ] - - def get_instance_state_str(self, instance): - """Get instance state in string format. - - :param instance: - """ - if isinstance(instance.state, six.string_types): - return instance.state - elif isinstance(instance.state, element.InstanceState): - return instance.state.value - else: - LOG.error('Unexpected instance state type, ' - 'state=%(state)s, state_type=%(st)s.' % - dict(state=instance.state, - st=type(instance.state))) - raise exception.WatcherException - - def get_node_status_str(self, node): - """Get node status in string format. - - :param node: - """ - if isinstance(node.status, six.string_types): - return node.status - elif isinstance(node.status, element.ServiceState): - return node.status.value - else: - LOG.error('Unexpected node status type, ' - 'status=%(status)s, status_type=%(st)s.' % - dict(status=node.status, - st=type(node.status))) - raise exception.WatcherException - - def add_action_enable_compute_node(self, node): - """Add an action for node enabler into the solution. - - :param node: node object - :return: None - """ - params = {'state': element.ServiceState.ENABLED.value} - self.solution.add_action( - action_type=self.CHANGE_NOVA_SERVICE_STATE, - resource_id=node.uuid, - input_parameters=params) - self.number_of_released_nodes -= 1 - - def add_action_disable_node(self, node): - """Add an action for node disability into the solution. - - :param node: node object - :return: None - """ - params = {'state': element.ServiceState.DISABLED.value} - self.solution.add_action( - action_type=self.CHANGE_NOVA_SERVICE_STATE, - resource_id=node.uuid, - input_parameters=params) - self.number_of_released_nodes += 1 - - def add_migration(self, instance, source_node, destination_node): - """Add an action for VM migration into the solution. - - :param instance: instance object - :param source_node: node object - :param destination_node: node object - :return: None - """ - instance_state_str = self.get_instance_state_str(instance) - if instance_state_str != element.InstanceState.ACTIVE.value: - # Watcher currently only supports live VM migration and block live - # VM migration which both requires migrated VM to be active. - # When supported, the cold migration may be used as a fallback - # migration mechanism to move non active VMs. - LOG.error( - 'Cannot live migrate: instance_uuid=%(instance_uuid)s, ' - 'state=%(instance_state)s.' % dict( - instance_uuid=instance.uuid, - instance_state=instance_state_str)) - return - - migration_type = 'live' - - # Here will makes repeated actions to enable the same compute node, - # when migrating VMs to the destination node which is disabled. - # Whether should we remove the same actions in the solution??? - destination_node_status_str = self.get_node_status_str( - destination_node) - if destination_node_status_str == element.ServiceState.DISABLED.value: - self.add_action_enable_compute_node(destination_node) - - if self.compute_model.migrate_instance( - instance, source_node, destination_node): - params = {'migration_type': migration_type, - 'source_node': source_node.uuid, - 'destination_node': destination_node.uuid} - self.solution.add_action(action_type=self.MIGRATION, - resource_id=instance.uuid, - input_parameters=params) - self.number_of_migrations += 1 - - def disable_unused_nodes(self): - """Generate actions for disabling unused nodes. - - :return: None - """ - for node in self.compute_model.get_all_compute_nodes().values(): - if (len(self.compute_model.get_node_instances(node)) == 0 and - node.status != - element.ServiceState.DISABLED.value): - self.add_action_disable_node(node) - - def get_instance_utilization(self, instance): - """Collect cpu, ram and disk utilization statistics of a VM. - - :param instance: instance object - :param aggr: string - :return: dict(cpu(number of vcpus used), ram(MB used), disk(B used)) - """ - instance_cpu_util = None - instance_ram_util = None - instance_disk_util = None - - if instance.uuid in self.datasource_instance_data_cache.keys(): - return self.datasource_instance_data_cache.get(instance.uuid) - - cpu_util_metric = self.METRIC_NAMES[ - self.config.datasource]['cpu_util_metric'] - ram_util_metric = self.METRIC_NAMES[ - self.config.datasource]['ram_util_metric'] - ram_alloc_metric = self.METRIC_NAMES[ - self.config.datasource]['ram_alloc_metric'] - disk_alloc_metric = self.METRIC_NAMES[ - self.config.datasource]['disk_alloc_metric'] - - if self.config.datasource == "ceilometer": - instance_cpu_util = self.ceilometer.statistic_aggregation( - resource_id=instance.uuid, meter_name=cpu_util_metric, - period=self.period, aggregate='avg') - instance_ram_util = self.ceilometer.statistic_aggregation( - resource_id=instance.uuid, meter_name=ram_util_metric, - period=self.period, aggregate='avg') - if not instance_ram_util: - instance_ram_util = self.ceilometer.statistic_aggregation( - resource_id=instance.uuid, meter_name=ram_alloc_metric, - period=self.period, aggregate='avg') - instance_disk_util = self.ceilometer.statistic_aggregation( - resource_id=instance.uuid, meter_name=disk_alloc_metric, - period=self.period, aggregate='avg') - elif self.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int(self.period)) - instance_cpu_util = self.gnocchi.statistic_aggregation( - resource_id=instance.uuid, - metric=cpu_util_metric, - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean' - ) - instance_ram_util = self.gnocchi.statistic_aggregation( - resource_id=instance.uuid, - metric=ram_util_metric, - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean' - ) - if not instance_ram_util: - instance_ram_util = self.gnocchi.statistic_aggregation( - resource_id=instance.uuid, - metric=ram_alloc_metric, - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean' - ) - instance_disk_util = self.gnocchi.statistic_aggregation( - resource_id=instance.uuid, - metric=disk_alloc_metric, - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean' - ) - if instance_cpu_util: - total_cpu_utilization = ( - instance.vcpus * (instance_cpu_util / 100.0)) - else: - total_cpu_utilization = instance.vcpus - - if not instance_ram_util: - instance_ram_util = instance.memory - LOG.warning('No values returned by %s for memory.usage, ' - 'use instance flavor ram value', instance.uuid) - - if not instance_disk_util: - instance_disk_util = instance.disk - LOG.warning('No values returned by %s for disk.root.size, ' - 'use instance flavor disk value', instance.uuid) - - self.datasource_instance_data_cache[instance.uuid] = dict( - cpu=total_cpu_utilization, ram=instance_ram_util, - disk=instance_disk_util) - return self.datasource_instance_data_cache.get(instance.uuid) - - def get_node_utilization(self, node): - """Collect cpu, ram and disk utilization statistics of a node. - - :param node: node object - :param aggr: string - :return: dict(cpu(number of cores used), ram(MB used), disk(B used)) - """ - node_instances = self.compute_model.get_node_instances(node) - node_ram_util = 0 - node_disk_util = 0 - node_cpu_util = 0 - for instance in node_instances: - instance_util = self.get_instance_utilization( - instance) - node_cpu_util += instance_util['cpu'] - node_ram_util += instance_util['ram'] - node_disk_util += instance_util['disk'] - - return dict(cpu=node_cpu_util, ram=node_ram_util, - disk=node_disk_util) - - def get_node_capacity(self, node): - """Collect cpu, ram and disk capacity of a node. - - :param node: node object - :return: dict(cpu(cores), ram(MB), disk(B)) - """ - return dict(cpu=node.vcpus, ram=node.memory, disk=node.disk_capacity) - - def get_relative_node_utilization(self, node): - """Return relative node utilization. - - :param node: node object - :return: {'cpu': <0,1>, 'ram': <0,1>, 'disk': <0,1>} - """ - relative_node_utilization = {} - util = self.get_node_utilization(node) - cap = self.get_node_capacity(node) - for k in util.keys(): - relative_node_utilization[k] = float(util[k]) / float(cap[k]) - return relative_node_utilization - - def get_relative_cluster_utilization(self): - """Calculate relative cluster utilization (rcu). - - RCU is an average of relative utilizations (rhu) of active nodes. - :return: {'cpu': <0,1>, 'ram': <0,1>, 'disk': <0,1>} - """ - nodes = self.compute_model.get_all_compute_nodes().values() - rcu = {} - counters = {} - for node in nodes: - node_status_str = self.get_node_status_str(node) - if node_status_str == element.ServiceState.ENABLED.value: - rhu = self.get_relative_node_utilization(node) - for k in rhu.keys(): - if k not in rcu: - rcu[k] = 0 - if k not in counters: - counters[k] = 0 - rcu[k] += rhu[k] - counters[k] += 1 - for k in rcu.keys(): - rcu[k] /= counters[k] - return rcu - - def is_overloaded(self, node, cc): - """Indicate whether a node is overloaded. - - This considers provided resource capacity coefficients (cc). - :param node: node object - :param cc: dictionary containing resource capacity coefficients - :return: [True, False] - """ - node_capacity = self.get_node_capacity(node) - node_utilization = self.get_node_utilization( - node) - metrics = ['cpu'] - for m in metrics: - if node_utilization[m] > node_capacity[m] * cc[m]: - return True - return False - - def instance_fits(self, instance, node, cc): - """Indicate whether is a node able to accommodate a VM. - - This considers provided resource capacity coefficients (cc). - :param instance: :py:class:`~.element.Instance` - :param node: node object - :param cc: dictionary containing resource capacity coefficients - :return: [True, False] - """ - node_capacity = self.get_node_capacity(node) - node_utilization = self.get_node_utilization(node) - instance_utilization = self.get_instance_utilization(instance) - metrics = ['cpu', 'ram', 'disk'] - for m in metrics: - if (instance_utilization[m] + node_utilization[m] > - node_capacity[m] * cc[m]): - return False - return True - - def optimize_solution(self): - """Optimize solution. - - This is done by eliminating unnecessary or circular set of migrations - which can be replaced by a more efficient solution. - e.g.: - - * A->B, B->C => replace migrations A->B, B->C with - a single migration A->C as both solution result in - VM running on node C which can be achieved with - one migration instead of two. - * A->B, B->A => remove A->B and B->A as they do not result - in a new VM placement. - """ - migrate_actions = ( - a for a in self.solution.actions if a[ - 'action_type'] == self.MIGRATION) - instance_to_be_migrated = ( - a['input_parameters']['resource_id'] for a in migrate_actions) - instance_uuids = list(set(instance_to_be_migrated)) - for instance_uuid in instance_uuids: - actions = list( - a for a in self.solution.actions if a[ - 'input_parameters'][ - 'resource_id'] == instance_uuid) - if len(actions) > 1: - src_uuid = actions[0]['input_parameters']['source_node'] - dst_uuid = actions[-1]['input_parameters']['destination_node'] - for a in actions: - self.solution.actions.remove(a) - self.number_of_migrations -= 1 - src_node = self.compute_model.get_node_by_uuid(src_uuid) - dst_node = self.compute_model.get_node_by_uuid(dst_uuid) - instance = self.compute_model.get_instance_by_uuid( - instance_uuid) - if self.compute_model.migrate_instance( - instance, dst_node, src_node): - self.add_migration(instance, src_node, dst_node) - - def offload_phase(self, cc): - """Perform offloading phase. - - This considers provided resource capacity coefficients. - Offload phase performing first-fit based bin packing to offload - overloaded nodes. This is done in a fashion of moving - the least CPU utilized VM first as live migration these - generally causes less troubles. This phase results in a cluster - with no overloaded nodes. - * This phase is be able to enable disabled nodes (if needed - and any available) in the case of the resource capacity provided by - active nodes is not able to accommodate all the load. - As the offload phase is later followed by the consolidation phase, - the node enabler in this phase doesn't necessarily results - in more enabled nodes in the final solution. - - :param cc: dictionary containing resource capacity coefficients - """ - sorted_nodes = sorted( - self.compute_model.get_all_compute_nodes().values(), - key=lambda x: self.get_node_utilization(x)['cpu']) - for node in reversed(sorted_nodes): - if self.is_overloaded(node, cc): - for instance in sorted( - self.compute_model.get_node_instances(node), - key=lambda x: self.get_instance_utilization( - x)['cpu'] - ): - for destination_node in reversed(sorted_nodes): - if self.instance_fits( - instance, destination_node, cc): - self.add_migration(instance, node, - destination_node) - break - if not self.is_overloaded(node, cc): - break - - def consolidation_phase(self, cc): - """Perform consolidation phase. - - This considers provided resource capacity coefficients. - Consolidation phase performing first-fit based bin packing. - First, nodes with the lowest cpu utilization are consolidated - by moving their load to nodes with the highest cpu utilization - which can accommodate the load. In this phase the most cpu utilized - VMs are prioritized as their load is more difficult to accommodate - in the system than less cpu utilized VMs which can be later used - to fill smaller CPU capacity gaps. - - :param cc: dictionary containing resource capacity coefficients - """ - sorted_nodes = sorted( - self.compute_model.get_all_compute_nodes().values(), - key=lambda x: self.get_node_utilization(x)['cpu']) - asc = 0 - for node in sorted_nodes: - instances = sorted( - self.compute_model.get_node_instances(node), - key=lambda x: self.get_instance_utilization(x)['cpu']) - for instance in reversed(instances): - dsc = len(sorted_nodes) - 1 - for destination_node in reversed(sorted_nodes): - if asc >= dsc: - break - if self.instance_fits( - instance, destination_node, cc): - self.add_migration(instance, node, - destination_node) - break - dsc -= 1 - asc += 1 - - def pre_execute(self): - if not self.compute_model: - raise exception.ClusterStateNotDefined() - - if self.compute_model.stale: - raise exception.ClusterStateStale() - - LOG.debug(self.compute_model.to_string()) - - def do_execute(self): - """Execute strategy. - - This strategy produces a solution resulting in more - efficient utilization of cluster resources using following - four phases: - - * Offload phase - handling over-utilized resources - * Consolidation phase - handling under-utilized resources - * Solution optimization - reducing number of migrations - * Disability of unused nodes - - :param original_model: root_model object - """ - LOG.info('Executing Smart Strategy') - rcu = self.get_relative_cluster_utilization() - - cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} - - # Offloading phase - self.offload_phase(cc) - - # Consolidation phase - self.consolidation_phase(cc) - - # Optimize solution - self.optimize_solution() - - # disable unused nodes - self.disable_unused_nodes() - - rcu_after = self.get_relative_cluster_utilization() - info = { - "compute_nodes_count": len( - self.compute_model.get_all_compute_nodes()), - 'number_of_migrations': self.number_of_migrations, - 'number_of_released_nodes': - self.number_of_released_nodes, - 'relative_cluster_utilization_before': str(rcu), - 'relative_cluster_utilization_after': str(rcu_after) - } - - LOG.debug(info) - - def post_execute(self): - self.solution.set_efficacy_indicators( - compute_nodes_count=len( - self.compute_model.get_all_compute_nodes()), - released_compute_nodes_count=self.number_of_released_nodes, - instance_migrations_count=self.number_of_migrations, - ) - - LOG.debug(self.compute_model.to_string()) diff --git a/watcher/decision_engine/strategy/strategies/workload_balance.py b/watcher/decision_engine/strategy/strategies/workload_balance.py deleted file mode 100644 index 63e638c..0000000 --- a/watcher/decision_engine/strategy/strategies/workload_balance.py +++ /dev/null @@ -1,414 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Junjie-Huang -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -""" -*[PoC]Workload balance using live migration* - -*Description* - -This strategy migrates a VM based on the VM workload of the hosts. -It makes decision to migrate a workload whenever a host's CPU -utilization % is higher than the specified threshold. The VM to -be moved should make the host close to average workload of all -hosts nodes. - -*Requirements* - -* Hardware: compute node should use the same physical CPUs -* Software: Ceilometer component ceilometer-agent-compute - running in each compute node, and Ceilometer API can - report such telemetry "cpu_util" successfully. -* You must have at least 2 physical compute nodes to run - this strategy. - -*Limitations* - -- This is a proof of concept that is not meant to be used in - production. -- We cannot forecast how many servers should be migrated. - This is the reason why we only plan a single virtual - machine migration at a time. So it's better to use this - algorithm with `CONTINUOUS` audits. -""" - -from __future__ import division -import datetime - -from oslo_config import cfg -from oslo_log import log - -from watcher._i18n import _ -from watcher.common import exception as wexc -from watcher.datasource import ceilometer as ceil -from watcher.datasource import gnocchi as gnoc -from watcher.decision_engine.model import element -from watcher.decision_engine.strategy.strategies import base - -LOG = log.getLogger(__name__) - - -class WorkloadBalance(base.WorkloadStabilizationBaseStrategy): - """[PoC]Workload balance using live migration - - *Description* - - It is a migration strategy based on the VM workload of physical - servers. It generates solutions to move a workload whenever a server's - CPU utilization % is higher than the specified threshold. - The VM to be moved should make the host close to average workload - of all compute nodes. - - *Requirements* - - * Hardware: compute node should use the same physical CPUs - * Software: Ceilometer component ceilometer-agent-compute running - in each compute node, and Ceilometer API can report such telemetry - "cpu_util" successfully. - * You must have at least 2 physical compute nodes to run this strategy - - *Limitations* - - - This is a proof of concept that is not meant to be used in production - - We cannot forecast how many servers should be migrated. This is the - reason why we only plan a single virtual machine migration at a time. - So it's better to use this algorithm with `CONTINUOUS` audits. - - It assume that live migrations are possible - """ - - # The meter to report CPU utilization % of VM in ceilometer - METER_NAME = "cpu_util" - # Unit: %, value range is [0 , 100] - - MIGRATION = "migrate" - - def __init__(self, config, osc=None): - """Workload balance using live migration - - :param config: A mapping containing the configuration of this strategy - :type config: :py:class:`~.Struct` instance - :param osc: :py:class:`~.OpenStackClients` instance - """ - super(WorkloadBalance, self).__init__(config, osc) - # the migration plan will be triggered when the CPU utilization % - # reaches threshold - self._meter = self.METER_NAME - self._ceilometer = None - self._gnocchi = None - - @property - def ceilometer(self): - if self._ceilometer is None: - self._ceilometer = ceil.CeilometerHelper(osc=self.osc) - return self._ceilometer - - @ceilometer.setter - def ceilometer(self, c): - self._ceilometer = c - - @property - def gnocchi(self): - if self._gnocchi is None: - self._gnocchi = gnoc.GnocchiHelper(osc=self.osc) - return self._gnocchi - - @gnocchi.setter - def gnocchi(self, gnocchi): - self._gnocchi = gnocchi - - @classmethod - def get_name(cls): - return "workload_balance" - - @classmethod - def get_display_name(cls): - return _("Workload Balance Migration Strategy") - - @classmethod - def get_translatable_display_name(cls): - return "Workload Balance Migration Strategy" - - @property - def granularity(self): - return self.input_parameters.get('granularity', 300) - - @classmethod - def get_schema(cls): - # Mandatory default setting for each element - return { - "properties": { - "threshold": { - "description": "workload threshold for migration", - "type": "number", - "default": 25.0 - }, - "period": { - "description": "aggregate time period of ceilometer", - "type": "number", - "default": 300 - }, - "granularity": { - "description": "The time between two measures in an " - "aggregated timeseries of a metric.", - "type": "number", - "default": 300 - }, - }, - } - - @classmethod - def get_config_opts(cls): - return [ - cfg.StrOpt( - "datasource", - help="Data source to use in order to query the needed metrics", - default="ceilometer", - choices=["ceilometer", "gnocchi"]) - ] - - def calculate_used_resource(self, node): - """Calculate the used vcpus, memory and disk based on VM flavors""" - instances = self.compute_model.get_node_instances(node) - vcpus_used = 0 - memory_mb_used = 0 - disk_gb_used = 0 - for instance in instances: - vcpus_used += instance.vcpus - memory_mb_used += instance.memory - disk_gb_used += instance.disk - - return vcpus_used, memory_mb_used, disk_gb_used - - def choose_instance_to_migrate(self, hosts, avg_workload, workload_cache): - """Pick up an active instance instance to migrate from provided hosts - - :param hosts: the array of dict which contains node object - :param avg_workload: the average workload value of all nodes - :param workload_cache: the map contains instance to workload mapping - """ - for instance_data in hosts: - source_node = instance_data['node'] - source_instances = self.compute_model.get_node_instances( - source_node) - if source_instances: - delta_workload = instance_data['workload'] - avg_workload - min_delta = 1000000 - instance_id = None - for instance in source_instances: - try: - # select the first active VM to migrate - if (instance.state != - element.InstanceState.ACTIVE.value): - LOG.debug("Instance not active, skipped: %s", - instance.uuid) - continue - current_delta = ( - delta_workload - workload_cache[instance.uuid]) - if 0 <= current_delta < min_delta: - min_delta = current_delta - instance_id = instance.uuid - except wexc.InstanceNotFound: - LOG.error("Instance not found; error: %s", - instance_id) - if instance_id: - return (source_node, - self.compute_model.get_instance_by_uuid( - instance_id)) - else: - LOG.info("VM not found from node: %s", - source_node.uuid) - - def filter_destination_hosts(self, hosts, instance_to_migrate, - avg_workload, workload_cache): - """Only return hosts with sufficient available resources""" - required_cores = instance_to_migrate.vcpus - required_disk = instance_to_migrate.disk - required_mem = instance_to_migrate.memory - - # filter nodes without enough resource - destination_hosts = [] - src_instance_workload = workload_cache[instance_to_migrate.uuid] - for instance_data in hosts: - host = instance_data['node'] - workload = instance_data['workload'] - # calculate the available resources - cores_used, mem_used, disk_used = self.calculate_used_resource( - host) - cores_available = host.vcpus - cores_used - disk_available = host.disk - disk_used - mem_available = host.memory - mem_used - if ( - cores_available >= required_cores and - disk_available >= required_disk and - mem_available >= required_mem and - ((src_instance_workload + workload) < - self.threshold / 100 * host.vcpus) - ): - destination_hosts.append(instance_data) - - return destination_hosts - - def group_hosts_by_cpu_util(self): - """Calculate the workloads of each node - - try to find out the nodes which have reached threshold - and the nodes which are under threshold. - and also calculate the average workload value of all nodes. - and also generate the instance workload map. - """ - - nodes = self.compute_model.get_all_compute_nodes() - cluster_size = len(nodes) - if not nodes: - raise wexc.ClusterEmpty() - overload_hosts = [] - nonoverload_hosts = [] - # total workload of cluster - cluster_workload = 0.0 - # use workload_cache to store the workload of VMs for reuse purpose - workload_cache = {} - for node_id in nodes: - node = self.compute_model.get_node_by_uuid(node_id) - instances = self.compute_model.get_node_instances(node) - node_workload = 0.0 - for instance in instances: - cpu_util = None - try: - if self.config.datasource == "ceilometer": - cpu_util = self.ceilometer.statistic_aggregation( - resource_id=instance.uuid, - meter_name=self._meter, - period=self._period, - aggregate='avg') - elif self.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int(self._period)) - cpu_util = self.gnocchi.statistic_aggregation( - resource_id=instance.uuid, - metric=self._meter, - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean' - ) - except Exception as exc: - LOG.exception(exc) - LOG.error("Can not get cpu_util from %s", - self.config.datasource) - continue - if cpu_util is None: - LOG.debug("Instance (%s): cpu_util is None", instance.uuid) - continue - workload_cache[instance.uuid] = cpu_util * instance.vcpus / 100 - node_workload += workload_cache[instance.uuid] - LOG.debug("VM (%s): cpu_util %f", instance.uuid, cpu_util) - node_cpu_util = node_workload / node.vcpus * 100 - - cluster_workload += node_workload - - instance_data = { - 'node': node, "cpu_util": node_cpu_util, - 'workload': node_workload} - if node_cpu_util >= self.threshold: - # mark the node to release resources - overload_hosts.append(instance_data) - else: - nonoverload_hosts.append(instance_data) - - avg_workload = cluster_workload / cluster_size - - return overload_hosts, nonoverload_hosts, avg_workload, workload_cache - - def pre_execute(self): - """Pre-execution phase - - This can be used to fetch some pre-requisites or data. - """ - LOG.info("Initializing Workload Balance Strategy") - - if not self.compute_model: - raise wexc.ClusterStateNotDefined() - - if self.compute_model.stale: - raise wexc.ClusterStateStale() - - LOG.debug(self.compute_model.to_string()) - - def do_execute(self): - """Strategy execution phase - - This phase is where you should put the main logic of your strategy. - """ - self.threshold = self.input_parameters.threshold - self._period = self.input_parameters.period - source_nodes, target_nodes, avg_workload, workload_cache = ( - self.group_hosts_by_cpu_util()) - - if not source_nodes: - LOG.debug("No hosts require optimization") - return self.solution - - if not target_nodes: - LOG.warning("No hosts current have CPU utilization under %s " - "percent, therefore there are no possible target " - "hosts for any migration", - self.threshold) - return self.solution - - # choose the server with largest cpu_util - source_nodes = sorted(source_nodes, - reverse=True, - key=lambda x: (x[self.METER_NAME])) - - instance_to_migrate = self.choose_instance_to_migrate( - source_nodes, avg_workload, workload_cache) - if not instance_to_migrate: - return self.solution - source_node, instance_src = instance_to_migrate - # find the hosts that have enough resource for the VM to be migrated - destination_hosts = self.filter_destination_hosts( - target_nodes, instance_src, avg_workload, workload_cache) - # sort the filtered result by workload - # pick up the lowest one as dest server - if not destination_hosts: - # for instance. - LOG.warning("No proper target host could be found, it might " - "be because of there's no enough CPU/Memory/DISK") - return self.solution - destination_hosts = sorted(destination_hosts, - key=lambda x: (x["cpu_util"])) - # always use the host with lowerest CPU utilization - mig_destination_node = destination_hosts[0]['node'] - # generate solution to migrate the instance to the dest server, - if self.compute_model.migrate_instance( - instance_src, source_node, mig_destination_node): - parameters = {'migration_type': 'live', - 'source_node': source_node.uuid, - 'destination_node': mig_destination_node.uuid} - self.solution.add_action(action_type=self.MIGRATION, - resource_id=instance_src.uuid, - input_parameters=parameters) - - def post_execute(self): - """Post-execution phase - - This can be used to compute the global efficacy - """ - self.solution.model = self.compute_model - - LOG.debug(self.compute_model.to_string()) diff --git a/watcher/decision_engine/strategy/strategies/workload_stabilization.py b/watcher/decision_engine/strategy/strategies/workload_stabilization.py deleted file mode 100644 index 7e3e96f..0000000 --- a/watcher/decision_engine/strategy/strategies/workload_stabilization.py +++ /dev/null @@ -1,520 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Servionica LLC -# -# Authors: Alexander Chadin -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -""" -*Workload Stabilization control using live migration* - -This is workload stabilization strategy based on standard deviation -algorithm. The goal is to determine if there is an overload in a cluster -and respond to it by migrating VMs to stabilize the cluster. - -It assumes that live migrations are possible in your cluster. - -""" - -import copy -import datetime -import itertools -import math -import random -import re - -import oslo_cache -from oslo_config import cfg -from oslo_log import log -import oslo_utils - -from watcher._i18n import _ -from watcher.common import exception -from watcher.datasource import ceilometer as ceil -from watcher.datasource import gnocchi as gnoc -from watcher.decision_engine.model import element -from watcher.decision_engine.strategy.strategies import base - -LOG = log.getLogger(__name__) -CONF = cfg.CONF - - -def _set_memoize(conf): - oslo_cache.configure(conf) - region = oslo_cache.create_region() - configured_region = oslo_cache.configure_cache_region(conf, region) - return oslo_cache.core.get_memoization_decorator(conf, - configured_region, - 'cache') - - -class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy): - """Workload Stabilization control using live migration""" - - MIGRATION = "migrate" - MEMOIZE = _set_memoize(CONF) - - def __init__(self, config, osc=None): - """Workload Stabilization control using live migration - - :param config: A mapping containing the configuration of this strategy - :type config: :py:class:`~.Struct` instance - :param osc: :py:class:`~.OpenStackClients` instance - """ - super(WorkloadStabilization, self).__init__(config, osc) - self._ceilometer = None - self._gnocchi = None - self._nova = None - self.weights = None - self.metrics = None - self.thresholds = None - self.host_choice = None - self.instance_metrics = None - self.retry_count = None - self.periods = None - - @classmethod - def get_name(cls): - return "workload_stabilization" - - @classmethod - def get_display_name(cls): - return _("Workload stabilization") - - @classmethod - def get_translatable_display_name(cls): - return "Workload stabilization" - - @property - def granularity(self): - return self.input_parameters.get('granularity', 300) - - @classmethod - def get_schema(cls): - return { - "properties": { - "metrics": { - "description": "Metrics used as rates of cluster loads.", - "type": "array", - "default": ["cpu_util", "memory.resident"] - }, - "thresholds": { - "description": "Dict where key is a metric and value " - "is a trigger value.", - "type": "object", - "default": {"cpu_util": 0.2, "memory.resident": 0.2} - }, - "weights": { - "description": "These weights used to calculate " - "common standard deviation. Name of weight" - " contains meter name and _weight suffix.", - "type": "object", - "default": {"cpu_util_weight": 1.0, - "memory.resident_weight": 1.0} - }, - "instance_metrics": { - "description": "Mapping to get hardware statistics using" - " instance metrics", - "type": "object", - "default": {"cpu_util": "compute.node.cpu.percent", - "memory.resident": "hardware.memory.used"} - }, - "host_choice": { - "description": "Method of host's choice. There are cycle," - " retry and fullsearch methods. " - "Cycle will iterate hosts in cycle. " - "Retry will get some hosts random " - "(count defined in retry_count option). " - "Fullsearch will return each host " - "from list.", - "type": "string", - "default": "retry" - }, - "retry_count": { - "description": "Count of random returned hosts", - "type": "number", - "default": 1 - }, - "periods": { - "description": "These periods are used to get statistic " - "aggregation for instance and host " - "metrics. The period is simply a repeating" - " interval of time into which the samples" - " are grouped for aggregation. Watcher " - "uses only the last period of all received" - " ones.", - "type": "object", - "default": {"instance": 720, "node": 600} - }, - "granularity": { - "description": "The time between two measures in an " - "aggregated timeseries of a metric.", - "type": "number", - "default": 300 - }, - } - } - - @classmethod - def get_config_opts(cls): - return [ - cfg.StrOpt( - "datasource", - help="Data source to use in order to query the needed metrics", - default="ceilometer", - choices=["ceilometer", "gnocchi"]) - ] - - @property - def ceilometer(self): - if self._ceilometer is None: - self._ceilometer = ceil.CeilometerHelper(osc=self.osc) - return self._ceilometer - - @property - def nova(self): - if self._nova is None: - self._nova = self.osc.nova() - return self._nova - - @nova.setter - def nova(self, n): - self._nova = n - - @ceilometer.setter - def ceilometer(self, c): - self._ceilometer = c - - @property - def gnocchi(self): - if self._gnocchi is None: - self._gnocchi = gnoc.GnocchiHelper(osc=self.osc) - return self._gnocchi - - @gnocchi.setter - def gnocchi(self, gnocchi): - self._gnocchi = gnocchi - - def transform_instance_cpu(self, instance_load, host_vcpus): - """Transform instance cpu utilization to overall host cpu utilization. - - :param instance_load: dict that contains instance uuid and - utilization info. - :param host_vcpus: int - :return: float value - """ - return (instance_load['cpu_util'] * - (instance_load['vcpus'] / float(host_vcpus))) - - @MEMOIZE - def get_instance_load(self, instance): - """Gathering instance load through ceilometer/gnocchi statistic. - - :param instance: instance for which statistic is gathered. - :return: dict - """ - LOG.debug('get_instance_load started') - instance_load = {'uuid': instance.uuid, 'vcpus': instance.vcpus} - for meter in self.metrics: - avg_meter = None - if self.config.datasource == "ceilometer": - avg_meter = self.ceilometer.statistic_aggregation( - resource_id=instance.uuid, - meter_name=meter, - period=self.periods['instance'], - aggregate='min' - ) - elif self.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int(self.periods['instance'])) - avg_meter = self.gnocchi.statistic_aggregation( - resource_id=instance.uuid, - metric=meter, - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean' - ) - if avg_meter is None: - LOG.warning( - "No values returned by %(resource_id)s " - "for %(metric_name)s" % dict( - resource_id=instance.uuid, metric_name=meter)) - avg_meter = 0 - if meter == 'cpu_util': - avg_meter /= float(100) - instance_load[meter] = avg_meter - return instance_load - - def normalize_hosts_load(self, hosts): - normalized_hosts = copy.deepcopy(hosts) - for host in normalized_hosts: - if 'memory.resident' in normalized_hosts[host]: - node = self.compute_model.get_node_by_uuid(host) - normalized_hosts[host]['memory.resident'] /= float(node.memory) - - return normalized_hosts - - def get_available_nodes(self): - return {node_uuid: node for node_uuid, node in - self.compute_model.get_all_compute_nodes().items() - if node.state == element.ServiceState.ONLINE.value and - node.status == element.ServiceState.ENABLED.value} - - def get_hosts_load(self): - """Get load of every available host by gathering instances load""" - hosts_load = {} - for node_id, node in self.get_available_nodes().items(): - hosts_load[node_id] = {} - hosts_load[node_id]['vcpus'] = node.vcpus - for metric in self.metrics: - resource_id = '' - avg_meter = None - meter_name = self.instance_metrics[metric] - if re.match('^compute.node', meter_name) is not None: - resource_id = "%s_%s" % (node.uuid, node.hostname) - else: - resource_id = node_id - if self.config.datasource == "ceilometer": - avg_meter = self.ceilometer.statistic_aggregation( - resource_id=resource_id, - meter_name=self.instance_metrics[metric], - period=self.periods['node'], - aggregate='avg' - ) - elif self.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int(self.periods['node'])) - avg_meter = self.gnocchi.statistic_aggregation( - resource_id=resource_id, - metric=self.instance_metrics[metric], - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean' - ) - - if avg_meter is None: - if meter_name == 'hardware.memory.used': - avg_meter = node.memory - if meter_name == 'compute.node.cpu.percent': - avg_meter = 1 - LOG.warning('No values returned by node %s for %s', - node_id, meter_name) - else: - if meter_name == 'hardware.memory.used': - avg_meter /= oslo_utils.units.Ki - if meter_name == 'compute.node.cpu.percent': - avg_meter /= 100 - hosts_load[node_id][metric] = avg_meter - return hosts_load - - def get_sd(self, hosts, meter_name): - """Get standard deviation among hosts by specified meter""" - mean = 0 - variaton = 0 - for host_id in hosts: - mean += hosts[host_id][meter_name] - mean /= len(hosts) - for host_id in hosts: - variaton += (hosts[host_id][meter_name] - mean) ** 2 - variaton /= len(hosts) - sd = math.sqrt(variaton) - return sd - - def calculate_weighted_sd(self, sd_case): - """Calculate common standard deviation among meters on host""" - weighted_sd = 0 - for metric, value in zip(self.metrics, sd_case): - try: - weighted_sd += value * float(self.weights[metric + '_weight']) - except KeyError as exc: - LOG.exception(exc) - raise exception.WatcherException( - _("Incorrect mapping: could not find associated weight" - " for %s in weight dict.") % metric) - return weighted_sd - - def calculate_migration_case(self, hosts, instance, src_node, dst_node): - """Calculate migration case - - Return list of standard deviation values, that appearing in case of - migration of instance from source host to destination host - :param hosts: hosts with their workload - :param instance: the virtual machine - :param src_node: the source node - :param dst_node: the destination node - :return: list of standard deviation values - """ - migration_case = [] - new_hosts = copy.deepcopy(hosts) - instance_load = self.get_instance_load(instance) - s_host_vcpus = new_hosts[src_node.uuid]['vcpus'] - d_host_vcpus = new_hosts[dst_node.uuid]['vcpus'] - for metric in self.metrics: - if metric is 'cpu_util': - new_hosts[src_node.uuid][metric] -= ( - self.transform_instance_cpu(instance_load, s_host_vcpus)) - new_hosts[dst_node.uuid][metric] += ( - self.transform_instance_cpu(instance_load, d_host_vcpus)) - else: - new_hosts[src_node.uuid][metric] -= instance_load[metric] - new_hosts[dst_node.uuid][metric] += instance_load[metric] - normalized_hosts = self.normalize_hosts_load(new_hosts) - for metric in self.metrics: - migration_case.append(self.get_sd(normalized_hosts, metric)) - migration_case.append(new_hosts) - return migration_case - - def simulate_migrations(self, hosts): - """Make sorted list of pairs instance:dst_host""" - def yield_nodes(nodes): - if self.host_choice == 'cycle': - for i in itertools.cycle(nodes): - yield [i] - if self.host_choice == 'retry': - while True: - yield random.sample(nodes, self.retry_count) - if self.host_choice == 'fullsearch': - while True: - yield nodes - - instance_host_map = [] - nodes = list(self.get_available_nodes()) - for src_host in nodes: - src_node = self.compute_model.get_node_by_uuid(src_host) - c_nodes = copy.copy(nodes) - c_nodes.remove(src_host) - node_list = yield_nodes(c_nodes) - for instance in self.compute_model.get_node_instances(src_node): - min_sd_case = {'value': len(self.metrics)} - if instance.state not in [element.InstanceState.ACTIVE.value, - element.InstanceState.PAUSED.value]: - continue - for dst_host in next(node_list): - dst_node = self.compute_model.get_node_by_uuid(dst_host) - sd_case = self.calculate_migration_case( - hosts, instance, src_node, dst_node) - - weighted_sd = self.calculate_weighted_sd(sd_case[:-1]) - - if weighted_sd < min_sd_case['value']: - min_sd_case = { - 'host': dst_node.uuid, 'value': weighted_sd, - 's_host': src_node.uuid, 'instance': instance.uuid} - instance_host_map.append(min_sd_case) - return sorted(instance_host_map, key=lambda x: x['value']) - - def check_threshold(self): - """Check if cluster is needed in balancing""" - hosts_load = self.get_hosts_load() - normalized_load = self.normalize_hosts_load(hosts_load) - for metric in self.metrics: - metric_sd = self.get_sd(normalized_load, metric) - if metric_sd > float(self.thresholds[metric]): - return self.simulate_migrations(hosts_load) - - def add_migration(self, - resource_id, - migration_type, - source_node, - destination_node): - parameters = {'migration_type': migration_type, - 'source_node': source_node, - 'destination_node': destination_node} - self.solution.add_action(action_type=self.MIGRATION, - resource_id=resource_id, - input_parameters=parameters) - - def create_migration_instance(self, mig_instance, mig_source_node, - mig_destination_node): - """Create migration VM""" - if self.compute_model.migrate_instance( - mig_instance, mig_source_node, mig_destination_node): - self.add_migration(mig_instance.uuid, 'live', - mig_source_node.uuid, - mig_destination_node.uuid) - - def migrate(self, instance_uuid, src_host, dst_host): - mig_instance = self.compute_model.get_instance_by_uuid(instance_uuid) - mig_source_node = self.compute_model.get_node_by_uuid( - src_host) - mig_destination_node = self.compute_model.get_node_by_uuid( - dst_host) - self.create_migration_instance(mig_instance, mig_source_node, - mig_destination_node) - - def fill_solution(self): - self.solution.model = self.compute_model - return self.solution - - def pre_execute(self): - LOG.info("Initializing Workload Stabilization") - - if not self.compute_model: - raise exception.ClusterStateNotDefined() - - if self.compute_model.stale: - raise exception.ClusterStateStale() - - self.weights = self.input_parameters.weights - self.metrics = self.input_parameters.metrics - self.thresholds = self.input_parameters.thresholds - self.host_choice = self.input_parameters.host_choice - self.instance_metrics = self.input_parameters.instance_metrics - self.retry_count = self.input_parameters.retry_count - self.periods = self.input_parameters.periods - - def do_execute(self): - migration = self.check_threshold() - if migration: - hosts_load = self.get_hosts_load() - min_sd = 1 - balanced = False - for instance_host in migration: - instance = self.compute_model.get_instance_by_uuid( - instance_host['instance']) - src_node = self.compute_model.get_node_by_uuid( - instance_host['s_host']) - dst_node = self.compute_model.get_node_by_uuid( - instance_host['host']) - if instance.disk > dst_node.disk: - continue - instance_load = self.calculate_migration_case( - hosts_load, instance, src_node, dst_node) - weighted_sd = self.calculate_weighted_sd(instance_load[:-1]) - if weighted_sd < min_sd: - min_sd = weighted_sd - hosts_load = instance_load[-1] - self.migrate(instance_host['instance'], - instance_host['s_host'], - instance_host['host']) - - for metric, value in zip(self.metrics, instance_load[:-1]): - if value < float(self.thresholds[metric]): - balanced = True - break - if balanced: - break - - def post_execute(self): - """Post-execution phase - - This can be used to compute the global efficacy - """ - self.fill_solution() - - LOG.debug(self.compute_model.to_string()) diff --git a/watcher/decision_engine/sync.py b/watcher/decision_engine/sync.py deleted file mode 100644 index 17c3318..0000000 --- a/watcher/decision_engine/sync.py +++ /dev/null @@ -1,571 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import ast -import collections - -from oslo_log import log - -from watcher.common import context -from watcher.decision_engine.loading import default -from watcher.decision_engine.scoring import scoring_factory -from watcher import objects - -LOG = log.getLogger(__name__) - -GoalMapping = collections.namedtuple( - 'GoalMapping', ['name', 'display_name', 'efficacy_specification']) -StrategyMapping = collections.namedtuple( - 'StrategyMapping', - ['name', 'goal_name', 'display_name', 'parameters_spec']) -ScoringEngineMapping = collections.namedtuple( - 'ScoringEngineMapping', - ['name', 'description', 'metainfo']) - -IndicatorSpec = collections.namedtuple( - 'IndicatorSpec', ['name', 'description', 'unit', 'schema']) - - -class Syncer(object): - """Syncs all available goals and strategies with the Watcher DB""" - - def __init__(self): - self.ctx = context.make_context() - self.discovered_map = None - - self._available_goals = None - self._available_goals_map = None - - self._available_strategies = None - self._available_strategies_map = None - - self._available_scoringengines = None - self._available_scoringengines_map = None - - # This goal mapping maps stale goal IDs to the synced goal - self.goal_mapping = dict() - # This strategy mapping maps stale strategy IDs to the synced goal - self.strategy_mapping = dict() - # Maps stale scoring engine IDs to the synced scoring engines - self.se_mapping = dict() - - self.stale_audit_templates_map = {} - self.stale_audits_map = {} - self.stale_action_plans_map = {} - - @property - def available_goals(self): - """Goals loaded from DB""" - if self._available_goals is None: - self._available_goals = objects.Goal.list(self.ctx) - return self._available_goals - - @property - def available_strategies(self): - """Strategies loaded from DB""" - if self._available_strategies is None: - self._available_strategies = objects.Strategy.list(self.ctx) - return self._available_strategies - - @property - def available_scoringengines(self): - """Scoring Engines loaded from DB""" - if self._available_scoringengines is None: - self._available_scoringengines = (objects.ScoringEngine - .list(self.ctx)) - return self._available_scoringengines - - @property - def available_goals_map(self): - """Mapping of goals loaded from DB""" - if self._available_goals_map is None: - self._available_goals_map = { - GoalMapping( - name=g.name, - display_name=g.display_name, - efficacy_specification=tuple( - IndicatorSpec(**item) - for item in g.efficacy_specification)): g - for g in self.available_goals - } - return self._available_goals_map - - @property - def available_strategies_map(self): - if self._available_strategies_map is None: - goals_map = {g.id: g.name for g in self.available_goals} - self._available_strategies_map = { - StrategyMapping( - name=s.name, goal_name=goals_map[s.goal_id], - display_name=s.display_name, - parameters_spec=str(s.parameters_spec)): s - for s in self.available_strategies - } - return self._available_strategies_map - - @property - def available_scoringengines_map(self): - if self._available_scoringengines_map is None: - self._available_scoringengines_map = { - ScoringEngineMapping( - name=s.id, description=s.description, - metainfo=s.metainfo): s - for s in self.available_scoringengines - } - return self._available_scoringengines_map - - def sync(self): - self.discovered_map = self._discover() - goals_map = self.discovered_map["goals"] - strategies_map = self.discovered_map["strategies"] - scoringengines_map = self.discovered_map["scoringengines"] - - for goal_name, goal_map in goals_map.items(): - if goal_map in self.available_goals_map: - LOG.info("Goal %s already exists", goal_name) - continue - - self.goal_mapping.update(self._sync_goal(goal_map)) - - for strategy_name, strategy_map in strategies_map.items(): - if (strategy_map in self.available_strategies_map and - strategy_map.goal_name not in - [g.name for g in self.goal_mapping.values()]): - LOG.info("Strategy %s already exists", strategy_name) - continue - - self.strategy_mapping.update(self._sync_strategy(strategy_map)) - - for se_name, se_map in scoringengines_map.items(): - if se_map in self.available_scoringengines_map: - LOG.info("Scoring Engine %s already exists", - se_name) - continue - - self.se_mapping.update(self._sync_scoringengine(se_map)) - - self._sync_objects() - self._soft_delete_removed_scoringengines() - - def _sync_goal(self, goal_map): - goal_name = goal_map.name - goal_mapping = dict() - # Goals that are matching by name with the given discovered goal name - matching_goals = [g for g in self.available_goals - if g.name == goal_name] - stale_goals = self._soft_delete_stale_goals(goal_map, matching_goals) - - if stale_goals or not matching_goals: - goal = objects.Goal(self.ctx) - goal.name = goal_name - goal.display_name = goal_map.display_name - goal.efficacy_specification = [ - indicator._asdict() - for indicator in goal_map.efficacy_specification] - goal.create() - LOG.info("Goal %s created", goal_name) - - # Updating the internal states - self.available_goals_map[goal] = goal_map - # Map the old goal IDs to the new (equivalent) goal - for matching_goal in matching_goals: - goal_mapping[matching_goal.id] = goal - - return goal_mapping - - def _sync_strategy(self, strategy_map): - strategy_name = strategy_map.name - strategy_display_name = strategy_map.display_name - goal_name = strategy_map.goal_name - parameters_spec = strategy_map.parameters_spec - strategy_mapping = dict() - - # Strategies that are matching by name with the given - # discovered strategy name - matching_strategies = [s for s in self.available_strategies - if s.name == strategy_name] - stale_strategies = self._soft_delete_stale_strategies( - strategy_map, matching_strategies) - - if stale_strategies or not matching_strategies: - strategy = objects.Strategy(self.ctx) - strategy.name = strategy_name - strategy.display_name = strategy_display_name - strategy.goal_id = objects.Goal.get_by_name(self.ctx, goal_name).id - strategy.parameters_spec = parameters_spec - strategy.create() - LOG.info("Strategy %s created", strategy_name) - - # Updating the internal states - self.available_strategies_map[strategy] = strategy_map - # Map the old strategy IDs to the new (equivalent) strategy - for matching_strategy in matching_strategies: - strategy_mapping[matching_strategy.id] = strategy - - return strategy_mapping - - def _sync_scoringengine(self, scoringengine_map): - scoringengine_name = scoringengine_map.name - se_mapping = dict() - # Scoring Engines matching by id with discovered Scoring engine - matching_scoringengines = [se for se in self.available_scoringengines - if se.name == scoringengine_name] - stale_scoringengines = self._soft_delete_stale_scoringengines( - scoringengine_map, matching_scoringengines) - - if stale_scoringengines or not matching_scoringengines: - scoringengine = objects.ScoringEngine(self.ctx) - scoringengine.name = scoringengine_name - scoringengine.description = scoringengine_map.description - scoringengine.metainfo = scoringengine_map.metainfo - scoringengine.create() - LOG.info("Scoring Engine %s created", scoringengine_name) - - # Updating the internal states - self.available_scoringengines_map[scoringengine] = \ - scoringengine_map - # Map the old scoring engine names to the new (equivalent) SE - for matching_scoringengine in matching_scoringengines: - se_mapping[matching_scoringengine.name] = scoringengine - - return se_mapping - - def _sync_objects(self): - # First we find audit templates, audits and action plans that are stale - # because their associated goal or strategy has been modified and we - # update them in-memory - self._find_stale_audit_templates_due_to_goal() - self._find_stale_audit_templates_due_to_strategy() - - self._find_stale_audits_due_to_goal() - self._find_stale_audits_due_to_strategy() - - self._find_stale_action_plans_due_to_strategy() - self._find_stale_action_plans_due_to_audit() - - # Then we handle the case where an audit template, an audit or an - # action plan becomes stale because its related goal does not - # exist anymore. - self._soft_delete_removed_goals() - # Then we handle the case where an audit template, an audit or an - # action plan becomes stale because its related strategy does not - # exist anymore. - self._soft_delete_removed_strategies() - - # Finally, we save into the DB the updated stale audit templates - # and soft delete stale audits and action plans - for stale_audit_template in self.stale_audit_templates_map.values(): - stale_audit_template.save() - LOG.info("Audit Template '%s' synced", - stale_audit_template.name) - - for stale_audit in self.stale_audits_map.values(): - stale_audit.save() - LOG.info("Stale audit '%s' synced and cancelled", - stale_audit.uuid) - - for stale_action_plan in self.stale_action_plans_map.values(): - stale_action_plan.save() - LOG.info("Stale action plan '%s' synced and cancelled", - stale_action_plan.uuid) - - def _find_stale_audit_templates_due_to_goal(self): - for goal_id, synced_goal in self.goal_mapping.items(): - filters = {"goal_id": goal_id} - stale_audit_templates = objects.AuditTemplate.list( - self.ctx, filters=filters) - - # Update the goal ID for the stale audit templates (w/o saving) - for audit_template in stale_audit_templates: - if audit_template.id not in self.stale_audit_templates_map: - audit_template.goal_id = synced_goal.id - self.stale_audit_templates_map[audit_template.id] = ( - audit_template) - else: - self.stale_audit_templates_map[ - audit_template.id].goal_id = synced_goal.id - - def _find_stale_audit_templates_due_to_strategy(self): - for strategy_id, synced_strategy in self.strategy_mapping.items(): - filters = {"strategy_id": strategy_id} - stale_audit_templates = objects.AuditTemplate.list( - self.ctx, filters=filters) - - # Update strategy IDs for all stale audit templates (w/o saving) - for audit_template in stale_audit_templates: - if audit_template.id not in self.stale_audit_templates_map: - audit_template.strategy_id = synced_strategy.id - self.stale_audit_templates_map[audit_template.id] = ( - audit_template) - else: - self.stale_audit_templates_map[ - audit_template.id].strategy_id = synced_strategy.id - - def _find_stale_audits_due_to_goal(self): - for goal_id, synced_goal in self.goal_mapping.items(): - filters = {"goal_id": goal_id} - stale_audits = objects.Audit.list( - self.ctx, filters=filters, eager=True) - - # Update the goal ID for the stale audits (w/o saving) - for audit in stale_audits: - if audit.id not in self.stale_audits_map: - audit.goal_id = synced_goal.id - self.stale_audits_map[audit.id] = audit - else: - self.stale_audits_map[audit.id].goal_id = synced_goal.id - - def _find_stale_audits_due_to_strategy(self): - for strategy_id, synced_strategy in self.strategy_mapping.items(): - filters = {"strategy_id": strategy_id} - stale_audits = objects.Audit.list( - self.ctx, filters=filters, eager=True) - # Update strategy IDs for all stale audits (w/o saving) - for audit in stale_audits: - if audit.id not in self.stale_audits_map: - audit.strategy_id = synced_strategy.id - audit.state = objects.audit.State.CANCELLED - self.stale_audits_map[audit.id] = audit - else: - self.stale_audits_map[ - audit.id].strategy_id = synced_strategy.id - self.stale_audits_map[ - audit.id].state = objects.audit.State.CANCELLED - - def _find_stale_action_plans_due_to_strategy(self): - for strategy_id, synced_strategy in self.strategy_mapping.items(): - filters = {"strategy_id": strategy_id} - stale_action_plans = objects.ActionPlan.list( - self.ctx, filters=filters, eager=True) - - # Update strategy IDs for all stale action plans (w/o saving) - for action_plan in stale_action_plans: - if action_plan.id not in self.stale_action_plans_map: - action_plan.strategy_id = synced_strategy.id - action_plan.state = objects.action_plan.State.CANCELLED - self.stale_action_plans_map[action_plan.id] = action_plan - else: - self.stale_action_plans_map[ - action_plan.id].strategy_id = synced_strategy.id - self.stale_action_plans_map[ - action_plan.id].state = ( - objects.action_plan.State.CANCELLED) - - def _find_stale_action_plans_due_to_audit(self): - for audit_id, synced_audit in self.stale_audits_map.items(): - filters = {"audit_id": audit_id} - stale_action_plans = objects.ActionPlan.list( - self.ctx, filters=filters, eager=True) - - # Update audit IDs for all stale action plans (w/o saving) - for action_plan in stale_action_plans: - if action_plan.id not in self.stale_action_plans_map: - action_plan.audit_id = synced_audit.id - action_plan.state = objects.action_plan.State.CANCELLED - self.stale_action_plans_map[action_plan.id] = action_plan - else: - self.stale_action_plans_map[ - action_plan.id].audit_id = synced_audit.id - self.stale_action_plans_map[ - action_plan.id].state = ( - objects.action_plan.State.CANCELLED) - - def _soft_delete_removed_goals(self): - removed_goals = [ - g for g in self.available_goals - if g.name not in self.discovered_map['goals']] - for removed_goal in removed_goals: - removed_goal.soft_delete() - filters = {"goal_id": removed_goal.id} - - invalid_ats = objects.AuditTemplate.list(self.ctx, filters=filters) - for at in invalid_ats: - LOG.warning( - "Audit Template '%(audit_template)s' references a " - "goal that does not exist", audit_template=at.uuid) - - stale_audits = objects.Audit.list( - self.ctx, filters=filters, eager=True) - for audit in stale_audits: - LOG.warning( - "Audit '%(audit)s' references a " - "goal that does not exist", audit=audit.uuid) - if audit.id not in self.stale_audits_map: - audit.state = objects.audit.State.CANCELLED - self.stale_audits_map[audit.id] = audit - else: - self.stale_audits_map[ - audit.id].state = objects.audit.State.CANCELLED - - def _soft_delete_removed_strategies(self): - removed_strategies = [ - s for s in self.available_strategies - if s.name not in self.discovered_map['strategies']] - - for removed_strategy in removed_strategies: - removed_strategy.soft_delete() - filters = {"strategy_id": removed_strategy.id} - invalid_ats = objects.AuditTemplate.list(self.ctx, filters=filters) - for at in invalid_ats: - LOG.info( - "Audit Template '%(audit_template)s' references a " - "strategy that does not exist", - audit_template=at.uuid) - # In this case we can reset the strategy ID to None - # so the audit template can still achieve the same goal - # but with a different strategy - if at.id not in self.stale_audit_templates_map: - at.strategy_id = None - self.stale_audit_templates_map[at.id] = at - else: - self.stale_audit_templates_map[at.id].strategy_id = None - - stale_audits = objects.Audit.list( - self.ctx, filters=filters, eager=True) - for audit in stale_audits: - LOG.warning( - "Audit '%(audit)s' references a " - "strategy that does not exist", audit=audit.uuid) - if audit.id not in self.stale_audits_map: - audit.state = objects.audit.State.CANCELLED - self.stale_audits_map[audit.id] = audit - else: - self.stale_audits_map[ - audit.id].state = objects.audit.State.CANCELLED - - stale_action_plans = objects.ActionPlan.list( - self.ctx, filters=filters, eager=True) - for action_plan in stale_action_plans: - LOG.warning( - "Action Plan '%(action_plan)s' references a " - "strategy that does not exist", - action_plan=action_plan.uuid) - if action_plan.id not in self.stale_action_plans_map: - action_plan.state = objects.action_plan.State.CANCELLED - self.stale_action_plans_map[action_plan.id] = action_plan - else: - self.stale_action_plans_map[ - action_plan.id].state = ( - objects.action_plan.State.CANCELLED) - - def _soft_delete_removed_scoringengines(self): - removed_se = [ - se for se in self.available_scoringengines - if se.name not in self.discovered_map['scoringengines']] - for se in removed_se: - LOG.info("Scoring Engine %s removed", se.name) - se.soft_delete() - - def _discover(self): - strategies_map = {} - goals_map = {} - scoringengines_map = {} - discovered_map = { - "goals": goals_map, - "strategies": strategies_map, - "scoringengines": scoringengines_map} - goal_loader = default.DefaultGoalLoader() - implemented_goals = goal_loader.list_available() - - strategy_loader = default.DefaultStrategyLoader() - implemented_strategies = strategy_loader.list_available() - - for goal_cls in implemented_goals.values(): - goals_map[goal_cls.get_name()] = GoalMapping( - name=goal_cls.get_name(), - display_name=goal_cls.get_translatable_display_name(), - efficacy_specification=tuple( - IndicatorSpec(**indicator.to_dict()) - for indicator in goal_cls.get_efficacy_specification( - ).get_indicators_specifications())) - - for strategy_cls in implemented_strategies.values(): - strategies_map[strategy_cls.get_name()] = StrategyMapping( - name=strategy_cls.get_name(), - goal_name=strategy_cls.get_goal_name(), - display_name=strategy_cls.get_translatable_display_name(), - parameters_spec=str(strategy_cls.get_schema())) - - for se in scoring_factory.get_scoring_engine_list(): - scoringengines_map[se.get_name()] = ScoringEngineMapping( - name=se.get_name(), - description=se.get_description(), - metainfo=se.get_metainfo()) - - return discovered_map - - def _soft_delete_stale_goals(self, goal_map, matching_goals): - """Soft delete the stale goals - - :param goal_map: discovered goal map - :type goal_map: :py:class:`~.GoalMapping` instance - :param matching_goals: list of DB goals matching the goal_map - :type matching_goals: list of :py:class:`~.objects.Goal` instances - :returns: A list of soft deleted DB goals (subset of matching goals) - :rtype: list of :py:class:`~.objects.Goal` instances - """ - goal_display_name = goal_map.display_name - goal_name = goal_map.name - goal_efficacy_spec = goal_map.efficacy_specification - - stale_goals = [] - for matching_goal in matching_goals: - if (matching_goal.efficacy_specification == goal_efficacy_spec and - matching_goal.display_name == goal_display_name): - LOG.info("Goal %s unchanged", goal_name) - else: - LOG.info("Goal %s modified", goal_name) - matching_goal.soft_delete() - stale_goals.append(matching_goal) - - return stale_goals - - def _soft_delete_stale_strategies(self, strategy_map, matching_strategies): - strategy_name = strategy_map.name - strategy_display_name = strategy_map.display_name - parameters_spec = strategy_map.parameters_spec - - stale_strategies = [] - for matching_strategy in matching_strategies: - if (matching_strategy.display_name == strategy_display_name and - matching_strategy.goal_id not in self.goal_mapping and - matching_strategy.parameters_spec == - ast.literal_eval(parameters_spec)): - LOG.info("Strategy %s unchanged", strategy_name) - else: - LOG.info("Strategy %s modified", strategy_name) - matching_strategy.soft_delete() - stale_strategies.append(matching_strategy) - - return stale_strategies - - def _soft_delete_stale_scoringengines( - self, scoringengine_map, matching_scoringengines): - se_name = scoringengine_map.name - se_description = scoringengine_map.description - se_metainfo = scoringengine_map.metainfo - - stale_scoringengines = [] - for matching_scoringengine in matching_scoringengines: - if (matching_scoringengine.description == se_description and - matching_scoringengine.metainfo == se_metainfo): - LOG.info("Scoring Engine %s unchanged", se_name) - else: - LOG.info("Scoring Engine %s modified", se_name) - matching_scoringengine.soft_delete() - stale_scoringengines.append(matching_scoringengine) - - return stale_scoringengines diff --git a/watcher/hacking/__init__.py b/watcher/hacking/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/hacking/checks.py b/watcher/hacking/checks.py deleted file mode 100644 index 6d4fab1..0000000 --- a/watcher/hacking/checks.py +++ /dev/null @@ -1,288 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import re - - -def flake8ext(f): - """Decorator to indicate flake8 extension. - - This is borrowed from hacking.core.flake8ext(), but at now it is used - only for unit tests to know which are watcher flake8 extensions. - """ - f.name = __name__ - return f - - -# Guidelines for writing new hacking checks -# -# - Use only for Watcher specific tests. OpenStack general tests -# should be submitted to the common 'hacking' module. -# - Pick numbers in the range N3xx. Find the current test with -# the highest allocated number and then pick the next value. -# - Keep the test method code in the source file ordered based -# on the N3xx value. -# - List the new rule in the top level HACKING.rst file - -_all_log_levels = { - 'reserved': '_', # this should never be used with a log unless - # it is a variable used for a log message and - # a exception - 'error': '_LE', - 'info': '_LI', - 'warning': '_LW', - 'critical': '_LC', - 'exception': '_LE', -} -_all_hints = set(_all_log_levels.values()) - - -def _regex_for_level(level, hint): - return r".*LOG\.%(level)s\(\s*((%(wrong_hints)s)\(|'|\")" % { - 'level': level, - 'wrong_hints': '|'.join(_all_hints - set([hint])), - } - - -log_warn = re.compile( - r"(.)*LOG\.(warn)\(\s*('|\"|_)") -unittest_imports_dot = re.compile(r"\bimport[\s]+unittest\b") -unittest_imports_from = re.compile(r"\bfrom[\s]+unittest\b") - - -@flake8ext -def use_jsonutils(logical_line, filename): - msg = "N321: jsonutils.%(fun)s must be used instead of json.%(fun)s" - - # Skip list is currently empty. - json_check_skipped_patterns = [] - - for pattern in json_check_skipped_patterns: - if pattern in filename: - return - - if "json." in logical_line: - json_funcs = ['dumps(', 'dump(', 'loads(', 'load('] - for f in json_funcs: - pos = logical_line.find('json.%s' % f) - if pos != -1: - yield (pos, msg % {'fun': f[:-1]}) - - -@flake8ext -def no_translate_debug_logs(logical_line, filename): - """Check for 'LOG.debug(_(' and 'LOG.debug(_Lx(' - - As per our translation policy, - https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation - we shouldn't translate debug level logs. - - * This check assumes that 'LOG' is a logger. - N319 - """ - for hint in _all_hints: - if logical_line.startswith("LOG.debug(%s(" % hint): - yield(0, "N319 Don't translate debug level logs") - - -@flake8ext -def check_assert_called_once_with(logical_line, filename): - # Try to detect unintended calls of nonexistent mock methods like: - # assert_called_once - # assertCalledOnceWith - # assert_has_called - # called_once_with - if 'watcher/tests/' in filename: - if '.assert_called_once_with(' in logical_line: - return - uncased_line = logical_line.lower().replace('_', '') - - check_calls = ['.assertcalledonce', '.calledoncewith'] - if any(x for x in check_calls if x in uncased_line): - msg = ("N322: Possible use of no-op mock method. " - "please use assert_called_once_with.") - yield (0, msg) - - if '.asserthascalled' in uncased_line: - msg = ("N322: Possible use of no-op mock method. " - "please use assert_has_calls.") - yield (0, msg) - - -@flake8ext -def check_python3_xrange(logical_line): - if re.search(r"\bxrange\s*\(", logical_line): - yield(0, "N325: Do not use xrange. Use range, or six.moves.range for " - "large loops.") - - -@flake8ext -def check_no_basestring(logical_line): - if re.search(r"\bbasestring\b", logical_line): - msg = ("N326: basestring is not Python3-compatible, use " - "six.string_types instead.") - yield(0, msg) - - -@flake8ext -def check_python3_no_iteritems(logical_line): - if re.search(r".*\.iteritems\(\)", logical_line): - msg = ("N327: Use six.iteritems() instead of dict.iteritems().") - yield(0, msg) - - -@flake8ext -def check_asserttrue(logical_line, filename): - if 'watcher/tests/' in filename: - if re.search(r"assertEqual\(\s*True,[^,]*(,[^,]*)?\)", logical_line): - msg = ("N328: Use assertTrue(observed) instead of " - "assertEqual(True, observed)") - yield (0, msg) - if re.search(r"assertEqual\([^,]*,\s*True(,[^,]*)?\)", logical_line): - msg = ("N328: Use assertTrue(observed) instead of " - "assertEqual(True, observed)") - yield (0, msg) - - -@flake8ext -def check_assertfalse(logical_line, filename): - if 'watcher/tests/' in filename: - if re.search(r"assertEqual\(\s*False,[^,]*(,[^,]*)?\)", logical_line): - msg = ("N328: Use assertFalse(observed) instead of " - "assertEqual(False, observed)") - yield (0, msg) - if re.search(r"assertEqual\([^,]*,\s*False(,[^,]*)?\)", logical_line): - msg = ("N328: Use assertFalse(observed) instead of " - "assertEqual(False, observed)") - yield (0, msg) - - -@flake8ext -def check_assertempty(logical_line, filename): - if 'watcher/tests/' in filename: - msg = ("N330: Use assertEqual(*empty*, observed) instead of " - "assertEqual(observed, *empty*). *empty* contains " - "{}, [], (), set(), '', \"\"") - empties = r"(\[\s*\]|\{\s*\}|\(\s*\)|set\(\s*\)|'\s*'|\"\s*\")" - reg = r"assertEqual\(([^,]*,\s*)+?%s\)\s*$" % empties - if re.search(reg, logical_line): - yield (0, msg) - - -@flake8ext -def check_assertisinstance(logical_line, filename): - if 'watcher/tests/' in filename: - if re.search(r"assertTrue\(\s*isinstance\(\s*[^,]*,\s*[^,]*\)\)", - logical_line): - msg = ("N331: Use assertIsInstance(observed, type) instead " - "of assertTrue(isinstance(observed, type))") - yield (0, msg) - - -@flake8ext -def check_assertequal_for_httpcode(logical_line, filename): - msg = ("N332: Use assertEqual(expected_http_code, observed_http_code) " - "instead of assertEqual(observed_http_code, expected_http_code)") - if 'watcher/tests/' in filename: - if re.search(r"assertEqual\(\s*[^,]*,[^,]*HTTP[^\.]*\.code\s*\)", - logical_line): - yield (0, msg) - - -@flake8ext -def check_log_warn_deprecated(logical_line, filename): - msg = "N333: Use LOG.warning due to compatibility with py3" - if log_warn.match(logical_line): - yield (0, msg) - - -@flake8ext -def check_oslo_i18n_wrapper(logical_line, filename, noqa): - """Check for watcher.i18n usage. - - N340(watcher/foo/bar.py): from watcher.i18n import _ - Okay(watcher/foo/bar.py): from watcher.i18n import _ # noqa - """ - - if noqa: - return - - split_line = logical_line.split() - modulename = os.path.normpath(filename).split('/')[0] - bad_i18n_module = '%s.i18n' % modulename - - if (len(split_line) > 1 and split_line[0] in ('import', 'from')): - if (split_line[1] == bad_i18n_module or - modulename != 'watcher' and split_line[1] in ('watcher.i18n', - 'watcher._i18n')): - msg = ("N340: %(found)s is found. Use %(module)s._i18n instead." - % {'found': split_line[1], 'module': modulename}) - yield (0, msg) - - -@flake8ext -def check_builtins_gettext(logical_line, tokens, filename, lines, noqa): - """Check usage of builtins gettext _(). - - N341(watcher/foo.py): _('foo') - Okay(watcher/i18n.py): _('foo') - Okay(watcher/_i18n.py): _('foo') - Okay(watcher/foo.py): _('foo') # noqa - """ - - if noqa: - return - - modulename = os.path.normpath(filename).split('/')[0] - - if '%s/tests' % modulename in filename: - return - - if os.path.basename(filename) in ('i18n.py', '_i18n.py'): - return - - token_values = [t[1] for t in tokens] - i18n_wrapper = '%s._i18n' % modulename - - if '_' in token_values: - i18n_import_line_found = False - for line in lines: - split_line = [elm.rstrip(',') for elm in line.split()] - if (len(split_line) > 1 and split_line[0] == 'from' and - split_line[1] == i18n_wrapper and - '_' in split_line): - i18n_import_line_found = True - break - if not i18n_import_line_found: - msg = ("N341: _ from python builtins module is used. " - "Use _ from %s instead." % i18n_wrapper) - yield (0, msg) - - -def factory(register): - register(use_jsonutils) - register(check_assert_called_once_with) - register(no_translate_debug_logs) - register(check_python3_xrange) - register(check_no_basestring) - register(check_python3_no_iteritems) - register(check_asserttrue) - register(check_assertfalse) - register(check_assertempty) - register(check_assertisinstance) - register(check_assertequal_for_httpcode) - register(check_log_warn_deprecated) - register(check_oslo_i18n_wrapper) - register(check_builtins_gettext) diff --git a/watcher/notifications/__init__.py b/watcher/notifications/__init__.py deleted file mode 100644 index cfed437..0000000 --- a/watcher/notifications/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Note(gibi): Importing publicly called functions so the caller code does not -# need to be changed after we moved these function inside the package -# Todo(gibi): remove these imports after legacy notifications using these are -# transformed to versioned notifications -from watcher.notifications import action # noqa -from watcher.notifications import action_plan # noqa -from watcher.notifications import audit # noqa -from watcher.notifications import exception # noqa -from watcher.notifications import goal # noqa -from watcher.notifications import service # noqa -from watcher.notifications import strategy # noqa diff --git a/watcher/notifications/action.py b/watcher/notifications/action.py deleted file mode 100644 index 449a012..0000000 --- a/watcher/notifications/action.py +++ /dev/null @@ -1,302 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 Servionica -# -# Authors: Alexander Chadin -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -from watcher.common import context as wcontext -from watcher.common import exception -from watcher.notifications import action_plan as ap_notifications -from watcher.notifications import base as notificationbase -from watcher.notifications import exception as exception_notifications -from watcher import objects -from watcher.objects import base -from watcher.objects import fields as wfields - -CONF = cfg.CONF - - -@base.WatcherObjectRegistry.register_notification -class ActionPayload(notificationbase.NotificationPayloadBase): - SCHEMA = { - 'uuid': ('action', 'uuid'), - - 'action_type': ('action', 'action_type'), - 'input_parameters': ('action', 'input_parameters'), - 'state': ('action', 'state'), - 'parents': ('action', 'parents'), - - 'created_at': ('action', 'created_at'), - 'updated_at': ('action', 'updated_at'), - 'deleted_at': ('action', 'deleted_at'), - } - - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'uuid': wfields.UUIDField(), - 'action_type': wfields.StringField(nullable=False), - 'input_parameters': wfields.DictField(nullable=False, default={}), - 'state': wfields.StringField(nullable=False), - 'parents': wfields.ListOfUUIDsField(nullable=False, default=[]), - 'action_plan_uuid': wfields.UUIDField(), - 'action_plan': wfields.ObjectField('TerseActionPlanPayload'), - - 'created_at': wfields.DateTimeField(nullable=True), - 'updated_at': wfields.DateTimeField(nullable=True), - 'deleted_at': wfields.DateTimeField(nullable=True), - } - - def __init__(self, action, **kwargs): - super(ActionPayload, self).__init__(**kwargs) - self.populate_schema(action=action) - - -@base.WatcherObjectRegistry.register_notification -class ActionStateUpdatePayload(notificationbase.NotificationPayloadBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'old_state': wfields.StringField(nullable=True), - 'state': wfields.StringField(nullable=True), - } - - -@base.WatcherObjectRegistry.register_notification -class ActionCreatePayload(ActionPayload): - # Version 1.0: Initial version - VERSION = '1.0' - fields = {} - - def __init__(self, action, action_plan): - super(ActionCreatePayload, self).__init__( - action=action, - action_plan=action_plan) - - -@base.WatcherObjectRegistry.register_notification -class ActionUpdatePayload(ActionPayload): - # Version 1.0: Initial version - VERSION = '1.0' - fields = { - 'state_update': wfields.ObjectField('ActionStateUpdatePayload'), - } - - def __init__(self, action, state_update, action_plan): - super(ActionUpdatePayload, self).__init__( - action=action, - state_update=state_update, - action_plan=action_plan) - - -@base.WatcherObjectRegistry.register_notification -class ActionExecutionPayload(ActionPayload): - # Version 1.0: Initial version - VERSION = '1.0' - fields = { - 'fault': wfields.ObjectField('ExceptionPayload', nullable=True), - } - - def __init__(self, action, action_plan, **kwargs): - super(ActionExecutionPayload, self).__init__( - action=action, - action_plan=action_plan, - **kwargs) - - -@base.WatcherObjectRegistry.register_notification -class ActionDeletePayload(ActionPayload): - # Version 1.0: Initial version - VERSION = '1.0' - fields = {} - - def __init__(self, action, action_plan): - super(ActionDeletePayload, self).__init__( - action=action, - action_plan=action_plan) - - -@notificationbase.notification_sample('action-execution-error.json') -@notificationbase.notification_sample('action-execution-end.json') -@notificationbase.notification_sample('action-execution-start.json') -@base.WatcherObjectRegistry.register_notification -class ActionExecutionNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('ActionExecutionPayload') - } - - -@notificationbase.notification_sample('action-create.json') -@base.WatcherObjectRegistry.register_notification -class ActionCreateNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('ActionCreatePayload') - } - - -@notificationbase.notification_sample('action-update.json') -@base.WatcherObjectRegistry.register_notification -class ActionUpdateNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('ActionUpdatePayload') - } - - -@notificationbase.notification_sample('action-delete.json') -@base.WatcherObjectRegistry.register_notification -class ActionDeleteNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('ActionDeletePayload') - } - - -def _get_action_plan_payload(action): - action_plan = None - strategy_uuid = None - audit = None - try: - action_plan = action.action_plan - audit = objects.Audit.get(wcontext.make_context(show_deleted=True), - action_plan.audit_id) - if audit.strategy_id: - strategy_uuid = objects.Strategy.get( - wcontext.make_context(show_deleted=True), - audit.strategy_id).uuid - except NotImplementedError: - raise exception.EagerlyLoadedActionRequired(action=action.uuid) - - action_plan_payload = ap_notifications.TerseActionPlanPayload( - action_plan=action_plan, - audit_uuid=audit.uuid, strategy_uuid=strategy_uuid) - - return action_plan_payload - - -def send_create(context, action, service='infra-optim', host=None): - """Emit an action.create notification.""" - action_plan_payload = _get_action_plan_payload(action) - - versioned_payload = ActionCreatePayload( - action=action, - action_plan=action_plan_payload, - ) - - notification = ActionCreateNotification( - priority=wfields.NotificationPriority.INFO, - event_type=notificationbase.EventType( - object='action', - action=wfields.NotificationAction.CREATE), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) - - -def send_update(context, action, service='infra-optim', - host=None, old_state=None): - """Emit an action.update notification.""" - action_plan_payload = _get_action_plan_payload(action) - - state_update = ActionStateUpdatePayload( - old_state=old_state, - state=action.state if old_state else None) - - versioned_payload = ActionUpdatePayload( - action=action, - state_update=state_update, - action_plan=action_plan_payload, - ) - - notification = ActionUpdateNotification( - priority=wfields.NotificationPriority.INFO, - event_type=notificationbase.EventType( - object='action', - action=wfields.NotificationAction.UPDATE), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) - - -def send_delete(context, action, service='infra-optim', host=None): - """Emit an action.delete notification.""" - action_plan_payload = _get_action_plan_payload(action) - - versioned_payload = ActionDeletePayload( - action=action, - action_plan=action_plan_payload, - ) - - notification = ActionDeleteNotification( - priority=wfields.NotificationPriority.INFO, - event_type=notificationbase.EventType( - object='action', - action=wfields.NotificationAction.DELETE), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) - - -def send_execution_notification(context, action, notification_action, phase, - priority=wfields.NotificationPriority.INFO, - service='infra-optim', host=None): - """Emit an action execution notification.""" - action_plan_payload = _get_action_plan_payload(action) - - fault = None - if phase == wfields.NotificationPhase.ERROR: - fault = exception_notifications.ExceptionPayload.from_exception() - - versioned_payload = ActionExecutionPayload( - action=action, - action_plan=action_plan_payload, - fault=fault, - ) - - notification = ActionExecutionNotification( - priority=priority, - event_type=notificationbase.EventType( - object='action', - action=notification_action, - phase=phase), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) diff --git a/watcher/notifications/action_plan.py b/watcher/notifications/action_plan.py deleted file mode 100644 index 97b714b..0000000 --- a/watcher/notifications/action_plan.py +++ /dev/null @@ -1,340 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -from watcher.common import context as wcontext -from watcher.common import exception -from watcher.notifications import audit as audit_notifications -from watcher.notifications import base as notificationbase -from watcher.notifications import exception as exception_notifications -from watcher.notifications import strategy as strategy_notifications -from watcher import objects -from watcher.objects import base -from watcher.objects import fields as wfields - -CONF = cfg.CONF - - -@base.WatcherObjectRegistry.register_notification -class TerseActionPlanPayload(notificationbase.NotificationPayloadBase): - SCHEMA = { - 'uuid': ('action_plan', 'uuid'), - - 'state': ('action_plan', 'state'), - 'global_efficacy': ('action_plan', 'global_efficacy'), - - 'created_at': ('action_plan', 'created_at'), - 'updated_at': ('action_plan', 'updated_at'), - 'deleted_at': ('action_plan', 'deleted_at'), - } - - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'uuid': wfields.UUIDField(), - 'state': wfields.StringField(), - 'global_efficacy': wfields.FlexibleDictField(nullable=True), - 'audit_uuid': wfields.UUIDField(), - 'strategy_uuid': wfields.UUIDField(nullable=True), - - 'created_at': wfields.DateTimeField(nullable=True), - 'updated_at': wfields.DateTimeField(nullable=True), - 'deleted_at': wfields.DateTimeField(nullable=True), - } - - def __init__(self, action_plan, audit=None, strategy=None, **kwargs): - super(TerseActionPlanPayload, self).__init__(audit=audit, - strategy=strategy, - **kwargs) - self.populate_schema(action_plan=action_plan) - - -@base.WatcherObjectRegistry.register_notification -class ActionPlanPayload(TerseActionPlanPayload): - SCHEMA = { - 'uuid': ('action_plan', 'uuid'), - - 'state': ('action_plan', 'state'), - 'global_efficacy': ('action_plan', 'global_efficacy'), - - 'created_at': ('action_plan', 'created_at'), - 'updated_at': ('action_plan', 'updated_at'), - 'deleted_at': ('action_plan', 'deleted_at'), - } - - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'audit': wfields.ObjectField('TerseAuditPayload'), - 'strategy': wfields.ObjectField('StrategyPayload'), - } - - def __init__(self, action_plan, audit, strategy, **kwargs): - if not kwargs.get('audit_uuid'): - kwargs['audit_uuid'] = audit.uuid - - if strategy and not kwargs.get('strategy_uuid'): - kwargs['strategy_uuid'] = strategy.uuid - - super(ActionPlanPayload, self).__init__( - action_plan, audit=audit, strategy=strategy, **kwargs) - - -@base.WatcherObjectRegistry.register_notification -class ActionPlanStateUpdatePayload(notificationbase.NotificationPayloadBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'old_state': wfields.StringField(nullable=True), - 'state': wfields.StringField(nullable=True), - } - - -@base.WatcherObjectRegistry.register_notification -class ActionPlanCreatePayload(ActionPlanPayload): - # Version 1.0: Initial version - VERSION = '1.0' - fields = {} - - def __init__(self, action_plan, audit, strategy): - super(ActionPlanCreatePayload, self).__init__( - action_plan=action_plan, - audit=audit, - strategy=strategy) - - -@base.WatcherObjectRegistry.register_notification -class ActionPlanUpdatePayload(ActionPlanPayload): - # Version 1.0: Initial version - VERSION = '1.0' - fields = { - 'state_update': wfields.ObjectField('ActionPlanStateUpdatePayload'), - } - - def __init__(self, action_plan, state_update, audit, strategy): - super(ActionPlanUpdatePayload, self).__init__( - action_plan=action_plan, - state_update=state_update, - audit=audit, - strategy=strategy) - - -@base.WatcherObjectRegistry.register_notification -class ActionPlanActionPayload(ActionPlanPayload): - # Version 1.0: Initial version - VERSION = '1.0' - fields = { - 'fault': wfields.ObjectField('ExceptionPayload', nullable=True), - } - - def __init__(self, action_plan, audit, strategy, **kwargs): - super(ActionPlanActionPayload, self).__init__( - action_plan=action_plan, - audit=audit, - strategy=strategy, - **kwargs) - - -@base.WatcherObjectRegistry.register_notification -class ActionPlanDeletePayload(ActionPlanPayload): - # Version 1.0: Initial version - VERSION = '1.0' - fields = {} - - def __init__(self, action_plan, audit, strategy): - super(ActionPlanDeletePayload, self).__init__( - action_plan=action_plan, - audit=audit, - strategy=strategy) - - -@notificationbase.notification_sample('action_plan-execution-error.json') -@notificationbase.notification_sample('action_plan-execution-end.json') -@notificationbase.notification_sample('action_plan-execution-start.json') -@base.WatcherObjectRegistry.register_notification -class ActionPlanActionNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('ActionPlanActionPayload') - } - - -@notificationbase.notification_sample('action_plan-create.json') -@base.WatcherObjectRegistry.register_notification -class ActionPlanCreateNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('ActionPlanCreatePayload') - } - - -@notificationbase.notification_sample('action_plan-update.json') -@base.WatcherObjectRegistry.register_notification -class ActionPlanUpdateNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('ActionPlanUpdatePayload') - } - - -@notificationbase.notification_sample('action_plan-delete.json') -@base.WatcherObjectRegistry.register_notification -class ActionPlanDeleteNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('ActionPlanDeletePayload') - } - - -def _get_common_payload(action_plan): - audit = None - strategy = None - try: - audit = action_plan.audit - strategy = action_plan.strategy - except NotImplementedError: - raise exception.EagerlyLoadedActionPlanRequired( - action_plan=action_plan.uuid) - - goal = objects.Goal.get( - wcontext.make_context(show_deleted=True), audit.goal_id) - audit_payload = audit_notifications.TerseAuditPayload( - audit=audit, goal_uuid=goal.uuid) - - strategy_payload = strategy_notifications.StrategyPayload( - strategy=strategy) - - return audit_payload, strategy_payload - - -def send_create(context, action_plan, service='infra-optim', host=None): - """Emit an action_plan.create notification.""" - audit_payload, strategy_payload = _get_common_payload(action_plan) - - versioned_payload = ActionPlanCreatePayload( - action_plan=action_plan, - audit=audit_payload, - strategy=strategy_payload, - ) - - notification = ActionPlanCreateNotification( - priority=wfields.NotificationPriority.INFO, - event_type=notificationbase.EventType( - object='action_plan', - action=wfields.NotificationAction.CREATE), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) - - -def send_update(context, action_plan, service='infra-optim', - host=None, old_state=None): - """Emit an action_plan.update notification.""" - audit_payload, strategy_payload = _get_common_payload(action_plan) - - state_update = ActionPlanStateUpdatePayload( - old_state=old_state, - state=action_plan.state if old_state else None) - - versioned_payload = ActionPlanUpdatePayload( - action_plan=action_plan, - state_update=state_update, - audit=audit_payload, - strategy=strategy_payload, - ) - - notification = ActionPlanUpdateNotification( - priority=wfields.NotificationPriority.INFO, - event_type=notificationbase.EventType( - object='action_plan', - action=wfields.NotificationAction.UPDATE), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) - - -def send_delete(context, action_plan, service='infra-optim', host=None): - """Emit an action_plan.delete notification.""" - audit_payload, strategy_payload = _get_common_payload(action_plan) - - versioned_payload = ActionPlanDeletePayload( - action_plan=action_plan, - audit=audit_payload, - strategy=strategy_payload, - ) - - notification = ActionPlanDeleteNotification( - priority=wfields.NotificationPriority.INFO, - event_type=notificationbase.EventType( - object='action_plan', - action=wfields.NotificationAction.DELETE), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) - - -def send_action_notification(context, action_plan, action, phase=None, - priority=wfields.NotificationPriority.INFO, - service='infra-optim', host=None): - """Emit an action_plan action notification.""" - audit_payload, strategy_payload = _get_common_payload(action_plan) - - fault = None - if phase == wfields.NotificationPhase.ERROR: - fault = exception_notifications.ExceptionPayload.from_exception() - - versioned_payload = ActionPlanActionPayload( - action_plan=action_plan, - audit=audit_payload, - strategy=strategy_payload, - fault=fault, - ) - - notification = ActionPlanActionNotification( - priority=priority, - event_type=notificationbase.EventType( - object='action_plan', - action=action, - phase=phase), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) diff --git a/watcher/notifications/audit.py b/watcher/notifications/audit.py deleted file mode 100644 index 83ec80d..0000000 --- a/watcher/notifications/audit.py +++ /dev/null @@ -1,368 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -from watcher.common import exception -from watcher.notifications import base as notificationbase -from watcher.notifications import exception as exception_notifications -from watcher.notifications import goal as goal_notifications -from watcher.notifications import strategy as strategy_notifications -from watcher.objects import base -from watcher.objects import fields as wfields - -CONF = cfg.CONF - - -@base.WatcherObjectRegistry.register_notification -class TerseAuditPayload(notificationbase.NotificationPayloadBase): - SCHEMA = { - 'uuid': ('audit', 'uuid'), - - 'audit_type': ('audit', 'audit_type'), - 'state': ('audit', 'state'), - 'parameters': ('audit', 'parameters'), - 'interval': ('audit', 'interval'), - 'scope': ('audit', 'scope'), - 'auto_trigger': ('audit', 'auto_trigger'), - 'next_run_time': ('audit', 'next_run_time'), - - 'created_at': ('audit', 'created_at'), - 'updated_at': ('audit', 'updated_at'), - 'deleted_at': ('audit', 'deleted_at'), - } - - # Version 1.0: Initial version - # Version 1.1: Added 'auto_trigger' boolean field, - # Added 'next_run_time' DateTime field, - # 'interval' type has been changed from Integer to String - VERSION = '1.1' - - fields = { - 'uuid': wfields.UUIDField(), - 'audit_type': wfields.StringField(), - 'state': wfields.StringField(), - 'parameters': wfields.FlexibleDictField(nullable=True), - 'interval': wfields.StringField(nullable=True), - 'scope': wfields.FlexibleListOfDictField(nullable=True), - 'goal_uuid': wfields.UUIDField(), - 'strategy_uuid': wfields.UUIDField(nullable=True), - 'auto_trigger': wfields.BooleanField(), - 'next_run_time': wfields.DateTimeField(nullable=True), - - 'created_at': wfields.DateTimeField(nullable=True), - 'updated_at': wfields.DateTimeField(nullable=True), - 'deleted_at': wfields.DateTimeField(nullable=True), - } - - def __init__(self, audit, goal_uuid, strategy_uuid=None, **kwargs): - super(TerseAuditPayload, self).__init__( - goal_uuid=goal_uuid, strategy_uuid=strategy_uuid, **kwargs) - self.populate_schema(audit=audit) - - -@base.WatcherObjectRegistry.register_notification -class AuditPayload(TerseAuditPayload): - SCHEMA = { - 'uuid': ('audit', 'uuid'), - - 'audit_type': ('audit', 'audit_type'), - 'state': ('audit', 'state'), - 'parameters': ('audit', 'parameters'), - 'interval': ('audit', 'interval'), - 'scope': ('audit', 'scope'), - 'auto_trigger': ('audit', 'auto_trigger'), - 'next_run_time': ('audit', 'next_run_time'), - - 'created_at': ('audit', 'created_at'), - 'updated_at': ('audit', 'updated_at'), - 'deleted_at': ('audit', 'deleted_at'), - } - - # Version 1.0: Initial version - # Version 1.1: Added 'auto_trigger' field, - # Added 'next_run_time' field - VERSION = '1.1' - - fields = { - 'goal': wfields.ObjectField('GoalPayload'), - 'strategy': wfields.ObjectField('StrategyPayload', nullable=True), - } - - def __init__(self, audit, goal, strategy=None, **kwargs): - if not kwargs.get('goal_uuid'): - kwargs['goal_uuid'] = goal.uuid - - if strategy and not kwargs.get('strategy_uuid'): - kwargs['strategy_uuid'] = strategy.uuid - - super(AuditPayload, self).__init__( - audit=audit, goal=goal, - strategy=strategy, **kwargs) - - -@base.WatcherObjectRegistry.register_notification -class AuditStateUpdatePayload(notificationbase.NotificationPayloadBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'old_state': wfields.StringField(nullable=True), - 'state': wfields.StringField(nullable=True), - } - - -@base.WatcherObjectRegistry.register_notification -class AuditCreatePayload(AuditPayload): - # Version 1.0: Initial version - # Version 1.1: Added 'auto_trigger' field, - # Added 'next_run_time' field - VERSION = '1.1' - fields = {} - - def __init__(self, audit, goal, strategy): - super(AuditCreatePayload, self).__init__( - audit=audit, - goal=goal, - goal_uuid=goal.uuid, - strategy=strategy) - - -@base.WatcherObjectRegistry.register_notification -class AuditUpdatePayload(AuditPayload): - # Version 1.0: Initial version - # Version 1.1: Added 'auto_trigger' field, - # Added 'next_run_time' field - VERSION = '1.1' - fields = { - 'state_update': wfields.ObjectField('AuditStateUpdatePayload'), - } - - def __init__(self, audit, state_update, goal, strategy): - super(AuditUpdatePayload, self).__init__( - audit=audit, - state_update=state_update, - goal=goal, - goal_uuid=goal.uuid, - strategy=strategy) - - -@base.WatcherObjectRegistry.register_notification -class AuditActionPayload(AuditPayload): - # Version 1.0: Initial version - # Version 1.1: Added 'auto_trigger' field, - # Added 'next_run_time' field - VERSION = '1.1' - fields = { - 'fault': wfields.ObjectField('ExceptionPayload', nullable=True), - } - - def __init__(self, audit, goal, strategy, **kwargs): - super(AuditActionPayload, self).__init__( - audit=audit, - goal=goal, - goal_uuid=goal.uuid, - strategy=strategy, - **kwargs) - - -@base.WatcherObjectRegistry.register_notification -class AuditDeletePayload(AuditPayload): - # Version 1.0: Initial version - # Version 1.1: Added 'auto_trigger' field, - # Added 'next_run_time' field - VERSION = '1.1' - fields = {} - - def __init__(self, audit, goal, strategy): - super(AuditDeletePayload, self).__init__( - audit=audit, - goal=goal, - goal_uuid=goal.uuid, - strategy=strategy) - - -@notificationbase.notification_sample('audit-strategy-error.json') -@notificationbase.notification_sample('audit-strategy-end.json') -@notificationbase.notification_sample('audit-strategy-start.json') -@base.WatcherObjectRegistry.register_notification -class AuditActionNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('AuditActionPayload') - } - - -@notificationbase.notification_sample('audit-create.json') -@base.WatcherObjectRegistry.register_notification -class AuditCreateNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('AuditCreatePayload') - } - - -@notificationbase.notification_sample('audit-update.json') -@base.WatcherObjectRegistry.register_notification -class AuditUpdateNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('AuditUpdatePayload') - } - - -@notificationbase.notification_sample('audit-delete.json') -@base.WatcherObjectRegistry.register_notification -class AuditDeleteNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('AuditDeletePayload') - } - - -def _get_common_payload(audit): - goal = None - strategy = None - try: - goal = audit.goal - if audit.strategy_id: - strategy = audit.strategy - except NotImplementedError: - raise exception.EagerlyLoadedAuditRequired(audit=audit.uuid) - - goal_payload = goal_notifications.GoalPayload(goal=goal) - - strategy_payload = None - if strategy: - strategy_payload = strategy_notifications.StrategyPayload( - strategy=strategy) - - return goal_payload, strategy_payload - - -def send_create(context, audit, service='infra-optim', host=None): - """Emit an audit.create notification.""" - goal_payload, strategy_payload = _get_common_payload(audit) - - versioned_payload = AuditCreatePayload( - audit=audit, - goal=goal_payload, - strategy=strategy_payload, - ) - - notification = AuditCreateNotification( - priority=wfields.NotificationPriority.INFO, - event_type=notificationbase.EventType( - object='audit', - action=wfields.NotificationAction.CREATE), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) - - -def send_update(context, audit, service='infra-optim', - host=None, old_state=None): - """Emit an audit.update notification.""" - goal_payload, strategy_payload = _get_common_payload(audit) - - state_update = AuditStateUpdatePayload( - old_state=old_state, - state=audit.state if old_state else None) - - versioned_payload = AuditUpdatePayload( - audit=audit, - state_update=state_update, - goal=goal_payload, - strategy=strategy_payload, - ) - - notification = AuditUpdateNotification( - priority=wfields.NotificationPriority.INFO, - event_type=notificationbase.EventType( - object='audit', - action=wfields.NotificationAction.UPDATE), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) - - -def send_delete(context, audit, service='infra-optim', host=None): - goal_payload, strategy_payload = _get_common_payload(audit) - - versioned_payload = AuditDeletePayload( - audit=audit, - goal=goal_payload, - strategy=strategy_payload, - ) - - notification = AuditDeleteNotification( - priority=wfields.NotificationPriority.INFO, - event_type=notificationbase.EventType( - object='audit', - action=wfields.NotificationAction.DELETE), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) - - -def send_action_notification(context, audit, action, phase=None, - priority=wfields.NotificationPriority.INFO, - service='infra-optim', host=None): - """Emit an audit action notification.""" - goal_payload, strategy_payload = _get_common_payload(audit) - - fault = None - if phase == wfields.NotificationPhase.ERROR: - fault = exception_notifications.ExceptionPayload.from_exception() - - versioned_payload = AuditActionPayload( - audit=audit, - goal=goal_payload, - strategy=strategy_payload, - fault=fault, - ) - - notification = AuditActionNotification( - priority=priority, - event_type=notificationbase.EventType( - object='audit', - action=action, - phase=phase), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) diff --git a/watcher/notifications/base.py b/watcher/notifications/base.py deleted file mode 100644 index d1c2d0e..0000000 --- a/watcher/notifications/base.py +++ /dev/null @@ -1,216 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log - -from watcher.common import exception -from watcher.common import rpc -from watcher.objects import base -from watcher.objects import fields as wfields - -CONF = cfg.CONF -LOG = log.getLogger(__name__) - -# Definition of notification levels in increasing order of severity -NOTIFY_LEVELS = { - wfields.NotificationPriority.DEBUG: 0, - wfields.NotificationPriority.INFO: 1, - wfields.NotificationPriority.WARNING: 2, - wfields.NotificationPriority.ERROR: 3, - wfields.NotificationPriority.CRITICAL: 4 -} - - -@base.WatcherObjectRegistry.register_if(False) -class NotificationObject(base.WatcherObject): - """Base class for every notification related versioned object.""" - - # Version 1.0: Initial version - VERSION = '1.0' - - def __init__(self, **kwargs): - super(NotificationObject, self).__init__(**kwargs) - # The notification objects are created on the fly when watcher emits - # the notification. This causes that every object shows every field as - # changed. We don't want to send this meaningless information so we - # reset the object after creation. - self.obj_reset_changes(recursive=False) - - def save(self, context): - raise exception.UnsupportedError() - - def obj_load_attr(self, attrname): - raise exception.UnsupportedError() - - -@base.WatcherObjectRegistry.register_notification -class EventType(NotificationObject): - - # Version 1.0: Initial version - # Version 1.1: Added STRATEGY action in NotificationAction enum - # Version 1.2: Added PLANNER action in NotificationAction enum - # Version 1.3: Added EXECUTION action in NotificationAction enum - VERSION = '1.3' - - fields = { - 'object': wfields.StringField(), - 'action': wfields.NotificationActionField(), - 'phase': wfields.NotificationPhaseField(nullable=True), - } - - def to_notification_event_type_field(self): - """Serialize the object to the wire format.""" - s = '%s.%s' % (self.object, self.action) - if self.obj_attr_is_set('phase'): - s += '.%s' % self.phase - return s - - -@base.WatcherObjectRegistry.register_if(False) -class NotificationPayloadBase(NotificationObject): - """Base class for the payload of versioned notifications.""" - # SCHEMA defines how to populate the payload fields. It is a dictionary - # where every key value pair has the following format: - # : (, - # ) - # The is the name where the data will be stored in the - # payload object, this field has to be defined as a field of the payload. - # The shall refer to name of the parameter passed as - # kwarg to the payload's populate_schema() call and this object will be - # used as the source of the data. The shall be - # a valid field of the passed argument. - # The SCHEMA needs to be applied with the populate_schema() call before the - # notification can be emitted. - # The value of the payload. field will be set by the - # . field. The - # will not be part of the payload object internal or - # external representation. - # Payload fields that are not set by the SCHEMA can be filled in the same - # way as in any versioned object. - SCHEMA = {} - # Version 1.0: Initial version - VERSION = '1.0' - - def __init__(self, **kwargs): - super(NotificationPayloadBase, self).__init__(**kwargs) - self.populated = not self.SCHEMA - - def populate_schema(self, **kwargs): - """Populate the object based on the SCHEMA and the source objects - - :param kwargs: A dict contains the source object at the key defined in - the SCHEMA - """ - for key, (obj, field) in self.SCHEMA.items(): - source = kwargs[obj] - if source.obj_attr_is_set(field): - setattr(self, key, getattr(source, field)) - self.populated = True - - # the schema population will create changed fields but we don't need - # this information in the notification - self.obj_reset_changes(recursive=False) - - -@base.WatcherObjectRegistry.register_notification -class NotificationPublisher(NotificationObject): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'host': wfields.StringField(nullable=False), - 'binary': wfields.StringField(nullable=False), - } - - -@base.WatcherObjectRegistry.register_if(False) -class NotificationBase(NotificationObject): - """Base class for versioned notifications. - - Every subclass shall define a 'payload' field. - """ - - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'priority': wfields.NotificationPriorityField(), - 'event_type': wfields.ObjectField('EventType'), - 'publisher': wfields.ObjectField('NotificationPublisher'), - } - - def save(self, context): - raise exception.UnsupportedError() - - def obj_load_attr(self, attrname): - raise exception.UnsupportedError() - - def _should_notify(self): - """Determine whether the notification should be sent. - - A notification is sent when the level of the notification is - greater than or equal to the level specified in the - configuration, in the increasing order of DEBUG, INFO, WARNING, - ERROR, CRITICAL. - :return: True if notification should be sent, False otherwise. - """ - if not CONF.notification_level: - return False - return (NOTIFY_LEVELS[self.priority] >= - NOTIFY_LEVELS[CONF.notification_level]) - - def _emit(self, context, event_type, publisher_id, payload): - notifier = rpc.get_notifier(publisher_id) - notify = getattr(notifier, self.priority) - LOG.debug("Emitting notification `%s`", event_type) - notify(context, event_type=event_type, payload=payload) - - def emit(self, context): - """Send the notification.""" - if not self._should_notify(): - return - if not self.payload.populated: - raise exception.NotificationPayloadError( - class_name=self.__class__.__name__) - # Note(gibi): notification payload will be a newly populated object - # therefore every field of it will look changed so this does not carry - # any extra information so we drop this from the payload. - self.payload.obj_reset_changes(recursive=False) - - self._emit( - context, - event_type=self.event_type.to_notification_event_type_field(), - publisher_id='%s:%s' % (self.publisher.binary, - self.publisher.host), - payload=self.payload.obj_to_primitive()) - - -def notification_sample(sample): - """Provide a notification sample of the decorated notification. - - Class decorator to attach the notification sample information - to the notification object for documentation generation purposes. - - :param sample: the path of the sample json file relative to the - doc/notification_samples/ directory in the watcher - repository root. - """ - def wrap(cls): - if not getattr(cls, 'samples', None): - cls.samples = [sample] - else: - cls.samples.append(sample) - return cls - return wrap diff --git a/watcher/notifications/exception.py b/watcher/notifications/exception.py deleted file mode 100644 index 68fc1eb..0000000 --- a/watcher/notifications/exception.py +++ /dev/null @@ -1,55 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect -import sys - -import six - -from watcher.notifications import base as notificationbase -from watcher.objects import base as base -from watcher.objects import fields as wfields - - -@base.WatcherObjectRegistry.register_notification -class ExceptionPayload(notificationbase.NotificationPayloadBase): - # Version 1.0: Initial version - VERSION = '1.0' - fields = { - 'module_name': wfields.StringField(), - 'function_name': wfields.StringField(), - 'exception': wfields.StringField(), - 'exception_message': wfields.StringField() - } - - @classmethod - def from_exception(cls, fault=None): - fault = fault or sys.exc_info()[1] - trace = inspect.trace()[-1] - # TODO(gibi): apply strutils.mask_password on exception_message and - # consider emitting the exception_message only if the safe flag is - # true in the exception like in the REST API - return cls( - function_name=trace[3], - module_name=inspect.getmodule(trace[0]).__name__, - exception=fault.__class__.__name__, - exception_message=six.text_type(fault)) - - -@notificationbase.notification_sample('infra-optim-exception.json') -@base.WatcherObjectRegistry.register_notification -class ExceptionNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - fields = { - 'payload': wfields.ObjectField('ExceptionPayload') - } diff --git a/watcher/notifications/goal.py b/watcher/notifications/goal.py deleted file mode 100644 index 8c76bad..0000000 --- a/watcher/notifications/goal.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.notifications import base as notificationbase -from watcher.objects import base -from watcher.objects import fields as wfields - - -@base.WatcherObjectRegistry.register_notification -class GoalPayload(notificationbase.NotificationPayloadBase): - SCHEMA = { - 'uuid': ('goal', 'uuid'), - 'name': ('goal', 'name'), - 'display_name': ('goal', 'display_name'), - 'efficacy_specification': ('goal', 'efficacy_specification'), - - 'created_at': ('goal', 'created_at'), - 'updated_at': ('goal', 'updated_at'), - 'deleted_at': ('goal', 'deleted_at'), - } - - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'uuid': wfields.UUIDField(), - 'name': wfields.StringField(), - 'display_name': wfields.StringField(), - 'efficacy_specification': wfields.FlexibleListOfDictField(), - - 'created_at': wfields.DateTimeField(nullable=True), - 'updated_at': wfields.DateTimeField(nullable=True), - 'deleted_at': wfields.DateTimeField(nullable=True), - } - - def __init__(self, goal, **kwargs): - super(GoalPayload, self).__init__(**kwargs) - self.populate_schema(goal=goal) diff --git a/watcher/notifications/service.py b/watcher/notifications/service.py deleted file mode 100644 index 1d2ab8a..0000000 --- a/watcher/notifications/service.py +++ /dev/null @@ -1,113 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 Servionica -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from oslo_config import cfg - -from watcher.notifications import base as notificationbase -from watcher.objects import base -from watcher.objects import fields as wfields -from watcher.objects import service as o_service - -CONF = cfg.CONF - - -@base.WatcherObjectRegistry.register_notification -class ServicePayload(notificationbase.NotificationPayloadBase): - - SCHEMA = { - 'sevice_host': ('failed_service', 'host'), - 'name': ('failed_service', 'name'), - 'last_seen_up': ('failed_service', 'last_seen_up'), - } - # Version 1.0: Initial version - VERSION = '1.0' - fields = { - 'sevice_host': wfields.StringField(), - 'name': wfields.StringField(), - 'last_seen_up': wfields.DateTimeField(nullable=True), - } - - def __init__(self, failed_service, status_update, **kwargs): - super(ServicePayload, self).__init__( - failed_service=failed_service, - status_update=status_update, **kwargs) - self.populate_schema(failed_service=failed_service) - - -@base.WatcherObjectRegistry.register_notification -class ServiceStatusUpdatePayload(notificationbase.NotificationPayloadBase): - # Version 1.0: Initial version - VERSION = '1.0' - fields = { - 'old_state': wfields.StringField(nullable=True), - 'state': wfields.StringField(nullable=True), - } - - -@base.WatcherObjectRegistry.register_notification -class ServiceUpdatePayload(ServicePayload): - # Version 1.0: Initial version - VERSION = '1.0' - fields = { - 'status_update': wfields.ObjectField('ServiceStatusUpdatePayload'), - } - - def __init__(self, failed_service, status_update): - super(ServiceUpdatePayload, self).__init__( - failed_service=failed_service, - status_update=status_update) - - -@notificationbase.notification_sample('service-update.json') -@base.WatcherObjectRegistry.register_notification -class ServiceUpdateNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('ServiceUpdatePayload') - } - - -def send_service_update(context, failed_service, state, - service='infra-optim', - host=None): - """Emit an service failed notification.""" - if state == o_service.ServiceStatus.FAILED: - priority = wfields.NotificationPriority.WARNING - status_update = ServiceStatusUpdatePayload( - old_state=o_service.ServiceStatus.ACTIVE, - state=o_service.ServiceStatus.FAILED) - else: - priority = wfields.NotificationPriority.INFO - status_update = ServiceStatusUpdatePayload( - old_state=o_service.ServiceStatus.FAILED, - state=o_service.ServiceStatus.ACTIVE) - versioned_payload = ServiceUpdatePayload( - failed_service=failed_service, - status_update=status_update - ) - - notification = ServiceUpdateNotification( - priority=priority, - event_type=notificationbase.EventType( - object='service', - action=wfields.NotificationAction.UPDATE), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) diff --git a/watcher/notifications/strategy.py b/watcher/notifications/strategy.py deleted file mode 100644 index f7da109..0000000 --- a/watcher/notifications/strategy.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.notifications import base as notificationbase -from watcher.objects import base -from watcher.objects import fields as wfields - - -@base.WatcherObjectRegistry.register_notification -class StrategyPayload(notificationbase.NotificationPayloadBase): - SCHEMA = { - 'uuid': ('strategy', 'uuid'), - 'name': ('strategy', 'name'), - 'display_name': ('strategy', 'display_name'), - 'parameters_spec': ('strategy', 'parameters_spec'), - - 'created_at': ('strategy', 'created_at'), - 'updated_at': ('strategy', 'updated_at'), - 'deleted_at': ('strategy', 'deleted_at'), - } - - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'uuid': wfields.UUIDField(), - 'name': wfields.StringField(), - 'display_name': wfields.StringField(), - 'parameters_spec': wfields.FlexibleDictField(nullable=True), - - 'created_at': wfields.DateTimeField(nullable=True), - 'updated_at': wfields.DateTimeField(nullable=True), - 'deleted_at': wfields.DateTimeField(nullable=True), - } - - def __init__(self, strategy, **kwargs): - super(StrategyPayload, self).__init__(**kwargs) - self.populate_schema(strategy=strategy) diff --git a/watcher/objects/__init__.py b/watcher/objects/__init__.py deleted file mode 100644 index 11c8a86..0000000 --- a/watcher/objects/__init__.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# NOTE(comstud): You may scratch your head as you see code that imports -# this module and then accesses attributes for objects such as Node, -# etc, yet you do not see these attributes in here. Never fear, there is -# a little bit of magic. When objects are registered, an attribute is set -# on this module automatically, pointing to the newest/latest version of -# the object. - - -def register_all(): - # NOTE(danms): You must make sure your object gets imported in this - # function in order for it to be registered by services that may - # need to receive it via RPC. - __import__('watcher.objects.goal') - __import__('watcher.objects.strategy') - __import__('watcher.objects.audit_template') - __import__('watcher.objects.audit') - __import__('watcher.objects.action_plan') - __import__('watcher.objects.action') - __import__('watcher.objects.efficacy_indicator') - __import__('watcher.objects.scoring_engine') - __import__('watcher.objects.service') diff --git a/watcher/objects/action.py b/watcher/objects/action.py deleted file mode 100644 index 95f923a..0000000 --- a/watcher/objects/action.py +++ /dev/null @@ -1,182 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.common import exception -from watcher.common import utils -from watcher.db import api as db_api -from watcher import notifications -from watcher import objects -from watcher.objects import base -from watcher.objects import fields as wfields - - -class State(object): - PENDING = 'PENDING' - ONGOING = 'ONGOING' - FAILED = 'FAILED' - SUCCEEDED = 'SUCCEEDED' - DELETED = 'DELETED' - CANCELLED = 'CANCELLED' - CANCELLING = 'CANCELLING' - - -@base.WatcherObjectRegistry.register -class Action(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - - # Version 1.0: Initial version - # Version 1.1: Added 'action_plan' object field - # Version 2.0: Removed 'next' object field, Added 'parents' object field - VERSION = '2.0' - - dbapi = db_api.get_instance() - - fields = { - 'id': wfields.IntegerField(), - 'uuid': wfields.UUIDField(), - 'action_plan_id': wfields.IntegerField(), - 'action_type': wfields.StringField(nullable=True), - 'input_parameters': wfields.DictField(nullable=True), - 'state': wfields.StringField(nullable=True), - 'parents': wfields.ListOfStringsField(nullable=True), - - 'action_plan': wfields.ObjectField('ActionPlan', nullable=True), - } - object_fields = { - 'action_plan': (objects.ActionPlan, 'action_plan_id'), - } - - @base.remotable_classmethod - def get(cls, context, action_id, eager=False): - """Find a action based on its id or uuid and return a Action object. - - :param action_id: the id *or* uuid of a action. - :param eager: Load object fields if True (Default: False) - :returns: a :class:`Action` object. - """ - if utils.is_int_like(action_id): - return cls.get_by_id(context, action_id, eager=eager) - elif utils.is_uuid_like(action_id): - return cls.get_by_uuid(context, action_id, eager=eager) - else: - raise exception.InvalidIdentity(identity=action_id) - - @base.remotable_classmethod - def get_by_id(cls, context, action_id, eager=False): - """Find a action based on its integer id and return a Action object. - - :param action_id: the id of a action. - :param eager: Load object fields if True (Default: False) - :returns: a :class:`Action` object. - """ - db_action = cls.dbapi.get_action_by_id(context, action_id, eager=eager) - action = cls._from_db_object(cls(context), db_action, eager=eager) - return action - - @base.remotable_classmethod - def get_by_uuid(cls, context, uuid, eager=False): - """Find a action based on uuid and return a :class:`Action` object. - - :param uuid: the uuid of a action. - :param context: Security context - :param eager: Load object fields if True (Default: False) - :returns: a :class:`Action` object. - """ - db_action = cls.dbapi.get_action_by_uuid(context, uuid, eager=eager) - action = cls._from_db_object(cls(context), db_action, eager=eager) - return action - - @base.remotable_classmethod - def list(cls, context, limit=None, marker=None, filters=None, - sort_key=None, sort_dir=None, eager=False): - """Return a list of Action objects. - - :param context: Security context. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param filters: Filters to apply. Defaults to None. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :param eager: Load object fields if True (Default: False) - :returns: a list of :class:`Action` object. - """ - db_actions = cls.dbapi.get_action_list(context, - limit=limit, - marker=marker, - filters=filters, - sort_key=sort_key, - sort_dir=sort_dir, - eager=eager) - - return [cls._from_db_object(cls(context), obj, eager=eager) - for obj in db_actions] - - @base.remotable - def create(self): - """Create an :class:`Action` record in the DB. - - :returns: An :class:`Action` object. - """ - values = self.obj_get_changes() - db_action = self.dbapi.create_action(values) - # Note(v-francoise): Always load eagerly upon creation so we can send - # notifications containing information about the related relationships - self._from_db_object(self, db_action, eager=True) - - notifications.action.send_create(self.obj_context, self) - - def destroy(self): - """Delete the Action from the DB""" - self.dbapi.destroy_action(self.uuid) - self.obj_reset_changes() - - @base.remotable - def save(self): - """Save updates to this Action. - - Updates will be made column by column based on the result - of self.what_changed(). - """ - updates = self.obj_get_changes() - db_obj = self.dbapi.update_action(self.uuid, updates) - obj = self._from_db_object(self, db_obj, eager=False) - self.obj_refresh(obj) - notifications.action.send_update(self.obj_context, self) - self.obj_reset_changes() - - @base.remotable - def refresh(self, eager=False): - """Loads updates for this Action. - - Loads a action with the same uuid from the database and - checks for updated attributes. Updates are applied from - the loaded action column by column, if there are any updates. - :param eager: Load object fields if True (Default: False) - """ - current = self.get_by_uuid(self._context, uuid=self.uuid, eager=eager) - self.obj_refresh(current) - - @base.remotable - def soft_delete(self): - """Soft Delete the Audit from the DB""" - self.state = State.DELETED - self.save() - db_obj = self.dbapi.soft_delete_action(self.uuid) - obj = self._from_db_object( - self.__class__(self._context), db_obj, eager=False) - self.obj_refresh(obj) - - notifications.action.send_delete(self.obj_context, self) diff --git a/watcher/objects/action_plan.py b/watcher/objects/action_plan.py deleted file mode 100644 index 4618ec9..0000000 --- a/watcher/objects/action_plan.py +++ /dev/null @@ -1,340 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -An :ref:`Action Plan ` is a flow of -:ref:`Actions ` that should be executed in order to satisfy -a given :ref:`Goal `. - -An :ref:`Action Plan ` is generated by Watcher when an -:ref:`Audit ` is successful which implies that the -:ref:`Strategy ` -which was used has found a :ref:`Solution ` to achieve the -:ref:`Goal ` of this :ref:`Audit `. - -In the default implementation of Watcher, an -:ref:`Action Plan ` -is only composed of successive :ref:`Actions ` -(i.e., a Workflow of :ref:`Actions ` belonging to a unique -branch). - -However, Watcher provides abstract interfaces for many of its components, -allowing other implementations to generate and handle more complex -:ref:`Action Plan(s) ` -composed of two types of Action Item(s): - -- simple :ref:`Actions `: atomic tasks, which means it - can not be split into smaller tasks or commands from an OpenStack point of - view. -- composite Actions: which are composed of several simple - :ref:`Actions ` - ordered in sequential and/or parallel flows. - -An :ref:`Action Plan ` may be described using -standard workflow model description formats such as -`Business Process Model and Notation 2.0 (BPMN 2.0) -`_ or `Unified Modeling Language (UML) -`_. - -An :ref:`Action Plan ` has a life-cycle and its current -state may be one of the following: - -- **RECOMMENDED** : the :ref:`Action Plan ` is waiting - for a validation from the :ref:`Administrator ` -- **ONGOING** : the :ref:`Action Plan ` is currently - being processed by the :ref:`Watcher Applier ` -- **SUCCEEDED** : the :ref:`Action Plan ` has been - executed successfully (i.e. all :ref:`Actions ` that it - contains have been executed successfully) -- **FAILED** : an error occurred while executing the - :ref:`Action Plan ` -- **DELETED** : the :ref:`Action Plan ` is still - stored in the :ref:`Watcher database ` but is - not returned any more through the Watcher APIs. -- **CANCELLED** : the :ref:`Action Plan ` was in - **PENDING** or **ONGOING** state and was cancelled by the - :ref:`Administrator ` -- **SUPERSEDED** : the :ref:`Action Plan ` was in - **RECOMMENDED** state and was superseded by the - :ref:`Administrator ` -""" -import datetime - -from watcher.common import exception -from watcher.common import utils -from watcher import conf -from watcher.db import api as db_api -from watcher import notifications -from watcher import objects -from watcher.objects import base -from watcher.objects import fields as wfields - -CONF = conf.CONF - - -class State(object): - RECOMMENDED = 'RECOMMENDED' - PENDING = 'PENDING' - ONGOING = 'ONGOING' - FAILED = 'FAILED' - SUCCEEDED = 'SUCCEEDED' - DELETED = 'DELETED' - CANCELLED = 'CANCELLED' - SUPERSEDED = 'SUPERSEDED' - CANCELLING = 'CANCELLING' - - -@base.WatcherObjectRegistry.register -class ActionPlan(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - - # Version 1.0: Initial version - # Version 1.1: Added 'audit' and 'strategy' object field - # Version 1.2: audit_id is not nullable anymore - # Version 2.0: Removed 'first_action_id' object field - VERSION = '2.0' - - dbapi = db_api.get_instance() - - fields = { - 'id': wfields.IntegerField(), - 'uuid': wfields.UUIDField(), - 'audit_id': wfields.IntegerField(), - 'strategy_id': wfields.IntegerField(), - 'state': wfields.StringField(nullable=True), - 'global_efficacy': wfields.FlexibleDictField(nullable=True), - - 'audit': wfields.ObjectField('Audit', nullable=True), - 'strategy': wfields.ObjectField('Strategy', nullable=True), - } - - object_fields = { - 'audit': (objects.Audit, 'audit_id'), - 'strategy': (objects.Strategy, 'strategy_id'), - } - - # Proxified field so we can keep the previous value after an update - _state = None - _old_state = None - - # NOTE(v-francoise): The way oslo.versionedobjects works is by using a - # __new__ that will automatically create the attributes referenced in - # fields. These attributes are properties that raise an exception if no - # value has been assigned, which means that they store the actual field - # value in an "_obj_%(field)s" attribute. So because we want to proxify a - # value that is already proxified, we have to do what you see below. - @property - def _obj_state(self): - return self._state - - @property - def _obj_old_state(self): - return self._old_state - - @property - def old_state(self): - return self._old_state - - @_obj_old_state.setter - def _obj_old_state(self, value): - self._old_state = value - - @_obj_state.setter - def _obj_state(self, value): - if self._old_state is None and self._state is None: - self._state = value - else: - self._old_state, self._state = self._state, value - - @base.remotable_classmethod - def get(cls, context, action_plan_id, eager=False): - """Find a action_plan based on its id or uuid and return a Action object. - - :param action_plan_id: the id *or* uuid of a action_plan. - :param eager: Load object fields if True (Default: False) - :returns: a :class:`Action` object. - """ - if utils.is_int_like(action_plan_id): - return cls.get_by_id(context, action_plan_id, eager=eager) - elif utils.is_uuid_like(action_plan_id): - return cls.get_by_uuid(context, action_plan_id, eager=eager) - else: - raise exception.InvalidIdentity(identity=action_plan_id) - - @base.remotable_classmethod - def get_by_id(cls, context, action_plan_id, eager=False): - """Find a action_plan based on its integer id and return a ActionPlan object. - - :param action_plan_id: the id of a action_plan. - :param eager: Load object fields if True (Default: False) - :returns: a :class:`ActionPlan` object. - """ - db_action_plan = cls.dbapi.get_action_plan_by_id( - context, action_plan_id, eager=eager) - action_plan = cls._from_db_object( - cls(context), db_action_plan, eager=eager) - return action_plan - - @base.remotable_classmethod - def get_by_uuid(cls, context, uuid, eager=False): - """Find a action_plan based on uuid and return a :class:`ActionPlan` object. - - :param uuid: the uuid of a action_plan. - :param context: Security context - :param eager: Load object fields if True (Default: False) - :returns: a :class:`ActionPlan` object. - """ - db_action_plan = cls.dbapi.get_action_plan_by_uuid( - context, uuid, eager=eager) - action_plan = cls._from_db_object( - cls(context), db_action_plan, eager=eager) - return action_plan - - @base.remotable_classmethod - def list(cls, context, limit=None, marker=None, filters=None, - sort_key=None, sort_dir=None, eager=False): - """Return a list of ActionPlan objects. - - :param context: Security context. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param filters: Filters to apply. Defaults to None. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :param eager: Load object fields if True (Default: False) - :returns: a list of :class:`ActionPlan` object. - """ - db_action_plans = cls.dbapi.get_action_plan_list(context, - limit=limit, - marker=marker, - filters=filters, - sort_key=sort_key, - sort_dir=sort_dir, - eager=eager) - - return [cls._from_db_object(cls(context), obj, eager=eager) - for obj in db_action_plans] - - @base.remotable - def create(self): - """Create an :class:`ActionPlan` record in the DB. - - :returns: An :class:`ActionPlan` object. - """ - values = self.obj_get_changes() - db_action_plan = self.dbapi.create_action_plan(values) - # Note(v-francoise): Always load eagerly upon creation so we can send - # notifications containing information about the related relationships - self._from_db_object(self, db_action_plan, eager=True) - - def _notify(): - notifications.action_plan.send_create(self._context, self) - - _notify() - - @base.remotable - def destroy(self): - """Delete the action plan from the DB""" - related_efficacy_indicators = objects.EfficacyIndicator.list( - context=self._context, - filters={"action_plan_uuid": self.uuid}) - - # Cascade soft_delete of related efficacy indicators - for related_efficacy_indicator in related_efficacy_indicators: - related_efficacy_indicator.destroy() - - self.dbapi.destroy_action_plan(self.uuid) - self.obj_reset_changes() - - @base.remotable - def save(self): - """Save updates to this Action plan. - - Updates will be made column by column based on the result - of self.what_changed(). - """ - updates = self.obj_get_changes() - db_obj = self.dbapi.update_action_plan(self.uuid, updates) - obj = self._from_db_object( - self.__class__(self._context), db_obj, eager=False) - self.obj_refresh(obj) - - def _notify(): - notifications.action_plan.send_update( - self._context, self, old_state=self.old_state) - - _notify() - - self.obj_reset_changes() - - @base.remotable - def refresh(self, eager=False): - """Loads updates for this Action plan. - - Loads a action_plan with the same uuid from the database and - checks for updated attributes. Updates are applied from - the loaded action_plan column by column, if there are any updates. - :param eager: Load object fields if True (Default: False) - """ - current = self.get_by_uuid(self._context, uuid=self.uuid, eager=eager) - self.obj_refresh(current) - - @base.remotable - def soft_delete(self): - """Soft Delete the Action plan from the DB""" - related_actions = objects.Action.list( - context=self._context, - filters={"action_plan_uuid": self.uuid}, - eager=True) - - # Cascade soft_delete of related actions - for related_action in related_actions: - related_action.soft_delete() - - related_efficacy_indicators = objects.EfficacyIndicator.list( - context=self._context, - filters={"action_plan_uuid": self.uuid}) - - # Cascade soft_delete of related efficacy indicators - for related_efficacy_indicator in related_efficacy_indicators: - related_efficacy_indicator.soft_delete() - - self.state = State.DELETED - self.save() - db_obj = self.dbapi.soft_delete_action_plan(self.uuid) - obj = self._from_db_object( - self.__class__(self._context), db_obj, eager=False) - self.obj_refresh(obj) - - def _notify(): - notifications.action_plan.send_delete(self._context, self) - - _notify() - - -class StateManager(object): - def check_expired(self, context): - action_plan_expiry = ( - CONF.watcher_decision_engine.action_plan_expiry) - date_created = datetime.datetime.utcnow() - datetime.timedelta( - hours=action_plan_expiry) - filters = {'state__eq': State.RECOMMENDED, - 'created_at__lt': date_created} - action_plans = objects.ActionPlan.list( - context, filters=filters, eager=True) - for action_plan in action_plans: - action_plan.state = State.SUPERSEDED - action_plan.save() diff --git a/watcher/objects/audit.py b/watcher/objects/audit.py deleted file mode 100644 index d0a1c10..0000000 --- a/watcher/objects/audit.py +++ /dev/null @@ -1,328 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -In the Watcher system, an :ref:`Audit ` is a request for -optimizing a :ref:`Cluster `. - -The optimization is done in order to satisfy one :ref:`Goal ` -on a given :ref:`Cluster `. - -For each :ref:`Audit `, the Watcher system generates an -:ref:`Action Plan `. - -An :ref:`Audit ` has a life-cycle and its current state may -be one of the following: - -- **PENDING** : a request for an :ref:`Audit ` has been - submitted (either manually by the - :ref:`Administrator ` or automatically via some - event handling mechanism) and is in the queue for being processed by the - :ref:`Watcher Decision Engine ` -- **ONGOING** : the :ref:`Audit ` is currently being - processed by the - :ref:`Watcher Decision Engine ` -- **SUCCEEDED** : the :ref:`Audit ` has been executed - successfully (note that it may not necessarily produce a - :ref:`Solution `). -- **FAILED** : an error occurred while executing the - :ref:`Audit ` -- **DELETED** : the :ref:`Audit ` is still stored in the - :ref:`Watcher database ` but is not returned - any more through the Watcher APIs. -- **CANCELLED** : the :ref:`Audit ` was in **PENDING** or - **ONGOING** state and was cancelled by the - :ref:`Administrator ` -- **SUSPENDED** : the :ref:`Audit ` was in **ONGOING** - state and was suspended by the - :ref:`Administrator ` -""" - -import enum - -from watcher.common import exception -from watcher.common import utils -from watcher.db import api as db_api -from watcher import notifications -from watcher import objects -from watcher.objects import base -from watcher.objects import fields as wfields - - -class State(object): - ONGOING = 'ONGOING' - SUCCEEDED = 'SUCCEEDED' - FAILED = 'FAILED' - CANCELLED = 'CANCELLED' - DELETED = 'DELETED' - PENDING = 'PENDING' - SUSPENDED = 'SUSPENDED' - - -class AuditType(enum.Enum): - ONESHOT = 'ONESHOT' - CONTINUOUS = 'CONTINUOUS' - - -@base.WatcherObjectRegistry.register -class Audit(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - - # Version 1.0: Initial version - # Version 1.1: Added 'goal' and 'strategy' object field - # Version 1.2: Added 'auto_trigger' boolean field - # Version 1.3: Added 'next_run_time' DateTime field, - # 'interval' type has been changed from Integer to String - VERSION = '1.3' - - dbapi = db_api.get_instance() - - fields = { - 'id': wfields.IntegerField(), - 'uuid': wfields.UUIDField(), - 'audit_type': wfields.StringField(), - 'state': wfields.StringField(), - 'parameters': wfields.FlexibleDictField(nullable=True), - 'interval': wfields.StringField(nullable=True), - 'scope': wfields.FlexibleListOfDictField(nullable=True), - 'goal_id': wfields.IntegerField(), - 'strategy_id': wfields.IntegerField(nullable=True), - 'auto_trigger': wfields.BooleanField(), - 'next_run_time': wfields.DateTimeField(nullable=True, - tzinfo_aware=False), - - 'goal': wfields.ObjectField('Goal', nullable=True), - 'strategy': wfields.ObjectField('Strategy', nullable=True), - } - - object_fields = { - 'goal': (objects.Goal, 'goal_id'), - 'strategy': (objects.Strategy, 'strategy_id'), - } - - # Proxified field so we can keep the previous value after an update - _state = None - _old_state = None - - # NOTE(v-francoise): The way oslo.versionedobjects works is by using a - # __new__ that will automatically create the attributes referenced in - # fields. These attributes are properties that raise an exception if no - # value has been assigned, which means that they store the actual field - # value in an "_obj_%(field)s" attribute. So because we want to proxify a - # value that is already proxified, we have to do what you see below. - @property - def _obj_state(self): - return self._state - - @property - def _obj_old_state(self): - return self._old_state - - @property - def old_state(self): - return self._old_state - - @_obj_old_state.setter - def _obj_old_state(self, value): - self._old_state = value - - @_obj_state.setter - def _obj_state(self, value): - if self._old_state is None and self._state is None: - self._state = value - else: - self._old_state, self._state = self._state, value - - @base.remotable_classmethod - def get(cls, context, audit_id, eager=False): - """Find a audit based on its id or uuid and return a Audit object. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Audit(context) - :param audit_id: the id *or* uuid of a audit. - :param eager: Load object fields if True (Default: False) - :returns: a :class:`Audit` object. - """ - if utils.is_int_like(audit_id): - return cls.get_by_id(context, audit_id, eager=eager) - elif utils.is_uuid_like(audit_id): - return cls.get_by_uuid(context, audit_id, eager=eager) - else: - raise exception.InvalidIdentity(identity=audit_id) - - @base.remotable_classmethod - def get_by_id(cls, context, audit_id, eager=False): - """Find a audit based on its integer id and return a Audit object. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Audit(context) - :param audit_id: the id of a audit. - :param eager: Load object fields if True (Default: False) - :returns: a :class:`Audit` object. - """ - db_audit = cls.dbapi.get_audit_by_id(context, audit_id, eager=eager) - audit = cls._from_db_object(cls(context), db_audit, eager=eager) - return audit - - @base.remotable_classmethod - def get_by_uuid(cls, context, uuid, eager=False): - """Find a audit based on uuid and return a :class:`Audit` object. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Audit(context) - :param uuid: the uuid of a audit. - :param eager: Load object fields if True (Default: False) - :returns: a :class:`Audit` object. - """ - - db_audit = cls.dbapi.get_audit_by_uuid(context, uuid, eager=eager) - audit = cls._from_db_object(cls(context), db_audit, eager=eager) - return audit - - @base.remotable_classmethod - def list(cls, context, limit=None, marker=None, filters=None, - sort_key=None, sort_dir=None, eager=False): - """Return a list of Audit objects. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Audit(context) - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param filters: Filters to apply. Defaults to None. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :param eager: Load object fields if True (Default: False) - :returns: a list of :class:`Audit` object. - - """ - db_audits = cls.dbapi.get_audit_list(context, - limit=limit, - marker=marker, - filters=filters, - sort_key=sort_key, - sort_dir=sort_dir, - eager=eager) - return [cls._from_db_object(cls(context), obj, eager=eager) - for obj in db_audits] - - @base.remotable - def create(self): - """Create an :class:`Audit` record in the DB. - - :returns: An :class:`Audit` object. - """ - values = self.obj_get_changes() - db_audit = self.dbapi.create_audit(values) - # Note(v-francoise): Always load eagerly upon creation so we can send - # notifications containing information about the related relationships - self._from_db_object(self, db_audit, eager=True) - - def _notify(): - notifications.audit.send_create(self._context, self) - - _notify() - - @base.remotable - def destroy(self): - """Delete the Audit from the DB.""" - self.dbapi.destroy_audit(self.uuid) - self.obj_reset_changes() - - @base.remotable - def save(self): - """Save updates to this Audit. - - Updates will be made column by column based on the result - of self.what_changed(). - """ - updates = self.obj_get_changes() - db_obj = self.dbapi.update_audit(self.uuid, updates) - obj = self._from_db_object( - self.__class__(self._context), db_obj, eager=False) - self.obj_refresh(obj) - - def _notify(): - notifications.audit.send_update( - self._context, self, old_state=self.old_state) - - _notify() - - self.obj_reset_changes() - - @base.remotable - def refresh(self, eager=False): - """Loads updates for this Audit. - - Loads a audit with the same uuid from the database and - checks for updated attributes. Updates are applied from - the loaded audit column by column, if there are any updates. - :param eager: Load object fields if True (Default: False) - """ - current = self.get_by_uuid(self._context, uuid=self.uuid, eager=eager) - self.obj_refresh(current) - - @base.remotable - def soft_delete(self): - """Soft Delete the Audit from the DB.""" - self.state = State.DELETED - self.save() - db_obj = self.dbapi.soft_delete_audit(self.uuid) - obj = self._from_db_object( - self.__class__(self._context), db_obj, eager=False) - self.obj_refresh(obj) - - def _notify(): - notifications.audit.send_delete(self._context, self) - - _notify() - - -class AuditStateTransitionManager(object): - - TRANSITIONS = { - State.PENDING: [State.ONGOING, State.CANCELLED], - State.ONGOING: [State.FAILED, State.SUCCEEDED, - State.CANCELLED, State.SUSPENDED], - State.FAILED: [State.DELETED], - State.SUCCEEDED: [State.DELETED], - State.CANCELLED: [State.DELETED], - State.SUSPENDED: [State.ONGOING, State.DELETED], - } - - INACTIVE_STATES = (State.CANCELLED, State.DELETED, - State.FAILED, State.SUSPENDED) - - def check_transition(self, initial, new): - return new in self.TRANSITIONS.get(initial, []) - - def is_inactive(self, audit): - return audit.state in self.INACTIVE_STATES diff --git a/watcher/objects/audit_template.py b/watcher/objects/audit_template.py deleted file mode 100644 index 11a0c2d..0000000 --- a/watcher/objects/audit_template.py +++ /dev/null @@ -1,241 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -An :ref:`Audit ` may be launched several times with the same -settings (:ref:`Goal `, thresholds, ...). Therefore it makes -sense to save those settings in some sort of Audit preset object, which is -known as an :ref:`Audit Template `. - -An :ref:`Audit Template ` contains at least the -:ref:`Goal ` of the :ref:`Audit `. - -It may also contain some error handling settings indicating whether: - -- :ref:`Watcher Applier ` stops the - entire operation -- :ref:`Watcher Applier ` performs a rollback - -and how many retries should be attempted before failure occurs (also the latter -can be complex: for example the scenario in which there are many first-time -failures on ultimately successful :ref:`Actions `). - -Moreover, an :ref:`Audit Template ` may contain some -settings related to the level of automation for the -:ref:`Action Plan ` that will be generated by the -:ref:`Audit `. -A flag will indicate whether the :ref:`Action Plan ` -will be launched automatically or will need a manual confirmation from the -:ref:`Administrator `. - -Last but not least, an :ref:`Audit Template ` may -contain a list of extra parameters related to the -:ref:`Strategy ` configuration. These parameters can be -provided as a list of key-value pairs. -""" - -from watcher.common import exception -from watcher.common import utils -from watcher.db import api as db_api -from watcher import objects -from watcher.objects import base -from watcher.objects import fields as wfields - - -@base.WatcherObjectRegistry.register -class AuditTemplate(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - - # Version 1.0: Initial version - # Version 1.1: Added 'goal' and 'strategy' object field - VERSION = '1.1' - - dbapi = db_api.get_instance() - - fields = { - 'id': wfields.IntegerField(), - 'uuid': wfields.UUIDField(), - 'name': wfields.StringField(), - 'description': wfields.StringField(nullable=True), - 'scope': wfields.FlexibleListOfDictField(nullable=True), - 'goal_id': wfields.IntegerField(), - 'strategy_id': wfields.IntegerField(nullable=True), - - 'goal': wfields.ObjectField('Goal', nullable=True), - 'strategy': wfields.ObjectField('Strategy', nullable=True), - } - - object_fields = { - 'goal': (objects.Goal, 'goal_id'), - 'strategy': (objects.Strategy, 'strategy_id'), - } - - @base.remotable_classmethod - def get(cls, context, audit_template_id, eager=False): - """Find an audit template based on its id or uuid - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: AuditTemplate(context) - :param audit_template_id: the id *or* uuid of a audit_template. - :param eager: Load object fields if True (Default: False) - :returns: a :class:`AuditTemplate` object. - """ - if utils.is_int_like(audit_template_id): - return cls.get_by_id(context, audit_template_id, eager=eager) - elif utils.is_uuid_like(audit_template_id): - return cls.get_by_uuid(context, audit_template_id, eager=eager) - else: - raise exception.InvalidIdentity(identity=audit_template_id) - - @base.remotable_classmethod - def get_by_id(cls, context, audit_template_id, eager=False): - """Find an audit template based on its integer id - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: AuditTemplate(context) - :param audit_template_id: the id of a audit_template. - :param eager: Load object fields if True (Default: False) - :returns: a :class:`AuditTemplate` object. - """ - db_audit_template = cls.dbapi.get_audit_template_by_id( - context, audit_template_id, eager=eager) - audit_template = cls._from_db_object( - cls(context), db_audit_template, eager=eager) - return audit_template - - @base.remotable_classmethod - def get_by_uuid(cls, context, uuid, eager=False): - """Find an audit template based on uuid - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: AuditTemplate(context) - :param uuid: the uuid of a audit_template. - :param eager: Load object fields if True (Default: False) - :returns: a :class:`AuditTemplate` object. - """ - db_audit_template = cls.dbapi.get_audit_template_by_uuid( - context, uuid, eager=eager) - audit_template = cls._from_db_object( - cls(context), db_audit_template, eager=eager) - return audit_template - - @base.remotable_classmethod - def get_by_name(cls, context, name, eager=False): - """Find an audit template based on name - - :param name: the logical name of a audit_template. - :param context: Security context - :param eager: Load object fields if True (Default: False) - :returns: a :class:`AuditTemplate` object. - """ - db_audit_template = cls.dbapi.get_audit_template_by_name( - context, name, eager=eager) - audit_template = cls._from_db_object( - cls(context), db_audit_template, eager=eager) - return audit_template - - @base.remotable_classmethod - def list(cls, context, filters=None, limit=None, marker=None, - sort_key=None, sort_dir=None, eager=False): - """Return a list of :class:`AuditTemplate` objects. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: AuditTemplate(context) - :param filters: dict mapping the filter key to a value. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :param eager: Load object fields if True (Default: False) - :returns: a list of :class:`AuditTemplate` object. - """ - db_audit_templates = cls.dbapi.get_audit_template_list( - context, - filters=filters, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir, - eager=eager) - - return [cls._from_db_object(cls(context), obj, eager=eager) - for obj in db_audit_templates] - - @base.remotable - def create(self): - """Create a :class:`AuditTemplate` record in the DB - - :returns: An :class:`AuditTemplate` object. - """ - values = self.obj_get_changes() - db_audit_template = self.dbapi.create_audit_template(values) - # Note(v-francoise): Always load eagerly upon creation so we can send - # notifications containing information about the related relationships - self._from_db_object(self, db_audit_template, eager=True) - - def destroy(self): - """Delete the :class:`AuditTemplate` from the DB""" - self.dbapi.destroy_audit_template(self.uuid) - self.obj_reset_changes() - - @base.remotable - def save(self): - """Save updates to this :class:`AuditTemplate`. - - Updates will be made column by column based on the result - of self.what_changed(). - """ - updates = self.obj_get_changes() - db_obj = self.dbapi.update_audit_template(self.uuid, updates) - obj = self._from_db_object(self, db_obj, eager=False) - self.obj_refresh(obj) - self.obj_reset_changes() - - @base.remotable - def refresh(self, eager=False): - """Loads updates for this :class:`AuditTemplate`. - - Loads a audit_template with the same uuid from the database and - checks for updated attributes. Updates are applied from - the loaded audit_template column by column, if there are any updates. - :param eager: Load object fields if True (Default: False) - """ - current = self.get_by_uuid(self._context, uuid=self.uuid, eager=eager) - self.obj_refresh(current) - - @base.remotable - def soft_delete(self): - """Soft Delete the :class:`AuditTemplate` from the DB""" - db_obj = self.dbapi.soft_delete_audit_template(self.uuid) - obj = self._from_db_object( - self.__class__(self._context), db_obj, eager=False) - self.obj_refresh(obj) diff --git a/watcher/objects/base.py b/watcher/objects/base.py deleted file mode 100644 index 8b93418..0000000 --- a/watcher/objects/base.py +++ /dev/null @@ -1,179 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Watcher common internal object model""" - -from oslo_utils import versionutils -from oslo_versionedobjects import base as ovo_base -from oslo_versionedobjects import fields as ovo_fields - -from watcher import objects - -remotable_classmethod = ovo_base.remotable_classmethod -remotable = ovo_base.remotable - - -def get_attrname(name): - """Return the mangled name of the attribute's underlying storage.""" - # FIXME(danms): This is just until we use o.vo's class properties - # and object base. - return '_obj_' + name - - -class WatcherObjectRegistry(ovo_base.VersionedObjectRegistry): - notification_classes = [] - - def registration_hook(self, cls, index): - # NOTE(danms): This is called when an object is registered, - # and is responsible for maintaining watcher.objects.$OBJECT - # as the highest-versioned implementation of a given object. - version = versionutils.convert_version_to_tuple(cls.VERSION) - if not hasattr(objects, cls.obj_name()): - setattr(objects, cls.obj_name(), cls) - else: - cur_version = versionutils.convert_version_to_tuple( - getattr(objects, cls.obj_name()).VERSION) - if version >= cur_version: - setattr(objects, cls.obj_name(), cls) - - @classmethod - def register_notification(cls, notification_cls): - """Register a class as notification. - - Use only to register concrete notification or payload classes, - do not register base classes intended for inheritance only. - """ - cls.register_if(False)(notification_cls) - cls.notification_classes.append(notification_cls) - return notification_cls - - @classmethod - def register_notification_objects(cls): - """Register previously decorated notification as normal ovos. - - This is not intended for production use but only for testing and - document generation purposes. - """ - for notification_cls in cls.notification_classes: - cls.register(notification_cls) - - -class WatcherObject(ovo_base.VersionedObject): - """Base class and object factory. - - This forms the base of all objects that can be remoted or instantiated - via RPC. Simply defining a class that inherits from this base class - will make it remotely instantiatable. Objects should implement the - necessary "get" classmethod routines as well as "save" object methods - as appropriate. - """ - - OBJ_SERIAL_NAMESPACE = 'watcher_object' - OBJ_PROJECT_NAMESPACE = 'watcher' - - def as_dict(self): - return { - k: getattr(self, k) for k in self.fields - if self.obj_attr_is_set(k)} - - -class WatcherObjectDictCompat(ovo_base.VersionedObjectDictCompat): - pass - - -class WatcherComparableObject(ovo_base.ComparableVersionedObject): - pass - - -class WatcherPersistentObject(object): - """Mixin class for Persistent objects. - - This adds the fields that we use in common for all persistent objects. - """ - fields = { - 'created_at': ovo_fields.DateTimeField(nullable=True), - 'updated_at': ovo_fields.DateTimeField(nullable=True), - 'deleted_at': ovo_fields.DateTimeField(nullable=True), - } - - # Mapping between the object field name and a 2-tuple pair composed of - # its object type (e.g. objects.RelatedObject) and the name of the - # model field related ID (or UUID) foreign key field. - # e.g.: - # - # fields = { - # # [...] - # 'related_object_id': fields.IntegerField(), # Foreign key - # 'related_object': wfields.ObjectField('RelatedObject'), - # } - # {'related_object': (objects.RelatedObject, 'related_object_id')} - object_fields = {} - - def obj_refresh(self, loaded_object): - """Applies updates for objects that inherit from base.WatcherObject. - - Checks for updated attributes in an object. Updates are applied from - the loaded object column by column in comparison with the current - object. - """ - fields = (field for field in self.fields - if field not in self.object_fields) - for field in fields: - if (self.obj_attr_is_set(field) and - self[field] != loaded_object[field]): - self[field] = loaded_object[field] - - @staticmethod - def _from_db_object(obj, db_object, eager=False): - """Converts a database entity to a formal object. - - :param obj: An object of the class. - :param db_object: A DB model of the object - :param eager: Enable the loading of object fields (Default: False) - :return: The object of the class with the database entity added - - """ - obj_class = type(obj) - object_fields = obj_class.object_fields - - for field in obj.fields: - if field not in object_fields: - obj[field] = db_object[field] - - if eager: - # Load object fields - context = obj._context - loadable_fields = ( - (obj_field, related_obj_cls, rel_id) - for obj_field, (related_obj_cls, rel_id) - in object_fields.items() - if obj[rel_id] - ) - for obj_field, related_obj_cls, rel_id in loadable_fields: - if getattr(db_object, obj_field, None) and obj[rel_id]: - # The object field data was eagerly loaded alongside - # the main object data - obj[obj_field] = related_obj_cls._from_db_object( - related_obj_cls(context), db_object[obj_field]) - else: - # The object field data wasn't loaded yet - obj[obj_field] = related_obj_cls.get(context, obj[rel_id]) - - obj.obj_reset_changes() - return obj - - -class WatcherObjectSerializer(ovo_base.VersionedObjectSerializer): - # Base class to use for object hydration - OBJ_BASE_CLASS = WatcherObject diff --git a/watcher/objects/efficacy_indicator.py b/watcher/objects/efficacy_indicator.py deleted file mode 100644 index 13027e1..0000000 --- a/watcher/objects/efficacy_indicator.py +++ /dev/null @@ -1,185 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.common import exception -from watcher.common import utils -from watcher.db import api as db_api -from watcher.objects import base -from watcher.objects import fields as wfields - - -@base.WatcherObjectRegistry.register -class EfficacyIndicator(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - # Version 1.0: Initial version - VERSION = '1.0' - - dbapi = db_api.get_instance() - - fields = { - 'id': wfields.IntegerField(), - 'uuid': wfields.UUIDField(), - 'action_plan_id': wfields.IntegerField(), - 'name': wfields.StringField(), - 'description': wfields.StringField(nullable=True), - 'unit': wfields.StringField(nullable=True), - 'value': wfields.NumericField(), - } - - @base.remotable_classmethod - def get(cls, context, efficacy_indicator_id): - """Find an efficacy indicator object given its ID or UUID - - :param efficacy_indicator_id: the ID or UUID of an efficacy indicator. - :returns: a :class:`EfficacyIndicator` object. - """ - if utils.is_int_like(efficacy_indicator_id): - return cls.get_by_id(context, efficacy_indicator_id) - elif utils.is_uuid_like(efficacy_indicator_id): - return cls.get_by_uuid(context, efficacy_indicator_id) - else: - raise exception.InvalidIdentity(identity=efficacy_indicator_id) - - @base.remotable_classmethod - def get_by_id(cls, context, efficacy_indicator_id): - """Find an efficacy indicator given its integer ID - - :param efficacy_indicator_id: the id of an efficacy indicator. - :returns: a :class:`EfficacyIndicator` object. - """ - db_efficacy_indicator = cls.dbapi.get_efficacy_indicator_by_id( - context, efficacy_indicator_id) - efficacy_indicator = EfficacyIndicator._from_db_object( - cls(context), db_efficacy_indicator) - return efficacy_indicator - - @base.remotable_classmethod - def get_by_uuid(cls, context, uuid): - """Find an efficacy indicator given its UUID - - :param uuid: the uuid of an efficacy indicator. - :param context: Security context - :returns: a :class:`EfficacyIndicator` object. - """ - db_efficacy_indicator = cls.dbapi.get_efficacy_indicator_by_uuid( - context, uuid) - efficacy_indicator = EfficacyIndicator._from_db_object( - cls(context), db_efficacy_indicator) - return efficacy_indicator - - @base.remotable_classmethod - def list(cls, context, limit=None, marker=None, filters=None, - sort_key=None, sort_dir=None): - """Return a list of EfficacyIndicator objects. - - :param context: Security context. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param filters: Filters to apply. Defaults to None. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :returns: a list of :class:`EfficacyIndicator` object. - - """ - db_efficacy_indicators = cls.dbapi.get_efficacy_indicator_list( - context, - limit=limit, - marker=marker, - filters=filters, - sort_key=sort_key, - sort_dir=sort_dir) - - return [cls._from_db_object(cls(context), obj) - for obj in db_efficacy_indicators] - - @base.remotable - def create(self, context=None): - """Create a EfficacyIndicator record in the DB. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: EfficacyIndicator(context) - - """ - values = self.obj_get_changes() - db_efficacy_indicator = self.dbapi.create_efficacy_indicator(values) - self._from_db_object(self, db_efficacy_indicator) - - def destroy(self, context=None): - """Delete the EfficacyIndicator from the DB. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: EfficacyIndicator(context) - """ - self.dbapi.destroy_efficacy_indicator(self.uuid) - self.obj_reset_changes() - - @base.remotable - def save(self, context=None): - """Save updates to this EfficacyIndicator. - - Updates will be made column by column based on the result - of self.what_changed(). - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: EfficacyIndicator(context) - """ - updates = self.obj_get_changes() - self.dbapi.update_efficacy_indicator(self.uuid, updates) - - self.obj_reset_changes() - - @base.remotable - def refresh(self, context=None): - """Loads updates for this EfficacyIndicator. - - Loads an efficacy indicator with the same uuid from the database and - checks for updated attributes. Updates are applied to the loaded - efficacy indicator column by column, if there are any updates. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: EfficacyIndicator(context) - """ - current = self.__class__.get_by_uuid(self._context, uuid=self.uuid) - self.obj_refresh(current) - - @base.remotable - def soft_delete(self, context=None): - """Soft Delete the efficacy indicator from the DB. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Audit(context) - """ - self.dbapi.soft_delete_efficacy_indicator(self.uuid) diff --git a/watcher/objects/fields.py b/watcher/objects/fields.py deleted file mode 100644 index d0df854..0000000 --- a/watcher/objects/fields.py +++ /dev/null @@ -1,168 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Utility methods for objects""" - -import ast -import six - -from oslo_serialization import jsonutils -from oslo_versionedobjects import fields - - -BaseEnumField = fields.BaseEnumField -BooleanField = fields.BooleanField -DateTimeField = fields.DateTimeField -Enum = fields.Enum -FloatField = fields.FloatField -IntegerField = fields.IntegerField -ListOfStringsField = fields.ListOfStringsField -NonNegativeFloatField = fields.NonNegativeFloatField -NonNegativeIntegerField = fields.NonNegativeIntegerField -ObjectField = fields.ObjectField -StringField = fields.StringField -UnspecifiedDefault = fields.UnspecifiedDefault -UUIDField = fields.UUIDField - - -class Numeric(fields.FieldType): - @staticmethod - def coerce(obj, attr, value): - if value is None: - return value - f_value = float(value) - return f_value if not f_value.is_integer() else value - - -class NumericField(fields.AutoTypedField): - AUTO_TYPE = Numeric() - - -class DictField(fields.AutoTypedField): - AUTO_TYPE = fields.Dict(fields.FieldType()) - - -class ListOfUUIDsField(fields.AutoTypedField): - AUTO_TYPE = fields.List(fields.UUID()) - - -class FlexibleDict(fields.FieldType): - @staticmethod - def coerce(obj, attr, value): - if isinstance(value, six.string_types): - value = ast.literal_eval(value) - return dict(value) - - -class FlexibleDictField(fields.AutoTypedField): - AUTO_TYPE = FlexibleDict() - - # TODO(lucasagomes): In our code we've always translated None to {}, - # this method makes this field to work like this. But probably won't - # be accepted as-is in the oslo_versionedobjects library - def _null(self, obj, attr): - if self.nullable: - return {} - super(FlexibleDictField, self)._null(obj, attr) - - -class FlexibleListOfDict(fields.FieldType): - @staticmethod - def coerce(obj, attr, value): - if isinstance(value, six.string_types): - value = ast.literal_eval(value) - return list(value) - - -class FlexibleListOfDictField(fields.AutoTypedField): - AUTO_TYPE = FlexibleListOfDict() - - # TODO(lucasagomes): In our code we've always translated None to {}, - # this method makes this field to work like this. But probably won't - # be accepted as-is in the oslo_versionedobjects library - def _null(self, obj, attr): - if self.nullable: - return [] - super(FlexibleListOfDictField, self)._null(obj, attr) - - -class Json(fields.FieldType): - def coerce(self, obj, attr, value): - if isinstance(value, six.string_types): - loaded = jsonutils.loads(value) - return loaded - return value - - def from_primitive(self, obj, attr, value): - return self.coerce(obj, attr, value) - - def to_primitive(self, obj, attr, value): - return jsonutils.dumps(value) - - -class JsonField(fields.AutoTypedField): - AUTO_TYPE = Json() - -# ### Notification fields ### # - - -class BaseWatcherEnum(Enum): - - ALL = () - - def __init__(self, **kwargs): - super(BaseWatcherEnum, self).__init__(valid_values=self.__class__.ALL) - - -class NotificationPriority(BaseWatcherEnum): - DEBUG = 'debug' - INFO = 'info' - WARNING = 'warning' - ERROR = 'error' - CRITICAL = 'critical' - - ALL = (DEBUG, INFO, WARNING, ERROR, CRITICAL) - - -class NotificationPhase(BaseWatcherEnum): - START = 'start' - END = 'end' - ERROR = 'error' - - ALL = (START, END, ERROR) - - -class NotificationAction(BaseWatcherEnum): - CREATE = 'create' - UPDATE = 'update' - EXCEPTION = 'exception' - DELETE = 'delete' - - STRATEGY = 'strategy' - PLANNER = 'planner' - EXECUTION = 'execution' - - ALL = (CREATE, UPDATE, EXCEPTION, DELETE, STRATEGY, PLANNER, EXECUTION) - - -class NotificationPriorityField(BaseEnumField): - AUTO_TYPE = NotificationPriority() - - -class NotificationPhaseField(BaseEnumField): - AUTO_TYPE = NotificationPhase() - - -class NotificationActionField(BaseEnumField): - AUTO_TYPE = NotificationAction() diff --git a/watcher/objects/goal.py b/watcher/objects/goal.py deleted file mode 100644 index e947ff6..0000000 --- a/watcher/objects/goal.py +++ /dev/null @@ -1,176 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.common import exception -from watcher.common import utils -from watcher.db import api as db_api -from watcher.objects import base -from watcher.objects import fields as wfields - - -@base.WatcherObjectRegistry.register -class Goal(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - # Version 1.0: Initial version - VERSION = '1.0' - - dbapi = db_api.get_instance() - - fields = { - 'id': wfields.IntegerField(), - 'uuid': wfields.UUIDField(), - 'name': wfields.StringField(), - 'display_name': wfields.StringField(), - 'efficacy_specification': wfields.FlexibleListOfDictField(), - } - - @base.remotable_classmethod - def get(cls, context, goal_id): - """Find a goal based on its id or uuid - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Goal(context) - :param goal_id: the id *or* uuid of a goal. - :returns: a :class:`Goal` object. - """ - if utils.is_int_like(goal_id): - return cls.get_by_id(context, goal_id) - elif utils.is_uuid_like(goal_id): - return cls.get_by_uuid(context, goal_id) - else: - raise exception.InvalidIdentity(identity=goal_id) - - @base.remotable_classmethod - def get_by_id(cls, context, goal_id): - """Find a goal based on its integer id - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Goal(context) - :param goal_id: the id *or* uuid of a goal. - :returns: a :class:`Goal` object. - """ - db_goal = cls.dbapi.get_goal_by_id(context, goal_id) - goal = cls._from_db_object(cls(context), db_goal) - return goal - - @base.remotable_classmethod - def get_by_uuid(cls, context, uuid): - """Find a goal based on uuid - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Goal(context) - :param uuid: the uuid of a goal. - :returns: a :class:`Goal` object. - """ - db_goal = cls.dbapi.get_goal_by_uuid(context, uuid) - goal = cls._from_db_object(cls(context), db_goal) - return goal - - @base.remotable_classmethod - def get_by_name(cls, context, name): - """Find a goal based on name - - :param name: the name of a goal. - :param context: Security context - :returns: a :class:`Goal` object. - """ - db_goal = cls.dbapi.get_goal_by_name(context, name) - goal = cls._from_db_object(cls(context), db_goal) - return goal - - @base.remotable_classmethod - def list(cls, context, limit=None, marker=None, filters=None, - sort_key=None, sort_dir=None): - """Return a list of :class:`Goal` objects. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Goal(context) - :param filters: dict mapping the filter key to a value. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :returns: a list of :class:`Goal` object. - """ - db_goals = cls.dbapi.get_goal_list( - context, - filters=filters, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - - return [cls._from_db_object(cls(context), obj) for obj in db_goals] - - @base.remotable - def create(self): - """Create a :class:`Goal` record in the DB""" - values = self.obj_get_changes() - db_goal = self.dbapi.create_goal(values) - self._from_db_object(self, db_goal) - - def destroy(self): - """Delete the :class:`Goal` from the DB""" - self.dbapi.destroy_goal(self.id) - self.obj_reset_changes() - - @base.remotable - def save(self): - """Save updates to this :class:`Goal`. - - Updates will be made column by column based on the result - of self.what_changed(). - """ - updates = self.obj_get_changes() - db_obj = self.dbapi.update_goal(self.uuid, updates) - obj = self._from_db_object(self, db_obj, eager=False) - self.obj_refresh(obj) - self.obj_reset_changes() - - @base.remotable - def refresh(self): - """Loads updates for this :class:`Goal`. - - Loads a goal with the same uuid from the database and - checks for updated attributes. Updates are applied from - the loaded goal column by column, if there are any updates. - """ - current = self.get_by_uuid(self._context, uuid=self.uuid) - self.obj_refresh(current) - - @base.remotable - def soft_delete(self): - """Soft Delete the :class:`Goal` from the DB""" - db_obj = self.dbapi.soft_delete_goal(self.uuid) - obj = self._from_db_object( - self.__class__(self._context), db_obj, eager=False) - self.obj_refresh(obj) diff --git a/watcher/objects/scoring_engine.py b/watcher/objects/scoring_engine.py deleted file mode 100644 index 0c95e72..0000000 --- a/watcher/objects/scoring_engine.py +++ /dev/null @@ -1,198 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2016 Intel -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -A :ref:`Scoring Engine ` is an instance of a data -model, to which a learning data was applied. - -Because there might be multiple algorithms used to build a particular data -model (and therefore a scoring engine), the usage of scoring engine might -vary. A metainfo field is supposed to contain any information which might -be needed by the user of a given scoring engine. -""" - -from watcher.common import exception -from watcher.common import utils -from watcher.db import api as db_api -from watcher.objects import base -from watcher.objects import fields as wfields - - -@base.WatcherObjectRegistry.register -class ScoringEngine(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - # Version 1.0: Initial version - VERSION = '1.0' - - dbapi = db_api.get_instance() - - fields = { - 'id': wfields.IntegerField(), - 'uuid': wfields.UUIDField(), - 'name': wfields.StringField(), - 'description': wfields.StringField(nullable=True), - 'metainfo': wfields.StringField(nullable=True), - } - - @base.remotable_classmethod - def get(cls, context, scoring_engine_id): - """Find a scoring engine based on its id or uuid - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: ScoringEngine(context) - :param scoring_engine_name: the name of a scoring_engine. - :returns: a :class:`ScoringEngine` object. - """ - if utils.is_int_like(scoring_engine_id): - return cls.get_by_id(context, scoring_engine_id) - elif utils.is_uuid_like(scoring_engine_id): - return cls.get_by_uuid(context, scoring_engine_id) - else: - raise exception.InvalidIdentity(identity=scoring_engine_id) - - @base.remotable_classmethod - def get_by_id(cls, context, scoring_engine_id): - """Find a scoring engine based on its id - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: ScoringEngine(context) - :param scoring_engine_id: the id of a scoring_engine. - :returns: a :class:`ScoringEngine` object. - """ - db_scoring_engine = cls.dbapi.get_scoring_engine_by_id( - context, - scoring_engine_id) - scoring_engine = ScoringEngine._from_db_object(cls(context), - db_scoring_engine) - return scoring_engine - - @base.remotable_classmethod - def get_by_uuid(cls, context, scoring_engine_uuid): - """Find a scoring engine based on its uuid - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: ScoringEngine(context) - :param scoring_engine_uuid: the uuid of a scoring_engine. - :returns: a :class:`ScoringEngine` object. - """ - db_scoring_engine = cls.dbapi.get_scoring_engine_by_uuid( - context, - scoring_engine_uuid) - scoring_engine = ScoringEngine._from_db_object(cls(context), - db_scoring_engine) - return scoring_engine - - @base.remotable_classmethod - def get_by_name(cls, context, scoring_engine_name): - """Find a scoring engine based on its name - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: ScoringEngine(context) - :param scoring_engine_name: the name of a scoring_engine. - :returns: a :class:`ScoringEngine` object. - """ - db_scoring_engine = cls.dbapi.get_scoring_engine_by_name( - context, - scoring_engine_name) - scoring_engine = ScoringEngine._from_db_object(cls(context), - db_scoring_engine) - return scoring_engine - - @base.remotable_classmethod - def list(cls, context, filters=None, limit=None, marker=None, - sort_key=None, sort_dir=None): - """Return a list of :class:`ScoringEngine` objects. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: ScoringEngine(context) - :param filters: dict mapping the filter key to a value. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :returns: a list of :class:`ScoringEngine` objects. - """ - db_scoring_engines = cls.dbapi.get_scoring_engine_list( - context, - filters=filters, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - return [cls._from_db_object(cls(context), obj) - for obj in db_scoring_engines] - - @base.remotable - def create(self): - """Create a :class:`ScoringEngine` record in the DB.""" - values = self.obj_get_changes() - db_scoring_engine = self.dbapi.create_scoring_engine(values) - self._from_db_object(self, db_scoring_engine) - - def destroy(self): - """Delete the :class:`ScoringEngine` from the DB""" - self.dbapi.destroy_scoring_engine(self.id) - self.obj_reset_changes() - - @base.remotable - def save(self): - """Save updates to this :class:`ScoringEngine`. - - Updates will be made column by column based on the result - of self.what_changed(). - """ - updates = self.obj_get_changes() - db_obj = self.dbapi.update_scoring_engine(self.uuid, updates) - obj = self._from_db_object(self, db_obj, eager=False) - self.obj_refresh(obj) - self.obj_reset_changes() - - def refresh(self): - """Loads updates for this :class:`ScoringEngine`. - - Loads a scoring_engine with the same id from the database and - checks for updated attributes. Updates are applied from - the loaded scoring_engine column by column, if there are any updates. - """ - current = self.get_by_id(self._context, scoring_engine_id=self.id) - self.obj_refresh(current) - - def soft_delete(self): - """Soft Delete the :class:`ScoringEngine` from the DB""" - db_obj = self.dbapi.soft_delete_scoring_engine(self.id) - obj = self._from_db_object( - self.__class__(self._context), db_obj, eager=False) - self.obj_refresh(obj) diff --git a/watcher/objects/service.py b/watcher/objects/service.py deleted file mode 100644 index 0b261b8..0000000 --- a/watcher/objects/service.py +++ /dev/null @@ -1,145 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Servionica -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.common import exception -from watcher.common import utils -from watcher.db import api as db_api -from watcher.objects import base -from watcher.objects import fields as wfields - - -class ServiceStatus(object): - ACTIVE = 'ACTIVE' - FAILED = 'FAILED' - - -@base.WatcherObjectRegistry.register -class Service(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - - # Version 1.0: Initial version - VERSION = '1.0' - - dbapi = db_api.get_instance() - - fields = { - 'id': wfields.IntegerField(), - 'name': wfields.StringField(), - 'host': wfields.StringField(), - 'last_seen_up': wfields.DateTimeField( - tzinfo_aware=False, nullable=True), - } - - @base.remotable_classmethod - def get(cls, context, service_id): - """Find a service based on its id - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Service(context) - :param service_id: the id of a service. - :returns: a :class:`Service` object. - """ - if utils.is_int_like(service_id): - db_service = cls.dbapi.get_service_by_id(context, service_id) - service = Service._from_db_object(cls(context), db_service) - return service - else: - raise exception.InvalidIdentity(identity=service_id) - - @base.remotable_classmethod - def get_by_name(cls, context, name): - """Find a service based on name - - :param name: the name of a service. - :param context: Security context - :returns: a :class:`Service` object. - """ - - db_service = cls.dbapi.get_service_by_name(context, name) - service = cls._from_db_object(cls(context), db_service) - return service - - @base.remotable_classmethod - def list(cls, context, limit=None, marker=None, filters=None, - sort_key=None, sort_dir=None): - """Return a list of :class:`Service` objects. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Service(context) - :param filters: dict mapping the filter key to a value. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :returns: a list of :class:`Service` object. - """ - db_services = cls.dbapi.get_service_list( - context, - filters=filters, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - - return [cls._from_db_object(cls(context), obj) for obj in db_services] - - @base.remotable - def create(self): - """Create a :class:`Service` record in the DB.""" - values = self.obj_get_changes() - db_service = self.dbapi.create_service(values) - self._from_db_object(self, db_service) - - @base.remotable - def save(self): - """Save updates to this :class:`Service`. - - Updates will be made column by column based on the result - of self.what_changed(). - """ - updates = self.obj_get_changes() - db_obj = self.dbapi.update_service(self.id, updates) - obj = self._from_db_object(self, db_obj, eager=False) - self.obj_refresh(obj) - self.obj_reset_changes() - - def refresh(self): - """Loads updates for this :class:`Service`. - - Loads a service with the same id from the database and - checks for updated attributes. Updates are applied from - the loaded service column by column, if there are any updates. - """ - current = self.get(self._context, service_id=self.id) - for field in self.fields: - if (hasattr(self, base.get_attrname(field)) and - self[field] != current[field]): - self[field] = current[field] - - def soft_delete(self): - """Soft Delete the :class:`Service` from the DB.""" - db_obj = self.dbapi.soft_delete_service(self.id) - obj = self._from_db_object( - self.__class__(self._context), db_obj, eager=False) - self.obj_refresh(obj) diff --git a/watcher/objects/strategy.py b/watcher/objects/strategy.py deleted file mode 100644 index 584c8ff..0000000 --- a/watcher/objects/strategy.py +++ /dev/null @@ -1,237 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.common import exception -from watcher.common import utils -from watcher.db import api as db_api -from watcher import objects -from watcher.objects import base -from watcher.objects import fields as wfields - - -@base.WatcherObjectRegistry.register -class Strategy(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - - # Version 1.0: Initial version - # Version 1.1: Added Goal object field - VERSION = '1.1' - - dbapi = db_api.get_instance() - - fields = { - 'id': wfields.IntegerField(), - 'uuid': wfields.UUIDField(), - 'name': wfields.StringField(), - 'display_name': wfields.StringField(), - 'goal_id': wfields.IntegerField(), - 'parameters_spec': wfields.FlexibleDictField(nullable=True), - 'goal': wfields.ObjectField('Goal', nullable=True), - } - - object_fields = {'goal': (objects.Goal, 'goal_id')} - - @base.remotable_classmethod - def get(cls, context, strategy_id, eager=False): - """Find a strategy based on its id or uuid - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Strategy(context) - :param strategy_id: the id *or* uuid of a strategy. - :param eager: Load object fields if True (Default: False) - :returns: A :class:`Strategy` object. - """ - if utils.is_int_like(strategy_id): - return cls.get_by_id(context, strategy_id, eager=eager) - elif utils.is_uuid_like(strategy_id): - return cls.get_by_uuid(context, strategy_id, eager=eager) - else: - raise exception.InvalidIdentity(identity=strategy_id) - - @base.remotable_classmethod - def get_by_id(cls, context, strategy_id, eager=False): - """Find a strategy based on its integer id - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Strategy(context) - :param strategy_id: the id of a strategy. - :param eager: Load object fields if True (Default: False) - :returns: A :class:`Strategy` object. - """ - db_strategy = cls.dbapi.get_strategy_by_id( - context, strategy_id, eager=eager) - strategy = cls._from_db_object(cls(context), db_strategy, eager=eager) - return strategy - - @base.remotable_classmethod - def get_by_uuid(cls, context, uuid, eager=False): - """Find a strategy based on uuid - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Strategy(context) - :param uuid: the uuid of a strategy. - :param eager: Load object fields if True (Default: False) - :returns: A :class:`Strategy` object. - """ - - db_strategy = cls.dbapi.get_strategy_by_uuid( - context, uuid, eager=eager) - strategy = cls._from_db_object(cls(context), db_strategy, eager=eager) - return strategy - - @base.remotable_classmethod - def get_by_name(cls, context, name, eager=False): - """Find a strategy based on name - - :param context: Security context - :param name: the name of a strategy. - :param eager: Load object fields if True (Default: False) - :returns: A :class:`Strategy` object. - """ - - db_strategy = cls.dbapi.get_strategy_by_name( - context, name, eager=eager) - strategy = cls._from_db_object(cls(context), db_strategy, eager=eager) - return strategy - - @base.remotable_classmethod - def list(cls, context, limit=None, marker=None, filters=None, - sort_key=None, sort_dir=None, eager=False): - """Return a list of :class:`Strategy` objects. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Strategy(context) - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param filters: dict mapping the filter key to a value. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc`". - :param eager: Load object fields if True (Default: False) - :returns: a list of :class:`Strategy` object. - """ - db_strategies = cls.dbapi.get_strategy_list( - context, - filters=filters, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - - return [cls._from_db_object(cls(context), obj, eager=eager) - for obj in db_strategies] - - @base.remotable - def create(self, context=None): - """Create a :class:`Strategy` record in the DB. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Strategy(context) - :returns: A :class:`Strategy` object. - """ - - values = self.obj_get_changes() - db_strategy = self.dbapi.create_strategy(values) - # Note(v-francoise): Always load eagerly upon creation so we can send - # notifications containing information about the related relationships - self._from_db_object(self, db_strategy, eager=True) - - def destroy(self, context=None): - """Delete the :class:`Strategy` from the DB. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Strategy(context) - """ - self.dbapi.destroy_strategy(self.id) - self.obj_reset_changes() - - @base.remotable - def save(self, context=None): - """Save updates to this :class:`Strategy`. - - Updates will be made column by column based on the result - of self.what_changed(). - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Strategy(context) - """ - updates = self.obj_get_changes() - self.dbapi.update_strategy(self.id, updates) - - self.obj_reset_changes() - - @base.remotable - def refresh(self, context=None, eager=False): - """Loads updates for this :class:`Strategy`. - - Loads a strategy with the same uuid from the database and - checks for updated attributes. Updates are applied from - the loaded strategy column by column, if there are any updates. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Strategy(context) - :param eager: Load object fields if True (Default: False) - """ - current = self.__class__.get_by_id( - self._context, strategy_id=self.id, eager=eager) - for field in self.fields: - if (hasattr(self, base.get_attrname(field)) and - self[field] != current[field]): - self[field] = current[field] - - @base.remotable - def soft_delete(self, context=None): - """Soft Delete the :class:`Strategy` from the DB. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Strategy(context) - """ - self.dbapi.soft_delete_strategy(self.id) diff --git a/watcher/objects/utils.py b/watcher/objects/utils.py deleted file mode 100644 index 1146832..0000000 --- a/watcher/objects/utils.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Utility methods for objects""" - -import ast -import datetime - -import iso8601 -import netaddr -from oslo_utils import timeutils -import six - -from watcher._i18n import _ - - -def datetime_or_none(value, tzinfo_aware=False): - """Validate a datetime or None value.""" - if value is None: - return None - if isinstance(value, six.string_types): - # NOTE(danms): Being tolerant of isotime strings here will help us - # during our objects transition - value = timeutils.parse_isotime(value) - elif not isinstance(value, datetime.datetime): - raise ValueError( - _("A datetime.datetime is required here. Got %s"), value) - - if value.utcoffset() is None and tzinfo_aware: - # NOTE(danms): Legacy objects from sqlalchemy are stored in UTC, - # but are returned without a timezone attached. - # As a transitional aid, assume a tz-naive object is in UTC. - value = value.replace(tzinfo=iso8601.iso8601.Utc()) - elif not tzinfo_aware: - value = value.replace(tzinfo=None) - - return value - - -def datetime_or_str_or_none(val, tzinfo_aware=False): - if isinstance(val, six.string_types): - return timeutils.parse_isotime(val) - return datetime_or_none(val, tzinfo_aware=tzinfo_aware) - - -def numeric_or_none(val): - """Attempt to parse an integer value, or None.""" - if val is None: - return val - else: - f_val = float(val) - return f_val if not f_val.is_integer() else val - - -def int_or_none(val): - """Attempt to parse an integer value, or None.""" - if val is None: - return val - else: - return int(val) - - -def str_or_none(val): - """Attempt to stringify a value to unicode, or None.""" - if val is None: - return val - else: - return six.text_type(val) - - -def dict_or_none(val): - """Attempt to dictify a value, or None.""" - if val is None: - return {} - elif isinstance(val, six.string_types): - return dict(ast.literal_eval(val)) - else: - try: - return dict(val) - except ValueError: - return {} - - -def list_or_none(val): - """Attempt to listify a value, or None.""" - if val is None: - return [] - elif isinstance(val, six.string_types): - return list(ast.literal_eval(val)) - else: - try: - return list(val) - except ValueError: - return [] - - -def ip_or_none(version): - """Return a version-specific IP address validator.""" - def validator(val, version=version): - if val is None: - return val - else: - return netaddr.IPAddress(val, version=version) - return validator - - -def nested_object_or_none(objclass): - def validator(val, objclass=objclass): - if val is None or isinstance(val, objclass): - return val - raise ValueError(_("An object of class %s is required here") - % objclass) - return validator - - -def dt_serializer(name): - """Return a datetime serializer for a named attribute.""" - def serializer(self, name=name): - if getattr(self, name) is not None: - return datetime.datetime.isoformat(getattr(self, name)) - else: - return None - return serializer - - -def dt_deserializer(val): - """A deserializer method for datetime attributes.""" - if val is None: - return None - else: - return timeutils.parse_isotime(val) - - -def obj_serializer(name): - def serializer(self, name=name): - if getattr(self, name) is not None: - return getattr(self, name).obj_to_primitive() - else: - return None - return serializer diff --git a/watcher/tests/__init__.py b/watcher/tests/__init__.py deleted file mode 100644 index cdc336c..0000000 --- a/watcher/tests/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from watcher import objects - -# NOTE(comstud): Make sure we have all of the objects loaded. We do this -# at module import time, because we may be using mock decorators in our -# tests that run at import time. -objects.register_all() diff --git a/watcher/tests/api/__init__.py b/watcher/tests/api/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/api/base.py b/watcher/tests/api/base.py deleted file mode 100644 index 6979347..0000000 --- a/watcher/tests/api/base.py +++ /dev/null @@ -1,291 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Base classes for API tests.""" - -# NOTE: Ported from ceilometer/tests/api.py (subsequently moved to -# ceilometer/tests/api/__init__.py). This should be oslo'ified: -# https://bugs.launchpad.net/watcher/+bug/1255115. - -# NOTE(deva): import auth_token so we can override a config option - -import copy -import mock - -from oslo_config import cfg -import pecan -import pecan.testing -from six.moves.urllib import parse as urlparse - -from watcher.api import hooks -from watcher.common import context as watcher_context -from watcher.notifications import service as n_service -from watcher.tests.db import base - -PATH_PREFIX = '/v1' - - -class FunctionalTest(base.DbTestCase): - """Pecan controller functional testing class. - - Used for functional tests of Pecan controllers where you need to - test your literal application and its integration with the - framework. - """ - - SOURCE_DATA = {'test_source': {'somekey': '666'}} - - def setUp(self): - super(FunctionalTest, self).setUp() - cfg.CONF.set_override("auth_version", "v2.0", - group='keystone_authtoken') - cfg.CONF.set_override("admin_user", "admin", - group='keystone_authtoken') - - p_services = mock.patch.object(n_service, "send_service_update", - new_callable=mock.PropertyMock) - self.m_services = p_services.start() - self.addCleanup(p_services.stop) - - self.app = self._make_app() - - def reset_pecan(): - pecan.set_config({}, overwrite=True) - - self.addCleanup(reset_pecan) - - def _make_app(self, enable_acl=False): - # Determine where we are so we can set up paths in the config - root_dir = self.get_path() - - self.config = { - 'app': { - 'root': 'watcher.api.controllers.root.RootController', - 'modules': ['watcher.api'], - 'hooks': [ - hooks.ContextHook(), - hooks.NoExceptionTracebackHook() - ], - 'static_root': '%s/public' % root_dir, - 'template_path': '%s/api/templates' % root_dir, - 'enable_acl': enable_acl, - 'acl_public_routes': ['/', '/v1'], - }, - } - - return pecan.testing.load_test_app(self.config) - - def _request_json(self, path, params, expect_errors=False, headers=None, - method="post", extra_environ=None, status=None, - path_prefix=PATH_PREFIX): - """Sends simulated HTTP request to Pecan test app. - - :param path: url path of target service - :param params: content for wsgi.input of request - :param expect_errors: Boolean value; whether an error is expected based - on request - :param headers: a dictionary of headers to send along with the request - :param method: Request method type. Appropriate method function call - should be used rather than passing attribute in. - :param extra_environ: a dictionary of environ variables to send along - with the request - :param status: expected status code of response - :param path_prefix: prefix of the url path - """ - full_path = path_prefix + path - print('%s: %s %s' % (method.upper(), full_path, params)) - - response = getattr(self.app, "%s_json" % method)( - str(full_path), - params=params, - headers=headers, - status=status, - extra_environ=extra_environ, - expect_errors=expect_errors - ) - print('GOT:%s' % response) - return response - - def put_json(self, path, params, expect_errors=False, headers=None, - extra_environ=None, status=None): - """Sends simulated HTTP PUT request to Pecan test app. - - :param path: url path of target service - :param params: content for wsgi.input of request - :param expect_errors: Boolean value; whether an error is expected based - on request - :param headers: a dictionary of headers to send along with the request - :param extra_environ: a dictionary of environ variables to send along - with the request - :param status: expected status code of response - """ - return self._request_json(path=path, params=params, - expect_errors=expect_errors, - headers=headers, extra_environ=extra_environ, - status=status, method="put") - - def post_json(self, path, params, expect_errors=False, headers=None, - extra_environ=None, status=None): - """Sends simulated HTTP POST request to Pecan test app. - - :param path: url path of target service - :param params: content for wsgi.input of request - :param expect_errors: Boolean value; whether an error is expected based - on request - :param headers: a dictionary of headers to send along with the request - :param extra_environ: a dictionary of environ variables to send along - with the request - :param status: expected status code of response - """ - return self._request_json(path=path, params=params, - expect_errors=expect_errors, - headers=headers, extra_environ=extra_environ, - status=status, method="post") - - def patch_json(self, path, params, expect_errors=False, headers=None, - extra_environ=None, status=None): - """Sends simulated HTTP PATCH request to Pecan test app. - - :param path: url path of target service - :param params: content for wsgi.input of request - :param expect_errors: Boolean value; whether an error is expected based - on request - :param headers: a dictionary of headers to send along with the request - :param extra_environ: a dictionary of environ variables to send along - with the request - :param status: expected status code of response - """ - return self._request_json(path=path, params=params, - expect_errors=expect_errors, - headers=headers, extra_environ=extra_environ, - status=status, method="patch") - - def delete(self, path, expect_errors=False, headers=None, - extra_environ=None, status=None, path_prefix=PATH_PREFIX): - """Sends simulated HTTP DELETE request to Pecan test app. - - :param path: url path of target service - :param expect_errors: Boolean value; whether an error is expected based - on request - :param headers: a dictionary of headers to send along with the request - :param extra_environ: a dictionary of environ variables to send along - with the request - :param status: expected status code of response - :param path_prefix: prefix of the url path - """ - full_path = path_prefix + path - print('DELETE: %s' % (full_path)) - response = self.app.delete(str(full_path), - headers=headers, - status=status, - extra_environ=extra_environ, - expect_errors=expect_errors) - print('GOT:%s' % response) - return response - - def get_json(self, path, expect_errors=False, headers=None, - extra_environ=None, q=[], path_prefix=PATH_PREFIX, **params): - """Sends simulated HTTP GET request to Pecan test app. - - :param path: url path of target service - :param expect_errors: Boolean value;whether an error is expected based - on request - :param headers: a dictionary of headers to send along with the request - :param extra_environ: a dictionary of environ variables to send along - with the request - :param q: list of queries consisting of: field, value, op, and type - keys - :param path_prefix: prefix of the url path - :param params: content for wsgi.input of request - """ - full_path = path_prefix + path - query_params = {'q.field': [], - 'q.value': [], - 'q.op': [], - } - for query in q: - for name in ['field', 'op', 'value']: - query_params['q.%s' % name].append(query.get(name, '')) - all_params = {} - all_params.update(params) - if q: - all_params.update(query_params) - print('GET: %s %r' % (full_path, all_params)) - - response = self.app.get(full_path, - params=all_params, - headers=headers, - extra_environ=extra_environ, - expect_errors=expect_errors) - if not expect_errors: - response = response.json - print('GOT:%s' % response) - return response - - def validate_link(self, link, bookmark=False): - """Checks if the given link can get correct data.""" - # removes the scheme and net location parts of the link - url_parts = list(urlparse.urlparse(link)) - url_parts[0] = url_parts[1] = '' - - # bookmark link should not have the version in the URL - if bookmark and url_parts[2].startswith(PATH_PREFIX): - return False - - full_path = urlparse.urlunparse(url_parts) - try: - self.get_json(full_path, path_prefix='') - return True - except Exception: - return False - - -class AdminRoleTest(base.DbTestCase): - def setUp(self): - super(AdminRoleTest, self).setUp() - token_info = { - 'token': { - 'project': { - 'id': 'admin' - }, - 'user': { - 'id': 'admin' - } - } - } - self.context = watcher_context.RequestContext( - auth_token_info=token_info, - project_id='admin', - user_id='admin') - - def make_context(*args, **kwargs): - # If context hasn't been constructed with token_info - if not kwargs.get('auth_token_info'): - kwargs['auth_token_info'] = copy.deepcopy(token_info) - if not kwargs.get('project_id'): - kwargs['project_id'] = 'admin' - if not kwargs.get('user_id'): - kwargs['user_id'] = 'admin' - if not kwargs.get('roles'): - kwargs['roles'] = ['admin'] - - context = watcher_context.RequestContext(*args, **kwargs) - return watcher_context.RequestContext.from_dict(context.to_dict()) - - p = mock.patch.object(watcher_context, 'make_context', - side_effect=make_context) - self.mock_make_context = p.start() - self.addCleanup(p.stop) diff --git a/watcher/tests/api/test_base.py b/watcher/tests/api/test_base.py deleted file mode 100644 index 8e5860c..0000000 --- a/watcher/tests/api/test_base.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from watcher.tests.api import base - - -class TestBase(base.FunctionalTest): - - def test_api_setup(self): - pass - - def test_bad_uri(self): - response = self.get_json('/bad/path', - expect_errors=True, - headers={"Accept": "application/json"}) - self.assertEqual(404, response.status_int) - self.assertEqual("application/json", response.content_type) - self.assertTrue(response.json['error_message']) diff --git a/watcher/tests/api/test_hooks.py b/watcher/tests/api/test_hooks.py deleted file mode 100644 index 34df0cb..0000000 --- a/watcher/tests/api/test_hooks.py +++ /dev/null @@ -1,273 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for the Pecan API hooks.""" - -from __future__ import unicode_literals - -import mock -from oslo_config import cfg -import oslo_messaging as messaging -from oslo_serialization import jsonutils -import six -from six.moves import http_client - -from watcher.api.controllers import root -from watcher.api import hooks -from watcher.common import context -from watcher.tests.api import base - - -class FakeRequest(object): - def __init__(self, headers, context, environ): - self.headers = headers - self.context = context - self.environ = environ or {} - self.version = (1, 0) - self.host_url = 'http://127.0.0.1:6385' - - -class FakeRequestState(object): - def __init__(self, headers=None, context=None, environ=None): - self.request = FakeRequest(headers, context, environ) - self.response = FakeRequest(headers, context, environ) - - def set_context(self): - headers = self.request.headers - creds = { - 'user': headers.get('X-User') or headers.get('X-User-Id'), - 'domain_id': headers.get('X-User-Domain-Id'), - 'domain_name': headers.get('X-User-Domain-Name'), - 'auth_token': headers.get('X-Auth-Token'), - 'roles': headers.get('X-Roles', '').split(','), - } - is_admin = ('admin' in creds['roles'] or - 'administrator' in creds['roles']) - is_public_api = self.request.environ.get('is_public_api', False) - - self.request.context = context.RequestContext( - is_admin=is_admin, is_public_api=is_public_api, **creds) - - -def fake_headers(admin=False): - headers = { - 'X-Auth-Token': '8d9f235ca7464dd7ba46f81515797ea0', - 'X-Domain-Id': 'None', - 'X-Domain-Name': 'None', - 'X-Project-Domain-Id': 'default', - 'X-Project-Domain-Name': 'Default', - 'X-Role': '_member_,admin', - 'X-Roles': '_member_,admin', - # 'X-Tenant': 'foo', - # 'X-Tenant-Id': 'b4efa69d4ffa4973863f2eefc094f7f8', - # 'X-Tenant-Name': 'foo', - 'X-User': 'foo', - 'X-User-Domain-Id': 'default', - 'X-User-Domain-Name': 'Default', - 'X-User-Id': '604ab2a197c442c2a84aba66708a9e1e', - 'X-User-Name': 'foo', - } - if admin: - headers.update({ - 'X-Project-Name': 'admin', - 'X-Role': '_member_,admin', - 'X-Roles': '_member_,admin', - 'X-Tenant': 'admin', - # 'X-Tenant-Name': 'admin', - # 'X-Tenant': 'admin' - 'X-Tenant-Name': 'admin', - 'X-Tenant-Id': 'c2a3a69d456a412376efdd9dac38', - 'X-Project-Name': 'admin', - 'X-Project-Id': 'c2a3a69d456a412376efdd9dac38', - }) - else: - headers.update({ - 'X-Role': '_member_', - 'X-Roles': '_member_', - 'X-Tenant': 'foo', - 'X-Tenant-Name': 'foo', - 'X-Tenant-Id': 'b4efa69d,4ffa4973863f2eefc094f7f8', - 'X-Project-Name': 'foo', - 'X-Project-Id': 'b4efa69d4ffa4973863f2eefc094f7f8', - }) - return headers - - -class TestNoExceptionTracebackHook(base.FunctionalTest): - - TRACE = ['Traceback (most recent call last):', - ' File "/opt/stack/watcher/watcher/common/rpc/amqp.py",' - ' line 434, in _process_data\\n **args)', - ' File "/opt/stack/watcher/watcher/common/rpc/' - 'dispatcher.py", line 172, in dispatch\\n result =' - ' getattr(proxyobj, method)(ctxt, **kwargs)'] - MSG_WITHOUT_TRACE = "Test exception message." - MSG_WITH_TRACE = MSG_WITHOUT_TRACE + "\n" + "\n".join(TRACE) - - def setUp(self): - super(TestNoExceptionTracebackHook, self).setUp() - p = mock.patch.object(root.Root, 'convert') - self.root_convert_mock = p.start() - self.addCleanup(p.stop) - cfg.CONF.set_override('debug', False) - - def test_hook_exception_success(self): - self.root_convert_mock.side_effect = Exception(self.MSG_WITH_TRACE) - - response = self.get_json('/', path_prefix='', expect_errors=True) - - actual_msg = jsonutils.loads( - response.json['error_message'])['faultstring'] - self.assertEqual(self.MSG_WITHOUT_TRACE, actual_msg) - - def test_hook_remote_error_success(self): - test_exc_type = 'TestException' - self.root_convert_mock.side_effect = messaging.rpc.RemoteError( - test_exc_type, self.MSG_WITHOUT_TRACE, self.TRACE) - - response = self.get_json('/', path_prefix='', expect_errors=True) - - # NOTE(max_lobur): For RemoteError the client message will still have - # some garbage because in RemoteError traceback is serialized as a list - # instead of'\n'.join(trace). But since RemoteError is kind of very - # rare thing (happens due to wrong deserialization settings etc.) - # we don't care about this garbage. - expected_msg = ("Remote error: %s %s" - % (test_exc_type, self.MSG_WITHOUT_TRACE) - + ("\n[u'" if six.PY2 else "\n['")) - actual_msg = jsonutils.loads( - response.json['error_message'])['faultstring'] - self.assertEqual(expected_msg, actual_msg) - - def _test_hook_without_traceback(self): - msg = "Error message without traceback \n but \n multiline" - self.root_convert_mock.side_effect = Exception(msg) - - response = self.get_json('/', path_prefix='', expect_errors=True) - - actual_msg = jsonutils.loads( - response.json['error_message'])['faultstring'] - self.assertEqual(msg, actual_msg) - - def test_hook_without_traceback(self): - self._test_hook_without_traceback() - - def test_hook_without_traceback_debug(self): - cfg.CONF.set_override('debug', True) - self._test_hook_without_traceback() - - def _test_hook_on_serverfault(self): - self.root_convert_mock.side_effect = Exception(self.MSG_WITH_TRACE) - - response = self.get_json('/', path_prefix='', expect_errors=True) - - actual_msg = jsonutils.loads( - response.json['error_message'])['faultstring'] - return actual_msg - - def test_hook_on_serverfault(self): - cfg.CONF.set_override('debug', False) - msg = self._test_hook_on_serverfault() - self.assertEqual(self.MSG_WITHOUT_TRACE, msg) - - def test_hook_on_serverfault_debug(self): - cfg.CONF.set_override('debug', True) - msg = self._test_hook_on_serverfault() - self.assertEqual(self.MSG_WITH_TRACE, msg) - - def _test_hook_on_clientfault(self): - client_error = Exception(self.MSG_WITH_TRACE) - client_error.code = http_client.BAD_REQUEST - self.root_convert_mock.side_effect = client_error - - response = self.get_json('/', path_prefix='', expect_errors=True) - - actual_msg = jsonutils.loads( - response.json['error_message'])['faultstring'] - return actual_msg - - def test_hook_on_clientfault(self): - msg = self._test_hook_on_clientfault() - self.assertEqual(self.MSG_WITHOUT_TRACE, msg) - - def test_hook_on_clientfault_debug_tracebacks(self): - cfg.CONF.set_override('debug', True) - msg = self._test_hook_on_clientfault() - self.assertEqual(self.MSG_WITH_TRACE, msg) - - -class TestContextHook(base.FunctionalTest): - @mock.patch.object(context, 'RequestContext') - def test_context_hook_not_admin(self, mock_ctx): - cfg.CONF.set_override( - 'auth_type', 'password', group='watcher_clients_auth') - headers = fake_headers(admin=False) - reqstate = FakeRequestState(headers=headers) - context_hook = hooks.ContextHook() - context_hook.before(reqstate) - mock_ctx.assert_called_with( - auth_token=headers['X-Auth-Token'], - user=headers['X-User'], - user_id=headers['X-User-Id'], - domain_id=headers['X-User-Domain-Id'], - domain_name=headers['X-User-Domain-Name'], - auth_url=cfg.CONF.keystone_authtoken.auth_uri, - project=headers['X-Project-Name'], - project_id=headers['X-Project-Id'], - show_deleted=None, - auth_token_info=self.token_info, - roles=headers['X-Roles'].split(',')) - - @mock.patch.object(context, 'RequestContext') - def test_context_hook_admin(self, mock_ctx): - cfg.CONF.set_override( - 'auth_type', 'password', group='watcher_clients_auth') - headers = fake_headers(admin=True) - reqstate = FakeRequestState(headers=headers) - context_hook = hooks.ContextHook() - context_hook.before(reqstate) - mock_ctx.assert_called_with( - auth_token=headers['X-Auth-Token'], - user=headers['X-User'], - user_id=headers['X-User-Id'], - domain_id=headers['X-User-Domain-Id'], - domain_name=headers['X-User-Domain-Name'], - auth_url=cfg.CONF.keystone_authtoken.auth_uri, - project=headers['X-Project-Name'], - project_id=headers['X-Project-Id'], - show_deleted=None, - auth_token_info=self.token_info, - roles=headers['X-Roles'].split(',')) - - @mock.patch.object(context, 'RequestContext') - def test_context_hook_public_api(self, mock_ctx): - cfg.CONF.set_override( - 'auth_type', 'password', group='watcher_clients_auth') - headers = fake_headers(admin=True) - env = {'is_public_api': True} - reqstate = FakeRequestState(headers=headers, environ=env) - context_hook = hooks.ContextHook() - context_hook.before(reqstate) - mock_ctx.assert_called_with( - auth_token=headers['X-Auth-Token'], - user=headers['X-User'], - user_id=headers['X-User-Id'], - domain_id=headers['X-User-Domain-Id'], - domain_name=headers['X-User-Domain-Name'], - auth_url=cfg.CONF.keystone_authtoken.auth_uri, - project=headers['X-Project-Name'], - project_id=headers['X-Project-Id'], - show_deleted=None, - auth_token_info=self.token_info, - roles=headers['X-Roles'].split(',')) diff --git a/watcher/tests/api/test_root.py b/watcher/tests/api/test_root.py deleted file mode 100644 index 7d93c30..0000000 --- a/watcher/tests/api/test_root.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from watcher.tests.api import base - - -class TestRoot(base.FunctionalTest): - - def test_get_root(self): - data = self.get_json('/', path_prefix='') - self.assertEqual('v1', data['default_version']['id']) - # Check fields are not empty - [self.assertNotIn(f, ['', []]) for f in data.keys()] - - -class TestV1Root(base.FunctionalTest): - - def test_get_v1_root(self): - data = self.get_json('/') - self.assertEqual('v1', data['id']) - # Check fields are not empty - for f in data.keys(): - self.assertNotIn(f, ['', []]) - # Check if all known resources are present and there are no extra ones. - not_resources = ('id', 'links', 'media_types') - actual_resources = tuple(set(data.keys()) - set(not_resources)) - expected_resources = ('audit_templates', 'audits', 'actions', - 'action_plans', 'scoring_engines', - 'services') - self.assertEqual(sorted(expected_resources), sorted(actual_resources)) - - self.assertIn({'type': 'application/vnd.openstack.watcher.v1+json', - 'base': 'application/json'}, data['media_types']) diff --git a/watcher/tests/api/test_scheduling.py b/watcher/tests/api/test_scheduling.py deleted file mode 100644 index b857afa..0000000 --- a/watcher/tests/api/test_scheduling.py +++ /dev/null @@ -1,114 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from apscheduler.schedulers import background -import datetime -import freezegun -import mock - -from watcher.api import scheduling -from watcher.notifications import service -from watcher import objects -from watcher.tests import base -from watcher.tests.db import base as db_base -from watcher.tests.db import utils - - -class TestSchedulingService(base.TestCase): - - @mock.patch.object(background.BackgroundScheduler, 'start') - def test_start_scheduling_service(self, m_start): - scheduler = scheduling.APISchedulingService() - scheduler.start() - m_start.assert_called_once_with(scheduler) - jobs = scheduler.get_jobs() - self.assertEqual(1, len(jobs)) - - -class TestSchedulingServiceFunctions(db_base.DbTestCase): - - def setUp(self): - super(TestSchedulingServiceFunctions, self).setUp() - fake_service = utils.get_test_service( - created_at=datetime.datetime.utcnow()) - self.fake_service = objects.Service(**fake_service) - - @mock.patch.object(scheduling.APISchedulingService, 'get_service_status') - @mock.patch.object(objects.Service, 'list') - @mock.patch.object(service, 'send_service_update') - def test_get_services_status_without_services_in_list( - self, mock_service_update, mock_get_list, mock_service_status): - scheduler = scheduling.APISchedulingService() - mock_get_list.return_value = [self.fake_service] - mock_service_status.return_value = 'ACTIVE' - scheduler.get_services_status(mock.ANY) - mock_service_status.assert_called_once_with(mock.ANY, - self.fake_service.id) - - mock_service_update.assert_not_called() - - @mock.patch.object(scheduling.APISchedulingService, 'get_service_status') - @mock.patch.object(objects.Service, 'list') - @mock.patch.object(service, 'send_service_update') - def test_get_services_status_with_services_in_list_same_status( - self, mock_service_update, mock_get_list, mock_service_status): - scheduler = scheduling.APISchedulingService() - mock_get_list.return_value = [self.fake_service] - scheduler.services_status = {1: 'ACTIVE'} - mock_service_status.return_value = 'ACTIVE' - scheduler.get_services_status(mock.ANY) - mock_service_status.assert_called_once_with(mock.ANY, - self.fake_service.id) - - mock_service_update.assert_not_called() - - @mock.patch.object(scheduling.APISchedulingService, 'get_service_status') - @mock.patch.object(objects.Service, 'list') - @mock.patch.object(service, 'send_service_update') - def test_get_services_status_with_services_in_list_diff_status( - self, mock_service_update, mock_get_list, mock_service_status): - scheduler = scheduling.APISchedulingService() - mock_get_list.return_value = [self.fake_service] - scheduler.services_status = {1: 'FAILED'} - mock_service_status.return_value = 'ACTIVE' - scheduler.get_services_status(mock.ANY) - mock_service_status.assert_called_once_with(mock.ANY, - self.fake_service.id) - - mock_service_update.assert_called_once_with(mock.ANY, - self.fake_service, - state='ACTIVE') - - @mock.patch.object(objects.Service, 'get') - def test_get_service_status_failed_service( - self, mock_get): - scheduler = scheduling.APISchedulingService() - mock_get.return_value = self.fake_service - service_status = scheduler.get_service_status(mock.ANY, - self.fake_service.id) - mock_get.assert_called_once_with(mock.ANY, - self.fake_service.id) - self.assertEqual('FAILED', service_status) - - @freezegun.freeze_time('2016-09-22T08:32:26.219414') - @mock.patch.object(objects.Service, 'get') - def test_get_service_status_failed_active( - self, mock_get): - scheduler = scheduling.APISchedulingService() - mock_get.return_value = self.fake_service - service_status = scheduler.get_service_status(mock.ANY, - self.fake_service.id) - mock_get.assert_called_once_with(mock.ANY, - self.fake_service.id) - self.assertEqual('ACTIVE', service_status) diff --git a/watcher/tests/api/test_utils.py b/watcher/tests/api/test_utils.py deleted file mode 100644 index 9c6d0ce..0000000 --- a/watcher/tests/api/test_utils.py +++ /dev/null @@ -1,59 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -from oslo_config import cfg -import wsme - -from watcher.api.controllers.v1 import utils as v1_utils -from watcher.tests import base - - -class TestApiUtilsValidScenarios(base.TestCase): - - scenarios = [ - ("limit=None + max_limit=None", - {"limit": None, "max_limit": None, "expected": None}), - ("limit=None + max_limit=1", - {"limit": None, "max_limit": 1, "expected": 1}), - # ("limit=0 + max_limit=None", - # {"limit": 0, "max_limit": None, "expected": 0}), - ("limit=1 + max_limit=None", - {"limit": 1, "max_limit": None, "expected": 1}), - ("limit=1 + max_limit=1", - {"limit": 1, "max_limit": 1, "expected": 1}), - ("limit=2 + max_limit=1", - {"limit": 2, "max_limit": 1, "expected": 1}), - ] - - def test_validate_limit(self): - cfg.CONF.set_override("max_limit", self.max_limit, group="api") - actual_limit = v1_utils.validate_limit(self.limit) - self.assertEqual(self.expected, actual_limit) - - -class TestApiUtilsInvalidScenarios(base.TestCase): - - scenarios = [ - ("limit=0 + max_limit=None", {"limit": 0, "max_limit": None}), - ] - - def test_validate_limit_invalid_cases(self): - cfg.CONF.set_override("max_limit", self.max_limit, group="api") - self.assertRaises( - wsme.exc.ClientSideError, v1_utils.validate_limit, self.limit - ) diff --git a/watcher/tests/api/utils.py b/watcher/tests/api/utils.py deleted file mode 100644 index 221eb76..0000000 --- a/watcher/tests/api/utils.py +++ /dev/null @@ -1,108 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Utils for testing the API service. -""" - -import datetime -from oslo_serialization import jsonutils - -from watcher.api.controllers.v1 import action as action_ctrl -from watcher.api.controllers.v1 import action_plan as action_plan_ctrl -from watcher.api.controllers.v1 import audit as audit_ctrl -from watcher.api.controllers.v1 import audit_template as audit_template_ctrl -from watcher.tests.db import utils as db_utils - - -ADMIN_TOKEN = '4562138218392831' -MEMBER_TOKEN = '4562138218392832' - - -class FakeMemcache(object): - """Fake cache that is used for keystone tokens lookup.""" - - _cache = { - 'tokens/%s' % ADMIN_TOKEN: { - 'access': { - 'token': {'id': ADMIN_TOKEN, - 'expires': '2100-09-11T00:00:00'}, - 'user': {'id': 'user_id1', - 'name': 'user_name1', - 'tenantId': '123i2910', - 'tenantName': 'mytenant', - 'roles': [{'name': 'admin'}] - }, - } - }, - 'tokens/%s' % MEMBER_TOKEN: { - 'access': { - 'token': {'id': MEMBER_TOKEN, - 'expires': '2100-09-11T00:00:00'}, - 'user': {'id': 'user_id2', - 'name': 'user-good', - 'tenantId': 'project-good', - 'tenantName': 'goodies', - 'roles': [{'name': 'Member'}] - } - } - } - } - - def __init__(self): - self.set_key = None - self.set_value = None - self.token_expiration = None - - def get(self, key): - dt = datetime.datetime.utcnow() + datetime.timedelta(minutes=5) - return jsonutils.dumps((self._cache.get(key), dt.isoformat())) - - def set(self, key, value, time=0, min_compress_len=0): - self.set_value = value - self.set_key = key - - -def remove_internal(values, internal): - # NOTE(yuriyz): internal attributes should not be posted, except uuid - int_attr = [attr.lstrip('/') for attr in internal if attr != '/uuid'] - return dict( - (k, v) for (k, v) in values.items() if k not in int_attr - ) - - -def audit_post_data(**kw): - audit = db_utils.get_test_audit(**kw) - internal = audit_ctrl.AuditPatchType.internal_attrs() - return remove_internal(audit, internal) - - -def audit_template_post_data(**kw): - attrs = audit_template_ctrl.AuditTemplatePostType._wsme_attributes - audit_template = db_utils.get_test_audit_template() - fields = [field.key for field in attrs] - post_data = {k: v for k, v in audit_template.items() if k in fields} - post_data.update({k: v for k, v in kw.items() if k in fields}) - return post_data - - -def action_post_data(**kw): - action = db_utils.get_test_action(**kw) - internal = action_ctrl.ActionPatchType.internal_attrs() - return remove_internal(action, internal) - - -def action_plan_post_data(**kw): - act_plan = db_utils.get_test_action_plan(**kw) - internal = action_plan_ctrl.ActionPlanPatchType.internal_attrs() - return remove_internal(act_plan, internal) diff --git a/watcher/tests/api/v1/__init__.py b/watcher/tests/api/v1/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/api/v1/test_actions.py b/watcher/tests/api/v1/test_actions.py deleted file mode 100644 index ca5881f..0000000 --- a/watcher/tests/api/v1/test_actions.py +++ /dev/null @@ -1,509 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import mock - -from oslo_config import cfg -from oslo_serialization import jsonutils -from wsme import types as wtypes - -from watcher.api.controllers.v1 import action as api_action -from watcher.common import utils -from watcher.db import api as db_api -from watcher import objects -from watcher.tests.api import base as api_base -from watcher.tests.api import utils as api_utils -from watcher.tests import base -from watcher.tests.db import utils as db_utils -from watcher.tests.objects import utils as obj_utils - - -def post_get_test_action(**kw): - action = api_utils.action_post_data(**kw) - action_plan = db_utils.get_test_action_plan() - del action['action_plan_id'] - action['action_plan_uuid'] = kw.get('action_plan_uuid', - action_plan['uuid']) - action['parents'] = None - return action - - -class TestActionObject(base.TestCase): - - def test_action_init(self): - action_dict = api_utils.action_post_data(action_plan_id=None, - parents=None) - del action_dict['state'] - action = api_action.Action(**action_dict) - self.assertEqual(wtypes.Unset, action.state) - - -class TestListAction(api_base.FunctionalTest): - - def setUp(self): - super(TestListAction, self).setUp() - self.goal = obj_utils.create_test_goal(self.context) - self.strategy = obj_utils.create_test_strategy(self.context) - self.audit = obj_utils.create_test_audit(self.context) - self.action_plan = obj_utils.create_test_action_plan(self.context) - - def test_empty(self): - response = self.get_json('/actions') - self.assertEqual([], response['actions']) - - def _assert_action_fields(self, action): - action_fields = ['uuid', 'state', 'action_plan_uuid', 'action_type'] - for field in action_fields: - self.assertIn(field, action) - - def test_one(self): - action = obj_utils.create_test_action(self.context, parents=None) - response = self.get_json('/actions') - self.assertEqual(action.uuid, response['actions'][0]["uuid"]) - self._assert_action_fields(response['actions'][0]) - - def test_one_soft_deleted(self): - action = obj_utils.create_test_action(self.context, parents=None) - action.soft_delete() - response = self.get_json('/actions', - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(action.uuid, response['actions'][0]["uuid"]) - self._assert_action_fields(response['actions'][0]) - - response = self.get_json('/actions') - self.assertEqual([], response['actions']) - - def test_get_one(self): - action = obj_utils.create_test_action(self.context, parents=None) - response = self.get_json('/actions/%s' % action['uuid']) - self.assertEqual(action.uuid, response['uuid']) - self.assertEqual(action.action_type, response['action_type']) - self.assertEqual(action.input_parameters, response['input_parameters']) - self._assert_action_fields(response) - - def test_get_one_soft_deleted(self): - action = obj_utils.create_test_action(self.context, parents=None) - action.soft_delete() - response = self.get_json('/actions/%s' % action['uuid'], - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(action.uuid, response['uuid']) - self._assert_action_fields(response) - - response = self.get_json('/actions/%s' % action['uuid'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_detail(self): - action = obj_utils.create_test_action(self.context, parents=None) - response = self.get_json('/actions/detail') - self.assertEqual(action.uuid, response['actions'][0]["uuid"]) - self._assert_action_fields(response['actions'][0]) - - def test_detail_soft_deleted(self): - action = obj_utils.create_test_action(self.context, parents=None) - action.soft_delete() - response = self.get_json('/actions/detail', - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(action.uuid, response['actions'][0]["uuid"]) - self._assert_action_fields(response['actions'][0]) - - response = self.get_json('/actions/detail') - self.assertEqual([], response['actions']) - - def test_detail_against_single(self): - action = obj_utils.create_test_action(self.context, parents=None) - response = self.get_json('/actions/%s/detail' % action['uuid'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_many(self): - action_list = [] - for id_ in range(5): - action = obj_utils.create_test_action(self.context, id=id_, - uuid=utils.generate_uuid()) - action_list.append(action.uuid) - response = self.get_json('/actions') - self.assertEqual(len(action_list), len(response['actions'])) - uuids = [s['uuid'] for s in response['actions']] - self.assertEqual(sorted(action_list), sorted(uuids)) - - def test_many_with_action_plan_uuid(self): - action_plan = obj_utils.create_test_action_plan( - self.context, - id=2, - uuid=utils.generate_uuid(), - audit_id=1) - action_list = [] - for id_ in range(5): - action = obj_utils.create_test_action( - self.context, id=id_, - action_plan_id=2, - uuid=utils.generate_uuid()) - action_list.append(action.uuid) - response = self.get_json('/actions') - self.assertEqual(len(action_list), len(response['actions'])) - for action in response['actions']: - self.assertEqual(action_plan.uuid, action['action_plan_uuid']) - - def test_filter_by_audit_uuid(self): - action_plan_1 = obj_utils.create_test_action_plan( - self.context, - uuid=utils.generate_uuid()) - action_list = [] - - for id_ in range(3): - action = obj_utils.create_test_action( - self.context, id=id_, - action_plan_id=action_plan_1.id, - uuid=utils.generate_uuid()) - action_list.append(action.uuid) - - audit2 = obj_utils.create_test_audit( - self.context, id=2, uuid=utils.generate_uuid()) - action_plan_2 = obj_utils.create_test_action_plan( - self.context, - uuid=utils.generate_uuid(), - audit_id=audit2.id) - - for id_ in range(4, 5, 6): - obj_utils.create_test_action( - self.context, id=id_, - action_plan_id=action_plan_2.id, - uuid=utils.generate_uuid()) - - response = self.get_json('/actions?audit_uuid=%s' % self.audit.uuid) - self.assertEqual(len(action_list), len(response['actions'])) - for action in response['actions']: - self.assertEqual(action_plan_1.uuid, action['action_plan_uuid']) - - def test_filter_by_action_plan_uuid(self): - action_plan_1 = obj_utils.create_test_action_plan( - self.context, - uuid=utils.generate_uuid(), - audit_id=self.audit.id) - action_list = [] - - for id_ in range(3): - action = obj_utils.create_test_action( - self.context, id=id_, - action_plan_id=action_plan_1.id, - uuid=utils.generate_uuid()) - action_list.append(action.uuid) - - action_plan_2 = obj_utils.create_test_action_plan( - self.context, - uuid=utils.generate_uuid(), - audit_id=self.audit.id) - - for id_ in range(4, 5, 6): - obj_utils.create_test_action( - self.context, id=id_, - action_plan_id=action_plan_2.id, - uuid=utils.generate_uuid()) - - response = self.get_json( - '/actions?action_plan_uuid=%s' % action_plan_1.uuid) - self.assertEqual(len(action_list), len(response['actions'])) - for action in response['actions']: - self.assertEqual(action_plan_1.uuid, action['action_plan_uuid']) - - response = self.get_json( - '/actions?action_plan_uuid=%s' % action_plan_2.uuid) - for action in response['actions']: - self.assertEqual(action_plan_2.uuid, action['action_plan_uuid']) - - def test_details_and_filter_by_action_plan_uuid(self): - action_plan = obj_utils.create_test_action_plan( - self.context, - uuid=utils.generate_uuid(), - audit_id=self.audit.id) - - for id_ in range(1, 3): - action = obj_utils.create_test_action( - self.context, id=id_, - action_plan_id=action_plan.id, - uuid=utils.generate_uuid()) - - response = self.get_json( - '/actions/detail?action_plan_uuid=%s' % action_plan.uuid) - for action in response['actions']: - self.assertEqual(action_plan.uuid, action['action_plan_uuid']) - - def test_details_and_filter_by_audit_uuid(self): - action_plan = obj_utils.create_test_action_plan( - self.context, - uuid=utils.generate_uuid(), - audit_id=self.audit.id) - - for id_ in range(1, 3): - action = obj_utils.create_test_action( - self.context, id=id_, - action_plan_id=action_plan.id, - uuid=utils.generate_uuid()) - - response = self.get_json( - '/actions/detail?audit_uuid=%s' % self.audit.uuid) - for action in response['actions']: - self.assertEqual(action_plan.uuid, action['action_plan_uuid']) - - def test_filter_by_action_plan_and_audit_uuids(self): - action_plan = obj_utils.create_test_action_plan( - self.context, - uuid=utils.generate_uuid(), - audit_id=self.audit.id) - url = '/actions?action_plan_uuid=%s&audit_uuid=%s' % ( - action_plan.uuid, self.audit.uuid) - response = self.get_json(url, expect_errors=True) - self.assertEqual(400, response.status_int) - - def test_many_with_soft_deleted_action_plan_uuid(self): - action_plan1 = obj_utils.create_test_action_plan( - self.context, - id=2, - uuid=utils.generate_uuid(), - audit_id=1) - action_plan2 = obj_utils.create_test_action_plan( - self.context, - id=3, - uuid=utils.generate_uuid(), - audit_id=1) - - ap1_action_list = [] - ap2_action_list = [] - - for id_ in range(0, 2): - action = obj_utils.create_test_action( - self.context, id=id_, - action_plan_id=action_plan1.id, - uuid=utils.generate_uuid()) - ap1_action_list.append(action) - - for id_ in range(2, 4): - action = obj_utils.create_test_action( - self.context, id=id_, - action_plan_id=action_plan2.id, - uuid=utils.generate_uuid()) - ap2_action_list.append(action) - - self.delete('/action_plans/%s' % action_plan1.uuid) - - response = self.get_json('/actions') - # We deleted the actions from the 1st action plan so we've got 2 left - self.assertEqual(len(ap2_action_list), len(response['actions'])) - - # We deleted them so that's normal - self.assertEqual([], - [act for act in response['actions'] - if act['action_plan_uuid'] == action_plan1.uuid]) - - # Here are the 2 actions left - self.assertEqual( - set([act.as_dict()['uuid'] for act in ap2_action_list]), - set([act['uuid'] for act in response['actions'] - if act['action_plan_uuid'] == action_plan2.uuid])) - - def test_many_with_parents(self): - action_list = [] - for id_ in range(5): - if id_ > 0: - action = obj_utils.create_test_action( - self.context, id=id_, uuid=utils.generate_uuid(), - parents=[action_list[id_ - 1]]) - else: - action = obj_utils.create_test_action( - self.context, id=id_, uuid=utils.generate_uuid(), - parents=[]) - action_list.append(action.uuid) - response = self.get_json('/actions') - response_actions = response['actions'] - for id_ in range(4): - self.assertEqual(response_actions[id_]['uuid'], - response_actions[id_ + 1]['parents'][0]) - - def test_many_without_soft_deleted(self): - action_list = [] - for id_ in [1, 2, 3]: - action = obj_utils.create_test_action(self.context, id=id_, - uuid=utils.generate_uuid()) - action_list.append(action.uuid) - for id_ in [4, 5]: - action = obj_utils.create_test_action(self.context, id=id_, - uuid=utils.generate_uuid()) - action.soft_delete() - response = self.get_json('/actions') - self.assertEqual(3, len(response['actions'])) - uuids = [s['uuid'] for s in response['actions']] - self.assertEqual(sorted(action_list), sorted(uuids)) - - def test_many_with_soft_deleted(self): - action_list = [] - for id_ in [1, 2, 3]: - action = obj_utils.create_test_action(self.context, id=id_, - uuid=utils.generate_uuid()) - action_list.append(action.uuid) - for id_ in [4, 5]: - action = obj_utils.create_test_action(self.context, id=id_, - uuid=utils.generate_uuid()) - action.soft_delete() - action_list.append(action.uuid) - response = self.get_json('/actions', - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(5, len(response['actions'])) - uuids = [s['uuid'] for s in response['actions']] - self.assertEqual(sorted(action_list), sorted(uuids)) - - def test_links(self): - uuid = utils.generate_uuid() - obj_utils.create_test_action(self.context, id=1, uuid=uuid) - response = self.get_json('/actions/%s' % uuid) - self.assertIn('links', response.keys()) - self.assertEqual(2, len(response['links'])) - self.assertIn(uuid, response['links'][0]['href']) - for l in response['links']: - bookmark = l['rel'] == 'bookmark' - self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) - - def test_collection_links(self): - parents = None - for id_ in range(5): - action = obj_utils.create_test_action(self.context, id=id_, - uuid=utils.generate_uuid(), - parents=parents) - parents = [action.id] - response = self.get_json('/actions/?limit=3') - self.assertEqual(3, len(response['actions'])) - - def test_collection_links_default_limit(self): - cfg.CONF.set_override('max_limit', 3, 'api') - for id_ in range(5): - obj_utils.create_test_action(self.context, id=id_, - uuid=utils.generate_uuid()) - response = self.get_json('/actions') - self.assertEqual(3, len(response['actions'])) - - -class TestPatch(api_base.FunctionalTest): - - def setUp(self): - super(TestPatch, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit(self.context) - obj_utils.create_test_action_plan(self.context) - self.action = obj_utils.create_test_action(self.context, parents=None) - p = mock.patch.object(db_api.BaseConnection, 'update_action') - self.mock_action_update = p.start() - self.mock_action_update.side_effect = self._simulate_rpc_action_update - self.addCleanup(p.stop) - - def _simulate_rpc_action_update(self, action): - action.save() - return action - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_patch_not_allowed(self, mock_utcnow): - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - new_state = objects.audit.State.SUCCEEDED - response = self.get_json('/actions/%s' % self.action.uuid) - self.assertNotEqual(new_state, response['state']) - - response = self.patch_json( - '/actions/%s' % self.action.uuid, - [{'path': '/state', 'value': new_state, 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(403, response.status_int) - self.assertTrue(response.json['error_message']) - - -class TestDelete(api_base.FunctionalTest): - - def setUp(self): - super(TestDelete, self).setUp() - self.goal = obj_utils.create_test_goal(self.context) - self.strategy = obj_utils.create_test_strategy(self.context) - self.audit = obj_utils.create_test_audit(self.context) - self.action_plan = obj_utils.create_test_action_plan(self.context) - self.action = obj_utils.create_test_action(self.context, parents=None) - p = mock.patch.object(db_api.BaseConnection, 'update_action') - self.mock_action_update = p.start() - self.mock_action_update.side_effect = self._simulate_rpc_action_update - self.addCleanup(p.stop) - - def _simulate_rpc_action_update(self, action): - action.save() - return action - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_delete_action_not_allowed(self, mock_utcnow): - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - response = self.delete('/actions/%s' % self.action.uuid, - expect_errors=True) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - -class TestActionPolicyEnforcement(api_base.FunctionalTest): - - def setUp(self): - super(TestActionPolicyEnforcement, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit(self.context) - obj_utils.create_test_action_plan(self.context) - - def _common_policy_check(self, rule, func, *arg, **kwarg): - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - rule: "rule:defaut"}) - response = func(*arg, **kwarg) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - jsonutils.loads(response.json['error_message'])['faultstring']) - - def test_policy_disallow_get_all(self): - self._common_policy_check( - "action:get_all", self.get_json, '/actions', - expect_errors=True) - - def test_policy_disallow_get_one(self): - action = obj_utils.create_test_action(self.context) - self._common_policy_check( - "action:get", self.get_json, - '/actions/%s' % action.uuid, - expect_errors=True) - - def test_policy_disallow_detail(self): - self._common_policy_check( - "action:detail", self.get_json, - '/actions/detail', - expect_errors=True) - - -class TestActionPolicyEnforcementWithAdminContext(TestListAction, - api_base.AdminRoleTest): - - def setUp(self): - super(TestActionPolicyEnforcementWithAdminContext, self).setUp() - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - "action:detail": "rule:default", - "action:get": "rule:default", - "action:get_all": "rule:default"}) diff --git a/watcher/tests/api/v1/test_actions_plans.py b/watcher/tests/api/v1/test_actions_plans.py deleted file mode 100644 index b417437..0000000 --- a/watcher/tests/api/v1/test_actions_plans.py +++ /dev/null @@ -1,625 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import itertools -import mock - -from oslo_config import cfg -from oslo_serialization import jsonutils - -from watcher.applier import rpcapi as aapi -from watcher.common import utils -from watcher.db import api as db_api -from watcher import objects -from watcher.tests.api import base as api_base -from watcher.tests.objects import utils as obj_utils - - -class TestListActionPlan(api_base.FunctionalTest): - - def setUp(self): - super(TestListActionPlan, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit(self.context) - - def test_empty(self): - response = self.get_json('/action_plans') - self.assertEqual([], response['action_plans']) - - def _assert_action_plans_fields(self, action_plan): - action_plan_fields = [ - 'uuid', 'audit_uuid', 'strategy_uuid', 'strategy_name', - 'state', 'global_efficacy', 'efficacy_indicators'] - for field in action_plan_fields: - self.assertIn(field, action_plan) - - def test_one(self): - action_plan = obj_utils.create_test_action_plan(self.context) - response = self.get_json('/action_plans') - self.assertEqual(action_plan.uuid, - response['action_plans'][0]["uuid"]) - self._assert_action_plans_fields(response['action_plans'][0]) - - def test_one_soft_deleted(self): - action_plan = obj_utils.create_test_action_plan(self.context) - action_plan.soft_delete() - response = self.get_json('/action_plans', - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(action_plan.uuid, - response['action_plans'][0]["uuid"]) - self._assert_action_plans_fields(response['action_plans'][0]) - - response = self.get_json('/action_plans') - self.assertEqual([], response['action_plans']) - - def test_get_one_ok(self): - action_plan = obj_utils.create_test_action_plan(self.context) - obj_utils.create_test_efficacy_indicator( - self.context, action_plan_id=action_plan['id']) - response = self.get_json('/action_plans/%s' % action_plan['uuid']) - self.assertEqual(action_plan.uuid, response['uuid']) - self._assert_action_plans_fields(response) - self.assertEqual( - [{'description': 'Test indicator', - 'name': 'test_indicator', - 'value': 0.0, - 'unit': '%'}], - response['efficacy_indicators']) - - def test_get_one_soft_deleted(self): - action_plan = obj_utils.create_test_action_plan(self.context) - action_plan.soft_delete() - response = self.get_json('/action_plans/%s' % action_plan['uuid'], - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(action_plan.uuid, response['uuid']) - self._assert_action_plans_fields(response) - - response = self.get_json('/action_plans/%s' % action_plan['uuid'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_detail(self): - action_plan = obj_utils.create_test_action_plan(self.context) - response = self.get_json('/action_plans/detail') - self.assertEqual(action_plan.uuid, - response['action_plans'][0]["uuid"]) - self._assert_action_plans_fields(response['action_plans'][0]) - - def test_detail_soft_deleted(self): - action_plan = obj_utils.create_test_action_plan(self.context) - action_plan.soft_delete() - response = self.get_json('/action_plans/detail', - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(action_plan.uuid, - response['action_plans'][0]["uuid"]) - self._assert_action_plans_fields(response['action_plans'][0]) - - response = self.get_json('/action_plans/detail') - self.assertEqual([], response['action_plans']) - - def test_detail_against_single(self): - action_plan = obj_utils.create_test_action_plan(self.context) - response = self.get_json( - '/action_plan/%s/detail' % action_plan['uuid'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_many(self): - action_plan_list = [] - for id_ in range(5): - action_plan = obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid()) - action_plan_list.append(action_plan.uuid) - response = self.get_json('/action_plans') - self.assertEqual(len(action_plan_list), len(response['action_plans'])) - uuids = [s['uuid'] for s in response['action_plans']] - self.assertEqual(sorted(action_plan_list), sorted(uuids)) - - def test_many_with_soft_deleted_audit_uuid(self): - action_plan_list = [] - audit1 = obj_utils.create_test_audit(self.context, - id=2, - uuid=utils.generate_uuid()) - audit2 = obj_utils.create_test_audit(self.context, - id=3, - uuid=utils.generate_uuid()) - - for id_ in range(0, 2): - action_plan = obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid(), - audit_id=audit1.id) - action_plan_list.append(action_plan.uuid) - - for id_ in range(2, 4): - action_plan = obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid(), - audit_id=audit2.id) - action_plan_list.append(action_plan.uuid) - - self.delete('/audits/%s' % audit1.uuid) - - response = self.get_json('/action_plans') - - self.assertEqual(len(action_plan_list), len(response['action_plans'])) - - for id_ in range(0, 2): - action_plan = response['action_plans'][id_] - self.assertIsNone(action_plan['audit_uuid']) - - for id_ in range(2, 4): - action_plan = response['action_plans'][id_] - self.assertEqual(audit2.uuid, action_plan['audit_uuid']) - - def test_many_with_audit_uuid(self): - action_plan_list = [] - audit = obj_utils.create_test_audit(self.context, - id=2, - uuid=utils.generate_uuid()) - for id_ in range(2, 5): - action_plan = obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid(), - audit_id=audit.id) - action_plan_list.append(action_plan.uuid) - response = self.get_json('/action_plans') - self.assertEqual(len(action_plan_list), len(response['action_plans'])) - for action in response['action_plans']: - self.assertEqual(audit.uuid, action['audit_uuid']) - - def test_many_with_audit_uuid_filter(self): - action_plan_list1 = [] - audit1 = obj_utils.create_test_audit(self.context, - id=2, - uuid=utils.generate_uuid()) - for id_ in range(2, 5): - action_plan = obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid(), - audit_id=audit1.id) - action_plan_list1.append(action_plan.uuid) - - audit2 = obj_utils.create_test_audit(self.context, - id=3, - uuid=utils.generate_uuid()) - action_plan_list2 = [] - for id_ in [5, 6, 7]: - action_plan = obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid(), - audit_id=audit2.id) - action_plan_list2.append(action_plan.uuid) - - response = self.get_json('/action_plans?audit_uuid=%s' % audit2.uuid) - self.assertEqual(len(action_plan_list2), len(response['action_plans'])) - for action in response['action_plans']: - self.assertEqual(audit2.uuid, action['audit_uuid']) - - def test_many_without_soft_deleted(self): - action_plan_list = [] - for id_ in [1, 2, 3]: - action_plan = obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid()) - action_plan_list.append(action_plan.uuid) - for id_ in [4, 5]: - action_plan = obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid()) - action_plan.soft_delete() - response = self.get_json('/action_plans') - self.assertEqual(3, len(response['action_plans'])) - uuids = [s['uuid'] for s in response['action_plans']] - self.assertEqual(sorted(action_plan_list), sorted(uuids)) - - def test_many_with_soft_deleted(self): - action_plan_list = [] - for id_ in [1, 2, 3]: - action_plan = obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid()) - action_plan_list.append(action_plan.uuid) - for id_ in [4, 5]: - action_plan = obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid()) - action_plan.soft_delete() - action_plan_list.append(action_plan.uuid) - response = self.get_json('/action_plans', - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(5, len(response['action_plans'])) - uuids = [s['uuid'] for s in response['action_plans']] - self.assertEqual(sorted(action_plan_list), sorted(uuids)) - - def test_many_with_sort_key_audit_uuid(self): - audit_list = [] - for id_ in range(2, 5): - audit = obj_utils.create_test_audit(self.context, - id=id_, - uuid=utils.generate_uuid()) - obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid(), - audit_id=audit.id) - audit_list.append(audit.uuid) - - response = self.get_json('/action_plans/?sort_key=audit_uuid') - - self.assertEqual(3, len(response['action_plans'])) - uuids = [s['audit_uuid'] for s in response['action_plans']] - self.assertEqual(sorted(audit_list), uuids) - - def test_links(self): - uuid = utils.generate_uuid() - obj_utils.create_test_action_plan(self.context, id=1, uuid=uuid) - response = self.get_json('/action_plans/%s' % uuid) - self.assertIn('links', response.keys()) - self.assertEqual(2, len(response['links'])) - self.assertIn(uuid, response['links'][0]['href']) - for l in response['links']: - bookmark = l['rel'] == 'bookmark' - self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) - - def test_collection_links(self): - for id_ in range(5): - obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid()) - response = self.get_json('/action_plans/?limit=3') - self.assertEqual(3, len(response['action_plans'])) - - next_marker = response['action_plans'][-1]['uuid'] - self.assertIn(next_marker, response['next']) - - def test_collection_links_default_limit(self): - cfg.CONF.set_override('max_limit', 3, 'api') - for id_ in range(5): - obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid()) - response = self.get_json('/action_plans') - self.assertEqual(3, len(response['action_plans'])) - - next_marker = response['action_plans'][-1]['uuid'] - self.assertIn(next_marker, response['next']) - - -class TestDelete(api_base.FunctionalTest): - - def setUp(self): - super(TestDelete, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit(self.context) - self.action_plan = obj_utils.create_test_action_plan( - self.context) - p = mock.patch.object(db_api.BaseConnection, 'destroy_action_plan') - self.mock_action_plan_delete = p.start() - self.mock_action_plan_delete.side_effect = \ - self._simulate_rpc_action_plan_delete - self.addCleanup(p.stop) - - def _simulate_rpc_action_plan_delete(self, audit_uuid): - action_plan = objects.ActionPlan.get_by_uuid(self.context, audit_uuid) - action_plan.destroy() - - def test_delete_action_plan_without_action(self): - self.delete('/action_plans/%s' % self.action_plan.uuid) - response = self.get_json('/action_plans/%s' % self.action_plan.uuid, - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - def test_delete_action_plan_with_action(self): - action = obj_utils.create_test_action( - self.context, id=1) - - self.delete('/action_plans/%s' % self.action_plan.uuid) - ap_response = self.get_json('/action_plans/%s' % self.action_plan.uuid, - expect_errors=True) - acts_response = self.get_json( - '/actions/?action_plan_uuid=%s' % self.action_plan.uuid) - act_response = self.get_json( - '/actions/%s' % action.uuid, - expect_errors=True) - - # The action plan does not exist anymore - self.assertEqual(404, ap_response.status_int) - self.assertEqual('application/json', ap_response.content_type) - self.assertTrue(ap_response.json['error_message']) - - # Nor does the action - self.assertEqual(0, len(acts_response['actions'])) - self.assertEqual(404, act_response.status_int) - self.assertEqual('application/json', act_response.content_type) - self.assertTrue(act_response.json['error_message']) - - def test_delete_action_plan_not_found(self): - uuid = utils.generate_uuid() - response = self.delete('/action_plans/%s' % uuid, expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - -class TestPatch(api_base.FunctionalTest): - - def setUp(self): - super(TestPatch, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit(self.context) - self.action_plan = obj_utils.create_test_action_plan( - self.context, state=objects.action_plan.State.RECOMMENDED) - p = mock.patch.object(db_api.BaseConnection, 'update_action_plan') - self.mock_action_plan_update = p.start() - self.mock_action_plan_update.side_effect = \ - self._simulate_rpc_action_plan_update - self.addCleanup(p.stop) - - def _simulate_rpc_action_plan_update(self, action_plan): - action_plan.save() - return action_plan - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_replace_denied(self, mock_utcnow): - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - new_state = objects.action_plan.State.DELETED - response = self.get_json( - '/action_plans/%s' % self.action_plan.uuid) - self.assertNotEqual(new_state, response['state']) - - response = self.patch_json( - '/action_plans/%s' % self.action_plan.uuid, - [{'path': '/state', 'value': new_state, 'op': 'replace'}], - expect_errors=True) - - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['error_message']) - - def test_replace_non_existent_action_plan_denied(self): - response = self.patch_json( - '/action_plans/%s' % utils.generate_uuid(), - [{'path': '/state', - 'value': objects.action_plan.State.PENDING, - 'op': 'replace'}], - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - def test_add_non_existent_property_denied(self): - response = self.patch_json( - '/action_plans/%s' % self.action_plan.uuid, - [{'path': '/foo', 'value': 'bar', 'op': 'add'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['error_message']) - - def test_remove_denied(self): - # We should not be able to remove the state of an action plan - response = self.get_json( - '/action_plans/%s' % self.action_plan.uuid) - self.assertIsNotNone(response['state']) - - response = self.patch_json( - '/action_plans/%s' % self.action_plan.uuid, - [{'path': '/state', 'op': 'remove'}], - expect_errors=True) - - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['error_message']) - - def test_remove_uuid_denied(self): - response = self.patch_json( - '/action_plans/%s' % self.action_plan.uuid, - [{'path': '/uuid', 'op': 'remove'}], - expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - def test_remove_non_existent_property_denied(self): - response = self.patch_json( - '/action_plans/%s' % self.action_plan.uuid, - [{'path': '/non-existent', 'op': 'remove'}], - expect_errors=True) - self.assertEqual(400, response.status_code) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - @mock.patch.object(aapi.ApplierAPI, 'launch_action_plan') - def test_replace_state_pending_ok(self, applier_mock): - new_state = objects.action_plan.State.PENDING - response = self.get_json( - '/action_plans/%s' % self.action_plan.uuid) - self.assertNotEqual(new_state, response['state']) - response = self.patch_json( - '/action_plans/%s' % self.action_plan.uuid, - [{'path': '/state', 'value': new_state, - 'op': 'replace'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - applier_mock.assert_called_once_with(mock.ANY, - self.action_plan.uuid) - - -ALLOWED_TRANSITIONS = [ - {"original_state": objects.action_plan.State.RECOMMENDED, - "new_state": objects.action_plan.State.PENDING}, - {"original_state": objects.action_plan.State.RECOMMENDED, - "new_state": objects.action_plan.State.CANCELLED}, - {"original_state": objects.action_plan.State.ONGOING, - "new_state": objects.action_plan.State.CANCELLING}, - {"original_state": objects.action_plan.State.PENDING, - "new_state": objects.action_plan.State.CANCELLED}, -] - - -class TestPatchStateTransitionDenied(api_base.FunctionalTest): - - STATES = [ - ap_state for ap_state in objects.action_plan.State.__dict__ - if not ap_state.startswith("_") - ] - - scenarios = [ - ( - "%s -> %s" % (original_state, new_state), - {"original_state": original_state, - "new_state": new_state}, - ) - for original_state, new_state - in list(itertools.product(STATES, STATES)) - # from DELETED to ... - # NOTE: Any state transition from DELETED (To RECOMMENDED, PENDING, - # ONGOING, CANCELLED, SUCCEEDED and FAILED) will cause a 404 Not Found - # because we cannot retrieve them with a GET (soft_deleted state). - # This is the reason why they are not listed here but they have a - # special test to cover it - if original_state != objects.action_plan.State.DELETED - and original_state != new_state - and {"original_state": original_state, - "new_state": new_state} not in ALLOWED_TRANSITIONS - ] - - def setUp(self): - super(TestPatchStateTransitionDenied, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit(self.context) - - @mock.patch.object( - db_api.BaseConnection, 'update_action_plan', - mock.Mock(side_effect=lambda ap: ap.save() or ap)) - def test_replace_state_pending_denied(self): - action_plan = obj_utils.create_test_action_plan( - self.context, state=self.original_state) - - initial_ap = self.get_json('/action_plans/%s' % action_plan.uuid) - response = self.patch_json( - '/action_plans/%s' % action_plan.uuid, - [{'path': '/state', 'value': self.new_state, - 'op': 'replace'}], - expect_errors=True) - updated_ap = self.get_json('/action_plans/%s' % action_plan.uuid) - - self.assertNotEqual(self.new_state, initial_ap['state']) - self.assertEqual(self.original_state, updated_ap['state']) - self.assertEqual(400, response.status_code) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - -class TestPatchStateTransitionOk(api_base.FunctionalTest): - - scenarios = [ - ( - "%s -> %s" % (transition["original_state"], - transition["new_state"]), - transition - ) - for transition in ALLOWED_TRANSITIONS - ] - - def setUp(self): - super(TestPatchStateTransitionOk, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit(self.context) - - @mock.patch.object( - db_api.BaseConnection, 'update_action_plan', - mock.Mock(side_effect=lambda ap: ap.save() or ap)) - @mock.patch.object(aapi.ApplierAPI, 'launch_action_plan', mock.Mock()) - def test_replace_state_pending_ok(self): - action_plan = obj_utils.create_test_action_plan( - self.context, state=self.original_state) - - initial_ap = self.get_json('/action_plans/%s' % action_plan.uuid) - - response = self.patch_json( - '/action_plans/%s' % action_plan.uuid, - [{'path': '/state', 'value': self.new_state, 'op': 'replace'}]) - updated_ap = self.get_json('/action_plans/%s' % action_plan.uuid) - - self.assertNotEqual(self.new_state, initial_ap['state']) - self.assertEqual(self.new_state, updated_ap['state']) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - - -class TestActionPlanPolicyEnforcement(api_base.FunctionalTest): - - def setUp(self): - super(TestActionPlanPolicyEnforcement, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit(self.context) - - def _common_policy_check(self, rule, func, *arg, **kwarg): - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - rule: "rule:defaut"}) - response = func(*arg, **kwarg) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - jsonutils.loads(response.json['error_message'])['faultstring']) - - def test_policy_disallow_get_all(self): - self._common_policy_check( - "action_plan:get_all", self.get_json, '/action_plans', - expect_errors=True) - - def test_policy_disallow_get_one(self): - action_plan = obj_utils.create_test_action_plan(self.context) - self._common_policy_check( - "action_plan:get", self.get_json, - '/action_plans/%s' % action_plan.uuid, - expect_errors=True) - - def test_policy_disallow_detail(self): - self._common_policy_check( - "action_plan:detail", self.get_json, - '/action_plans/detail', - expect_errors=True) - - def test_policy_disallow_update(self): - action_plan = obj_utils.create_test_action_plan(self.context) - self._common_policy_check( - "action_plan:update", self.patch_json, - '/action_plans/%s' % action_plan.uuid, - [{'path': '/state', - 'value': objects.action_plan.State.DELETED, - 'op': 'replace'}], - expect_errors=True) - - def test_policy_disallow_delete(self): - action_plan = obj_utils.create_test_action_plan(self.context) - self._common_policy_check( - "action_plan:delete", self.delete, - '/action_plans/%s' % action_plan.uuid, expect_errors=True) - - -class TestActionPlanPolicyEnforcementWithAdminContext(TestListActionPlan, - api_base.AdminRoleTest): - - def setUp(self): - super(TestActionPlanPolicyEnforcementWithAdminContext, self).setUp() - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - "action_plan:delete": "rule:default", - "action_plan:detail": "rule:default", - "action_plan:get": "rule:default", - "action_plan:get_all": "rule:default", - "action_plan:update": "rule:default"}) diff --git a/watcher/tests/api/v1/test_audit_templates.py b/watcher/tests/api/v1/test_audit_templates.py deleted file mode 100644 index ace32a8..0000000 --- a/watcher/tests/api/v1/test_audit_templates.py +++ /dev/null @@ -1,754 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import itertools -import mock -from webtest.app import AppError - -from oslo_config import cfg -from oslo_serialization import jsonutils -from oslo_utils import timeutils -from six.moves.urllib import parse as urlparse -from wsme import types as wtypes - -from watcher.api.controllers.v1 import audit_template as api_audit_template -from watcher.common import exception -from watcher.common import utils -from watcher import objects -from watcher.tests.api import base as api_base -from watcher.tests.api import utils as api_utils -from watcher.tests import base -from watcher.tests.db import utils as db_utils -from watcher.tests.objects import utils as obj_utils - - -def post_get_test_audit_template(**kw): - goal = db_utils.get_test_goal() - strategy = db_utils.get_test_strategy(goal_id=goal['id']) - kw['goal'] = kw.get('goal', goal['uuid']) - kw['strategy'] = kw.get('strategy', strategy['uuid']) - kw['scope'] = kw.get('scope', []) - audit_template = api_utils.audit_template_post_data(**kw) - return audit_template - - -class TestAuditTemplateObject(base.TestCase): - - def test_audit_template_init(self): - audit_template_dict = post_get_test_audit_template() - del audit_template_dict['name'] - audit_template = api_audit_template.AuditTemplate( - **audit_template_dict) - self.assertEqual(wtypes.Unset, audit_template.name) - - -class FunctionalTestWithSetup(api_base.FunctionalTest): - - def setUp(self): - super(FunctionalTestWithSetup, self).setUp() - self.fake_goal1 = obj_utils.create_test_goal( - self.context, id=1, uuid=utils.generate_uuid(), name="dummy_1") - self.fake_goal2 = obj_utils.create_test_goal( - self.context, id=2, uuid=utils.generate_uuid(), name="dummy_2") - self.fake_strategy1 = obj_utils.create_test_strategy( - self.context, id=1, uuid=utils.generate_uuid(), name="strategy_1", - goal_id=self.fake_goal1.id) - self.fake_strategy2 = obj_utils.create_test_strategy( - self.context, id=2, uuid=utils.generate_uuid(), name="strategy_2", - goal_id=self.fake_goal2.id) - - -class TestListAuditTemplate(FunctionalTestWithSetup): - - def test_empty(self): - response = self.get_json('/audit_templates') - self.assertEqual([], response['audit_templates']) - - def _assert_audit_template_fields(self, audit_template): - audit_template_fields = ['name', 'goal_uuid', 'goal_name', - 'strategy_uuid', 'strategy_name'] - for field in audit_template_fields: - self.assertIn(field, audit_template) - - def test_one(self): - audit_template = obj_utils.create_test_audit_template( - self.context, strategy_id=self.fake_strategy1.id) - response = self.get_json('/audit_templates') - self.assertEqual(audit_template.uuid, - response['audit_templates'][0]["uuid"]) - self._assert_audit_template_fields(response['audit_templates'][0]) - - def test_get_one_soft_deleted_ok(self): - audit_template = obj_utils.create_test_audit_template(self.context) - audit_template.soft_delete() - response = self.get_json('/audit_templates', - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(audit_template.uuid, - response['audit_templates'][0]["uuid"]) - self._assert_audit_template_fields(response['audit_templates'][0]) - - response = self.get_json('/audit_templates') - self.assertEqual([], response['audit_templates']) - - def test_get_one_by_uuid(self): - audit_template = obj_utils.create_test_audit_template(self.context) - response = self.get_json( - '/audit_templates/%s' % audit_template['uuid']) - self.assertEqual(audit_template.uuid, response['uuid']) - self._assert_audit_template_fields(response) - - def test_get_one_by_name(self): - audit_template = obj_utils.create_test_audit_template(self.context) - response = self.get_json(urlparse.quote( - '/audit_templates/%s' % audit_template['name'])) - self.assertEqual(audit_template.uuid, response['uuid']) - self._assert_audit_template_fields(response) - - def test_get_one_soft_deleted(self): - audit_template = obj_utils.create_test_audit_template(self.context) - audit_template.soft_delete() - response = self.get_json( - '/audit_templates/%s' % audit_template['uuid'], - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(audit_template.uuid, response['uuid']) - self._assert_audit_template_fields(response) - - response = self.get_json( - '/audit_templates/%s' % audit_template['uuid'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_detail(self): - audit_template = obj_utils.create_test_audit_template(self.context) - response = self.get_json('/audit_templates/detail') - self.assertEqual(audit_template.uuid, - response['audit_templates'][0]["uuid"]) - self._assert_audit_template_fields(response['audit_templates'][0]) - - def test_detail_soft_deleted(self): - audit_template = obj_utils.create_test_audit_template(self.context) - audit_template.soft_delete() - response = self.get_json('/audit_templates/detail', - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(audit_template.uuid, - response['audit_templates'][0]["uuid"]) - self._assert_audit_template_fields(response['audit_templates'][0]) - - response = self.get_json('/audit_templates/detail') - self.assertEqual([], response['audit_templates']) - - def test_detail_against_single(self): - audit_template = obj_utils.create_test_audit_template(self.context) - response = self.get_json( - '/audit_templates/%s/detail' % audit_template['uuid'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_many(self): - audit_template_list = [] - for id_ in range(1, 6): - audit_template = obj_utils.create_test_audit_template( - self.context, id=id_, - uuid=utils.generate_uuid(), - name='My Audit Template {0}'.format(id_)) - audit_template_list.append(audit_template) - - response = self.get_json('/audit_templates') - self.assertEqual(len(audit_template_list), - len(response['audit_templates'])) - uuids = [s['uuid'] for s in response['audit_templates']] - self.assertEqual( - sorted([at.uuid for at in audit_template_list]), - sorted(uuids)) - - def test_many_without_soft_deleted(self): - audit_template_list = [] - for id_ in range(1, 6): - audit_template = obj_utils.create_test_audit_template( - self.context, id=id_, uuid=utils.generate_uuid(), - name='My Audit Template {0}'.format(id_)) - audit_template_list.append(audit_template) - - # We soft delete the ones with ID 4 and 5 - [at.soft_delete() for at in audit_template_list[3:]] - - response = self.get_json('/audit_templates') - self.assertEqual(3, len(response['audit_templates'])) - uuids = [s['uuid'] for s in response['audit_templates']] - self.assertEqual( - sorted([at.uuid for at in audit_template_list[:3]]), - sorted(uuids)) - - def test_many_with_soft_deleted(self): - audit_template_list = [] - for id_ in range(1, 6): - audit_template = obj_utils.create_test_audit_template( - self.context, id=id_, uuid=utils.generate_uuid(), - name='My Audit Template {0}'.format(id_)) - audit_template_list.append(audit_template) - - # We soft delete the ones with ID 4 and 5 - [at.soft_delete() for at in audit_template_list[3:]] - - response = self.get_json('/audit_templates', - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(5, len(response['audit_templates'])) - uuids = [s['uuid'] for s in response['audit_templates']] - self.assertEqual( - sorted([at.uuid for at in audit_template_list]), - sorted(uuids)) - - def test_links(self): - uuid = utils.generate_uuid() - obj_utils.create_test_audit_template(self.context, id=1, uuid=uuid) - response = self.get_json('/audit_templates/%s' % uuid) - self.assertIn('links', response.keys()) - self.assertEqual(2, len(response['links'])) - self.assertIn(uuid, response['links'][0]['href']) - for l in response['links']: - bookmark = l['rel'] == 'bookmark' - self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) - - def test_collection_links(self): - for id_ in range(5): - obj_utils.create_test_audit_template( - self.context, id=id_, uuid=utils.generate_uuid(), - name='My Audit Template {0}'.format(id_)) - response = self.get_json('/audit_templates/?limit=3') - self.assertEqual(3, len(response['audit_templates'])) - - next_marker = response['audit_templates'][-1]['uuid'] - self.assertIn(next_marker, response['next']) - - def test_collection_links_default_limit(self): - cfg.CONF.set_override('max_limit', 3, 'api') - for id_ in range(5): - obj_utils.create_test_audit_template( - self.context, id=id_, uuid=utils.generate_uuid(), - name='My Audit Template {0}'.format(id_)) - response = self.get_json('/audit_templates') - self.assertEqual(3, len(response['audit_templates'])) - - next_marker = response['audit_templates'][-1]['uuid'] - self.assertIn(next_marker, response['next']) - - def test_filter_by_goal_uuid(self): - for id_, goal_id in enumerate(itertools.chain.from_iterable([ - itertools.repeat(self.fake_goal1.id, 3), - itertools.repeat(self.fake_goal2.id, 2)]), 1): - obj_utils.create_test_audit_template( - self.context, id=id_, uuid=utils.generate_uuid(), - name='My Audit Template {0}'.format(id_), - goal_id=goal_id) - - response = self.get_json( - '/audit_templates?goal=%s' % self.fake_goal2.uuid) - self.assertEqual(2, len(response['audit_templates'])) - - def test_filter_by_goal_name(self): - for id_, goal_id in enumerate(itertools.chain.from_iterable([ - itertools.repeat(self.fake_goal1.id, 3), - itertools.repeat(self.fake_goal2.id, 2)]), 1): - obj_utils.create_test_audit_template( - self.context, id=id_, uuid=utils.generate_uuid(), - name='My Audit Template {0}'.format(id_), - goal_id=goal_id) - - response = self.get_json( - '/audit_templates?goal=%s' % self.fake_goal2.name) - self.assertEqual(2, len(response['audit_templates'])) - - def test_filter_by_strategy_uuid(self): - for id_, strategy_id in enumerate(itertools.chain.from_iterable([ - itertools.repeat(self.fake_strategy1.id, 3), - itertools.repeat(self.fake_strategy2.id, 2)]), 1): - obj_utils.create_test_audit_template( - self.context, id=id_, uuid=utils.generate_uuid(), - name='My Audit Template {0}'.format(id_), - strategy_id=strategy_id) - - response = self.get_json( - '/audit_templates?strategy=%s' % self.fake_strategy2.uuid) - self.assertEqual(2, len(response['audit_templates'])) - - def test_filter_by_strategy_name(self): - for id_, strategy_id in enumerate(itertools.chain.from_iterable([ - itertools.repeat(self.fake_strategy1.id, 3), - itertools.repeat(self.fake_strategy2.id, 2)]), 1): - obj_utils.create_test_audit_template( - self.context, id=id_, uuid=utils.generate_uuid(), - name='My Audit Template {0}'.format(id_), - strategy_id=strategy_id) - - response = self.get_json( - '/audit_templates?strategy=%s' % self.fake_strategy2.name) - self.assertEqual(2, len(response['audit_templates'])) - - -class TestPatch(FunctionalTestWithSetup): - - def setUp(self): - super(TestPatch, self).setUp() - obj_utils.create_test_goal(self.context) - self.audit_template = obj_utils.create_test_audit_template( - self.context, strategy_id=None) - - @mock.patch.object(timeutils, 'utcnow') - def test_replace_goal_uuid(self, mock_utcnow): - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - new_goal_uuid = self.fake_goal2.uuid - response = self.get_json( - '/audit_templates/%s' % self.audit_template.uuid) - self.assertNotEqual(new_goal_uuid, response['goal_uuid']) - - response = self.patch_json( - '/audit_templates/%s' % self.audit_template.uuid, - [{'path': '/goal', 'value': new_goal_uuid, - 'op': 'replace'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - - response = self.get_json( - '/audit_templates/%s' % self.audit_template.uuid) - self.assertEqual(new_goal_uuid, response['goal_uuid']) - return_updated_at = timeutils.parse_isotime( - response['updated_at']).replace(tzinfo=None) - self.assertEqual(test_time, return_updated_at) - - @mock.patch.object(timeutils, 'utcnow') - def test_replace_goal_uuid_by_name(self, mock_utcnow): - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - new_goal_uuid = self.fake_goal2.uuid - response = self.get_json(urlparse.quote( - '/audit_templates/%s' % self.audit_template.name)) - self.assertNotEqual(new_goal_uuid, response['goal_uuid']) - - response = self.patch_json( - '/audit_templates/%s' % self.audit_template.name, - [{'path': '/goal', 'value': new_goal_uuid, - 'op': 'replace'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - - response = self.get_json( - '/audit_templates/%s' % self.audit_template.name) - self.assertEqual(new_goal_uuid, response['goal_uuid']) - return_updated_at = timeutils.parse_isotime( - response['updated_at']).replace(tzinfo=None) - self.assertEqual(test_time, return_updated_at) - - def test_replace_non_existent_audit_template(self): - response = self.patch_json( - '/audit_templates/%s' % utils.generate_uuid(), - [{'path': '/goal', 'value': self.fake_goal1.uuid, - 'op': 'replace'}], - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - def test_replace_invalid_goal(self): - with mock.patch.object( - self.dbapi, - 'update_audit_template', - wraps=self.dbapi.update_audit_template - ) as cn_mock: - response = self.patch_json( - '/audit_templates/%s' % self.audit_template.uuid, - [{'path': '/goal', 'value': utils.generate_uuid(), - 'op': 'replace'}], - expect_errors=True) - self.assertEqual(400, response.status_int) - assert not cn_mock.called - - def test_add_goal_uuid(self): - response = self.patch_json( - '/audit_templates/%s' % self.audit_template.uuid, - [{'path': '/goal', - 'value': self.fake_goal2.uuid, - 'op': 'add'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_int) - - response = self.get_json( - '/audit_templates/%s' % self.audit_template.uuid) - self.assertEqual(self.fake_goal2.uuid, response['goal_uuid']) - - def test_add_strategy_uuid(self): - response = self.patch_json( - '/audit_templates/%s' % self.audit_template.uuid, - [{'path': '/strategy', - 'value': self.fake_strategy1.uuid, - 'op': 'add'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_int) - - response = self.get_json( - '/audit_templates/%s' % self.audit_template.uuid) - self.assertEqual(self.fake_strategy1.uuid, response['strategy_uuid']) - - def test_replace_strategy_uuid(self): - response = self.patch_json( - '/audit_templates/%s' % self.audit_template.uuid, - [{'path': '/strategy', - 'value': self.fake_strategy2['uuid'], - 'op': 'replace'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_int) - - response = self.get_json( - '/audit_templates/%s' % self.audit_template.uuid) - self.assertEqual( - self.fake_strategy2['uuid'], response['strategy_uuid']) - - def test_replace_invalid_strategy(self): - response = self.patch_json( - '/audit_templates/%s' % self.audit_template.uuid, - [{'path': '/strategy', - 'value': utils.generate_uuid(), # Does not exist - 'op': 'replace'}], expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['error_message']) - - def test_add_non_existent_property(self): - response = self.patch_json( - '/audit_templates/%s' % self.audit_template.uuid, - [{'path': '/foo', 'value': 'bar', 'op': 'add'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['error_message']) - - def test_remove_strategy(self): - audit_template = obj_utils.create_test_audit_template( - self.context, uuid=utils.generate_uuid(), - name="AT_%s" % utils.generate_uuid(), - goal_id=self.fake_goal1.id, - strategy_id=self.fake_strategy1.id) - response = self.get_json( - '/audit_templates/%s' % audit_template.uuid) - self.assertIsNotNone(response['strategy_uuid']) - - response = self.patch_json( - '/audit_templates/%s' % self.audit_template.uuid, - [{'path': '/strategy', 'op': 'remove'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - - def test_remove_goal(self): - response = self.get_json( - '/audit_templates/%s' % self.audit_template.uuid) - self.assertIsNotNone(response['goal_uuid']) - - response = self.patch_json( - '/audit_templates/%s' % self.audit_template.uuid, - [{'path': '/goal', 'op': 'remove'}], - expect_errors=True) - self.assertEqual(403, response.status_code) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - def test_remove_uuid(self): - response = self.patch_json( - '/audit_templates/%s' % self.audit_template.uuid, - [{'path': '/uuid', 'op': 'remove'}], - expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - def test_remove_non_existent_property(self): - response = self.patch_json( - '/audit_templates/%s' % self.audit_template.uuid, - [{'path': '/non-existent', 'op': 'remove'}], - expect_errors=True) - self.assertEqual(400, response.status_code) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - -class TestPost(FunctionalTestWithSetup): - - @mock.patch.object(timeutils, 'utcnow') - def test_create_audit_template(self, mock_utcnow): - audit_template_dict = post_get_test_audit_template( - goal=self.fake_goal1.uuid, - strategy=self.fake_strategy1.uuid) - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - response = self.post_json('/audit_templates', audit_template_dict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - # Check location header - self.assertIsNotNone(response.location) - expected_location = \ - '/v1/audit_templates/%s' % response.json['uuid'] - self.assertEqual(urlparse.urlparse(response.location).path, - expected_location) - self.assertTrue(utils.is_uuid_like(response.json['uuid'])) - self.assertNotIn('updated_at', response.json.keys) - self.assertNotIn('deleted_at', response.json.keys) - self.assertEqual(self.fake_goal1.uuid, response.json['goal_uuid']) - self.assertEqual(self.fake_strategy1.uuid, - response.json['strategy_uuid']) - return_created_at = timeutils.parse_isotime( - response.json['created_at']).replace(tzinfo=None) - self.assertEqual(test_time, return_created_at) - - def test_create_audit_template_vlidation_with_aggregates(self): - scope = [{'host_aggregates': [{'id': '*'}]}, - {'availability_zones': [{'name': 'AZ1'}, - {'name': 'AZ2'}]}, - {'exclude': [ - {'instances': [ - {'uuid': 'INSTANCE_1'}, - {'uuid': 'INSTANCE_2'}]}, - {'compute_nodes': [ - {'name': 'Node_1'}, - {'name': 'Node_2'}]}, - {'host_aggregates': [{'id': '*'}]} - ]} - ] - audit_template_dict = post_get_test_audit_template( - goal=self.fake_goal1.uuid, - strategy=self.fake_strategy1.uuid, scope=scope) - with self.assertRaisesRegex(AppError, - "be included and excluded together"): - self.post_json('/audit_templates', audit_template_dict) - - def test_create_audit_template_does_autogenerate_id(self): - audit_template_dict = post_get_test_audit_template( - goal=self.fake_goal1.uuid, strategy=None) - with mock.patch.object( - self.dbapi, - 'create_audit_template', - wraps=self.dbapi.create_audit_template - ) as cn_mock: - response = self.post_json('/audit_templates', audit_template_dict) - self.assertEqual(audit_template_dict['goal'], - response.json['goal_uuid']) - # Check that 'id' is not in first arg of positional args - self.assertNotIn('id', cn_mock.call_args[0][0]) - - def test_create_audit_template_generate_uuid(self): - audit_template_dict = post_get_test_audit_template( - goal=self.fake_goal1.uuid, strategy=None) - - response = self.post_json('/audit_templates', audit_template_dict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertTrue(utils.is_uuid_like(response.json['uuid'])) - - def test_create_audit_template_with_invalid_goal(self): - with mock.patch.object( - self.dbapi, - 'create_audit_template', - wraps=self.dbapi.create_audit_template - ) as cn_mock: - audit_template_dict = post_get_test_audit_template( - goal_uuid=utils.generate_uuid()) - response = self.post_json('/audit_templates', - audit_template_dict, expect_errors=True) - self.assertEqual(400, response.status_int) - assert not cn_mock.called - - def test_create_audit_template_with_invalid_strategy(self): - with mock.patch.object( - self.dbapi, - 'create_audit_template', - wraps=self.dbapi.create_audit_template - ) as cn_mock: - audit_template_dict = post_get_test_audit_template( - goal_uuid=self.fake_goal1['uuid'], - strategy_uuid=utils.generate_uuid()) - response = self.post_json('/audit_templates', - audit_template_dict, expect_errors=True) - self.assertEqual(400, response.status_int) - assert not cn_mock.called - - def test_create_audit_template_with_unrelated_strategy(self): - with mock.patch.object( - self.dbapi, - 'create_audit_template', - wraps=self.dbapi.create_audit_template - ) as cn_mock: - audit_template_dict = post_get_test_audit_template( - goal_uuid=self.fake_goal1['uuid'], - strategy=self.fake_strategy2['uuid']) - response = self.post_json('/audit_templates', - audit_template_dict, expect_errors=True) - self.assertEqual(400, response.status_int) - assert not cn_mock.called - - def test_create_audit_template_with_uuid(self): - with mock.patch.object( - self.dbapi, - 'create_audit_template', - wraps=self.dbapi.create_audit_template - ) as cn_mock: - audit_template_dict = post_get_test_audit_template() - response = self.post_json('/audit_templates', audit_template_dict, - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - assert not cn_mock.called - - -class TestDelete(api_base.FunctionalTest): - - def setUp(self): - super(TestDelete, self).setUp() - obj_utils.create_test_goal(self.context) - self.audit_template = obj_utils.create_test_audit_template( - self.context) - - @mock.patch.object(timeutils, 'utcnow') - def test_delete_audit_template_by_uuid(self, mock_utcnow): - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - self.delete(urlparse.quote('/audit_templates/%s' % - self.audit_template.uuid)) - response = self.get_json( - urlparse.quote('/audit_templates/%s' % self.audit_template.uuid), - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - self.assertRaises(exception.AuditTemplateNotFound, - objects.AuditTemplate.get_by_uuid, - self.context, - self.audit_template.uuid) - - self.context.show_deleted = True - at = objects.AuditTemplate.get_by_uuid(self.context, - self.audit_template.uuid) - self.assertEqual(self.audit_template.name, at.name) - - @mock.patch.object(timeutils, 'utcnow') - def test_delete_audit_template_by_name(self, mock_utcnow): - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - self.delete(urlparse.quote('/audit_templates/%s' % - self.audit_template.name)) - response = self.get_json( - urlparse.quote('/audit_templates/%s' % self.audit_template.name), - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - self.assertRaises(exception.AuditTemplateNotFound, - objects.AuditTemplate.get_by_name, - self.context, - self.audit_template.name) - - self.context.show_deleted = True - at = objects.AuditTemplate.get_by_name(self.context, - self.audit_template.name) - self.assertEqual(self.audit_template.uuid, at.uuid) - - def test_delete_audit_template_not_found(self): - uuid = utils.generate_uuid() - response = self.delete( - '/audit_templates/%s' % uuid, expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - -class TestAuditTemplatePolicyEnforcement(api_base.FunctionalTest): - - def _common_policy_check(self, rule, func, *arg, **kwarg): - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - rule: "rule:defaut"}) - response = func(*arg, **kwarg) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - jsonutils.loads(response.json['error_message'])['faultstring']) - - def test_policy_disallow_get_all(self): - self._common_policy_check( - "audit_template:get_all", self.get_json, '/audit_templates', - expect_errors=True) - - def test_policy_disallow_get_one(self): - obj_utils.create_test_goal(self.context) - audit_template = obj_utils.create_test_audit_template(self.context) - self._common_policy_check( - "audit_template:get", self.get_json, - '/audit_templates/%s' % audit_template.uuid, - expect_errors=True) - - def test_policy_disallow_detail(self): - self._common_policy_check( - "audit_template:detail", self.get_json, - '/audit_templates/detail', - expect_errors=True) - - def test_policy_disallow_update(self): - obj_utils.create_test_goal(self.context) - audit_template = obj_utils.create_test_audit_template(self.context) - self._common_policy_check( - "audit_template:update", self.patch_json, - '/audit_templates/%s' % audit_template.uuid, - [{'path': '/state', 'value': objects.audit.State.SUCCEEDED, - 'op': 'replace'}], expect_errors=True) - - def test_policy_disallow_create(self): - fake_goal1 = obj_utils.get_test_goal( - self.context, id=1, uuid=utils.generate_uuid(), name="dummy_1") - fake_goal1.create() - fake_strategy1 = obj_utils.get_test_strategy( - self.context, id=1, uuid=utils.generate_uuid(), name="strategy_1", - goal_id=fake_goal1.id) - fake_strategy1.create() - - audit_template_dict = post_get_test_audit_template( - goal=fake_goal1.uuid, - strategy=fake_strategy1.uuid) - self._common_policy_check( - "audit_template:create", self.post_json, '/audit_templates', - audit_template_dict, expect_errors=True) - - def test_policy_disallow_delete(self): - obj_utils.create_test_goal(self.context) - audit_template = obj_utils.create_test_audit_template(self.context) - self._common_policy_check( - "audit_template:delete", self.delete, - '/audit_templates/%s' % audit_template.uuid, expect_errors=True) - - -class TestAuditTemplatePolicyWithAdminContext(TestListAuditTemplate, - api_base.AdminRoleTest): - def setUp(self): - super(TestAuditTemplatePolicyWithAdminContext, self).setUp() - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - "audit_template:create": "rule:default", - "audit_template:delete": "rule:default", - "audit_template:detail": "rule:default", - "audit_template:get": "rule:default", - "audit_template:get_all": "rule:default", - "audit_template:update": "rule:default"}) diff --git a/watcher/tests/api/v1/test_audits.py b/watcher/tests/api/v1/test_audits.py deleted file mode 100644 index b26f62c..0000000 --- a/watcher/tests/api/v1/test_audits.py +++ /dev/null @@ -1,918 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import itertools -import mock - -from oslo_config import cfg -from oslo_serialization import jsonutils -from oslo_utils import timeutils -from wsme import types as wtypes - -from six.moves.urllib import parse as urlparse -from watcher.api.controllers.v1 import audit as api_audit -from watcher.common import utils -from watcher.db import api as db_api -from watcher.decision_engine import rpcapi as deapi -from watcher import objects -from watcher.tests.api import base as api_base -from watcher.tests.api import utils as api_utils -from watcher.tests import base -from watcher.tests.db import utils as db_utils -from watcher.tests.objects import utils as obj_utils - - -def post_get_test_audit(**kw): - audit = api_utils.audit_post_data(**kw) - audit_template = db_utils.get_test_audit_template() - goal = db_utils.get_test_goal() - del_keys = ['goal_id', 'strategy_id'] - add_keys = {'audit_template_uuid': audit_template['uuid'], - 'goal': goal['uuid'], - } - for k in del_keys: - del audit[k] - for k in add_keys: - audit[k] = kw.get(k, add_keys[k]) - return audit - - -def post_get_test_audit_with_predefined_strategy(**kw): - spec = kw.pop('strategy_parameters_spec', {}) - strategy_id = 2 - strategy = db_utils.get_test_strategy(parameters_spec=spec, id=strategy_id) - audit = api_utils.audit_post_data(**kw) - audit_template = db_utils.get_test_audit_template( - strategy_id=strategy['id']) - del_keys = ['goal_id', 'strategy_id'] - add_keys = {'audit_template_uuid': audit_template['uuid'], - } - for k in del_keys: - del audit[k] - for k in add_keys: - audit[k] = kw.get(k, add_keys[k]) - return audit - - -class TestAuditObject(base.TestCase): - - def test_audit_init(self): - audit_dict = api_utils.audit_post_data(audit_template_id=None, - goal_id=None, - strategy_id=None) - del audit_dict['state'] - audit = api_audit.Audit(**audit_dict) - self.assertEqual(wtypes.Unset, audit.state) - - -class TestListAudit(api_base.FunctionalTest): - - def setUp(self): - super(TestListAudit, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit_template(self.context) - - def test_empty(self): - response = self.get_json('/audits') - self.assertEqual([], response['audits']) - - def _assert_audit_fields(self, audit): - audit_fields = ['audit_type', 'scope', 'state', 'goal_uuid', - 'strategy_uuid'] - for field in audit_fields: - self.assertIn(field, audit) - - def test_one(self): - audit = obj_utils.create_test_audit(self.context) - response = self.get_json('/audits') - self.assertEqual(audit.uuid, response['audits'][0]["uuid"]) - self._assert_audit_fields(response['audits'][0]) - - def test_one_soft_deleted(self): - audit = obj_utils.create_test_audit(self.context) - audit.soft_delete() - response = self.get_json('/audits', - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(audit.uuid, response['audits'][0]["uuid"]) - self._assert_audit_fields(response['audits'][0]) - - response = self.get_json('/audits') - self.assertEqual([], response['audits']) - - def test_get_one(self): - audit = obj_utils.create_test_audit(self.context) - response = self.get_json('/audits/%s' % audit['uuid']) - self.assertEqual(audit.uuid, response['uuid']) - self._assert_audit_fields(response) - - def test_get_one_soft_deleted(self): - audit = obj_utils.create_test_audit(self.context) - audit.soft_delete() - response = self.get_json('/audits/%s' % audit['uuid'], - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(audit.uuid, response['uuid']) - self._assert_audit_fields(response) - - response = self.get_json('/audits/%s' % audit['uuid'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_detail(self): - audit = obj_utils.create_test_audit(self.context) - response = self.get_json('/audits/detail') - self.assertEqual(audit.uuid, response['audits'][0]["uuid"]) - self._assert_audit_fields(response['audits'][0]) - - def test_detail_soft_deleted(self): - audit = obj_utils.create_test_audit(self.context) - audit.soft_delete() - response = self.get_json('/audits/detail', - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(audit.uuid, response['audits'][0]["uuid"]) - self._assert_audit_fields(response['audits'][0]) - - response = self.get_json('/audits/detail') - self.assertEqual([], response['audits']) - - def test_detail_against_single(self): - audit = obj_utils.create_test_audit(self.context) - response = self.get_json('/audits/%s/detail' % audit['uuid'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_many(self): - audit_list = [] - for id_ in range(5): - audit = obj_utils.create_test_audit(self.context, id=id_, - uuid=utils.generate_uuid()) - audit_list.append(audit.uuid) - response = self.get_json('/audits') - self.assertEqual(len(audit_list), len(response['audits'])) - uuids = [s['uuid'] for s in response['audits']] - self.assertEqual(sorted(audit_list), sorted(uuids)) - - def test_many_without_soft_deleted(self): - audit_list = [] - for id_ in [1, 2, 3]: - audit = obj_utils.create_test_audit(self.context, id=id_, - uuid=utils.generate_uuid()) - audit_list.append(audit.uuid) - for id_ in [4, 5]: - audit = obj_utils.create_test_audit(self.context, id=id_, - uuid=utils.generate_uuid()) - audit.soft_delete() - response = self.get_json('/audits') - self.assertEqual(3, len(response['audits'])) - uuids = [s['uuid'] for s in response['audits']] - self.assertEqual(sorted(audit_list), sorted(uuids)) - - def test_many_with_soft_deleted(self): - audit_list = [] - for id_ in [1, 2, 3]: - audit = obj_utils.create_test_audit(self.context, id=id_, - uuid=utils.generate_uuid()) - audit_list.append(audit.uuid) - for id_ in [4, 5]: - audit = obj_utils.create_test_audit(self.context, id=id_, - uuid=utils.generate_uuid()) - audit.soft_delete() - audit_list.append(audit.uuid) - response = self.get_json('/audits', - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(5, len(response['audits'])) - uuids = [s['uuid'] for s in response['audits']] - self.assertEqual(sorted(audit_list), sorted(uuids)) - - def test_many_with_sort_key_goal_uuid(self): - goal_list = [] - for id_ in range(5): - goal = obj_utils.create_test_goal( - self.context, - name='gl{0}'.format(id_), - uuid=utils.generate_uuid()) - obj_utils.create_test_audit( - self.context, id=id_, uuid=utils.generate_uuid(), - goal_id=goal.id) - goal_list.append(goal.uuid) - - response = self.get_json('/audits/?sort_key=goal_uuid') - - self.assertEqual(5, len(response['audits'])) - uuids = [s['goal_uuid'] for s in response['audits']] - self.assertEqual(sorted(goal_list), uuids) - - def test_links(self): - uuid = utils.generate_uuid() - obj_utils.create_test_audit(self.context, id=1, uuid=uuid) - response = self.get_json('/audits/%s' % uuid) - self.assertIn('links', response.keys()) - self.assertEqual(2, len(response['links'])) - self.assertIn(uuid, response['links'][0]['href']) - for l in response['links']: - bookmark = l['rel'] == 'bookmark' - self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) - - def test_collection_links(self): - for id_ in range(5): - obj_utils.create_test_audit(self.context, id=id_, - uuid=utils.generate_uuid()) - response = self.get_json('/audits/?limit=3') - self.assertEqual(3, len(response['audits'])) - - next_marker = response['audits'][-1]['uuid'] - self.assertIn(next_marker, response['next']) - - def test_collection_links_default_limit(self): - cfg.CONF.set_override('max_limit', 3, 'api') - for id_ in range(5): - obj_utils.create_test_audit(self.context, id=id_, - uuid=utils.generate_uuid()) - response = self.get_json('/audits') - self.assertEqual(3, len(response['audits'])) - - next_marker = response['audits'][-1]['uuid'] - self.assertIn(next_marker, response['next']) - - -class TestPatch(api_base.FunctionalTest): - - def setUp(self): - super(TestPatch, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit_template(self.context) - self.audit = obj_utils.create_test_audit(self.context) - p = mock.patch.object(db_api.BaseConnection, 'update_audit') - self.mock_audit_update = p.start() - self.mock_audit_update.side_effect = self._simulate_rpc_audit_update - self.addCleanup(p.stop) - - def _simulate_rpc_audit_update(self, audit): - audit.save() - return audit - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_replace_ok(self, mock_utcnow): - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - new_state = objects.audit.State.CANCELLED - response = self.get_json('/audits/%s' % self.audit.uuid) - self.assertNotEqual(new_state, response['state']) - - response = self.patch_json( - '/audits/%s' % self.audit.uuid, - [{'path': '/state', 'value': new_state, - 'op': 'replace'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - - response = self.get_json('/audits/%s' % self.audit.uuid) - self.assertEqual(new_state, response['state']) - return_updated_at = timeutils.parse_isotime( - response['updated_at']).replace(tzinfo=None) - self.assertEqual(test_time, return_updated_at) - - def test_replace_non_existent_audit(self): - response = self.patch_json( - '/audits/%s' % utils.generate_uuid(), - [{'path': '/state', 'value': objects.audit.State.SUCCEEDED, - 'op': 'replace'}], expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - def test_add_ok(self): - new_state = objects.audit.State.SUCCEEDED - response = self.patch_json( - '/audits/%s' % self.audit.uuid, - [{'path': '/state', 'value': new_state, 'op': 'add'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_int) - - response = self.get_json('/audits/%s' % self.audit.uuid) - self.assertEqual(new_state, response['state']) - - def test_add_non_existent_property(self): - response = self.patch_json( - '/audits/%s' % self.audit.uuid, - [{'path': '/foo', 'value': 'bar', 'op': 'add'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['error_message']) - - def test_remove_ok(self): - response = self.get_json('/audits/%s' % self.audit.uuid) - self.assertIsNotNone(response['interval']) - - response = self.patch_json('/audits/%s' % self.audit.uuid, - [{'path': '/interval', 'op': 'remove'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - - response = self.get_json('/audits/%s' % self.audit.uuid) - self.assertIsNone(response['interval']) - - def test_remove_uuid(self): - response = self.patch_json('/audits/%s' % self.audit.uuid, - [{'path': '/uuid', 'op': 'remove'}], - expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - def test_remove_non_existent_property(self): - response = self.patch_json( - '/audits/%s' % self.audit.uuid, - [{'path': '/non-existent', 'op': 'remove'}], - expect_errors=True) - self.assertEqual(400, response.status_code) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - -ALLOWED_TRANSITIONS = [ - {"original_state": key, "new_state": value} - for key, values in ( - objects.audit.AuditStateTransitionManager.TRANSITIONS.items()) - for value in values] - - -class TestPatchStateTransitionDenied(api_base.FunctionalTest): - - STATES = [ - ap_state for ap_state in objects.audit.State.__dict__ - if not ap_state.startswith("_") - ] - - scenarios = [ - ( - "%s -> %s" % (original_state, new_state), - {"original_state": original_state, - "new_state": new_state}, - ) - for original_state, new_state - in list(itertools.product(STATES, STATES)) - if original_state != new_state - and {"original_state": original_state, - "new_state": new_state} not in ALLOWED_TRANSITIONS - ] - - def setUp(self): - super(TestPatchStateTransitionDenied, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit_template(self.context) - self.audit = obj_utils.create_test_audit(self.context, - state=self.original_state) - p = mock.patch.object(db_api.BaseConnection, 'update_audit') - self.mock_audit_update = p.start() - self.mock_audit_update.side_effect = self._simulate_rpc_audit_update - self.addCleanup(p.stop) - - def _simulate_rpc_audit_update(self, audit): - audit.save() - return audit - - def test_replace_denied(self): - response = self.get_json('/audits/%s' % self.audit.uuid) - self.assertNotEqual(self.new_state, response['state']) - - response = self.patch_json( - '/audits/%s' % self.audit.uuid, - [{'path': '/state', 'value': self.new_state, - 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_code) - self.assertTrue(response.json['error_message']) - - response = self.get_json('/audits/%s' % self.audit.uuid) - self.assertEqual(self.original_state, response['state']) - - -class TestPatchStateTransitionOk(api_base.FunctionalTest): - - scenarios = [ - ( - "%s -> %s" % (transition["original_state"], - transition["new_state"]), - transition - ) - for transition in ALLOWED_TRANSITIONS - ] - - def setUp(self): - super(TestPatchStateTransitionOk, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit_template(self.context) - self.audit = obj_utils.create_test_audit(self.context, - state=self.original_state) - p = mock.patch.object(db_api.BaseConnection, 'update_audit') - self.mock_audit_update = p.start() - self.mock_audit_update.side_effect = self._simulate_rpc_audit_update - self.addCleanup(p.stop) - - def _simulate_rpc_audit_update(self, audit): - audit.save() - return audit - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_replace_ok(self, mock_utcnow): - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - response = self.get_json('/audits/%s' % self.audit.uuid) - self.assertNotEqual(self.new_state, response['state']) - - response = self.patch_json( - '/audits/%s' % self.audit.uuid, - [{'path': '/state', 'value': self.new_state, - 'op': 'replace'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - - response = self.get_json('/audits/%s' % self.audit.uuid) - self.assertEqual(self.new_state, response['state']) - return_updated_at = timeutils.parse_isotime( - response['updated_at']).replace(tzinfo=None) - self.assertEqual(test_time, return_updated_at) - - -class TestPost(api_base.FunctionalTest): - - def setUp(self): - super(TestPost, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit_template(self.context) - p = mock.patch.object(db_api.BaseConnection, 'create_audit') - self.mock_create_audit = p.start() - self.mock_create_audit.side_effect = ( - self._simulate_rpc_audit_create) - self.addCleanup(p.stop) - - def _simulate_rpc_audit_create(self, audit): - audit.create() - return audit - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - @mock.patch('oslo_utils.timeutils.utcnow') - def test_create_audit(self, mock_utcnow, mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - audit_dict = post_get_test_audit(state=objects.audit.State.PENDING) - del audit_dict['uuid'] - del audit_dict['state'] - del audit_dict['interval'] - del audit_dict['scope'] - del audit_dict['next_run_time'] - - response = self.post_json('/audits', audit_dict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - # Check location header - self.assertIsNotNone(response.location) - expected_location = '/v1/audits/%s' % response.json['uuid'] - self.assertEqual(urlparse.urlparse(response.location).path, - expected_location) - self.assertEqual(objects.audit.State.PENDING, - response.json['state']) - self.assertNotIn('updated_at', response.json.keys) - self.assertNotIn('deleted_at', response.json.keys) - return_created_at = timeutils.parse_isotime( - response.json['created_at']).replace(tzinfo=None) - self.assertEqual(test_time, return_created_at) - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - @mock.patch('oslo_utils.timeutils.utcnow') - def test_create_audit_with_state_not_allowed(self, mock_utcnow, - mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - audit_dict = post_get_test_audit(state=objects.audit.State.SUCCEEDED) - - response = self.post_json('/audits', audit_dict, expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_create_audit_invalid_audit_template_uuid(self, mock_utcnow): - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - audit_dict = post_get_test_audit() - del audit_dict['uuid'] - del audit_dict['state'] - del audit_dict['interval'] - del audit_dict['scope'] - del audit_dict['next_run_time'] - # Make the audit template UUID some garbage value - audit_dict['audit_template_uuid'] = ( - '01234567-8910-1112-1314-151617181920') - - response = self.post_json('/audits', audit_dict, expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual("application/json", response.content_type) - expected_error_msg = ('The audit template UUID or name specified is ' - 'invalid') - self.assertTrue(response.json['error_message']) - self.assertIn(expected_error_msg, response.json['error_message']) - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - def test_create_audit_doesnt_contain_id(self, mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - - audit_dict = post_get_test_audit(state=objects.audit.State.PENDING) - state = audit_dict['state'] - del audit_dict['uuid'] - del audit_dict['state'] - del audit_dict['interval'] - del audit_dict['scope'] - del audit_dict['next_run_time'] - with mock.patch.object(self.dbapi, 'create_audit', - wraps=self.dbapi.create_audit) as cn_mock: - response = self.post_json('/audits', audit_dict) - self.assertEqual(state, response.json['state']) - cn_mock.assert_called_once_with(mock.ANY) - # Check that 'id' is not in first arg of positional args - self.assertNotIn('id', cn_mock.call_args[0][0]) - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - def test_create_audit_generate_uuid(self, mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - - audit_dict = post_get_test_audit() - del audit_dict['uuid'] - del audit_dict['state'] - del audit_dict['interval'] - del audit_dict['scope'] - del audit_dict['next_run_time'] - - response = self.post_json('/audits', audit_dict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertEqual(objects.audit.State.PENDING, - response.json['state']) - self.assertTrue(utils.is_uuid_like(response.json['uuid'])) - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - def test_create_continuous_audit_with_interval(self, mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - - audit_dict = post_get_test_audit() - del audit_dict['uuid'] - del audit_dict['state'] - del audit_dict['scope'] - del audit_dict['next_run_time'] - audit_dict['audit_type'] = objects.audit.AuditType.CONTINUOUS.value - audit_dict['interval'] = '1200' - - response = self.post_json('/audits', audit_dict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertEqual(objects.audit.State.PENDING, - response.json['state']) - self.assertEqual(audit_dict['interval'], response.json['interval']) - self.assertTrue(utils.is_uuid_like(response.json['uuid'])) - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - def test_create_continuous_audit_with_cron_interval(self, - mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - - audit_dict = post_get_test_audit() - del audit_dict['uuid'] - del audit_dict['state'] - del audit_dict['scope'] - del audit_dict['next_run_time'] - audit_dict['audit_type'] = objects.audit.AuditType.CONTINUOUS.value - audit_dict['interval'] = '* * * * *' - - response = self.post_json('/audits', audit_dict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertEqual(objects.audit.State.PENDING, - response.json['state']) - self.assertEqual(audit_dict['interval'], response.json['interval']) - self.assertTrue(utils.is_uuid_like(response.json['uuid'])) - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - def test_create_continuous_audit_with_wrong_interval(self, - mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - - audit_dict = post_get_test_audit() - del audit_dict['uuid'] - del audit_dict['state'] - del audit_dict['scope'] - del audit_dict['next_run_time'] - audit_dict['audit_type'] = objects.audit.AuditType.CONTINUOUS.value - audit_dict['interval'] = 'zxc' - - response = self.post_json('/audits', audit_dict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(500, response.status_int) - expected_error_msg = ('Exactly 5 or 6 columns has to be ' - 'specified for iteratorexpression.') - self.assertTrue(response.json['error_message']) - self.assertIn(expected_error_msg, response.json['error_message']) - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - def test_create_continuous_audit_without_period(self, mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - - audit_dict = post_get_test_audit() - del audit_dict['uuid'] - del audit_dict['state'] - audit_dict['audit_type'] = objects.audit.AuditType.CONTINUOUS.value - del audit_dict['interval'] - del audit_dict['scope'] - del audit_dict['next_run_time'] - - response = self.post_json('/audits', audit_dict, expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual('application/json', response.content_type) - expected_error_msg = ('Interval of audit must be specified ' - 'for CONTINUOUS.') - self.assertTrue(response.json['error_message']) - self.assertIn(expected_error_msg, response.json['error_message']) - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - def test_create_oneshot_audit_with_period(self, mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - - audit_dict = post_get_test_audit() - del audit_dict['uuid'] - del audit_dict['state'] - audit_dict['audit_type'] = objects.audit.AuditType.ONESHOT.value - del audit_dict['scope'] - del audit_dict['next_run_time'] - - response = self.post_json('/audits', audit_dict, expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual('application/json', response.content_type) - expected_error_msg = 'Interval of audit must not be set for ONESHOT.' - self.assertTrue(response.json['error_message']) - self.assertIn(expected_error_msg, response.json['error_message']) - - def test_create_audit_trigger_decision_engine(self): - with mock.patch.object(deapi.DecisionEngineAPI, - 'trigger_audit') as de_mock: - audit_dict = post_get_test_audit(state=objects.audit.State.PENDING) - del audit_dict['uuid'] - del audit_dict['state'] - del audit_dict['interval'] - del audit_dict['scope'] - del audit_dict['next_run_time'] - response = self.post_json('/audits', audit_dict) - de_mock.assert_called_once_with(mock.ANY, response.json['uuid']) - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - def test_create_audit_with_uuid(self, mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - - audit_dict = post_get_test_audit(state=objects.audit.State.PENDING) - del audit_dict['scope'] - response = self.post_json('/audits', audit_dict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - assert not mock_trigger_audit.called - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - def test_create_audit_parameters_no_predefined_strategy( - self, mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - audit_dict = post_get_test_audit(parameters={'name': 'Tom'}) - del audit_dict['uuid'] - del audit_dict['state'] - del audit_dict['interval'] - del audit_dict['scope'] - del audit_dict['next_run_time'] - - response = self.post_json('/audits', audit_dict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - expected_error_msg = ('Specify parameters but no predefined ' - 'strategy for audit template, or no ' - 'parameter spec in predefined strategy') - self.assertTrue(response.json['error_message']) - self.assertIn(expected_error_msg, response.json['error_message']) - assert not mock_trigger_audit.called - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - def test_create_audit_parameters_no_schema( - self, mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - audit_dict = post_get_test_audit_with_predefined_strategy( - parameters={'name': 'Tom'}) - del audit_dict['uuid'] - del audit_dict['state'] - del audit_dict['interval'] - del audit_dict['scope'] - del audit_dict['next_run_time'] - - response = self.post_json('/audits', audit_dict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - expected_error_msg = ('Specify parameters but no predefined ' - 'strategy for audit template, or no ' - 'parameter spec in predefined strategy') - self.assertTrue(response.json['error_message']) - self.assertIn(expected_error_msg, response.json['error_message']) - assert not mock_trigger_audit.called - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - def test_create_audit_with_parameter_not_allowed( - self, mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - audit_template = self.prepare_audit_template_strategy_with_parameter() - - audit_dict = api_utils.audit_post_data( - parameters={'fake1': 1, 'fake2': "hello"}) - - audit_dict['audit_template_uuid'] = audit_template['uuid'] - del_keys = ['uuid', 'goal_id', 'strategy_id', 'state', 'interval', - 'scope', 'next_run_time'] - for k in del_keys: - del audit_dict[k] - - response = self.post_json('/audits', audit_dict, expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual("application/json", response.content_type) - expected_error_msg = 'Audit parameter fake2 are not allowed' - self.assertTrue(response.json['error_message']) - self.assertIn(expected_error_msg, response.json['error_message']) - assert not mock_trigger_audit.called - - def prepare_audit_template_strategy_with_parameter(self): - fake_spec = { - "properties": { - "fake1": { - "description": "number parameter example", - "type": "number", - "default": 3.2, - "minimum": 1.0, - "maximum": 10.2, - } - } - } - template_uuid = 'e74c40e0-d825-11e2-a28f-0800200c9a67' - strategy_uuid = 'e74c40e0-d825-11e2-a28f-0800200c9a68' - template_name = 'my template' - strategy_name = 'my strategy' - strategy_id = 3 - strategy = db_utils.get_test_strategy(parameters_spec=fake_spec, - id=strategy_id, - uuid=strategy_uuid, - name=strategy_name) - obj_utils.create_test_strategy(self.context, - parameters_spec=fake_spec, - id=strategy_id, - uuid=strategy_uuid, - name=strategy_name) - obj_utils.create_test_audit_template(self.context, - strategy_id=strategy_id, - uuid=template_uuid, - name='name') - audit_template = db_utils.get_test_audit_template( - strategy_id=strategy['id'], uuid=template_uuid, name=template_name) - return audit_template - - -class TestDelete(api_base.FunctionalTest): - - def setUp(self): - super(TestDelete, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit_template(self.context) - self.audit = obj_utils.create_test_audit(self.context) - p = mock.patch.object(db_api.BaseConnection, 'update_audit') - self.mock_audit_update = p.start() - self.mock_audit_update.side_effect = self._simulate_rpc_audit_update - self.addCleanup(p.stop) - - def _simulate_rpc_audit_update(self, audit): - audit.save() - return audit - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_delete_audit(self, mock_utcnow): - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - self.delete('/audits/%s' % self.audit.uuid) - response = self.get_json('/audits/%s' % self.audit.uuid, - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - self.context.show_deleted = True - audit = objects.Audit.get_by_uuid(self.context, self.audit.uuid) - - return_deleted_at = timeutils.strtime(audit['deleted_at']) - self.assertEqual(timeutils.strtime(test_time), return_deleted_at) - self.assertEqual(objects.audit.State.DELETED, audit['state']) - - def test_delete_audit_not_found(self): - uuid = utils.generate_uuid() - response = self.delete('/audits/%s' % uuid, expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - -class TestAuditPolicyEnforcement(api_base.FunctionalTest): - - def setUp(self): - super(TestAuditPolicyEnforcement, self).setUp() - obj_utils.create_test_goal(self.context) - - def _common_policy_check(self, rule, func, *arg, **kwarg): - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - rule: "rule:defaut"}) - response = func(*arg, **kwarg) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - jsonutils.loads(response.json['error_message'])['faultstring']) - - def test_policy_disallow_get_all(self): - self._common_policy_check( - "audit:get_all", self.get_json, '/audits', - expect_errors=True) - - def test_policy_disallow_get_one(self): - audit = obj_utils.create_test_audit(self.context) - self._common_policy_check( - "audit:get", self.get_json, - '/audits/%s' % audit.uuid, - expect_errors=True) - - def test_policy_disallow_detail(self): - self._common_policy_check( - "audit:detail", self.get_json, - '/audits/detail', - expect_errors=True) - - def test_policy_disallow_update(self): - audit = obj_utils.create_test_audit(self.context) - self._common_policy_check( - "audit:update", self.patch_json, - '/audits/%s' % audit.uuid, - [{'path': '/state', 'value': objects.audit.State.SUCCEEDED, - 'op': 'replace'}], expect_errors=True) - - def test_policy_disallow_create(self): - audit_dict = post_get_test_audit(state=objects.audit.State.PENDING) - del audit_dict['uuid'] - del audit_dict['state'] - del audit_dict['scope'] - del audit_dict['next_run_time'] - self._common_policy_check( - "audit:create", self.post_json, '/audits', audit_dict, - expect_errors=True) - - def test_policy_disallow_delete(self): - audit = obj_utils.create_test_audit(self.context) - self._common_policy_check( - "audit:delete", self.delete, - '/audits/%s' % audit.uuid, expect_errors=True) - - -class TestAuditEnforcementWithAdminContext(TestListAudit, - api_base.AdminRoleTest): - - def setUp(self): - super(TestAuditEnforcementWithAdminContext, self).setUp() - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - "audit:create": "rule:default", - "audit:delete": "rule:default", - "audit:detail": "rule:default", - "audit:get": "rule:default", - "audit:get_all": "rule:default", - "audit:update": "rule:default"}) diff --git a/watcher/tests/api/v1/test_goals.py b/watcher/tests/api/v1/test_goals.py deleted file mode 100644 index 6c71c15..0000000 --- a/watcher/tests/api/v1/test_goals.py +++ /dev/null @@ -1,167 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg -from oslo_serialization import jsonutils -from six.moves.urllib import parse as urlparse - -from watcher.common import utils -from watcher.tests.api import base as api_base -from watcher.tests.objects import utils as obj_utils - - -class TestListGoal(api_base.FunctionalTest): - - def _assert_goal_fields(self, goal): - goal_fields = ['uuid', 'name', 'display_name', - 'efficacy_specification'] - for field in goal_fields: - self.assertIn(field, goal) - - def test_one(self): - goal = obj_utils.create_test_goal(self.context) - response = self.get_json('/goals') - self.assertEqual(goal.uuid, response['goals'][0]["uuid"]) - self._assert_goal_fields(response['goals'][0]) - - def test_get_one_by_uuid(self): - goal = obj_utils.create_test_goal(self.context) - response = self.get_json('/goals/%s' % goal.uuid) - self.assertEqual(goal.uuid, response["uuid"]) - self.assertEqual(goal.name, response["name"]) - self._assert_goal_fields(response) - - def test_get_one_by_name(self): - goal = obj_utils.create_test_goal(self.context) - response = self.get_json(urlparse.quote( - '/goals/%s' % goal['name'])) - self.assertEqual(goal.uuid, response['uuid']) - self._assert_goal_fields(response) - - def test_get_one_soft_deleted(self): - goal = obj_utils.create_test_goal(self.context) - goal.soft_delete() - response = self.get_json( - '/goals/%s' % goal['uuid'], - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(goal.uuid, response['uuid']) - self._assert_goal_fields(response) - - response = self.get_json( - '/goals/%s' % goal['uuid'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_detail(self): - goal = obj_utils.create_test_goal(self.context) - response = self.get_json('/goals/detail') - self.assertEqual(goal.uuid, response['goals'][0]["uuid"]) - self._assert_goal_fields(response['goals'][0]) - - def test_detail_against_single(self): - goal = obj_utils.create_test_goal(self.context) - response = self.get_json('/goals/%s/detail' % goal.uuid, - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_many(self): - goal_list = [] - for idx in range(1, 6): - goal = obj_utils.create_test_goal( - self.context, id=idx, - uuid=utils.generate_uuid(), - name='GOAL_{0}'.format(idx)) - goal_list.append(goal.uuid) - response = self.get_json('/goals') - self.assertGreater(len(response['goals']), 2) - - def test_many_without_soft_deleted(self): - goal_list = [] - for id_ in [1, 2, 3]: - goal = obj_utils.create_test_goal( - self.context, id=id_, uuid=utils.generate_uuid(), - name='GOAL_{0}'.format(id_)) - goal_list.append(goal.uuid) - for id_ in [4, 5]: - goal = obj_utils.create_test_goal( - self.context, id=id_, uuid=utils.generate_uuid(), - name='GOAL_{0}'.format(id_)) - goal.soft_delete() - response = self.get_json('/goals') - self.assertEqual(3, len(response['goals'])) - uuids = [s['uuid'] for s in response['goals']] - self.assertEqual(sorted(goal_list), sorted(uuids)) - - def test_goals_collection_links(self): - for idx in range(1, 6): - obj_utils.create_test_goal( - self.context, id=idx, - uuid=utils.generate_uuid(), - name='GOAL_{0}'.format(idx)) - response = self.get_json('/goals/?limit=2') - self.assertEqual(2, len(response['goals'])) - - def test_goals_collection_links_default_limit(self): - for idx in range(1, 6): - obj_utils.create_test_goal( - self.context, id=idx, - uuid=utils.generate_uuid(), - name='GOAL_{0}'.format(idx)) - cfg.CONF.set_override('max_limit', 3, 'api') - response = self.get_json('/goals') - self.assertEqual(3, len(response['goals'])) - - -class TestGoalPolicyEnforcement(api_base.FunctionalTest): - - def _common_policy_check(self, rule, func, *arg, **kwarg): - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - rule: "rule:defaut"}) - response = func(*arg, **kwarg) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - jsonutils.loads(response.json['error_message'])['faultstring']) - - def test_policy_disallow_get_all(self): - self._common_policy_check( - "goal:get_all", self.get_json, '/goals', - expect_errors=True) - - def test_policy_disallow_get_one(self): - goal = obj_utils.create_test_goal(self.context) - self._common_policy_check( - "goal:get", self.get_json, - '/goals/%s' % goal.uuid, - expect_errors=True) - - def test_policy_disallow_detail(self): - self._common_policy_check( - "goal:detail", self.get_json, - '/goals/detail', - expect_errors=True) - - -class TestGoalPolicyEnforcementWithAdminContext(TestListGoal, - api_base.AdminRoleTest): - - def setUp(self): - super(TestGoalPolicyEnforcementWithAdminContext, self).setUp() - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - "goal:detail": "rule:default", - "goal:get_all": "rule:default", - "goal:get_one": "rule:default"}) diff --git a/watcher/tests/api/v1/test_root.py b/watcher/tests/api/v1/test_root.py deleted file mode 100644 index 2cac444..0000000 --- a/watcher/tests/api/v1/test_root.py +++ /dev/null @@ -1,20 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from watcher.tests.api import base as api_base - - -class TestV1Routing(api_base.FunctionalTest): - def setUp(self): - super(TestV1Routing, self).setUp() diff --git a/watcher/tests/api/v1/test_scoring_engines.py b/watcher/tests/api/v1/test_scoring_engines.py deleted file mode 100644 index 2e7b3cc..0000000 --- a/watcher/tests/api/v1/test_scoring_engines.py +++ /dev/null @@ -1,160 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg -from oslo_serialization import jsonutils -from watcher.common import utils - -from watcher.tests.api import base as api_base -from watcher.tests.objects import utils as obj_utils - - -class TestListScoringEngine(api_base.FunctionalTest): - - def _assert_scoring_engine_fields(self, scoring_engine): - scoring_engine_fields = ['uuid', 'name', 'description', 'metainfo'] - for field in scoring_engine_fields: - self.assertIn(field, scoring_engine) - - def test_one(self): - scoring_engine = obj_utils.create_test_scoring_engine(self.context) - response = self.get_json('/scoring_engines') - self.assertEqual( - scoring_engine.name, response['scoring_engines'][0]['name']) - self._assert_scoring_engine_fields(response['scoring_engines'][0]) - - def test_get_one_soft_deleted(self): - scoring_engine = obj_utils.create_test_scoring_engine(self.context) - scoring_engine.soft_delete() - response = self.get_json( - '/scoring_engines/%s' % scoring_engine['name'], - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(scoring_engine.name, response['name']) - self._assert_scoring_engine_fields(response) - - response = self.get_json( - '/scoring_engines/%s' % scoring_engine['name'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_detail(self): - obj_utils.create_test_goal(self.context) - scoring_engine = obj_utils.create_test_scoring_engine(self.context) - response = self.get_json('/scoring_engines/detail') - self.assertEqual( - scoring_engine.name, response['scoring_engines'][0]['name']) - self._assert_scoring_engine_fields(response['scoring_engines'][0]) - for scoring_engine in response['scoring_engines']: - self.assertTrue( - all(val is not None for key, val in scoring_engine.items() - if key in ['uuid', 'name', 'description', 'metainfo'])) - - def test_detail_against_single(self): - scoring_engine = obj_utils.create_test_scoring_engine(self.context) - response = self.get_json( - '/scoring_engines/%s/detail' % scoring_engine.id, - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_many(self): - scoring_engine_list = [] - for idx in range(1, 6): - scoring_engine = obj_utils.create_test_scoring_engine( - self.context, id=idx, uuid=utils.generate_uuid(), - name=str(idx), description='SE_{0}'.format(idx)) - scoring_engine_list.append(scoring_engine.name) - response = self.get_json('/scoring_engines') - self.assertEqual(5, len(response['scoring_engines'])) - for scoring_engine in response['scoring_engines']: - self.assertTrue( - all(val is not None for key, val in scoring_engine.items() - if key in ['name', 'description', 'metainfo'])) - - def test_many_without_soft_deleted(self): - scoring_engine_list = [] - for id_ in [1, 2, 3]: - scoring_engine = obj_utils.create_test_scoring_engine( - self.context, id=id_, uuid=utils.generate_uuid(), - name=str(id_), description='SE_{0}'.format(id_)) - scoring_engine_list.append(scoring_engine.name) - for id_ in [4, 5]: - scoring_engine = obj_utils.create_test_scoring_engine( - self.context, id=id_, uuid=utils.generate_uuid(), - name=str(id_), description='SE_{0}'.format(id_)) - scoring_engine.soft_delete() - response = self.get_json('/scoring_engines') - self.assertEqual(3, len(response['scoring_engines'])) - names = [s['name'] for s in response['scoring_engines']] - self.assertEqual(sorted(scoring_engine_list), sorted(names)) - - def test_scoring_engines_collection_links(self): - for idx in range(1, 6): - obj_utils.create_test_scoring_engine( - self.context, id=idx, uuid=utils.generate_uuid(), - name=str(idx), description='SE_{0}'.format(idx)) - response = self.get_json('/scoring_engines/?limit=2') - self.assertEqual(2, len(response['scoring_engines'])) - - def test_scoring_engines_collection_links_default_limit(self): - for idx in range(1, 6): - obj_utils.create_test_scoring_engine( - self.context, id=idx, uuid=utils.generate_uuid(), - name=str(idx), description='SE_{0}'.format(idx)) - cfg.CONF.set_override('max_limit', 3, 'api') - response = self.get_json('/scoring_engines') - self.assertEqual(3, len(response['scoring_engines'])) - - -class TestScoringEnginePolicyEnforcement(api_base.FunctionalTest): - - def _common_policy_check(self, rule, func, *arg, **kwarg): - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - rule: "rule:defaut"}) - response = func(*arg, **kwarg) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - jsonutils.loads(response.json['error_message'])['faultstring']) - - def test_policy_disallow_get_all(self): - self._common_policy_check( - "scoring_engine:get_all", self.get_json, '/scoring_engines', - expect_errors=True) - - def test_policy_disallow_get_one(self): - se = obj_utils.create_test_scoring_engine(self.context) - self._common_policy_check( - "scoring_engine:get", self.get_json, - '/scoring_engines/%s' % se.uuid, - expect_errors=True) - - def test_policy_disallow_detail(self): - self._common_policy_check( - "scoring_engine:detail", self.get_json, - '/scoring_engines/detail', - expect_errors=True) - - -class TestScoringEnginePolicyEnforcementWithAdminContext( - TestListScoringEngine, api_base.AdminRoleTest): - - def setUp(self): - super(TestScoringEnginePolicyEnforcementWithAdminContext, self).setUp() - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - "scoring_engine:detail": "rule:default", - "scoring_engine:get": "rule:default", - "scoring_engine:get_all": "rule:default"}) diff --git a/watcher/tests/api/v1/test_services.py b/watcher/tests/api/v1/test_services.py deleted file mode 100644 index c556d95..0000000 --- a/watcher/tests/api/v1/test_services.py +++ /dev/null @@ -1,178 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg -from oslo_serialization import jsonutils -from six.moves.urllib import parse as urlparse - -from watcher.tests.api import base as api_base -from watcher.tests.objects import utils as obj_utils - - -class TestListService(api_base.FunctionalTest): - - def _assert_service_fields(self, service): - service_fields = ['id', 'name', 'host', 'status'] - for field in service_fields: - self.assertIn(field, service) - - def test_one(self): - service = obj_utils.create_test_service(self.context) - response = self.get_json('/services') - self.assertEqual(service.id, response['services'][0]["id"]) - self._assert_service_fields(response['services'][0]) - - def test_get_one_by_id(self): - service = obj_utils.create_test_service(self.context) - response = self.get_json('/services/%s' % service.id) - self.assertEqual(service.id, response["id"]) - self.assertEqual(service.name, response["name"]) - self._assert_service_fields(response) - - def test_get_one_by_name(self): - service = obj_utils.create_test_service(self.context) - response = self.get_json(urlparse.quote( - '/services/%s' % service['name'])) - self.assertEqual(service.id, response['id']) - self._assert_service_fields(response) - - def test_get_one_soft_deleted(self): - service = obj_utils.create_test_service(self.context) - service.soft_delete() - response = self.get_json( - '/services/%s' % service['id'], - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(service.id, response['id']) - self._assert_service_fields(response) - - response = self.get_json( - '/services/%s' % service['id'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_detail(self): - service = obj_utils.create_test_service(self.context) - response = self.get_json('/services/detail') - self.assertEqual(service.id, response['services'][0]["id"]) - self._assert_service_fields(response['services'][0]) - for service in response['services']: - self.assertTrue( - all(val is not None for key, val in service.items() - if key in ['id', 'name', 'host', 'status']) - ) - - def test_detail_against_single(self): - service = obj_utils.create_test_service(self.context) - response = self.get_json('/services/%s/detail' % service.id, - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_many(self): - service_list = [] - for idx in range(1, 4): - service = obj_utils.create_test_service( - self.context, id=idx, host='CONTROLLER1', - name='SERVICE_{0}'.format(idx)) - service_list.append(service.id) - for idx in range(1, 4): - service = obj_utils.create_test_service( - self.context, id=3+idx, host='CONTROLLER2', - name='SERVICE_{0}'.format(idx)) - service_list.append(service.id) - response = self.get_json('/services') - self.assertEqual(6, len(response['services'])) - for service in response['services']: - self.assertTrue( - all(val is not None for key, val in service.items() - if key in ['id', 'name', 'host', 'status'])) - - def test_many_without_soft_deleted(self): - service_list = [] - for id_ in [1, 2, 3]: - service = obj_utils.create_test_service( - self.context, id=id_, host='CONTROLLER', - name='SERVICE_{0}'.format(id_)) - service_list.append(service.id) - for id_ in [4, 5]: - service = obj_utils.create_test_service( - self.context, id=id_, host='CONTROLLER', - name='SERVICE_{0}'.format(id_)) - service.soft_delete() - response = self.get_json('/services') - self.assertEqual(3, len(response['services'])) - ids = [s['id'] for s in response['services']] - self.assertEqual(sorted(service_list), sorted(ids)) - - def test_services_collection_links(self): - for idx in range(1, 6): - obj_utils.create_test_service( - self.context, id=idx, - host='CONTROLLER', - name='SERVICE_{0}'.format(idx)) - response = self.get_json('/services/?limit=2') - self.assertEqual(2, len(response['services'])) - - def test_services_collection_links_default_limit(self): - for idx in range(1, 6): - obj_utils.create_test_service( - self.context, id=idx, - host='CONTROLLER', - name='SERVICE_{0}'.format(idx)) - cfg.CONF.set_override('max_limit', 3, 'api') - response = self.get_json('/services') - self.assertEqual(3, len(response['services'])) - - -class TestServicePolicyEnforcement(api_base.FunctionalTest): - - def _common_policy_check(self, rule, func, *arg, **kwarg): - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - rule: "rule:default"}) - response = func(*arg, **kwarg) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - jsonutils.loads(response.json['error_message'])['faultstring']) - - def test_policy_disallow_get_all(self): - self._common_policy_check( - "service:get_all", self.get_json, '/services', - expect_errors=True) - - def test_policy_disallow_get_one(self): - service = obj_utils.create_test_service(self.context) - self._common_policy_check( - "service:get", self.get_json, - '/services/%s' % service.id, - expect_errors=True) - - def test_policy_disallow_detail(self): - self._common_policy_check( - "service:detail", self.get_json, - '/services/detail', - expect_errors=True) - - -class TestServiceEnforcementWithAdminContext(TestListService, - api_base.AdminRoleTest): - - def setUp(self): - super(TestServiceEnforcementWithAdminContext, self).setUp() - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - "service:detail": "rule:default", - "service:get": "rule:default", - "service:get_all": "rule:default"}) diff --git a/watcher/tests/api/v1/test_strategies.py b/watcher/tests/api/v1/test_strategies.py deleted file mode 100644 index 6edcd48..0000000 --- a/watcher/tests/api/v1/test_strategies.py +++ /dev/null @@ -1,248 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg -from oslo_serialization import jsonutils -from six.moves.urllib import parse as urlparse - -from watcher.common import utils -from watcher.tests.api import base as api_base -from watcher.tests.objects import utils as obj_utils - - -class TestListStrategy(api_base.FunctionalTest): - - def setUp(self): - super(TestListStrategy, self).setUp() - self.fake_goal = obj_utils.create_test_goal( - self.context, uuid=utils.generate_uuid()) - - def _assert_strategy_fields(self, strategy): - strategy_fields = ['uuid', 'name', 'display_name', 'goal_uuid'] - for field in strategy_fields: - self.assertIn(field, strategy) - - def test_one(self): - strategy = obj_utils.create_test_strategy(self.context) - response = self.get_json('/strategies') - self.assertEqual(strategy.uuid, response['strategies'][0]["uuid"]) - self._assert_strategy_fields(response['strategies'][0]) - - def test_get_one_by_uuid(self): - strategy = obj_utils.create_test_strategy(self.context) - response = self.get_json('/strategies/%s' % strategy.uuid) - self.assertEqual(strategy.uuid, response["uuid"]) - self.assertEqual(strategy.name, response["name"]) - self._assert_strategy_fields(response) - - def test_get_one_by_name(self): - strategy = obj_utils.create_test_strategy(self.context) - response = self.get_json(urlparse.quote( - '/strategies/%s' % strategy['name'])) - self.assertEqual(strategy.uuid, response['uuid']) - self._assert_strategy_fields(response) - - def test_get_one_soft_deleted(self): - strategy = obj_utils.create_test_strategy(self.context) - strategy.soft_delete() - response = self.get_json( - '/strategies/%s' % strategy['uuid'], - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(strategy.uuid, response['uuid']) - self._assert_strategy_fields(response) - - response = self.get_json( - '/strategies/%s' % strategy['uuid'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_detail(self): - strategy = obj_utils.create_test_strategy(self.context) - response = self.get_json('/strategies/detail') - self.assertEqual(strategy.uuid, response['strategies'][0]["uuid"]) - self._assert_strategy_fields(response['strategies'][0]) - for strategy in response['strategies']: - self.assertTrue( - all(val is not None for key, val in strategy.items() - if key in ['uuid', 'name', 'display_name', 'goal_uuid'])) - - def test_detail_against_single(self): - strategy = obj_utils.create_test_strategy(self.context) - response = self.get_json('/strategies/%s/detail' % strategy.uuid, - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_many(self): - strategy_list = [] - for idx in range(1, 6): - strategy = obj_utils.create_test_strategy( - self.context, id=idx, - uuid=utils.generate_uuid(), - name='STRATEGY_{0}'.format(idx)) - strategy_list.append(strategy.uuid) - response = self.get_json('/strategies') - self.assertEqual(5, len(response['strategies'])) - for strategy in response['strategies']: - self.assertTrue( - all(val is not None for key, val in strategy.items() - if key in ['uuid', 'name', 'display_name', 'goal_uuid'])) - - def test_many_without_soft_deleted(self): - strategy_list = [] - for id_ in [1, 2, 3]: - strategy = obj_utils.create_test_strategy( - self.context, id=id_, uuid=utils.generate_uuid(), - name='STRATEGY_{0}'.format(id_)) - strategy_list.append(strategy.uuid) - for id_ in [4, 5]: - strategy = obj_utils.create_test_strategy( - self.context, id=id_, uuid=utils.generate_uuid(), - name='STRATEGY_{0}'.format(id_)) - strategy.soft_delete() - response = self.get_json('/strategies') - self.assertEqual(3, len(response['strategies'])) - uuids = [s['uuid'] for s in response['strategies']] - self.assertEqual(sorted(strategy_list), sorted(uuids)) - - def test_strategies_collection_links(self): - for idx in range(1, 6): - obj_utils.create_test_strategy( - self.context, id=idx, - uuid=utils.generate_uuid(), - name='STRATEGY_{0}'.format(idx)) - response = self.get_json('/strategies/?limit=2') - self.assertEqual(2, len(response['strategies'])) - - def test_strategies_collection_links_default_limit(self): - for idx in range(1, 6): - obj_utils.create_test_strategy( - self.context, id=idx, - uuid=utils.generate_uuid(), - name='STRATEGY_{0}'.format(idx)) - cfg.CONF.set_override('max_limit', 3, 'api') - response = self.get_json('/strategies') - self.assertEqual(3, len(response['strategies'])) - - def test_filter_by_goal_uuid(self): - goal1 = obj_utils.create_test_goal( - self.context, - id=2, - uuid=utils.generate_uuid(), - name='My_Goal 1') - goal2 = obj_utils.create_test_goal( - self.context, - id=3, - uuid=utils.generate_uuid(), - name='My Goal 2') - - for id_ in range(1, 3): - obj_utils.create_test_strategy( - self.context, id=id_, - uuid=utils.generate_uuid(), - name='Goal %s' % id_, - goal_id=goal1['id']) - for id_ in range(3, 5): - obj_utils.create_test_strategy( - self.context, id=id_, - uuid=utils.generate_uuid(), - name='Goal %s' % id_, - goal_id=goal2['id']) - - response = self.get_json('/strategies/?goal=%s' % goal1['uuid']) - - strategies = response['strategies'] - self.assertEqual(2, len(strategies)) - for strategy in strategies: - self.assertEqual(goal1['uuid'], strategy['goal_uuid']) - - def test_filter_by_goal_name(self): - goal1 = obj_utils.create_test_goal( - self.context, - id=2, - uuid=utils.generate_uuid(), - name='My_Goal 1') - goal2 = obj_utils.create_test_goal( - self.context, - id=3, - uuid=utils.generate_uuid(), - name='My Goal 2') - - for id_ in range(1, 3): - obj_utils.create_test_strategy( - self.context, id=id_, - uuid=utils.generate_uuid(), - name='Goal %s' % id_, - goal_id=goal1['id']) - for id_ in range(3, 5): - obj_utils.create_test_strategy( - self.context, id=id_, - uuid=utils.generate_uuid(), - name='Goal %s' % id_, - goal_id=goal2['id']) - - response = self.get_json('/strategies/?goal=%s' % goal1['name']) - - strategies = response['strategies'] - self.assertEqual(2, len(strategies)) - for strategy in strategies: - self.assertEqual(goal1['uuid'], strategy['goal_uuid']) - - -class TestStrategyPolicyEnforcement(api_base.FunctionalTest): - - def setUp(self): - super(TestStrategyPolicyEnforcement, self).setUp() - self.fake_goal = obj_utils.create_test_goal( - self.context, uuid=utils.generate_uuid()) - - def _common_policy_check(self, rule, func, *arg, **kwarg): - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - rule: "rule:defaut"}) - response = func(*arg, **kwarg) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - jsonutils.loads(response.json['error_message'])['faultstring']) - - def test_policy_disallow_get_all(self): - self._common_policy_check( - "strategy:get_all", self.get_json, '/strategies', - expect_errors=True) - - def test_policy_disallow_get_one(self): - strategy = obj_utils.create_test_strategy(self.context) - self._common_policy_check( - "strategy:get", self.get_json, - '/strategies/%s' % strategy.uuid, - expect_errors=True) - - def test_policy_disallow_detail(self): - self._common_policy_check( - "strategy:detail", self.get_json, - '/strategies/detail', - expect_errors=True) - - -class TestStrategyEnforcementWithAdminContext( - TestListStrategy, api_base.AdminRoleTest): - - def setUp(self): - super(TestStrategyEnforcementWithAdminContext, self).setUp() - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - "strategy:detail": "rule:default", - "strategy:get": "rule:default", - "strategy:get_all": "rule:default"}) diff --git a/watcher/tests/api/v1/test_types.py b/watcher/tests/api/v1/test_types.py deleted file mode 100644 index 2a6a34e..0000000 --- a/watcher/tests/api/v1/test_types.py +++ /dev/null @@ -1,249 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import webtest -import wsme -from wsme import types as wtypes - -from watcher.api.controllers.v1 import types -from watcher.common import exception -from watcher.common import utils -from watcher.tests import base - - -class TestUuidType(base.TestCase): - - def test_valid_uuid(self): - test_uuid = '1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e' - self.assertEqual(test_uuid, types.UuidType.validate(test_uuid)) - - def test_invalid_uuid(self): - self.assertRaises(exception.InvalidUUID, - types.UuidType.validate, 'invalid-uuid') - - -class TestNameType(base.TestCase): - - def test_valid_name(self): - test_name = 'hal-9000' - self.assertEqual(test_name, types.NameType.validate(test_name)) - - def test_invalid_name(self): - self.assertRaises(exception.InvalidName, - types.NameType.validate, '-this is not valid-') - - -class TestUuidOrNameType(base.TestCase): - - @mock.patch.object(utils, 'is_uuid_like') - @mock.patch.object(utils, 'is_hostname_safe') - def test_valid_uuid(self, host_mock, uuid_mock): - test_uuid = '1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e' - host_mock.return_value = False - uuid_mock.return_value = True - self.assertTrue(types.UuidOrNameType.validate(test_uuid)) - uuid_mock.assert_called_once_with(test_uuid) - - @mock.patch.object(utils, 'is_uuid_like') - @mock.patch.object(utils, 'is_hostname_safe') - def test_valid_name(self, host_mock, uuid_mock): - test_name = 'dc16-database5' - uuid_mock.return_value = False - host_mock.return_value = True - self.assertTrue(types.UuidOrNameType.validate(test_name)) - host_mock.assert_called_once_with(test_name) - - def test_invalid_uuid_or_name(self): - self.assertRaises(exception.InvalidUuidOrName, - types.UuidOrNameType.validate, 'inval#uuid%or*name') - - -class MyPatchType(types.JsonPatchType): - """Helper class for TestJsonPatchType tests.""" - - @staticmethod - def mandatory_attrs(): - return ['/mandatory'] - - @staticmethod - def internal_attrs(): - return ['/internal'] - - -class MyRoot(wsme.WSRoot): - """Helper class for TestJsonPatchType tests.""" - - @wsme.expose([wsme.types.text], body=[MyPatchType]) - @wsme.validate([MyPatchType]) - def test(self, patch): - return patch - - -class TestJsonPatchType(base.TestCase): - - def setUp(self): - super(TestJsonPatchType, self).setUp() - self.app = webtest.TestApp(MyRoot(['restjson']).wsgiapp()) - - def _patch_json(self, params, expect_errors=False): - return self.app.patch_json( - '/test', - params=params, - headers={'Accept': 'application/json'}, - expect_errors=expect_errors - ) - - def test_valid_patches(self): - valid_patches = [{'path': '/extra/foo', 'op': 'remove'}, - {'path': '/extra/foo', 'op': 'add', 'value': 'bar'}, - {'path': '/str', 'op': 'replace', 'value': 'bar'}, - {'path': '/bool', 'op': 'add', 'value': True}, - {'path': '/int', 'op': 'add', 'value': 1}, - {'path': '/float', 'op': 'add', 'value': 0.123}, - {'path': '/list', 'op': 'add', 'value': [1, 2]}, - {'path': '/none', 'op': 'add', 'value': None}, - {'path': '/empty_dict', 'op': 'add', 'value': {}}, - {'path': '/empty_list', 'op': 'add', 'value': []}, - {'path': '/dict', 'op': 'add', - 'value': {'cat': 'meow'}}] - ret = self._patch_json(valid_patches, False) - self.assertEqual(200, ret.status_int) - self.assertEqual(valid_patches, ret.json) - - def test_cannot_update_internal_attr(self): - patch = [{'path': '/internal', 'op': 'replace', 'value': 'foo'}] - ret = self._patch_json(patch, True) - self.assertEqual(400, ret.status_int) - self.assertTrue(ret.json['faultstring']) - - def test_cannot_update_internal_dict_attr(self): - patch = [{'path': '/internal', 'op': 'replace', - 'value': 'foo'}] - ret = self._patch_json(patch, True) - self.assertEqual(400, ret.status_int) - self.assertTrue(ret.json['faultstring']) - - def test_mandatory_attr(self): - patch = [{'op': 'replace', 'path': '/mandatory', 'value': 'foo'}] - ret = self._patch_json(patch, False) - self.assertEqual(200, ret.status_int) - self.assertEqual(patch, ret.json) - - def test_cannot_remove_mandatory_attr(self): - patch = [{'op': 'remove', 'path': '/mandatory'}] - ret = self._patch_json(patch, True) - self.assertEqual(400, ret.status_int) - self.assertTrue(ret.json['faultstring']) - - def test_missing_required_fields_path(self): - missing_path = [{'op': 'remove'}] - ret = self._patch_json(missing_path, True) - self.assertEqual(400, ret.status_int) - self.assertTrue(ret.json['faultstring']) - - def test_missing_required_fields_op(self): - missing_op = [{'path': '/foo'}] - ret = self._patch_json(missing_op, True) - self.assertEqual(400, ret.status_int) - self.assertTrue(ret.json['faultstring']) - - def test_invalid_op(self): - patch = [{'path': '/foo', 'op': 'invalid'}] - ret = self._patch_json(patch, True) - self.assertEqual(400, ret.status_int) - self.assertTrue(ret.json['faultstring']) - - def test_invalid_path(self): - patch = [{'path': 'invalid-path', 'op': 'remove'}] - ret = self._patch_json(patch, True) - self.assertEqual(400, ret.status_int) - self.assertTrue(ret.json['faultstring']) - - def test_cannot_add_with_no_value(self): - patch = [{'path': '/extra/foo', 'op': 'add'}] - ret = self._patch_json(patch, True) - self.assertEqual(400, ret.status_int) - self.assertTrue(ret.json['faultstring']) - - def test_cannot_replace_with_no_value(self): - patch = [{'path': '/foo', 'op': 'replace'}] - ret = self._patch_json(patch, True) - self.assertEqual(400, ret.status_int) - self.assertTrue(ret.json['faultstring']) - - -class TestBooleanType(base.TestCase): - - def test_valid_true_values(self): - v = types.BooleanType() - self.assertTrue(v.validate("true")) - self.assertTrue(v.validate("TRUE")) - self.assertTrue(v.validate("True")) - self.assertTrue(v.validate("t")) - self.assertTrue(v.validate("1")) - self.assertTrue(v.validate("y")) - self.assertTrue(v.validate("yes")) - self.assertTrue(v.validate("on")) - - def test_valid_false_values(self): - v = types.BooleanType() - self.assertFalse(v.validate("false")) - self.assertFalse(v.validate("FALSE")) - self.assertFalse(v.validate("False")) - self.assertFalse(v.validate("f")) - self.assertFalse(v.validate("0")) - self.assertFalse(v.validate("n")) - self.assertFalse(v.validate("no")) - self.assertFalse(v.validate("off")) - - def test_invalid_value(self): - v = types.BooleanType() - self.assertRaises(exception.Invalid, v.validate, "invalid-value") - self.assertRaises(exception.Invalid, v.validate, "01") - - -class TestJsonType(base.TestCase): - - def test_valid_values(self): - vt = types.jsontype - value = vt.validate("hello") - self.assertEqual("hello", value) - value = vt.validate(10) - self.assertEqual(10, value) - value = vt.validate(0.123) - self.assertEqual(0.123, value) - value = vt.validate(True) - self.assertTrue(value) - value = vt.validate([1, 2, 3]) - self.assertEqual([1, 2, 3], value) - value = vt.validate({'foo': 'bar'}) - self.assertEqual({'foo': 'bar'}, value) - value = vt.validate(None) - self.assertIsNone(value) - - def test_invalid_values(self): - vt = types.jsontype - self.assertRaises(exception.Invalid, vt.validate, object()) - - def test_apimultitype_tostring(self): - vts = str(types.jsontype) - self.assertIn(str(wtypes.text), vts) - self.assertIn(str(int), vts) - self.assertIn(str(float), vts) - self.assertIn(str(types.BooleanType), vts) - self.assertIn(str(list), vts) - self.assertIn(str(dict), vts) - self.assertIn(str(None), vts) diff --git a/watcher/tests/api/v1/test_utils.py b/watcher/tests/api/v1/test_utils.py deleted file mode 100644 index e5541dd..0000000 --- a/watcher/tests/api/v1/test_utils.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import wsme - -from oslo_config import cfg - -from watcher.api.controllers.v1 import utils -from watcher.tests import base - -CONF = cfg.CONF - - -class TestApiUtils(base.TestCase): - - def test_validate_limit(self): - limit = utils.validate_limit(10) - self.assertEqual(10, 10) - - # max limit - limit = utils.validate_limit(999999999) - self.assertEqual(CONF.api.max_limit, limit) - - # negative - self.assertRaises(wsme.exc.ClientSideError, utils.validate_limit, -1) - - # zero - self.assertRaises(wsme.exc.ClientSideError, utils.validate_limit, 0) - - def test_validate_sort_dir(self): - # if sort_dir is valid, nothing should happen - try: - utils.validate_sort_dir('asc') - except Exception as exc: - self.fail(exc) - - # invalid sort_dir parameter - self.assertRaises(wsme.exc.ClientSideError, - utils.validate_sort_dir, - 'fake-sort') - - def test_validate_search_filters(self): - allowed_fields = ["allowed", "authorized"] - - test_filters = {"allowed": 1, "authorized": 2} - try: - utils.validate_search_filters(test_filters, allowed_fields) - except Exception as exc: - self.fail(exc) - - def test_validate_search_filters_with_invalid_key(self): - allowed_fields = ["allowed", "authorized"] - - test_filters = {"allowed": 1, "unauthorized": 2} - - self.assertRaises( - wsme.exc.ClientSideError, utils.validate_search_filters, - test_filters, allowed_fields) diff --git a/watcher/tests/applier/__init__.py b/watcher/tests/applier/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/applier/action_plan/__init__.py b/watcher/tests/applier/action_plan/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/applier/action_plan/test_default_action_handler.py b/watcher/tests/applier/action_plan/test_default_action_handler.py deleted file mode 100755 index 7aadee9..0000000 --- a/watcher/tests/applier/action_plan/test_default_action_handler.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.applier.action_plan import default -from watcher.applier import default as ap_applier -from watcher.common import exception -from watcher import notifications -from watcher import objects -from watcher.objects import action_plan as ap_objects -from watcher.tests.db import base -from watcher.tests.objects import utils as obj_utils - - -class TestDefaultActionPlanHandler(base.DbTestCase): - - class FakeApplierException(Exception): - pass - - def setUp(self): - super(TestDefaultActionPlanHandler, self).setUp() - - p_action_plan_notifications = mock.patch.object( - notifications, 'action_plan', autospec=True) - self.m_action_plan_notifications = p_action_plan_notifications.start() - self.addCleanup(p_action_plan_notifications.stop) - - obj_utils.create_test_goal(self.context) - self.strategy = obj_utils.create_test_strategy(self.context) - self.audit = obj_utils.create_test_audit( - self.context, strategy_id=self.strategy.id) - self.action_plan = obj_utils.create_test_action_plan( - self.context, audit_id=self.audit.id, - strategy_id=self.strategy.id) - self.action = obj_utils.create_test_action( - self.context, action_plan_id=self.action_plan.id, - action_type='nop', - input_parameters={'message': 'hello World'}) - - @mock.patch.object(objects.ActionPlan, "get_by_uuid") - def test_launch_action_plan(self, m_get_action_plan): - m_get_action_plan.return_value = self.action_plan - command = default.DefaultActionPlanHandler( - self.context, mock.MagicMock(), self.action_plan.uuid) - command.execute() - - expected_calls = [ - mock.call(self.context, self.action_plan, - action=objects.fields.NotificationAction.EXECUTION, - phase=objects.fields.NotificationPhase.START), - mock.call(self.context, self.action_plan, - action=objects.fields.NotificationAction.EXECUTION, - phase=objects.fields.NotificationPhase.END)] - - self.assertEqual(ap_objects.State.SUCCEEDED, self.action_plan.state) - - self.assertEqual( - expected_calls, - self.m_action_plan_notifications - .send_action_notification - .call_args_list) - - @mock.patch.object(ap_applier.DefaultApplier, "execute") - @mock.patch.object(objects.ActionPlan, "get_by_uuid") - def test_launch_action_plan_with_error(self, m_get_action_plan, m_execute): - m_get_action_plan.return_value = self.action_plan - m_execute.side_effect = self.FakeApplierException - command = default.DefaultActionPlanHandler( - self.context, mock.MagicMock(), self.action_plan.uuid) - command.execute() - - expected_calls = [ - mock.call(self.context, self.action_plan, - action=objects.fields.NotificationAction.EXECUTION, - phase=objects.fields.NotificationPhase.START), - mock.call(self.context, self.action_plan, - action=objects.fields.NotificationAction.EXECUTION, - priority=objects.fields.NotificationPriority.ERROR, - phase=objects.fields.NotificationPhase.ERROR)] - - self.assertEqual(ap_objects.State.FAILED, self.action_plan.state) - - self.assertEqual( - expected_calls, - self.m_action_plan_notifications - .send_action_notification - .call_args_list) - - @mock.patch.object(objects.ActionPlan, "get_by_uuid") - def test_cancel_action_plan(self, m_get_action_plan): - m_get_action_plan.return_value = self.action_plan - self.action_plan.state = ap_objects.State.CANCELLED - self.action_plan.save() - command = default.DefaultActionPlanHandler( - self.context, mock.MagicMock(), self.action_plan.uuid) - command.execute() - action = self.action.get_by_uuid(self.context, self.action.uuid) - self.assertEqual(ap_objects.State.CANCELLED, self.action_plan.state) - self.assertEqual(objects.action.State.CANCELLED, action.state) - - @mock.patch.object(ap_applier.DefaultApplier, "execute") - @mock.patch.object(objects.ActionPlan, "get_by_uuid") - def test_cancel_action_plan_with_exception(self, m_get_action_plan, - m_execute): - m_get_action_plan.return_value = self.action_plan - m_execute.side_effect = exception.ActionPlanCancelled( - self.action_plan.uuid) - command = default.DefaultActionPlanHandler( - self.context, mock.MagicMock(), self.action_plan.uuid) - command.execute() - self.assertEqual(ap_objects.State.CANCELLED, self.action_plan.state) diff --git a/watcher/tests/applier/actions/__init__.py b/watcher/tests/applier/actions/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/applier/actions/loading/__init__.py b/watcher/tests/applier/actions/loading/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/applier/actions/loading/test_default_actions_loader.py b/watcher/tests/applier/actions/loading/test_default_actions_loader.py deleted file mode 100644 index 5a36391..0000000 --- a/watcher/tests/applier/actions/loading/test_default_actions_loader.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import unicode_literals - -from watcher.applier.actions import base as abase -from watcher.applier.loading import default -from watcher.tests import base - - -class TestDefaultActionLoader(base.TestCase): - def setUp(self): - super(TestDefaultActionLoader, self).setUp() - self.loader = default.DefaultActionLoader() - - def test_endpoints(self): - for endpoint in self.loader.list_available(): - loaded = self.loader.load(endpoint) - self.assertIsNotNone(loaded) - self.assertIsInstance(loaded, abase.BaseAction) diff --git a/watcher/tests/applier/actions/test_change_node_power_state.py b/watcher/tests/applier/actions/test_change_node_power_state.py deleted file mode 100644 index ca60995..0000000 --- a/watcher/tests/applier/actions/test_change_node_power_state.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright (c) 2017 ZTE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -import jsonschema -import mock - -from watcher.applier.actions import base as baction -from watcher.applier.actions import change_node_power_state -from watcher.common import clients -from watcher.tests import base - -COMPUTE_NODE = "compute-1" - - -@mock.patch.object(clients.OpenStackClients, 'nova') -@mock.patch.object(clients.OpenStackClients, 'ironic') -class TestChangeNodePowerState(base.TestCase): - - def setUp(self): - super(TestChangeNodePowerState, self).setUp() - - self.input_parameters = { - baction.BaseAction.RESOURCE_ID: COMPUTE_NODE, - "state": change_node_power_state.NodeState.POWERON.value, - } - self.action = change_node_power_state.ChangeNodePowerState( - mock.Mock()) - self.action.input_parameters = self.input_parameters - - def test_parameters_down(self, mock_ironic, mock_nova): - self.action.input_parameters = { - baction.BaseAction.RESOURCE_ID: COMPUTE_NODE, - self.action.STATE: - change_node_power_state.NodeState.POWEROFF.value} - self.assertTrue(self.action.validate_parameters()) - - def test_parameters_up(self, mock_ironic, mock_nova): - self.action.input_parameters = { - baction.BaseAction.RESOURCE_ID: COMPUTE_NODE, - self.action.STATE: - change_node_power_state.NodeState.POWERON.value} - self.assertTrue(self.action.validate_parameters()) - - def test_parameters_exception_wrong_state(self, mock_ironic, mock_nova): - self.action.input_parameters = { - baction.BaseAction.RESOURCE_ID: COMPUTE_NODE, - self.action.STATE: 'error'} - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_parameters_resource_id_empty(self, mock_ironic, mock_nova): - self.action.input_parameters = { - self.action.STATE: - change_node_power_state.NodeState.POWERON.value, - } - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_parameters_applies_add_extra(self, mock_ironic, mock_nova): - self.action.input_parameters = {"extra": "failed"} - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_change_service_state_pre_condition(self, mock_ironic, mock_nova): - try: - self.action.pre_condition() - except Exception as exc: - self.fail(exc) - - def test_change_node_state_post_condition(self, mock_ironic, mock_nova): - try: - self.action.post_condition() - except Exception as exc: - self.fail(exc) - - def test_execute_node_service_state_with_poweron_target( - self, mock_ironic, mock_nova): - mock_irclient = mock_ironic.return_value - self.action.execute() - - mock_irclient.node.set_power_state.assert_called_once_with( - COMPUTE_NODE, change_node_power_state.NodeState.POWERON.value) - - def test_execute_change_node_state_with_poweroff_target( - self, mock_ironic, mock_nova): - mock_irclient = mock_ironic.return_value - mock_nvclient = mock_nova.return_value - mock_get = mock.MagicMock() - mock_get.to_dict.return_value = {'running_vms': 0} - mock_nvclient.hypervisors.get.return_value = mock_get - self.action.input_parameters["state"] = ( - change_node_power_state.NodeState.POWEROFF.value) - self.action.execute() - - mock_irclient.node.set_power_state.assert_called_once_with( - COMPUTE_NODE, change_node_power_state.NodeState.POWEROFF.value) - - def test_revert_change_node_state_with_poweron_target( - self, mock_ironic, mock_nova): - mock_irclient = mock_ironic.return_value - mock_nvclient = mock_nova.return_value - mock_get = mock.MagicMock() - mock_get.to_dict.return_value = {'running_vms': 0} - mock_nvclient.hypervisors.get.return_value = mock_get - self.action.input_parameters["state"] = ( - change_node_power_state.NodeState.POWERON.value) - self.action.revert() - - mock_irclient.node.set_power_state.assert_called_once_with( - COMPUTE_NODE, change_node_power_state.NodeState.POWEROFF.value) - - def test_revert_change_node_state_with_poweroff_target( - self, mock_ironic, mock_nova): - mock_irclient = mock_ironic.return_value - self.action.input_parameters["state"] = ( - change_node_power_state.NodeState.POWEROFF.value) - self.action.revert() - - mock_irclient.node.set_power_state.assert_called_once_with( - COMPUTE_NODE, change_node_power_state.NodeState.POWERON.value) diff --git a/watcher/tests/applier/actions/test_change_nova_service_state.py b/watcher/tests/applier/actions/test_change_nova_service_state.py deleted file mode 100644 index e2f016c..0000000 --- a/watcher/tests/applier/actions/test_change_nova_service_state.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -import jsonschema -import mock - -from watcher.applier.actions import base as baction -from watcher.applier.actions import change_nova_service_state -from watcher.common import clients -from watcher.common import nova_helper -from watcher.decision_engine.model import element -from watcher.tests import base - - -class TestChangeNovaServiceState(base.TestCase): - - def setUp(self): - super(TestChangeNovaServiceState, self).setUp() - - self.m_osc_cls = mock.Mock() - self.m_helper_cls = mock.Mock() - self.m_helper = mock.Mock(spec=nova_helper.NovaHelper) - self.m_helper_cls.return_value = self.m_helper - self.m_osc = mock.Mock(spec=clients.OpenStackClients) - self.m_osc_cls.return_value = self.m_osc - - m_openstack_clients = mock.patch.object( - clients, "OpenStackClients", self.m_osc_cls) - m_nova_helper = mock.patch.object( - nova_helper, "NovaHelper", self.m_helper_cls) - - m_openstack_clients.start() - m_nova_helper.start() - - self.addCleanup(m_openstack_clients.stop) - self.addCleanup(m_nova_helper.stop) - - self.input_parameters = { - baction.BaseAction.RESOURCE_ID: "compute-1", - "state": element.ServiceState.ENABLED.value, - } - self.action = change_nova_service_state.ChangeNovaServiceState( - mock.Mock()) - self.action.input_parameters = self.input_parameters - - def test_parameters_down(self): - self.action.input_parameters = { - baction.BaseAction.RESOURCE_ID: "compute-1", - self.action.STATE: element.ServiceState.DISABLED.value} - self.assertTrue(self.action.validate_parameters()) - - def test_parameters_up(self): - self.action.input_parameters = { - baction.BaseAction.RESOURCE_ID: "compute-1", - self.action.STATE: element.ServiceState.ENABLED.value} - self.assertTrue(self.action.validate_parameters()) - - def test_parameters_exception_wrong_state(self): - self.action.input_parameters = { - baction.BaseAction.RESOURCE_ID: "compute-1", - self.action.STATE: 'error'} - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_parameters_resource_id_empty(self): - self.action.input_parameters = { - self.action.STATE: element.ServiceState.ENABLED.value, - } - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_parameters_applies_add_extra(self): - self.action.input_parameters = {"extra": "failed"} - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_change_service_state_pre_condition(self): - try: - self.action.pre_condition() - except Exception as exc: - self.fail(exc) - - def test_change_service_state_post_condition(self): - try: - self.action.post_condition() - except Exception as exc: - self.fail(exc) - - def test_execute_change_service_state_with_enable_target(self): - self.action.execute() - - self.m_helper_cls.assert_called_once_with(osc=self.m_osc) - self.m_helper.enable_service_nova_compute.assert_called_once_with( - "compute-1") - - def test_execute_change_service_state_with_disable_target(self): - self.action.input_parameters["state"] = ( - element.ServiceState.DISABLED.value) - self.action.execute() - - self.m_helper_cls.assert_called_once_with(osc=self.m_osc) - self.m_helper.disable_service_nova_compute.assert_called_once_with( - "compute-1") - - def test_revert_change_service_state_with_enable_target(self): - self.action.revert() - - self.m_helper_cls.assert_called_once_with(osc=self.m_osc) - self.m_helper.disable_service_nova_compute.assert_called_once_with( - "compute-1") - - def test_revert_change_service_state_with_disable_target(self): - self.action.input_parameters["state"] = ( - element.ServiceState.DISABLED.value) - self.action.revert() - - self.m_helper_cls.assert_called_once_with(osc=self.m_osc) - self.m_helper.enable_service_nova_compute.assert_called_once_with( - "compute-1") diff --git a/watcher/tests/applier/actions/test_migration.py b/watcher/tests/applier/actions/test_migration.py deleted file mode 100644 index 7d85a00..0000000 --- a/watcher/tests/applier/actions/test_migration.py +++ /dev/null @@ -1,254 +0,0 @@ -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - - -import jsonschema -import mock - -from watcher.applier.actions import base as baction -from watcher.applier.actions import migration -from watcher.common import clients -from watcher.common import exception -from watcher.common import nova_helper -from watcher.tests import base - - -class TestMigration(base.TestCase): - - INSTANCE_UUID = "45a37aeb-95ab-4ddb-a305-7d9f62c2f5ba" - - def setUp(self): - super(TestMigration, self).setUp() - - self.m_osc_cls = mock.Mock() - self.m_helper_cls = mock.Mock() - self.m_helper = mock.Mock(spec=nova_helper.NovaHelper) - self.m_helper_cls.return_value = self.m_helper - self.m_osc = mock.Mock(spec=clients.OpenStackClients) - self.m_osc_cls.return_value = self.m_osc - - m_openstack_clients = mock.patch.object( - clients, "OpenStackClients", self.m_osc_cls) - m_nova_helper = mock.patch.object( - nova_helper, "NovaHelper", self.m_helper_cls) - - m_openstack_clients.start() - m_nova_helper.start() - - self.addCleanup(m_openstack_clients.stop) - self.addCleanup(m_nova_helper.stop) - - self.input_parameters = { - "migration_type": "live", - "source_node": "compute1-hostname", - "destination_node": "compute2-hostname", - baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, - } - self.action = migration.Migrate(mock.Mock()) - self.action.input_parameters = self.input_parameters - - self.input_parameters_cold = { - "migration_type": "cold", - "source_node": "compute1-hostname", - "destination_node": "compute2-hostname", - baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, - } - self.action_cold = migration.Migrate(mock.Mock()) - self.action_cold.input_parameters = self.input_parameters_cold - - def test_parameters(self): - params = {baction.BaseAction.RESOURCE_ID: - self.INSTANCE_UUID, - self.action.MIGRATION_TYPE: 'live', - self.action.DESTINATION_NODE: 'compute-2', - self.action.SOURCE_NODE: 'compute-3'} - self.action.input_parameters = params - self.assertTrue(self.action.validate_parameters()) - - def test_parameters_cold(self): - params = {baction.BaseAction.RESOURCE_ID: - self.INSTANCE_UUID, - self.action.MIGRATION_TYPE: 'cold', - self.action.DESTINATION_NODE: 'compute-2', - self.action.SOURCE_NODE: 'compute-3'} - self.action_cold.input_parameters = params - self.assertTrue(self.action_cold.validate_parameters()) - - def test_parameters_exception_empty_fields(self): - parameters = {baction.BaseAction.RESOURCE_ID: None, - 'migration_type': None, - 'source_node': None, - 'destination_node': None} - self.action.input_parameters = parameters - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_parameters_exception_migration_type(self): - parameters = {baction.BaseAction.RESOURCE_ID: - self.INSTANCE_UUID, - 'migration_type': 'unknown', - 'source_node': 'compute-2', - 'destination_node': 'compute-3'} - self.action.input_parameters = parameters - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_parameters_exception_source_node(self): - parameters = {baction.BaseAction.RESOURCE_ID: - self.INSTANCE_UUID, - 'migration_type': 'live', - 'source_node': None, - 'destination_node': 'compute-3'} - self.action.input_parameters = parameters - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_parameters_destination_node_none(self): - parameters = {baction.BaseAction.RESOURCE_ID: - self.INSTANCE_UUID, - 'migration_type': 'live', - 'source_node': 'compute-1', - 'destination_node': None} - self.action.input_parameters = parameters - self.assertTrue(self.action.validate_parameters) - - def test_parameters_exception_resource_id(self): - parameters = {baction.BaseAction.RESOURCE_ID: "EFEF", - 'migration_type': 'live', - 'source_node': 'compute-2', - 'destination_node': 'compute-3'} - self.action.input_parameters = parameters - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_migration_pre_condition(self): - try: - self.action.pre_condition() - except Exception as exc: - self.fail(exc) - - def test_migration_post_condition(self): - try: - self.action.post_condition() - except Exception as exc: - self.fail(exc) - - def test_execute_live_migration_invalid_instance(self): - self.m_helper.find_instance.return_value = None - exc = self.assertRaises( - exception.InstanceNotFound, self.action.execute) - self.m_helper.find_instance.assert_called_once_with(self.INSTANCE_UUID) - self.assertEqual(self.INSTANCE_UUID, exc.kwargs["name"]) - - def test_execute_cold_migration_invalid_instance(self): - self.m_helper.find_instance.return_value = None - exc = self.assertRaises( - exception.InstanceNotFound, self.action_cold.execute) - self.m_helper.find_instance.assert_called_once_with(self.INSTANCE_UUID) - self.assertEqual(self.INSTANCE_UUID, exc.kwargs["name"]) - - def test_execute_live_migration(self): - self.m_helper.find_instance.return_value = self.INSTANCE_UUID - - try: - self.action.execute() - except Exception as exc: - self.fail(exc) - - self.m_helper.live_migrate_instance.assert_called_once_with( - instance_id=self.INSTANCE_UUID, - dest_hostname="compute2-hostname") - - def test_execute_cold_migration(self): - self.m_helper.find_instance.return_value = self.INSTANCE_UUID - - try: - self.action_cold.execute() - except Exception as exc: - self.fail(exc) - - self.m_helper.watcher_non_live_migrate_instance.\ - assert_called_once_with( - instance_id=self.INSTANCE_UUID, - dest_hostname="compute2-hostname" - ) - - def test_revert_live_migration(self): - self.m_helper.find_instance.return_value = self.INSTANCE_UUID - - self.action.revert() - - self.m_helper_cls.assert_called_once_with(osc=self.m_osc) - self.m_helper.live_migrate_instance.assert_called_once_with( - instance_id=self.INSTANCE_UUID, - dest_hostname="compute1-hostname" - ) - - def test_revert_cold_migration(self): - self.m_helper.find_instance.return_value = self.INSTANCE_UUID - - self.action_cold.revert() - - self.m_helper_cls.assert_called_once_with(osc=self.m_osc) - self.m_helper.watcher_non_live_migrate_instance.\ - assert_called_once_with( - instance_id=self.INSTANCE_UUID, - dest_hostname="compute1-hostname" - ) - - def test_live_migrate_non_shared_storage_instance(self): - self.m_helper.find_instance.return_value = self.INSTANCE_UUID - - self.m_helper.live_migrate_instance.side_effect = [ - nova_helper.nvexceptions.ClientException(400, "BadRequest"), True] - - try: - self.action.execute() - except Exception as exc: - self.fail(exc) - - self.m_helper.live_migrate_instance.assert_has_calls([ - mock.call(instance_id=self.INSTANCE_UUID, - dest_hostname="compute2-hostname"), - mock.call(instance_id=self.INSTANCE_UUID, - dest_hostname="compute2-hostname", - block_migration=True) - ]) - - expected = [mock.call.first(instance_id=self.INSTANCE_UUID, - dest_hostname="compute2-hostname"), - mock.call.second(instance_id=self.INSTANCE_UUID, - dest_hostname="compute2-hostname", - block_migration=True) - ] - self.m_helper.live_migrate_instance.mock_calls == expected - self.assertEqual(2, self.m_helper.live_migrate_instance.call_count) - - def test_abort_live_migrate(self): - migration = mock.MagicMock() - migration.id = "2" - migrations = [migration] - self.m_helper.get_running_migration.return_value = migrations - self.m_helper.find_instance.return_value = self.INSTANCE_UUID - try: - self.action.abort() - except Exception as exc: - self.fail(exc) - - self.m_helper.abort_live_migrate.assert_called_once_with( - instance_id=self.INSTANCE_UUID, source="compute1-hostname", - destination="compute2-hostname") diff --git a/watcher/tests/applier/actions/test_resize.py b/watcher/tests/applier/actions/test_resize.py deleted file mode 100644 index 0cdfc0e..0000000 --- a/watcher/tests/applier/actions/test_resize.py +++ /dev/null @@ -1,93 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals -import jsonschema -import mock - -from watcher.applier.actions import base as baction -from watcher.applier.actions import resize -from watcher.common import clients -from watcher.common import nova_helper -from watcher.tests import base - - -class TestResize(base.TestCase): - - INSTANCE_UUID = "94ae2f92-b7fd-4da7-9e97-f13504ae98c4" - - def setUp(self): - super(TestResize, self).setUp() - - self.r_osc_cls = mock.Mock() - self.r_helper_cls = mock.Mock() - self.r_helper = mock.Mock(spec=nova_helper.NovaHelper) - self.r_helper_cls.return_value = self.r_helper - self.r_osc = mock.Mock(spec=clients.OpenStackClients) - self.r_osc_cls.return_value = self.r_osc - - r_openstack_clients = mock.patch.object( - clients, "OpenStackClients", self.r_osc_cls) - r_nova_helper = mock.patch.object( - nova_helper, "NovaHelper", self.r_helper_cls) - - r_openstack_clients.start() - r_nova_helper.start() - - self.addCleanup(r_openstack_clients.stop) - self.addCleanup(r_nova_helper.stop) - - self.input_parameters = { - "flavor": "x1", - baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, - } - self.action = resize.Resize(mock.Mock()) - self.action.input_parameters = self.input_parameters - - def test_parameters(self): - params = {baction.BaseAction.RESOURCE_ID: - self.INSTANCE_UUID, - self.action.FLAVOR: 'x1'} - self.action.input_parameters = params - self.assertTrue(self.action.validate_parameters()) - - def test_parameters_exception_empty_fields(self): - parameters = {baction.BaseAction.RESOURCE_ID: - self.INSTANCE_UUID, - self.action.FLAVOR: None} - self.action.input_parameters = parameters - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_parameters_exception_flavor(self): - parameters = {baction.BaseAction.RESOURCE_ID: - self.INSTANCE_UUID, - self.action.FLAVOR: None} - self.action.input_parameters = parameters - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_parameters_exception_resource_id(self): - parameters = {baction.BaseAction.RESOURCE_ID: "EFEF", - self.action.FLAVOR: 'x1'} - self.action.input_parameters = parameters - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_execute_resize(self): - self.r_helper.find_instance.return_value = self.INSTANCE_UUID - self.action.execute() - self.r_helper.resize_instance.assert_called_once_with( - instance_id=self.INSTANCE_UUID, flavor='x1') diff --git a/watcher/tests/applier/actions/test_sleep.py b/watcher/tests/applier/actions/test_sleep.py deleted file mode 100644 index 0b83c8f..0000000 --- a/watcher/tests/applier/actions/test_sleep.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (c) 2016 b<>com -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -import jsonschema -import mock - -from watcher.applier.actions import sleep -from watcher.tests import base - - -class TestSleep(base.TestCase): - def setUp(self): - super(TestSleep, self).setUp() - self.s = sleep.Sleep(mock.Mock()) - - def test_parameters_duration(self): - self.s.input_parameters = {self.s.DURATION: 1.0} - self.assertTrue(self.s.validate_parameters()) - - def test_parameters_duration_empty(self): - self.s.input_parameters = {self.s.DURATION: None} - self.assertRaises(jsonschema.ValidationError, - self.s.validate_parameters) - - def test_parameters_wrong_parameter(self): - self.s.input_parameters = {self.s.DURATION: "ef"} - self.assertRaises(jsonschema.ValidationError, - self.s.validate_parameters) - - def test_parameters_add_field(self): - self.s.input_parameters = {self.s.DURATION: 1.0, "not_required": "nop"} - self.assertRaises(jsonschema.ValidationError, - self.s.validate_parameters) diff --git a/watcher/tests/applier/messaging/__init__.py b/watcher/tests/applier/messaging/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/applier/messaging/test_trigger_action_plan_endpoint.py b/watcher/tests/applier/messaging/test_trigger_action_plan_endpoint.py deleted file mode 100644 index cb6bf90..0000000 --- a/watcher/tests/applier/messaging/test_trigger_action_plan_endpoint.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -import mock - -from watcher.applier.messaging import trigger -from watcher.common import utils -from watcher.tests import base - - -class TestTriggerActionPlan(base.TestCase): - def __init__(self, *args, **kwds): - super(TestTriggerActionPlan, self).__init__(*args, **kwds) - self.applier = mock.MagicMock() - self.endpoint = trigger.TriggerActionPlan(self.applier) - - def setUp(self): - super(TestTriggerActionPlan, self).setUp() - - def test_launch_action_plan(self): - action_plan_uuid = utils.generate_uuid() - expected_uuid = self.endpoint.launch_action_plan(self.context, - action_plan_uuid) - self.assertEqual(expected_uuid, action_plan_uuid) diff --git a/watcher/tests/applier/test_applier_manager.py b/watcher/tests/applier/test_applier_manager.py deleted file mode 100644 index bfa6750..0000000 --- a/watcher/tests/applier/test_applier_manager.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import mock - -import oslo_messaging as om -from watcher.applier import manager as applier_manager -from watcher.common import service -from watcher.tests import base - - -class TestApplierManager(base.TestCase): - def setUp(self): - super(TestApplierManager, self).setUp() - p_heartbeat = mock.patch.object( - service.ServiceHeartbeat, "send_beat") - self.m_heartbeat = p_heartbeat.start() - self.addCleanup(p_heartbeat.stop) - self.applier = service.Service(applier_manager.ApplierManager) - - @mock.patch.object(om.rpc.server.RPCServer, "stop") - @mock.patch.object(om.rpc.server.RPCServer, "start") - def test_start(self, m_messaging_start, m_messaging_stop): - self.applier.start() - self.applier.stop() - self.assertEqual(1, m_messaging_start.call_count) - self.assertEqual(1, m_messaging_stop.call_count) diff --git a/watcher/tests/applier/test_rpcapi.py b/watcher/tests/applier/test_rpcapi.py deleted file mode 100644 index 80e221c..0000000 --- a/watcher/tests/applier/test_rpcapi.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import mock -import oslo_messaging as om -from watcher.applier import rpcapi - -from watcher.common import exception -from watcher.common import utils -from watcher.tests import base - - -class TestApplierAPI(base.TestCase): - - api = rpcapi.ApplierAPI() - - def setUp(self): - super(TestApplierAPI, self).setUp() - - def test_get_api_version(self): - with mock.patch.object(om.RPCClient, 'call') as mock_call: - expected_context = self.context - self.api.check_api_version(expected_context) - mock_call.assert_called_once_with( - expected_context, - 'check_api_version', - api_version=rpcapi.ApplierAPI().API_VERSION) - - def test_execute_audit_without_error(self): - with mock.patch.object(om.RPCClient, 'cast') as mock_cast: - action_plan_uuid = utils.generate_uuid() - self.api.launch_action_plan(self.context, action_plan_uuid) - mock_cast.assert_called_once_with( - self.context, - 'launch_action_plan', - action_plan_uuid=action_plan_uuid) - - def test_execute_action_plan_throw_exception(self): - action_plan_uuid = "uuid" - self.assertRaises(exception.InvalidUuidOrName, - self.api.launch_action_plan, - action_plan_uuid) diff --git a/watcher/tests/applier/workflow_engine/__init__.py b/watcher/tests/applier/workflow_engine/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/applier/workflow_engine/loading/__init__.py b/watcher/tests/applier/workflow_engine/loading/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/applier/workflow_engine/loading/test_default_engine_loader.py b/watcher/tests/applier/workflow_engine/loading/test_default_engine_loader.py deleted file mode 100644 index 49d27ca..0000000 --- a/watcher/tests/applier/workflow_engine/loading/test_default_engine_loader.py +++ /dev/null @@ -1,32 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import unicode_literals - -from watcher.applier.loading import default -from watcher.applier.workflow_engine import base as wbase -from watcher.tests import base - - -class TestDefaultActionLoader(base.TestCase): - def setUp(self): - super(TestDefaultActionLoader, self).setUp() - self.loader = default.DefaultWorkFlowEngineLoader() - - def test_endpoints(self): - for endpoint in self.loader.list_available(): - loaded = self.loader.load(endpoint) - self.assertIsNotNone(loaded) - self.assertIsInstance(loaded, wbase.BaseWorkFlowEngine) diff --git a/watcher/tests/applier/workflow_engine/test_default_workflow_engine.py b/watcher/tests/applier/workflow_engine/test_default_workflow_engine.py deleted file mode 100644 index fdf902c..0000000 --- a/watcher/tests/applier/workflow_engine/test_default_workflow_engine.py +++ /dev/null @@ -1,354 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -import mock - -import six - -from watcher.applier.actions import base as abase -from watcher.applier.actions import factory -from watcher.applier.workflow_engine import default as tflow -from watcher.common import exception -from watcher.common import utils -from watcher import notifications -from watcher import objects -from watcher.tests.db import base -from watcher.tests.objects import utils as obj_utils - - -class ExpectedException(Exception): - pass - - -@six.add_metaclass(abc.ABCMeta) -class FakeAction(abase.BaseAction): - def schema(self): - pass - - def post_condition(self): - pass - - def pre_condition(self): - pass - - def revert(self): - pass - - def execute(self): - raise ExpectedException() - - def get_description(self): - return "fake action, just for test" - - -class TestDefaultWorkFlowEngine(base.DbTestCase): - def setUp(self): - super(TestDefaultWorkFlowEngine, self).setUp() - self.engine = tflow.DefaultWorkFlowEngine( - config=mock.Mock(), - context=self.context, - applier_manager=mock.MagicMock()) - self.engine.config.max_workers = 2 - - @mock.patch('taskflow.engines.load') - @mock.patch('taskflow.patterns.graph_flow.Flow.link') - def test_execute(self, graph_flow, engines): - actions = mock.MagicMock() - try: - self.engine.execute(actions) - self.assertTrue(engines.called) - except Exception as exc: - self.fail(exc) - - def create_action(self, action_type, parameters, parents=None, uuid=None, - state=None): - action = { - 'uuid': uuid or utils.generate_uuid(), - 'action_plan_id': 0, - 'action_type': action_type, - 'input_parameters': parameters, - 'state': objects.action.State.PENDING, - 'parents': parents or [], - - } - new_action = objects.Action(self.context, **action) - with mock.patch.object(notifications.action, 'send_create'): - new_action.create() - return new_action - - def check_action_state(self, action, expected_state): - to_check = objects.Action.get_by_uuid(self.context, action.uuid) - self.assertEqual(expected_state, to_check.state) - - def check_actions_state(self, actions, expected_state): - for a in actions: - self.check_action_state(a, expected_state) - - @mock.patch('taskflow.engines.load') - @mock.patch('taskflow.patterns.graph_flow.Flow.link') - def test_execute_with_no_actions(self, graph_flow, engines): - actions = [] - try: - self.engine.execute(actions) - self.assertFalse(graph_flow.called) - self.assertTrue(engines.called) - except Exception as exc: - self.fail(exc) - - @mock.patch.object(objects.ActionPlan, "get_by_id") - @mock.patch.object(notifications.action, 'send_execution_notification') - @mock.patch.object(notifications.action, 'send_update') - def test_execute_with_one_action(self, mock_send_update, - mock_execution_notification, - m_get_actionplan): - m_get_actionplan.return_value = obj_utils.get_test_action_plan( - self.context, id=0) - actions = [self.create_action("nop", {'message': 'test'})] - try: - self.engine.execute(actions) - self.check_actions_state(actions, objects.action.State.SUCCEEDED) - - except Exception as exc: - self.fail(exc) - - @mock.patch.object(objects.ActionPlan, "get_by_id") - @mock.patch.object(notifications.action, 'send_execution_notification') - @mock.patch.object(notifications.action, 'send_update') - def test_execute_nop_sleep(self, mock_send_update, - mock_execution_notification, - m_get_actionplan): - m_get_actionplan.return_value = obj_utils.get_test_action_plan( - self.context, id=0) - actions = [] - first_nop = self.create_action("nop", {'message': 'test'}) - second_nop = self.create_action("nop", {'message': 'second test'}) - sleep = self.create_action("sleep", {'duration': 0.0}, - parents=[first_nop.uuid, second_nop.uuid]) - actions.extend([first_nop, second_nop, sleep]) - - try: - self.engine.execute(actions) - self.check_actions_state(actions, objects.action.State.SUCCEEDED) - - except Exception as exc: - self.fail(exc) - - @mock.patch.object(objects.ActionPlan, "get_by_id") - @mock.patch.object(notifications.action, 'send_execution_notification') - @mock.patch.object(notifications.action, 'send_update') - def test_execute_with_parents(self, mock_send_update, - mock_execution_notification, - m_get_actionplan): - m_get_actionplan.return_value = obj_utils.get_test_action_plan( - self.context, id=0) - actions = [] - first_nop = self.create_action( - "nop", {'message': 'test'}, - uuid='bc7eee5c-4fbe-4def-9744-b539be55aa19') - second_nop = self.create_action( - "nop", {'message': 'second test'}, - uuid='0565bd5c-aa00-46e5-8d81-2cb5cc1ffa23') - first_sleep = self.create_action( - "sleep", {'duration': 0.0}, parents=[first_nop.uuid, - second_nop.uuid], - uuid='be436531-0da3-4dad-a9c0-ea1d2aff6496') - second_sleep = self.create_action( - "sleep", {'duration': 0.0}, parents=[first_sleep.uuid], - uuid='9eb51e14-936d-4d12-a500-6ba0f5e0bb1c') - actions.extend([first_nop, second_nop, first_sleep, second_sleep]) - - expected_nodes = [ - {'uuid': 'bc7eee5c-4fbe-4def-9744-b539be55aa19', - 'input_parameters': {u'message': u'test'}, - 'action_plan_id': 0, 'state': u'PENDING', 'parents': [], - 'action_type': u'nop', 'id': 1}, - {'uuid': '0565bd5c-aa00-46e5-8d81-2cb5cc1ffa23', - 'input_parameters': {u'message': u'second test'}, - 'action_plan_id': 0, 'state': u'PENDING', 'parents': [], - 'action_type': u'nop', 'id': 2}, - {'uuid': 'be436531-0da3-4dad-a9c0-ea1d2aff6496', - 'input_parameters': {u'duration': 0.0}, - 'action_plan_id': 0, 'state': u'PENDING', - 'parents': [u'bc7eee5c-4fbe-4def-9744-b539be55aa19', - u'0565bd5c-aa00-46e5-8d81-2cb5cc1ffa23'], - 'action_type': u'sleep', 'id': 3}, - {'uuid': '9eb51e14-936d-4d12-a500-6ba0f5e0bb1c', - 'input_parameters': {u'duration': 0.0}, - 'action_plan_id': 0, 'state': u'PENDING', - 'parents': [u'be436531-0da3-4dad-a9c0-ea1d2aff6496'], - 'action_type': u'sleep', 'id': 4}] - - expected_edges = [ - ('action_type:nop uuid:0565bd5c-aa00-46e5-8d81-2cb5cc1ffa23', - 'action_type:sleep uuid:be436531-0da3-4dad-a9c0-ea1d2aff6496'), - ('action_type:nop uuid:bc7eee5c-4fbe-4def-9744-b539be55aa19', - 'action_type:sleep uuid:be436531-0da3-4dad-a9c0-ea1d2aff6496'), - ('action_type:sleep uuid:be436531-0da3-4dad-a9c0-ea1d2aff6496', - 'action_type:sleep uuid:9eb51e14-936d-4d12-a500-6ba0f5e0bb1c')] - - try: - flow = self.engine.execute(actions) - actual_nodes = sorted([x[0]._db_action.as_dict() - for x in flow.iter_nodes()], - key=lambda x: x['id']) - for expected, actual in zip(expected_nodes, actual_nodes): - for key in expected.keys(): - self.assertIn(expected[key], actual.values()) - actual_edges = [(u.name, v.name) - for (u, v, _) in flow.iter_links()] - - for edge in expected_edges: - self.assertIn(edge, actual_edges) - - self.check_actions_state(actions, objects.action.State.SUCCEEDED) - - except Exception as exc: - self.fail(exc) - - @mock.patch.object(objects.ActionPlan, "get_by_id") - @mock.patch.object(notifications.action, 'send_execution_notification') - @mock.patch.object(notifications.action, 'send_update') - def test_execute_with_two_actions(self, m_send_update, m_execution, - m_get_actionplan): - m_get_actionplan.return_value = obj_utils.get_test_action_plan( - self.context, id=0) - actions = [] - second = self.create_action("sleep", {'duration': 0.0}) - first = self.create_action("nop", {'message': 'test'}) - - actions.append(first) - actions.append(second) - - try: - self.engine.execute(actions) - self.check_actions_state(actions, objects.action.State.SUCCEEDED) - - except Exception as exc: - self.fail(exc) - - @mock.patch.object(objects.ActionPlan, "get_by_id") - @mock.patch.object(notifications.action, 'send_execution_notification') - @mock.patch.object(notifications.action, 'send_update') - def test_execute_with_three_actions(self, m_send_update, m_execution, - m_get_actionplan): - m_get_actionplan.return_value = obj_utils.get_test_action_plan( - self.context, id=0) - actions = [] - third = self.create_action("nop", {'message': 'next'}) - second = self.create_action("sleep", {'duration': 0.0}) - first = self.create_action("nop", {'message': 'hello'}) - - self.check_action_state(first, objects.action.State.PENDING) - self.check_action_state(second, objects.action.State.PENDING) - self.check_action_state(third, objects.action.State.PENDING) - - actions.append(first) - actions.append(second) - actions.append(third) - - try: - self.engine.execute(actions) - self.check_actions_state(actions, objects.action.State.SUCCEEDED) - - except Exception as exc: - self.fail(exc) - - @mock.patch.object(objects.ActionPlan, "get_by_id") - @mock.patch.object(notifications.action, 'send_execution_notification') - @mock.patch.object(notifications.action, 'send_update') - def test_execute_with_exception(self, m_send_update, m_execution, - m_get_actionplan): - m_get_actionplan.return_value = obj_utils.get_test_action_plan( - self.context, id=0) - actions = [] - - third = self.create_action("no_exist", {'message': 'next'}) - second = self.create_action("sleep", {'duration': 0.0}) - first = self.create_action("nop", {'message': 'hello'}) - - self.check_action_state(first, objects.action.State.PENDING) - self.check_action_state(second, objects.action.State.PENDING) - self.check_action_state(third, objects.action.State.PENDING) - - actions.append(first) - actions.append(second) - actions.append(third) - - self.assertRaises(exception.WorkflowExecutionException, - self.engine.execute, actions) - - self.check_action_state(first, objects.action.State.SUCCEEDED) - self.check_action_state(second, objects.action.State.SUCCEEDED) - self.check_action_state(third, objects.action.State.FAILED) - - @mock.patch.object(objects.ActionPlan, "get_by_id") - @mock.patch.object(notifications.action, 'send_execution_notification') - @mock.patch.object(notifications.action, 'send_update') - @mock.patch.object(factory.ActionFactory, "make_action") - def test_execute_with_action_exception(self, m_make_action, m_send_update, - m_send_execution, m_get_actionplan): - m_get_actionplan.return_value = obj_utils.get_test_action_plan( - self.context, id=0) - actions = [self.create_action("fake_action", {})] - m_make_action.return_value = FakeAction(mock.Mock()) - - exc = self.assertRaises(exception.WorkflowExecutionException, - self.engine.execute, actions) - - self.assertIsInstance(exc.kwargs['error'], ExpectedException) - self.check_action_state(actions[0], objects.action.State.FAILED) - - @mock.patch.object(objects.ActionPlan, "get_by_uuid") - def test_execute_with_action_plan_cancel(self, m_get_actionplan): - obj_utils.create_test_goal(self.context) - strategy = obj_utils.create_test_strategy(self.context) - audit = obj_utils.create_test_audit( - self.context, strategy_id=strategy.id) - action_plan = obj_utils.create_test_action_plan( - self.context, audit_id=audit.id, - strategy_id=strategy.id, - state=objects.action_plan.State.CANCELLING) - action1 = obj_utils.create_test_action( - self.context, action_plan_id=action_plan.id, - action_type='nop', state=objects.action.State.SUCCEEDED, - input_parameters={'message': 'hello World'}) - action2 = obj_utils.create_test_action( - self.context, action_plan_id=action_plan.id, - action_type='nop', state=objects.action.State.ONGOING, - uuid='9eb51e14-936d-4d12-a500-6ba0f5e0bb1c', - input_parameters={'message': 'hello World'}) - action3 = obj_utils.create_test_action( - self.context, action_plan_id=action_plan.id, - action_type='nop', state=objects.action.State.PENDING, - uuid='bc7eee5c-4fbe-4def-9744-b539be55aa19', - input_parameters={'message': 'hello World'}) - m_get_actionplan.return_value = action_plan - actions = [] - actions.append(action1) - actions.append(action2) - actions.append(action3) - self.assertRaises(exception.ActionPlanCancelled, - self.engine.execute, actions) - try: - self.check_action_state(action1, objects.action.State.SUCCEEDED) - self.check_action_state(action2, objects.action.State.CANCELLED) - self.check_action_state(action3, objects.action.State.CANCELLED) - - except Exception as exc: - self.fail(exc) diff --git a/watcher/tests/applier/workflow_engine/test_taskflow_action_container.py b/watcher/tests/applier/workflow_engine/test_taskflow_action_container.py deleted file mode 100644 index c05d471..0000000 --- a/watcher/tests/applier/workflow_engine/test_taskflow_action_container.py +++ /dev/null @@ -1,79 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import eventlet -import mock - -from watcher.applier.workflow_engine import default as tflow -from watcher import objects -from watcher.tests.db import base -from watcher.tests.objects import utils as obj_utils - - -class TestTaskFlowActionContainer(base.DbTestCase): - def setUp(self): - super(TestTaskFlowActionContainer, self).setUp() - self.engine = tflow.DefaultWorkFlowEngine( - config=mock.Mock(), - context=self.context, - applier_manager=mock.MagicMock()) - obj_utils.create_test_goal(self.context) - self.strategy = obj_utils.create_test_strategy(self.context) - self.audit = obj_utils.create_test_audit( - self.context, strategy_id=self.strategy.id) - - def test_execute(self): - action_plan = obj_utils.create_test_action_plan( - self.context, audit_id=self.audit.id, - strategy_id=self.strategy.id, - state=objects.action.State.ONGOING) - - action = obj_utils.create_test_action( - self.context, action_plan_id=action_plan.id, - state=objects.action.State.ONGOING, - action_type='nop', - input_parameters={'message': 'hello World'}) - action_container = tflow.TaskFlowActionContainer( - db_action=action, - engine=self.engine) - action_container.execute() - - self.assertTrue(action.state, objects.action.State.SUCCEEDED) - - @mock.patch('eventlet.spawn') - def test_execute_with_cancel_action_plan(self, mock_eventlet_spawn): - action_plan = obj_utils.create_test_action_plan( - self.context, audit_id=self.audit.id, - strategy_id=self.strategy.id, - state=objects.action_plan.State.CANCELLING) - - action = obj_utils.create_test_action( - self.context, action_plan_id=action_plan.id, - state=objects.action.State.ONGOING, - action_type='nop', - input_parameters={'message': 'hello World'}) - action_container = tflow.TaskFlowActionContainer( - db_action=action, - engine=self.engine) - - def empty_test(): - pass - et = eventlet.spawn(empty_test) - mock_eventlet_spawn.return_value = et - action_container.execute() - et.kill.assert_called_with() diff --git a/watcher/tests/base.py b/watcher/tests/base.py deleted file mode 100644 index fc57081..0000000 --- a/watcher/tests/base.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import os - -import mock -from oslo_config import cfg -from oslo_log import log -from oslo_messaging import conffixture -from oslotest import base -import pecan -from pecan import testing -import testscenarios - -from watcher.common import context as watcher_context -from watcher.common import service -from watcher.objects import base as objects_base -from watcher.tests import conf_fixture -from watcher.tests import policy_fixture - - -CONF = cfg.CONF -try: - log.register_options(CONF) -except cfg.ArgsAlreadyParsedError: - pass -CONF.set_override('use_stderr', False) - - -class BaseTestCase(testscenarios.WithScenarios, base.BaseTestCase): - """Test base class.""" - - def setUp(self): - super(BaseTestCase, self).setUp() - self.addCleanup(cfg.CONF.reset) - - -class TestCase(BaseTestCase): - """Test case base class for all unit tests.""" - - def setUp(self): - super(TestCase, self).setUp() - self.useFixture(conf_fixture.ConfReloadFixture()) - self.policy = self.useFixture(policy_fixture.PolicyFixture()) - self.messaging_conf = self.useFixture(conffixture.ConfFixture(CONF)) - self.messaging_conf.transport_driver = 'fake' - - cfg.CONF.set_override("auth_type", "admin_token", - group='keystone_authtoken') - cfg.CONF.set_override("auth_uri", "http://127.0.0.1/identity", - group='keystone_authtoken') - - app_config_path = os.path.join(os.path.dirname(__file__), 'config.py') - self.app = testing.load_test_app(app_config_path) - self.token_info = { - 'token': { - 'project': { - 'id': 'fake_project' - }, - 'user': { - 'id': 'fake_user' - } - } - } - - objects_base.WatcherObject.indirection_api = None - - self.context = watcher_context.RequestContext( - auth_token_info=self.token_info, - project_id='fake_project', - user_id='fake_user') - - self.policy = self.useFixture(policy_fixture.PolicyFixture()) - - def make_context(*args, **kwargs): - # If context hasn't been constructed with token_info - if not kwargs.get('auth_token_info'): - kwargs['auth_token_info'] = copy.deepcopy(self.token_info) - if not kwargs.get('project_id'): - kwargs['project_id'] = 'fake_project' - if not kwargs.get('user_id'): - kwargs['user_id'] = 'fake_user' - - context = watcher_context.RequestContext(*args, **kwargs) - return watcher_context.RequestContext.from_dict(context.to_dict()) - - p = mock.patch.object(watcher_context, 'make_context', - side_effect=make_context) - self.mock_make_context = p.start() - self.addCleanup(p.stop) - - self.useFixture(conf_fixture.ConfFixture(cfg.CONF)) - self._reset_singletons() - - self._base_test_obj_backup = copy.copy( - objects_base.WatcherObjectRegistry._registry._obj_classes) - self.addCleanup(self._restore_obj_registry) - self.addCleanup(self._reset_singletons) - - def _reset_singletons(self): - service.Singleton._instances.clear() - - def reset_pecan(): - pecan.set_config({}, overwrite=True) - - self.addCleanup(reset_pecan) - - def _restore_obj_registry(self): - objects_base.WatcherObjectRegistry._registry._obj_classes = ( - self._base_test_obj_backup) - - def config(self, **kw): - """Override config options for a test.""" - group = kw.pop('group', None) - for k, v in kw.items(): - CONF.set_override(k, v, group) - - def get_path(self, project_file=None): - """Get the absolute path to a file. Used for testing the API. - - :param project_file: File whose path to return. Default: None. - :returns: path to the specified file, or path to project root. - """ - root = os.path.abspath( - os.path.join(os.path.dirname(__file__), '..', '..')) - if project_file: - return os.path.join(root, project_file) - else: - return root diff --git a/watcher/tests/cmd/__init__.py b/watcher/tests/cmd/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/cmd/test_api.py b/watcher/tests/cmd/test_api.py deleted file mode 100644 index bdeba3b..0000000 --- a/watcher/tests/cmd/test_api.py +++ /dev/null @@ -1,66 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import unicode_literals - -import types - -import mock -from oslo_config import cfg -from oslo_service import wsgi -from pecan.testing import load_test_app - -from watcher.api import config as api_config -from watcher.cmd import api -from watcher.common import service -from watcher.tests import base - - -class TestApi(base.BaseTestCase): - - def setUp(self): - super(TestApi, self).setUp() - - self.conf = cfg.CONF - self._parse_cli_opts = self.conf._parse_cli_opts - - def _fake_parse(self, args=[]): - return cfg.ConfigOpts._parse_cli_opts(self, []) - - _fake_parse_method = types.MethodType(_fake_parse, self.conf) - self.conf._parse_cli_opts = _fake_parse_method - - def tearDown(self): - super(TestApi, self).tearDown() - self.conf._parse_cli_opts = self._parse_cli_opts - - @mock.patch.object(wsgi, "Server", mock.Mock()) - @mock.patch("watcher.api.app.pecan.make_app") - @mock.patch.object(service, "launch") - def test_run_api_app(self, m_launcher, m_make_app): - m_make_app.return_value = load_test_app(config=api_config.PECAN_CONFIG) - api.main() - self.assertEqual(1, m_launcher.call_count) - - @mock.patch.object(wsgi, "Server", mock.Mock()) - @mock.patch("watcher.api.app.pecan.make_app") - @mock.patch.object(service, "launch") - def test_run_api_app_serve_specific_address(self, m_launcher, m_make_app): - cfg.CONF.set_default("host", "localhost", group="api") - m_make_app.return_value = load_test_app(config=api_config.PECAN_CONFIG) - api.main() - self.assertEqual(1, m_launcher.call_count) diff --git a/watcher/tests/cmd/test_applier.py b/watcher/tests/cmd/test_applier.py deleted file mode 100644 index 25690eb..0000000 --- a/watcher/tests/cmd/test_applier.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import unicode_literals - -import types - -import mock -from oslo_config import cfg -from oslo_service import service -from watcher.common import service as watcher_service - -from watcher.cmd import applier -from watcher.tests import base - - -class TestApplier(base.BaseTestCase): - def setUp(self): - super(TestApplier, self).setUp() - - self.conf = cfg.CONF - self._parse_cli_opts = self.conf._parse_cli_opts - - def _fake_parse(self, args=[]): - return cfg.ConfigOpts._parse_cli_opts(self, []) - - _fake_parse_method = types.MethodType(_fake_parse, self.conf) - self.conf._parse_cli_opts = _fake_parse_method - p_heartbeat = mock.patch.object( - watcher_service.ServiceHeartbeat, "send_beat") - self.m_heartbeat = p_heartbeat.start() - self.addCleanup(p_heartbeat.stop) - - def tearDown(self): - super(TestApplier, self).tearDown() - self.conf._parse_cli_opts = self._parse_cli_opts - - @mock.patch.object(service, "launch") - def test_run_applier_app(self, m_launch): - applier.main() - self.assertEqual(1, m_launch.call_count) diff --git a/watcher/tests/cmd/test_db_manage.py b/watcher/tests/cmd/test_db_manage.py deleted file mode 100644 index f2e85ee..0000000 --- a/watcher/tests/cmd/test_db_manage.py +++ /dev/null @@ -1,175 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -import mock -from oslo_config import cfg - -from watcher.cmd import dbmanage -from watcher.db import migration -from watcher.db import purge -from watcher.tests import base - - -class TestDBManageRunApp(base.TestCase): - - scenarios = ( - ("upgrade", {"command": "upgrade", "expected": "upgrade"}), - ("downgrade", {"command": "downgrade", "expected": "downgrade"}), - ("revision", {"command": "revision", "expected": "revision"}), - ("stamp", {"command": "stamp", "expected": "stamp"}), - ("version", {"command": "version", "expected": "version"}), - ("create_schema", {"command": "create_schema", - "expected": "create_schema"}), - ("purge", {"command": "purge", "expected": "purge"}), - ("no_param", {"command": None, "expected": "upgrade"}), - ) - - @mock.patch.object(dbmanage, "register_sub_command_opts", mock.Mock()) - @mock.patch("watcher.cmd.dbmanage.service.prepare_service") - @mock.patch("watcher.cmd.dbmanage.sys") - def test_run_db_manage_app(self, m_sys, m_prepare_service): - # Patch command function - m_func = mock.Mock() - cfg.CONF.register_opt(cfg.SubCommandOpt("command")) - cfg.CONF.command.func = m_func - - # Only append if the command is not None - m_sys.argv = list(filter(None, ["watcher-db-manage", self.command])) - - dbmanage.main() - self.assertEqual(1, m_func.call_count) - m_prepare_service.assert_called_once_with( - ["watcher-db-manage", self.expected], cfg.CONF) - - -class TestDBManageRunCommand(base.TestCase): - - @mock.patch.object(migration, "upgrade") - def test_run_db_upgrade(self, m_upgrade): - cfg.CONF.register_opt(cfg.StrOpt("revision"), group="command") - cfg.CONF.set_default("revision", "dummy", group="command") - dbmanage.DBCommand.upgrade() - - m_upgrade.assert_called_once_with("dummy") - - @mock.patch.object(migration, "downgrade") - def test_run_db_downgrade(self, m_downgrade): - cfg.CONF.register_opt(cfg.StrOpt("revision"), group="command") - cfg.CONF.set_default("revision", "dummy", group="command") - dbmanage.DBCommand.downgrade() - - m_downgrade.assert_called_once_with("dummy") - - @mock.patch.object(migration, "revision") - def test_run_db_revision(self, m_revision): - cfg.CONF.register_opt(cfg.StrOpt("message"), group="command") - cfg.CONF.register_opt(cfg.StrOpt("autogenerate"), group="command") - cfg.CONF.set_default( - "message", "dummy_message", group="command" - ) - cfg.CONF.set_default( - "autogenerate", "dummy_autogenerate", group="command" - ) - dbmanage.DBCommand.revision() - - m_revision.assert_called_once_with( - "dummy_message", "dummy_autogenerate" - ) - - @mock.patch.object(migration, "stamp") - def test_run_db_stamp(self, m_stamp): - cfg.CONF.register_opt(cfg.StrOpt("revision"), group="command") - cfg.CONF.set_default("revision", "dummy", group="command") - dbmanage.DBCommand.stamp() - - @mock.patch.object(migration, "version") - def test_run_db_version(self, m_version): - dbmanage.DBCommand.version() - - self.assertEqual(1, m_version.call_count) - - @mock.patch.object(purge, "PurgeCommand") - def test_run_db_purge(self, m_purge_cls): - m_purge = mock.Mock() - m_purge_cls.return_value = m_purge - m_purge_cls.get_goal_uuid.return_value = 'Some UUID' - cfg.CONF.register_opt(cfg.IntOpt("age_in_days"), group="command") - cfg.CONF.register_opt(cfg.IntOpt("max_number"), group="command") - cfg.CONF.register_opt(cfg.StrOpt("goal"), group="command") - cfg.CONF.register_opt(cfg.BoolOpt("exclude_orphans"), group="command") - cfg.CONF.register_opt(cfg.BoolOpt("dry_run"), group="command") - cfg.CONF.set_default("age_in_days", None, group="command") - cfg.CONF.set_default("max_number", None, group="command") - cfg.CONF.set_default("goal", None, group="command") - cfg.CONF.set_default("exclude_orphans", True, group="command") - cfg.CONF.set_default("dry_run", False, group="command") - - dbmanage.DBCommand.purge() - - m_purge_cls.assert_called_once_with( - None, None, 'Some UUID', True, False) - m_purge.execute.assert_called_once_with() - - @mock.patch.object(sys, "exit") - @mock.patch.object(purge, "PurgeCommand") - def test_run_db_purge_negative_max_number(self, m_purge_cls, m_exit): - m_purge = mock.Mock() - m_purge_cls.return_value = m_purge - m_purge_cls.get_goal_uuid.return_value = 'Some UUID' - cfg.CONF.register_opt(cfg.IntOpt("age_in_days"), group="command") - cfg.CONF.register_opt(cfg.IntOpt("max_number"), group="command") - cfg.CONF.register_opt(cfg.StrOpt("goal"), group="command") - cfg.CONF.register_opt(cfg.BoolOpt("exclude_orphans"), group="command") - cfg.CONF.register_opt(cfg.BoolOpt("dry_run"), group="command") - cfg.CONF.set_default("age_in_days", None, group="command") - cfg.CONF.set_default("max_number", -1, group="command") - cfg.CONF.set_default("goal", None, group="command") - cfg.CONF.set_default("exclude_orphans", True, group="command") - cfg.CONF.set_default("dry_run", False, group="command") - - dbmanage.DBCommand.purge() - - self.assertEqual(0, m_purge_cls.call_count) - self.assertEqual(0, m_purge.execute.call_count) - self.assertEqual(0, m_purge.do_delete.call_count) - self.assertEqual(1, m_exit.call_count) - - @mock.patch.object(sys, "exit") - @mock.patch.object(purge, "PurgeCommand") - def test_run_db_purge_dry_run(self, m_purge_cls, m_exit): - m_purge = mock.Mock() - m_purge_cls.return_value = m_purge - m_purge_cls.get_goal_uuid.return_value = 'Some UUID' - cfg.CONF.register_opt(cfg.IntOpt("age_in_days"), group="command") - cfg.CONF.register_opt(cfg.IntOpt("max_number"), group="command") - cfg.CONF.register_opt(cfg.StrOpt("goal"), group="command") - cfg.CONF.register_opt(cfg.BoolOpt("exclude_orphans"), group="command") - cfg.CONF.register_opt(cfg.BoolOpt("dry_run"), group="command") - cfg.CONF.set_default("age_in_days", None, group="command") - cfg.CONF.set_default("max_number", None, group="command") - cfg.CONF.set_default("goal", None, group="command") - cfg.CONF.set_default("exclude_orphans", True, group="command") - cfg.CONF.set_default("dry_run", True, group="command") - - dbmanage.DBCommand.purge() - - m_purge_cls.assert_called_once_with( - None, None, 'Some UUID', True, True) - self.assertEqual(1, m_purge.execute.call_count) - self.assertEqual(0, m_purge.do_delete.call_count) - self.assertEqual(0, m_exit.call_count) diff --git a/watcher/tests/cmd/test_decision_engine.py b/watcher/tests/cmd/test_decision_engine.py deleted file mode 100644 index 3f0380b..0000000 --- a/watcher/tests/cmd/test_decision_engine.py +++ /dev/null @@ -1,64 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import unicode_literals - -import types - -import mock -from oslo_config import cfg -from oslo_service import service - -from watcher.cmd import decisionengine -from watcher.common import service as watcher_service -from watcher.decision_engine.audit import continuous -from watcher.decision_engine import sync -from watcher.tests import base - - -class TestDecisionEngine(base.BaseTestCase): - - def setUp(self): - super(TestDecisionEngine, self).setUp() - - self.conf = cfg.CONF - self._parse_cli_opts = self.conf._parse_cli_opts - - def _fake_parse(self, args=[]): - return cfg.ConfigOpts._parse_cli_opts(self, []) - - _fake_parse_method = types.MethodType(_fake_parse, self.conf) - self.conf._parse_cli_opts = _fake_parse_method - - p_heartbeat = mock.patch.object( - watcher_service.ServiceHeartbeat, "send_beat") - self.m_heartbeat = p_heartbeat.start() - self.addCleanup(p_heartbeat.stop) - p_continuoushandler = mock.patch.object( - continuous.ContinuousAuditHandler, "start") - self.m_continuoushandler = p_continuoushandler.start() - self.addCleanup(p_continuoushandler.stop) - - def tearDown(self): - super(TestDecisionEngine, self).tearDown() - self.conf._parse_cli_opts = self._parse_cli_opts - - @mock.patch.object(sync.Syncer, "sync", mock.Mock()) - @mock.patch.object(service, "launch") - def test_run_de_app(self, m_launch): - decisionengine.main() - self.assertEqual(1, m_launch.call_count) diff --git a/watcher/tests/common/__init__.py b/watcher/tests/common/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/common/loader/__init__.py b/watcher/tests/common/loader/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/common/loader/test_loader.py b/watcher/tests/common/loader/test_loader.py deleted file mode 100644 index 6eef2bd..0000000 --- a/watcher/tests/common/loader/test_loader.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -import mock - -from oslo_config import cfg -from stevedore import driver as drivermanager -from stevedore import extension as stevedore_extension - -from watcher.common import exception -from watcher.common.loader import default -from watcher.common.loader import loadable -from watcher.tests import base - - -class FakeLoadable(loadable.Loadable): - - @classmethod - def get_config_opts(cls): - return [] - - -class FakeLoadableWithOpts(loadable.Loadable): - - @classmethod - def get_config_opts(cls): - return [ - cfg.StrOpt("test_opt", default="fake_with_opts"), - ] - - -class TestLoader(base.TestCase): - - def setUp(self): - super(TestLoader, self).setUp() - - def _fake_parse(self, *args, **kw): - return cfg.ConfigOpts._parse_cli_opts(cfg.CONF, []) - - cfg.CONF._parse_cli_opts = _fake_parse - - def test_load_loadable_no_opt(self): - fake_driver = drivermanager.DriverManager.make_test_instance( - extension=stevedore_extension.Extension( - name="fake", - entry_point="%s:%s" % (FakeLoadable.__module__, - FakeLoadable.__name__), - plugin=FakeLoadable, - obj=None), - namespace="TESTING") - - loader_manager = default.DefaultLoader(namespace='TESTING') - with mock.patch.object(drivermanager, - "DriverManager") as m_driver_manager: - m_driver_manager.return_value = fake_driver - loaded_driver = loader_manager.load(name='fake') - - self.assertIsInstance(loaded_driver, FakeLoadable) - - @mock.patch("watcher.common.loader.default.drivermanager.DriverManager") - def test_load_loadable_bad_plugin(self, m_driver_manager): - m_driver_manager.side_effect = Exception() - - loader_manager = default.DefaultLoader(namespace='TESTING') - self.assertRaises(exception.LoadingError, loader_manager.load, - name='bad_driver') - - def test_load_loadable_with_opts(self): - fake_driver = drivermanager.DriverManager.make_test_instance( - extension=stevedore_extension.Extension( - name="fake", - entry_point="%s:%s" % (FakeLoadableWithOpts.__module__, - FakeLoadableWithOpts.__name__), - plugin=FakeLoadableWithOpts, - obj=None), - namespace="TESTING") - - loader_manager = default.DefaultLoader(namespace='TESTING') - with mock.patch.object(drivermanager, - "DriverManager") as m_driver_manager: - m_driver_manager.return_value = fake_driver - loaded_driver = loader_manager.load(name='fake') - - self.assertIsInstance(loaded_driver, FakeLoadableWithOpts) - - self.assertEqual( - "fake_with_opts", loaded_driver.config.get("test_opt")) - - self.assertEqual( - "fake_with_opts", loaded_driver.config.test_opt) diff --git a/watcher/tests/common/test_cinder_helper.py b/watcher/tests/common/test_cinder_helper.py deleted file mode 100644 index 9c5991c..0000000 --- a/watcher/tests/common/test_cinder_helper.py +++ /dev/null @@ -1,126 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import mock - -from watcher.common import cinder_helper -from watcher.common import clients -from watcher.common import exception -from watcher.tests import base - - -@mock.patch.object(clients.OpenStackClients, 'cinder') -class TestCinderHelper(base.TestCase): - - def setUp(self): - super(TestCinderHelper, self).setUp() - - @staticmethod - def fake_storage_node(**kwargs): - node = mock.MagicMock() - node.binary = kwargs.get('binary', 'cinder-volume') - node.host = kwargs.get('name', 'host@backend') - - return node - - def test_get_storage_node_list(self, mock_cinder): - node1 = self.fake_storage_node() - cinder_util = cinder_helper.CinderHelper() - cinder_util.cinder.services.list.return_value = [node1] - cinder_util.get_storage_node_list() - cinder_util.cinder.services.list.assert_called_once_with( - binary='cinder-volume') - - def test_get_storage_node_by_name_success(self, mock_cinder): - node1 = self.fake_storage_node() - cinder_util = cinder_helper.CinderHelper() - cinder_util.cinder.services.list.return_value = [node1] - node = cinder_util.get_storage_node_by_name('host@backend') - - self.assertEqual(node, node1) - - def test_get_storage_node_by_name_failure(self, mock_cinder): - node1 = self.fake_storage_node() - cinder_util = cinder_helper.CinderHelper() - cinder_util.cinder.services.list.return_value = [node1] - self.assertRaisesRegex( - exception.StorageNodeNotFound, - "The storage node failure could not be found", - cinder_util.get_storage_node_by_name, 'failure') - - @staticmethod - def fake_pool(**kwargs): - pool = mock.MagicMock() - pool.name = kwargs.get('name', 'host@backend#pool') - - return pool - - def test_get_storage_pool_list(self, mock_cinder): - pool = self.fake_pool() - cinder_util = cinder_helper.CinderHelper() - cinder_util.cinder.pools.list.return_value = [pool] - cinder_util.get_storage_pool_list() - cinder_util.cinder.pools.list.assert_called_once_with(detailed=True) - - def test_get_storage_pool_by_name_success(self, mock_cinder): - pool1 = self.fake_pool() - cinder_util = cinder_helper.CinderHelper() - cinder_util.cinder.pools.list.return_value = [pool1] - pool = cinder_util.get_storage_pool_by_name('host@backend#pool') - - self.assertEqual(pool, pool1) - - def test_get_storage_pool_by_name_failure(self, mock_cinder): - pool1 = self.fake_pool() - cinder_util = cinder_helper.CinderHelper() - cinder_util.cinder.services.list.return_value = [pool1] - self.assertRaisesRegex( - exception.PoolNotFound, - "The pool failure could not be found", - cinder_util.get_storage_pool_by_name, 'failure') - - @staticmethod - def fake_volume_type(**kwargs): - volume_type = mock.MagicMock() - volume_type.name = kwargs.get('name', 'fake_type') - extra_specs = {'volume_backend_name': 'backend'} - volume_type.extra_specs = kwargs.get('extra_specs', extra_specs) - return volume_type - - def test_get_volume_type_list(self, mock_cinder): - volume_type1 = self.fake_volume_type() - cinder_util = cinder_helper.CinderHelper() - cinder_util.cinder.volume_types.list.return_value = [volume_type1] - cinder_util.get_volume_type_list() - cinder_util.cinder.volume_types.list.assert_called_once_with() - - def test_get_volume_type_by_backendname_with_backend_exist( - self, mock_cinder): - volume_type1 = self.fake_volume_type() - cinder_util = cinder_helper.CinderHelper() - cinder_util.cinder.volume_types.list.return_value = [volume_type1] - volume_type_name = cinder_util.get_volume_type_by_backendname( - 'backend') - - self.assertEqual(volume_type_name, volume_type1.name) - - def test_get_volume_type_by_backendname_with_no_backend_exist( - self, mock_cinder): - volume_type1 = self.fake_volume_type() - cinder_util = cinder_helper.CinderHelper() - cinder_util.cinder.volume_types.list.return_value = [volume_type1] - volume_type_name = cinder_util.get_volume_type_by_backendname( - 'nobackend') - - self.assertEqual("", volume_type_name) diff --git a/watcher/tests/common/test_clients.py b/watcher/tests/common/test_clients.py deleted file mode 100755 index 32ab071..0000000 --- a/watcher/tests/common/test_clients.py +++ /dev/null @@ -1,432 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometerclient import client as ceclient -import ceilometerclient.v2.client as ceclient_v2 -from cinderclient import client as ciclient -from cinderclient.v1 import client as ciclient_v1 -from glanceclient import client as glclient -from gnocchiclient import client as gnclient -from gnocchiclient.v1 import client as gnclient_v1 -from ironicclient import client as irclient -from ironicclient.v1 import client as irclient_v1 -from keystoneauth1 import loading as ka_loading -import mock -from monascaclient import client as monclient -from monascaclient.v2_0 import client as monclient_v2 -from neutronclient.neutron import client as netclient -from neutronclient.v2_0 import client as netclient_v2 -from novaclient import client as nvclient - -from watcher.common import clients -from watcher import conf -from watcher.tests import base - -CONF = conf.CONF - - -class TestClients(base.TestCase): - - def _register_watcher_clients_auth_opts(self): - _AUTH_CONF_GROUP = 'watcher_clients_auth' - ka_loading.register_auth_conf_options(CONF, _AUTH_CONF_GROUP) - ka_loading.register_session_conf_options(CONF, _AUTH_CONF_GROUP) - CONF.set_override('auth_type', 'password', group=_AUTH_CONF_GROUP) - - # ka_loading.load_auth_from_conf_options(CONF, _AUTH_CONF_GROUP) - # ka_loading.load_session_from_conf_options(CONF, _AUTH_CONF_GROUP) - # CONF.set_override( - # 'auth-url', 'http://server.ip:35357', group=_AUTH_CONF_GROUP) - - # If we don't clean up the _AUTH_CONF_GROUP conf options, then other - # tests that run after this one will fail, complaining about required - # options that _AUTH_CONF_GROUP wants. - def cleanup_conf_from_loading(): - # oslo_config doesn't seem to allow unregistering groups through a - # single method, so we do this instead - CONF.reset() - del CONF._groups[_AUTH_CONF_GROUP] - - self.addCleanup(cleanup_conf_from_loading) - - def reset_register_opts_mock(conf_obj, original_method): - conf_obj.register_opts = original_method - - original_register_opts = CONF.register_opts - self.addCleanup(reset_register_opts_mock, - CONF, - original_register_opts) - - expected = {'username': 'foousername', - 'password': 'foopassword', - 'auth_url': 'http://server.ip:35357', - 'cafile': None, - 'certfile': None, - 'keyfile': None, - 'insecure': False, - 'user_domain_id': 'foouserdomainid', - 'project_domain_id': 'fooprojdomainid'} - - # Because some of the conf options for auth plugins are not registered - # until right before they are loaded, and because the method that does - # the actual loading of the conf option values is an anonymous method - # (see _getter method of load_from_conf_options in - # keystoneauth1.loading.conf.py), we need to manually monkey patch - # the register opts method so that we can override the conf values to - # our custom values. - def mock_register_opts(*args, **kwargs): - ret = original_register_opts(*args, **kwargs) - if 'group' in kwargs and kwargs['group'] == _AUTH_CONF_GROUP: - for key, value in expected.items(): - CONF.set_override(key, value, group=_AUTH_CONF_GROUP) - return ret - - CONF.register_opts = mock_register_opts - - def test_get_keystone_session(self): - self._register_watcher_clients_auth_opts() - - osc = clients.OpenStackClients() - - expected = {'username': 'foousername', - 'password': 'foopassword', - 'auth_url': 'http://server.ip:35357', - 'user_domain_id': 'foouserdomainid', - 'project_domain_id': 'fooprojdomainid'} - - sess = osc.session - self.assertEqual(expected['auth_url'], sess.auth.auth_url) - self.assertEqual(expected['username'], sess.auth._username) - self.assertEqual(expected['password'], sess.auth._password) - self.assertEqual(expected['user_domain_id'], sess.auth._user_domain_id) - self.assertEqual(expected['project_domain_id'], - sess.auth._project_domain_id) - - @mock.patch.object(nvclient, 'Client') - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_nova(self, mock_session, mock_call): - osc = clients.OpenStackClients() - osc._nova = None - osc.nova() - mock_call.assert_called_once_with( - CONF.nova_client.api_version, - endpoint_type=CONF.nova_client.endpoint_type, - session=mock_session) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_nova_diff_vers(self, mock_session): - CONF.set_override('api_version', '2.3', group='nova_client') - osc = clients.OpenStackClients() - osc._nova = None - osc.nova() - self.assertEqual('2.3', osc.nova().api_version.get_string()) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_nova_diff_endpoint(self, mock_session): - CONF.set_override('endpoint_type', 'publicURL', group='nova_client') - osc = clients.OpenStackClients() - osc._nova = None - osc.nova() - self.assertEqual('publicURL', osc.nova().client.interface) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_nova_cached(self, mock_session): - osc = clients.OpenStackClients() - osc._nova = None - nova = osc.nova() - nova_cached = osc.nova() - self.assertEqual(nova, nova_cached) - - @mock.patch.object(glclient, 'Client') - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_glance(self, mock_session, mock_call): - osc = clients.OpenStackClients() - osc._glance = None - osc.glance() - mock_call.assert_called_once_with( - CONF.glance_client.api_version, - interface=CONF.glance_client.endpoint_type, - session=mock_session) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_glance_diff_vers(self, mock_session): - CONF.set_override('api_version', '1', group='glance_client') - osc = clients.OpenStackClients() - osc._glance = None - osc.glance() - self.assertEqual(1.0, osc.glance().version) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_glance_diff_endpoint(self, mock_session): - CONF.set_override('endpoint_type', - 'internalURL', group='glance_client') - osc = clients.OpenStackClients() - osc._glance = None - osc.glance() - self.assertEqual('internalURL', osc.glance().http_client.interface) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_glance_cached(self, mock_session): - osc = clients.OpenStackClients() - osc._glance = None - glance = osc.glance() - glance_cached = osc.glance() - self.assertEqual(glance, glance_cached) - - @mock.patch.object(gnclient, 'Client') - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_gnocchi(self, mock_session, mock_call): - osc = clients.OpenStackClients() - osc._gnocchi = None - osc.gnocchi() - mock_call.assert_called_once_with( - CONF.gnocchi_client.api_version, - interface=CONF.gnocchi_client.endpoint_type, - session=mock_session) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_gnocchi_diff_vers(self, mock_session): - # gnocchiclient currently only has one version (v1) - CONF.set_override('api_version', '1', group='gnocchi_client') - osc = clients.OpenStackClients() - osc._gnocchi = None - osc.gnocchi() - self.assertEqual(gnclient_v1.Client, type(osc.gnocchi())) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_gnocchi_diff_endpoint(self, mock_session): - # gnocchiclient currently only has one version (v1) - CONF.set_override('endpoint_type', 'publicURL', group='gnocchi_client') - osc = clients.OpenStackClients() - osc._gnocchi = None - osc.gnocchi() - self.assertEqual('publicURL', osc.gnocchi().api.interface) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_gnocchi_cached(self, mock_session): - osc = clients.OpenStackClients() - osc._gnocchi = None - gnocchi = osc.gnocchi() - gnocchi_cached = osc.gnocchi() - self.assertEqual(gnocchi, gnocchi_cached) - - @mock.patch.object(ciclient, 'Client') - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_cinder(self, mock_session, mock_call): - osc = clients.OpenStackClients() - osc._cinder = None - osc.cinder() - mock_call.assert_called_once_with( - CONF.cinder_client.api_version, - endpoint_type=CONF.cinder_client.endpoint_type, - session=mock_session) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_cinder_diff_vers(self, mock_session): - CONF.set_override('api_version', '1', group='cinder_client') - osc = clients.OpenStackClients() - osc._cinder = None - osc.cinder() - self.assertEqual(ciclient_v1.Client, type(osc.cinder())) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_cinder_diff_endpoint(self, mock_session): - CONF.set_override('endpoint_type', - 'internalURL', group='cinder_client') - osc = clients.OpenStackClients() - osc._cinder = None - osc.cinder() - self.assertEqual('internalURL', osc.cinder().client.interface) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_cinder_cached(self, mock_session): - osc = clients.OpenStackClients() - osc._cinder = None - cinder = osc.cinder() - cinder_cached = osc.cinder() - self.assertEqual(cinder, cinder_cached) - - @mock.patch.object(ceclient, 'Client') - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_ceilometer(self, mock_session, mock_call): - osc = clients.OpenStackClients() - osc._ceilometer = None - osc.ceilometer() - mock_call.assert_called_once_with( - CONF.ceilometer_client.api_version, - None, - endpoint_type=CONF.ceilometer_client.endpoint_type, - session=mock_session) - - @mock.patch.object(clients.OpenStackClients, 'session') - @mock.patch.object(ceclient_v2.Client, '_get_redirect_client') - def test_clients_ceilometer_diff_vers(self, mock_get_redirect_client, - mock_session): - '''ceilometerclient currently only has one version (v2)''' - mock_get_redirect_client.return_value = [mock.Mock(), mock.Mock()] - CONF.set_override('api_version', '2', - group='ceilometer_client') - osc = clients.OpenStackClients() - osc._ceilometer = None - osc.ceilometer() - self.assertEqual(ceclient_v2.Client, - type(osc.ceilometer())) - - @mock.patch.object(clients.OpenStackClients, 'session') - @mock.patch.object(ceclient_v2.Client, '_get_redirect_client') - def test_clients_ceilometer_diff_endpoint(self, mock_get_redirect_client, - mock_session): - mock_get_redirect_client.return_value = [mock.Mock(), mock.Mock()] - CONF.set_override('endpoint_type', 'publicURL', - group='ceilometer_client') - osc = clients.OpenStackClients() - osc._ceilometer = None - osc.ceilometer() - self.assertEqual('publicURL', osc.ceilometer().http_client.interface) - - @mock.patch.object(clients.OpenStackClients, 'session') - @mock.patch.object(ceclient_v2.Client, '_get_redirect_client') - def test_clients_ceilometer_cached(self, mock_get_redirect_client, - mock_session): - mock_get_redirect_client.return_value = [mock.Mock(), mock.Mock()] - osc = clients.OpenStackClients() - osc._ceilometer = None - ceilometer = osc.ceilometer() - ceilometer_cached = osc.ceilometer() - self.assertEqual(ceilometer, ceilometer_cached) - - @mock.patch.object(netclient, 'Client') - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_neutron(self, mock_session, mock_call): - osc = clients.OpenStackClients() - osc._neutron = None - osc.neutron() - mock_call.assert_called_once_with( - CONF.neutron_client.api_version, - endpoint_type=CONF.neutron_client.endpoint_type, - session=mock_session) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_neutron_diff_vers(self, mock_session): - '''neutronclient currently only has one version (v2)''' - CONF.set_override('api_version', '2.0', - group='neutron_client') - osc = clients.OpenStackClients() - osc._neutron = None - osc.neutron() - self.assertEqual(netclient_v2.Client, - type(osc.neutron())) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_neutron_diff_endpoint(self, mock_session): - '''neutronclient currently only has one version (v2)''' - CONF.set_override('endpoint_type', 'internalURL', - group='neutron_client') - osc = clients.OpenStackClients() - osc._neutron = None - osc.neutron() - self.assertEqual('internalURL', osc.neutron().httpclient.interface) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_neutron_cached(self, mock_session): - osc = clients.OpenStackClients() - osc._neutron = None - neutron = osc.neutron() - neutron_cached = osc.neutron() - self.assertEqual(neutron, neutron_cached) - - @mock.patch.object(monclient, 'Client') - @mock.patch.object(ka_loading, 'load_session_from_conf_options') - def test_clients_monasca(self, mock_session, mock_call): - mock_session.return_value = mock.Mock( - get_endpoint=mock.Mock(return_value='test_endpoint'), - get_token=mock.Mock(return_value='test_token'),) - - self._register_watcher_clients_auth_opts() - - osc = clients.OpenStackClients() - osc._monasca = None - osc.monasca() - mock_call.assert_called_once_with( - CONF.monasca_client.api_version, - 'test_endpoint', - auth_url='http://server.ip:35357', cert_file=None, insecure=False, - key_file=None, keystone_timeout=None, os_cacert=None, - password='foopassword', service_type='monitoring', - token='test_token', username='foousername') - - @mock.patch.object(ka_loading, 'load_session_from_conf_options') - def test_clients_monasca_diff_vers(self, mock_session): - mock_session.return_value = mock.Mock( - get_endpoint=mock.Mock(return_value='test_endpoint'), - get_token=mock.Mock(return_value='test_token'),) - - self._register_watcher_clients_auth_opts() - - CONF.set_override('api_version', '2_0', group='monasca_client') - osc = clients.OpenStackClients() - osc._monasca = None - osc.monasca() - self.assertEqual(monclient_v2.Client, type(osc.monasca())) - - @mock.patch.object(ka_loading, 'load_session_from_conf_options') - def test_clients_monasca_cached(self, mock_session): - mock_session.return_value = mock.Mock( - get_endpoint=mock.Mock(return_value='test_endpoint'), - get_token=mock.Mock(return_value='test_token'),) - - self._register_watcher_clients_auth_opts() - - osc = clients.OpenStackClients() - osc._monasca = None - monasca = osc.monasca() - monasca_cached = osc.monasca() - self.assertEqual(monasca, monasca_cached) - - @mock.patch.object(irclient, 'Client') - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_ironic(self, mock_session, mock_call): - osc = clients.OpenStackClients() - osc._ironic = None - osc.ironic() - mock_call.assert_called_once_with( - CONF.ironic_client.api_version, - CONF.ironic_client.endpoint_type, - max_retries=None, - os_ironic_api_version=None, - retry_interval=None, - session=mock_session) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_ironic_diff_vers(self, mock_session): - CONF.set_override('api_version', '1', group='ironic_client') - osc = clients.OpenStackClients() - osc._ironic = None - osc.ironic() - self.assertEqual(irclient_v1.Client, type(osc.ironic())) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_ironic_diff_endpoint(self, mock_session): - CONF.set_override('endpoint_type', 'internalURL', - group='ironic_client') - osc = clients.OpenStackClients() - osc._ironic = None - osc.ironic() - self.assertEqual('internalURL', osc.ironic().http_client.endpoint) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_ironic_cached(self, mock_session): - osc = clients.OpenStackClients() - osc._ironic = None - ironic = osc.ironic() - ironic_cached = osc.ironic() - self.assertEqual(ironic, ironic_cached) diff --git a/watcher/tests/common/test_nova_helper.py b/watcher/tests/common/test_nova_helper.py deleted file mode 100644 index 06daf6f..0000000 --- a/watcher/tests/common/test_nova_helper.py +++ /dev/null @@ -1,365 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import time - -import mock - -from watcher.common import clients -from watcher.common import nova_helper -from watcher.common import utils -from watcher.tests import base - - -@mock.patch.object(clients.OpenStackClients, 'nova') -@mock.patch.object(clients.OpenStackClients, 'neutron') -@mock.patch.object(clients.OpenStackClients, 'cinder') -@mock.patch.object(clients.OpenStackClients, 'glance') -class TestNovaHelper(base.TestCase): - - def setUp(self): - super(TestNovaHelper, self).setUp() - self.instance_uuid = "fb5311b7-37f3-457e-9cde-6494a3c59bfe" - self.source_node = "ldev-indeedsrv005" - self.destination_node = "ldev-indeedsrv006" - self.flavor_name = "x1" - - @staticmethod - def fake_server(*args, **kwargs): - server = mock.MagicMock() - server.id = args[0] - server.status = 'ACTIVE' - - return server - - @staticmethod - def fake_migration(*args, **kwargs): - migration = mock.MagicMock() - migration.id = args[0] - return migration - - @staticmethod - def fake_nova_find_list(nova_util, find=None, list=None): - nova_util.nova.servers.get.return_value = find - if list is None: - nova_util.nova.servers.list.return_value = [] - else: - nova_util.nova.servers.list.return_value = [list] - - @staticmethod - def fake_nova_migration_list(nova_util, list=None): - if list is None: - nova_util.nova.server_migrations.list.return_value = [] - else: - nova_util.nova.server_migration.list.return_value = [list] - - @staticmethod - def fake_live_migrate(server, *args, **kwargs): - - def side_effect(*args, **kwargs): - setattr(server, 'OS-EXT-SRV-ATTR:host', "compute-2") - - server.live_migrate.side_effect = side_effect - - @staticmethod - def fake_confirm_resize(server, *args, **kwargs): - - def side_effect(*args, **kwargs): - setattr(server, 'status', 'ACTIVE') - - server.confirm_resize.side_effect = side_effect - - @staticmethod - def fake_cold_migrate(server, *args, **kwargs): - - def side_effect(*args, **kwargs): - setattr(server, 'OS-EXT-SRV-ATTR:host', "compute-2") - setattr(server, 'status', 'VERIFY_RESIZE') - - server.migrate.side_effect = side_effect - - @mock.patch.object(time, 'sleep', mock.Mock()) - def test_stop_instance(self, mock_glance, mock_cinder, mock_neutron, - mock_nova): - nova_util = nova_helper.NovaHelper() - instance_id = utils.generate_uuid() - server = self.fake_server(instance_id) - setattr(server, 'OS-EXT-STS:vm_state', 'stopped') - self.fake_nova_find_list(nova_util, find=server, list=server) - - result = nova_util.stop_instance(instance_id) - self.assertTrue(result) - - setattr(server, 'OS-EXT-STS:vm_state', 'active') - result = nova_util.stop_instance(instance_id) - self.assertFalse(result) - - self.fake_nova_find_list(nova_util, find=server, list=None) - - result = nova_util.stop_instance(instance_id) - self.assertFalse(result) - - def test_set_host_offline(self, mock_glance, mock_cinder, mock_neutron, - mock_nova): - host = mock.MagicMock() - nova_util = nova_helper.NovaHelper() - nova_util.nova.hosts = mock.MagicMock() - nova_util.nova.hosts.get.return_value = host - result = nova_util.set_host_offline("rennes") - self.assertTrue(result) - - nova_util.nova.hosts.get.return_value = None - result = nova_util.set_host_offline("rennes") - self.assertFalse(result) - - @mock.patch.object(time, 'sleep', mock.Mock()) - def test_resize_instance(self, mock_glance, mock_cinder, - mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - server = self.fake_server(self.instance_uuid) - setattr(server, 'status', 'VERIFY_RESIZE') - self.fake_nova_find_list(nova_util, find=server, list=server) - is_success = nova_util.resize_instance(self.instance_uuid, - self.flavor_name) - self.assertTrue(is_success) - - setattr(server, 'status', 'SOMETHING_ELSE') - is_success = nova_util.resize_instance(self.instance_uuid, - self.flavor_name) - self.assertFalse(is_success) - - @mock.patch.object(time, 'sleep', mock.Mock()) - def test_live_migrate_instance(self, mock_glance, mock_cinder, - mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - server = self.fake_server(self.instance_uuid) - setattr(server, 'OS-EXT-SRV-ATTR:host', - self.destination_node) - self.fake_nova_find_list(nova_util, find=server, list=server) - is_success = nova_util.live_migrate_instance( - self.instance_uuid, self.destination_node - ) - self.assertTrue(is_success) - - setattr(server, 'OS-EXT-SRV-ATTR:host', - self.source_node) - self.fake_nova_find_list(nova_util, find=server, list=None) - is_success = nova_util.live_migrate_instance( - self.instance_uuid, self.destination_node - ) - self.assertFalse(is_success) - - @mock.patch.object(time, 'sleep', mock.Mock()) - def test_live_migrate_instance_no_destination_node( - self, mock_glance, mock_cinder, mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - server = self.fake_server(self.instance_uuid) - self.destination_node = None - self.fake_nova_find_list(nova_util, find=server, list=server) - self.fake_live_migrate(server) - is_success = nova_util.live_migrate_instance( - self.instance_uuid, self.destination_node - ) - self.assertTrue(is_success) - - def test_watcher_non_live_migrate_instance_not_found( - self, mock_glance, mock_cinder, mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - self.fake_nova_find_list(nova_util, find=None, list=None) - - is_success = nova_util.watcher_non_live_migrate_instance( - self.instance_uuid, - self.destination_node) - - self.assertFalse(is_success) - - @mock.patch.object(time, 'sleep', mock.Mock()) - def test_watcher_non_live_migrate_instance_volume( - self, mock_glance, mock_cinder, mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - nova_servers = nova_util.nova.servers - instance = self.fake_server(self.instance_uuid) - setattr(instance, 'OS-EXT-SRV-ATTR:host', - self.source_node) - setattr(instance, 'OS-EXT-STS:vm_state', "stopped") - attached_volumes = [{'id': str(utils.generate_uuid())}] - setattr(instance, "os-extended-volumes:volumes_attached", - attached_volumes) - self.fake_nova_find_list(nova_util, find=instance, list=instance) - nova_servers.create_image.return_value = utils.generate_uuid() - nova_util.glance.images.get.return_value = mock.MagicMock( - status='active') - nova_util.cinder.volumes.get.return_value = mock.MagicMock( - status='available') - - is_success = nova_util.watcher_non_live_migrate_instance( - self.instance_uuid, - self.destination_node) - self.assertTrue(is_success) - - @mock.patch.object(time, 'sleep', mock.Mock()) - def test_watcher_non_live_migrate_keep_image( - self, mock_glance, mock_cinder, mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - nova_servers = nova_util.nova.servers - instance = self.fake_server(self.instance_uuid) - setattr(instance, 'OS-EXT-SRV-ATTR:host', - self.source_node) - setattr(instance, 'OS-EXT-STS:vm_state', "stopped") - addresses = mock.MagicMock() - network_type = mock.MagicMock() - networks = [] - networks.append(("lan", network_type)) - addresses.items.return_value = networks - attached_volumes = mock.MagicMock() - setattr(instance, 'addresses', addresses) - setattr(instance, "os-extended-volumes:volumes_attached", - attached_volumes) - self.fake_nova_find_list(nova_util, find=instance, list=instance) - nova_servers.create_image.return_value = utils.generate_uuid() - nova_util.glance.images.get.return_value = mock.MagicMock( - status='active') - is_success = nova_util.watcher_non_live_migrate_instance( - self.instance_uuid, - self.destination_node, keep_original_image_name=False) - self.assertTrue(is_success) - - @mock.patch.object(time, 'sleep', mock.Mock()) - def test_abort_live_migrate_instance(self, mock_glance, mock_cinder, - mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - server = self.fake_server(self.instance_uuid) - setattr(server, 'OS-EXT-SRV-ATTR:host', - self.source_node) - setattr(server, 'OS-EXT-STS:task_state', None) - migration = self.fake_migration(2) - self.fake_nova_migration_list(nova_util, list=migration) - - self.fake_nova_find_list(nova_util, find=server, list=server) - - self.assertTrue(nova_util.abort_live_migrate( - self.instance_uuid, self.source_node, self.destination_node)) - - setattr(server, 'OS-EXT-SRV-ATTR:host', self.destination_node) - - self.assertFalse(nova_util.abort_live_migrate( - self.instance_uuid, self.source_node, self.destination_node)) - - setattr(server, 'status', 'ERROR') - self.assertRaises(Exception, nova_util.abort_live_migrate, - (self.instance_uuid, self.source_node, - self.destination_node)) - - def test_non_live_migrate_instance_no_destination_node( - self, mock_glance, mock_cinder, mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - server = self.fake_server(self.instance_uuid) - setattr(server, 'OS-EXT-SRV-ATTR:host', - self.source_node) - self.destination_node = None - self.fake_nova_find_list(nova_util, find=server, list=server) - self.fake_cold_migrate(server) - self.fake_confirm_resize(server) - is_success = nova_util.watcher_non_live_migrate_instance( - self.instance_uuid, self.destination_node - ) - self.assertTrue(is_success) - - @mock.patch.object(time, 'sleep', mock.Mock()) - def test_create_image_from_instance(self, mock_glance, mock_cinder, - mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - instance = self.fake_server(self.instance_uuid) - image = mock.MagicMock() - setattr(instance, 'OS-EXT-SRV-ATTR:host', self.source_node) - setattr(instance, 'OS-EXT-STS:vm_state', "stopped") - self.fake_nova_find_list(nova_util, find=instance, list=instance) - image_uuid = 'fake-image-uuid' - nova_util.nova.servers.create_image.return_value = image - - glance_client = mock.MagicMock() - mock_glance.return_value = glance_client - - glance_client.images = {image_uuid: image} - instance = nova_util.create_image_from_instance( - self.instance_uuid, "Cirros" - ) - self.assertIsNotNone(instance) - - nova_util.glance.images.get.return_value = None - instance = nova_util.create_image_from_instance( - self.instance_uuid, "Cirros" - ) - self.assertIsNone(instance) - - def test_enable_service_nova_compute(self, mock_glance, mock_cinder, - mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - nova_services = nova_util.nova.services - nova_services.enable.return_value = mock.MagicMock( - status='enabled') - - result = nova_util.enable_service_nova_compute('nanjing') - self.assertTrue(result) - - nova_services.enable.return_value = mock.MagicMock( - status='disabled') - - result = nova_util.enable_service_nova_compute('nanjing') - self.assertFalse(result) - - def test_disable_service_nova_compute(self, mock_glance, mock_cinder, - mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - nova_services = nova_util.nova.services - nova_services.disable.return_value = mock.MagicMock( - status='enabled') - - result = nova_util.disable_service_nova_compute('nanjing') - self.assertFalse(result) - - nova_services.disable.return_value = mock.MagicMock( - status='disabled') - - result = nova_util.disable_service_nova_compute('nanjing') - self.assertTrue(result) - - @mock.patch.object(time, 'sleep', mock.Mock()) - def test_create_instance(self, mock_glance, mock_cinder, - mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - instance = self.fake_server(self.instance_uuid) - nova_util.nova.services.create.return_value = instance - nova_util.nova.services.get.return_value = instance - - instance = nova_util.create_instance(self.source_node) - self.assertIsNotNone(instance) - - def test_get_flavor_instance(self, mock_glance, mock_cinder, - mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - instance = self.fake_server(self.instance_uuid) - flavor = {'id': 1, 'name': 'm1.tiny', 'ram': 512, 'vcpus': 1, - 'disk': 0, 'ephemeral': 0} - instance.flavor = flavor - nova_util.nova.flavors.get.return_value = flavor - cache = flavor - - nova_util.get_flavor_instance(instance, cache) - self.assertEqual(instance.flavor['name'], cache['name']) diff --git a/watcher/tests/common/test_service.py b/watcher/tests/common/test_service.py deleted file mode 100644 index 71f4f37..0000000 --- a/watcher/tests/common/test_service.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import mock - -from oslo_config import cfg - -import oslo_messaging as om -from watcher.common import rpc -from watcher.common import service -from watcher import objects -from watcher.tests import base - -CONF = cfg.CONF - - -class DummyManager(object): - - API_VERSION = '1.0' - - conductor_endpoints = [mock.Mock()] - notification_endpoints = [mock.Mock()] - - def __init__(self): - self.publisher_id = "pub_id" - self.conductor_topic = "conductor_topic" - self.notification_topics = [] - self.api_version = self.API_VERSION - self.service_name = None - - -class TestServiceHeartbeat(base.TestCase): - - def setUp(self): - super(TestServiceHeartbeat, self).setUp() - - @mock.patch.object(objects.Service, 'list') - @mock.patch.object(objects.Service, 'create') - def test_send_beat_with_creating_service(self, mock_create, - mock_list): - CONF.set_default('host', 'fake-fqdn') - - mock_list.return_value = [] - service.ServiceHeartbeat(service_name='watcher-service') - mock_list.assert_called_once_with(mock.ANY, - filters={'name': 'watcher-service', - 'host': 'fake-fqdn'}) - self.assertEqual(1, mock_create.call_count) - - @mock.patch.object(objects.Service, 'list') - @mock.patch.object(objects.Service, 'save') - def test_send_beat_without_creating_service(self, mock_save, mock_list): - - mock_list.return_value = [objects.Service(mock.Mock(), - name='watcher-service', - host='controller')] - service.ServiceHeartbeat(service_name='watcher-service') - self.assertEqual(1, mock_save.call_count) - - -class TestService(base.TestCase): - - def setUp(self): - super(TestService, self).setUp() - - @mock.patch.object(om.rpc.server, "RPCServer") - def test_start(self, m_handler): - dummy_service = service.Service(DummyManager) - dummy_service.start() - self.assertEqual(1, m_handler.call_count) - - @mock.patch.object(om.rpc.server, "RPCServer") - def test_stop(self, m_handler): - dummy_service = service.Service(DummyManager) - dummy_service.stop() - self.assertEqual(1, m_handler.call_count) - - def test_build_topic_handler(self): - topic_name = "mytopic" - dummy_service = service.Service(DummyManager) - handler = dummy_service.build_topic_handler(topic_name) - self.assertIsNotNone(handler) - self.assertIsInstance(handler, om.rpc.server.RPCServer) - self.assertEqual("mytopic", handler._target.topic) - - def test_init_service(self): - dummy_service = service.Service(DummyManager) - self.assertIsInstance(dummy_service.serializer, - rpc.RequestContextSerializer) - self.assertIsInstance( - dummy_service.conductor_topic_handler, - om.rpc.server.RPCServer) diff --git a/watcher/tests/conf/__init__.py b/watcher/tests/conf/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/conf/test_list_opts.py b/watcher/tests/conf/test_list_opts.py deleted file mode 100755 index ef7f4f1..0000000 --- a/watcher/tests/conf/test_list_opts.py +++ /dev/null @@ -1,148 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# Copyright (c) 2016 Intel Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -from stevedore import extension - -from watcher.conf import opts -from watcher.conf import plugins -from watcher.tests import base -from watcher.tests.decision_engine import fake_strategies - - -class TestListOpts(base.TestCase): - def setUp(self): - super(TestListOpts, self).setUp() - self.base_sections = [ - 'DEFAULT', 'api', 'database', 'watcher_decision_engine', - 'watcher_applier', 'watcher_planner', 'nova_client', - 'glance_client', 'gnocchi_client', 'cinder_client', - 'ceilometer_client', 'monasca_client', 'ironic_client', - 'neutron_client', 'watcher_clients_auth'] - self.opt_sections = list(dict(opts.list_opts()).keys()) - - def test_run_list_opts(self): - expected_sections = self.opt_sections - - result = opts.list_opts() - - self.assertIsNotNone(result) - for section_name, options in result: - self.assertIn(section_name, expected_sections) - self.assertTrue(len(options)) - - def test_list_opts_no_opts(self): - expected_sections = self.base_sections - # Set up the fake Stevedore extensions - fake_extmanager_call = extension.ExtensionManager.make_test_instance( - extensions=[extension.Extension( - name=fake_strategies.FakeDummy1Strategy2.get_name(), - entry_point="%s:%s" % ( - fake_strategies.FakeDummy1Strategy2.__module__, - fake_strategies.FakeDummy1Strategy2.__name__), - plugin=fake_strategies.FakeDummy1Strategy2, - obj=None, - )], - namespace="watcher_strategies", - ) - - def m_list_available(namespace): - if namespace == "watcher_strategies": - return fake_extmanager_call - else: - return extension.ExtensionManager.make_test_instance( - extensions=[], namespace=namespace) - - with mock.patch.object(extension, "ExtensionManager") as m_ext_manager: - m_ext_manager.side_effect = m_list_available - result = opts.list_opts() - - self.assertIsNotNone(result) - for section_name, options in result: - self.assertIn(section_name, expected_sections) - self.assertTrue(len(options)) - - def test_list_opts_with_opts(self): - expected_sections = self.base_sections + [ - 'watcher_strategies.strategy_1'] - # Set up the fake Stevedore extensions - fake_extmanager_call = extension.ExtensionManager.make_test_instance( - extensions=[extension.Extension( - name=fake_strategies.FakeDummy1Strategy1.get_name(), - entry_point="%s:%s" % ( - fake_strategies.FakeDummy1Strategy1.__module__, - fake_strategies.FakeDummy1Strategy1.__name__), - plugin=fake_strategies.FakeDummy1Strategy1, - obj=None, - )], - namespace="watcher_strategies", - ) - - def m_list_available(namespace): - if namespace == "watcher_strategies": - return fake_extmanager_call - else: - return extension.ExtensionManager.make_test_instance( - extensions=[], namespace=namespace) - - with mock.patch.object(extension, "ExtensionManager") as m_ext_manager: - m_ext_manager.side_effect = m_list_available - result = opts.list_opts() - - self.assertIsNotNone(result) - for section_name, options in result: - self.assertIn(section_name, expected_sections) - self.assertTrue(len(options)) - - result_map = dict(result) - strategy_opts = result_map['watcher_strategies.strategy_1'] - self.assertEqual(['test_opt'], [opt.name for opt in strategy_opts]) - - -class TestPlugins(base.TestCase): - - def test_show_plugins(self): - # Set up the fake Stevedore extensions - fake_extmanager_call = extension.ExtensionManager.make_test_instance( - extensions=[extension.Extension( - name=fake_strategies.FakeDummy1Strategy1.get_name(), - entry_point="%s:%s" % ( - fake_strategies.FakeDummy1Strategy1.__module__, - fake_strategies.FakeDummy1Strategy1.__name__), - plugin=fake_strategies.FakeDummy1Strategy1, - obj=None, - )], - namespace="watcher_strategies", - ) - - def m_list_available(namespace): - if namespace == "watcher_strategies": - return fake_extmanager_call - else: - return extension.ExtensionManager.make_test_instance( - extensions=[], namespace=namespace) - - with mock.patch.object(extension, "ExtensionManager") as m_ext_manager: - with mock.patch.object( - plugins, "_show_plugins_ascii_table" - ) as m_show: - m_ext_manager.side_effect = m_list_available - plugins.show_plugins() - m_show.assert_called_once_with( - [('watcher_strategies.strategy_1', 'strategy_1', - 'watcher.tests.decision_engine.' - 'fake_strategies.FakeDummy1Strategy1')]) diff --git a/watcher/tests/conf_fixture.py b/watcher/tests/conf_fixture.py deleted file mode 100644 index 3e197b9..0000000 --- a/watcher/tests/conf_fixture.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures -from oslo_config import cfg - -from watcher.common import config - -CONF = cfg.CONF -CONF.import_opt('host', 'watcher.conf.service') -CONF.import_opt('connection', 'oslo_db.options', group='database') -CONF.import_opt('sqlite_synchronous', 'oslo_db.options', group='database') - - -class ConfFixture(fixtures.Fixture): - """Fixture to manage conf settings.""" - - def __init__(self, conf=cfg.CONF): - self.conf = conf - - def setUp(self): - super(ConfFixture, self).setUp() - - self.conf.set_default('connection', "sqlite://", group='database') - self.conf.set_default('sqlite_synchronous', False, group='database') - config.parse_args([], default_config_files=[]) - self.addCleanup(self.conf.reset) - - -class ConfReloadFixture(ConfFixture): - """Fixture to manage reloads of conf settings.""" - - def __init__(self, conf=cfg.CONF): - self.conf = conf - self._original_parse_cli_opts = self.conf._parse_cli_opts - - def _fake_parser(self, *args, **kw): - return cfg.ConfigOpts._parse_cli_opts(self.conf, []) - - def _restore_parser(self): - self.conf._parse_cli_opts = self._original_parse_cli_opts - - def setUp(self): - super(ConfReloadFixture, self).setUp() - self.conf._parse_cli_opts = self._fake_parser - self.addCleanup(self._restore_parser) diff --git a/watcher/tests/config.py b/watcher/tests/config.py deleted file mode 100644 index 7b8745b..0000000 --- a/watcher/tests/config.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.api import hooks - -# Server Specific Configurations -server = { - 'port': '9322', - 'host': '0.0.0.0' -} - -# Pecan Application Configurations -app = { - 'root': 'watcher.api.controllers.root.RootController', - 'modules': ['watcher.api'], - 'hooks': [ - hooks.ContextHook(), - ], - 'acl_public_routes': [ - '/' - ], -} - -# Custom Configurations must be in Python dictionary format:: -# -# foo = {'bar':'baz'} -# -# All configurations are accessible at:: -# pecan.conf diff --git a/watcher/tests/datasource/__init__.py b/watcher/tests/datasource/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/datasource/test_ceilometer_helper.py b/watcher/tests/datasource/test_ceilometer_helper.py deleted file mode 100644 index fd05181..0000000 --- a/watcher/tests/datasource/test_ceilometer_helper.py +++ /dev/null @@ -1,95 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import unicode_literals - -import mock - -from watcher.common import clients -from watcher.datasource import ceilometer as ceilometer_helper -from watcher.tests import base - - -@mock.patch.object(clients.OpenStackClients, 'ceilometer') -class TestCeilometerHelper(base.BaseTestCase): - - def test_build_query(self, mock_ceilometer): - mock_ceilometer.return_value = mock.MagicMock() - cm = ceilometer_helper.CeilometerHelper() - expected = [{'field': 'user_id', 'op': 'eq', 'value': u'user_id'}, - {'field': 'project_id', 'op': 'eq', 'value': u'tenant_id'}, - {'field': 'resource_id', 'op': 'eq', - 'value': u'resource_id'}] - - query = cm.build_query(user_id="user_id", - tenant_id="tenant_id", - resource_id="resource_id", - user_ids=["user_ids"], - tenant_ids=["tenant_ids"], - resource_ids=["resource_ids"]) - self.assertEqual(expected, query) - - def test_statistic_aggregation(self, mock_ceilometer): - cm = ceilometer_helper.CeilometerHelper() - ceilometer = mock.MagicMock() - statistic = mock.MagicMock() - expected_result = 100 - statistic[-1]._info = {'aggregate': {'avg': expected_result}} - ceilometer.statistics.list.return_value = statistic - mock_ceilometer.return_value = ceilometer - cm = ceilometer_helper.CeilometerHelper() - val = cm.statistic_aggregation( - resource_id="INSTANCE_ID", - meter_name="cpu_util", - period="7300" - ) - self.assertEqual(expected_result, val) - - def test_get_last_sample(self, mock_ceilometer): - ceilometer = mock.MagicMock() - statistic = mock.MagicMock() - expected_result = 100 - statistic[-1]._info = {'counter_volume': expected_result} - ceilometer.samples.list.return_value = statistic - mock_ceilometer.return_value = ceilometer - cm = ceilometer_helper.CeilometerHelper() - val = cm.get_last_sample_value( - resource_id="id", - meter_name="compute.node.percent" - ) - self.assertEqual(expected_result, val) - - def test_get_last_sample_none(self, mock_ceilometer): - ceilometer = mock.MagicMock() - expected = [] - ceilometer.samples.list.return_value = expected - mock_ceilometer.return_value = ceilometer - cm = ceilometer_helper.CeilometerHelper() - val = cm.get_last_sample_values( - resource_id="id", - meter_name="compute.node.percent" - ) - self.assertEqual(expected, val) - - def test_statistic_list(self, mock_ceilometer): - ceilometer = mock.MagicMock() - expected_value = [] - ceilometer.statistics.list.return_value = expected_value - mock_ceilometer.return_value = ceilometer - cm = ceilometer_helper.CeilometerHelper() - val = cm.statistic_list(meter_name="cpu_util") - self.assertEqual(expected_value, val) diff --git a/watcher/tests/datasource/test_gnocchi_helper.py b/watcher/tests/datasource/test_gnocchi_helper.py deleted file mode 100644 index 8b481a3..0000000 --- a/watcher/tests/datasource/test_gnocchi_helper.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 Servionica -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -from oslo_config import cfg -from oslo_utils import timeutils - -from watcher.common import clients -from watcher.common import exception -from watcher.datasource import gnocchi as gnocchi_helper -from watcher.tests import base - -CONF = cfg.CONF - - -@mock.patch.object(clients.OpenStackClients, 'gnocchi') -class TestGnocchiHelper(base.BaseTestCase): - - def test_gnocchi_statistic_aggregation(self, mock_gnocchi): - gnocchi = mock.MagicMock() - expected_result = 5.5 - - expected_measures = [["2017-02-02T09:00:00.000000", 360, 5.5]] - - gnocchi.metric.get_measures.return_value = expected_measures - mock_gnocchi.return_value = gnocchi - - helper = gnocchi_helper.GnocchiHelper() - result = helper.statistic_aggregation( - resource_id='16a86790-327a-45f9-bc82-45839f062fdc', - metric='cpu_util', - granularity=360, - start_time=timeutils.parse_isotime("2017-02-02T09:00:00.000000"), - stop_time=timeutils.parse_isotime("2017-02-02T10:00:00.000000"), - aggregation='mean' - ) - self.assertEqual(expected_result, result) - - def test_gnocchi_wrong_datetime(self, mock_gnocchi): - gnocchi = mock.MagicMock() - - expected_measures = [["2017-02-02T09:00:00.000000", 360, 5.5]] - - gnocchi.metric.get_measures.return_value = expected_measures - mock_gnocchi.return_value = gnocchi - - helper = gnocchi_helper.GnocchiHelper() - self.assertRaises( - exception.InvalidParameter, helper.statistic_aggregation, - resource_id='16a86790-327a-45f9-bc82-45839f062fdc', - metric='cpu_util', - granularity=360, - start_time="2017-02-02T09:00:00.000000", - stop_time=timeutils.parse_isotime("2017-02-02T10:00:00.000000"), - aggregation='mean') diff --git a/watcher/tests/datasource/test_monasca_helper.py b/watcher/tests/datasource/test_monasca_helper.py deleted file mode 100644 index 5c49af6..0000000 --- a/watcher/tests/datasource/test_monasca_helper.py +++ /dev/null @@ -1,102 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -from monascaclient import exc -from oslo_config import cfg -from oslo_utils import timeutils - -from watcher.common import clients -from watcher.datasource import monasca as monasca_helper -from watcher.tests import base - -CONF = cfg.CONF - - -@mock.patch.object(clients.OpenStackClients, 'monasca') -class TestMonascaHelper(base.BaseTestCase): - - def test_monasca_statistic_aggregation(self, mock_monasca): - monasca = mock.MagicMock() - expected_result = [{ - 'columns': ['timestamp', 'avg'], - 'dimensions': { - 'hostname': 'rdev-indeedsrv001', - 'service': 'monasca'}, - 'id': '0', - 'name': 'cpu.percent', - 'statistics': [ - ['2016-07-29T12:45:00Z', 0.0], - ['2016-07-29T12:50:00Z', 0.9100000000000001], - ['2016-07-29T12:55:00Z', 0.9111111111111112]]}] - - monasca.metrics.list_statistics.return_value = expected_result - mock_monasca.return_value = monasca - - helper = monasca_helper.MonascaHelper() - result = helper.statistic_aggregation( - meter_name='cpu.percent', - dimensions={'hostname': 'NODE_UUID'}, - start_time=timeutils.parse_isotime("2016-06-06T10:33:22.063176"), - end_time=None, - period=7200, - aggregate='avg', - group_by='*', - ) - self.assertEqual(expected_result, result) - - def test_monasca_statistic_list(self, mock_monasca): - monasca = mock.MagicMock() - expected_result = [{ - 'columns': ['timestamp', 'value', 'value_meta'], - 'dimensions': { - 'hostname': 'rdev-indeedsrv001', - 'service': 'monasca'}, - 'id': '0', - 'measurements': [ - ['2016-07-29T12:54:06.000Z', 0.9, {}], - ['2016-07-29T12:54:36.000Z', 0.9, {}], - ['2016-07-29T12:55:06.000Z', 0.9, {}], - ['2016-07-29T12:55:36.000Z', 0.8, {}]], - 'name': 'cpu.percent'}] - - monasca.metrics.list_measurements.return_value = expected_result - mock_monasca.return_value = monasca - helper = monasca_helper.MonascaHelper() - val = helper.statistics_list(meter_name="cpu.percent", dimensions={}) - self.assertEqual(expected_result, val) - - def test_monasca_statistic_list_query_retry(self, mock_monasca): - monasca = mock.MagicMock() - expected_result = [{ - 'columns': ['timestamp', 'value', 'value_meta'], - 'dimensions': { - 'hostname': 'rdev-indeedsrv001', - 'service': 'monasca'}, - 'id': '0', - 'measurements': [ - ['2016-07-29T12:54:06.000Z', 0.9, {}], - ['2016-07-29T12:54:36.000Z', 0.9, {}], - ['2016-07-29T12:55:06.000Z', 0.9, {}], - ['2016-07-29T12:55:36.000Z', 0.8, {}]], - 'name': 'cpu.percent'}] - - monasca.metrics.list_measurements.side_effect = [ - exc.HTTPUnauthorized, expected_result] - mock_monasca.return_value = monasca - helper = monasca_helper.MonascaHelper() - val = helper.statistics_list(meter_name="cpu.percent", dimensions={}) - self.assertEqual(expected_result, val) diff --git a/watcher/tests/db/__init__.py b/watcher/tests/db/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/db/base.py b/watcher/tests/db/base.py deleted file mode 100644 index 57d7a61..0000000 --- a/watcher/tests/db/base.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright (c) 2012 NTT DOCOMO, INC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Watcher DB test base class.""" - -import fixtures -from oslo_config import cfg - -from watcher.db import api as dbapi -from watcher.db.sqlalchemy import api as sqla_api -from watcher.db.sqlalchemy import migration -from watcher.db.sqlalchemy import models -from watcher.tests import base -from watcher.tests.db import utils - - -CONF = cfg.CONF - -CONF.import_opt('enable_authentication', 'watcher.api.acl') - -_DB_CACHE = None - - -class Database(fixtures.Fixture): - - def __init__(self, db_api, db_migrate, sql_connection): - self.sql_connection = sql_connection - - self.engine = db_api.get_engine() - self.engine.dispose() - conn = self.engine.connect() - self.setup_sqlite(db_migrate) - self.post_migrations() - - self._DB = "".join(line for line in conn.connection.iterdump()) - self.engine.dispose() - - def setup_sqlite(self, db_migrate): - if db_migrate.version(): - return - models.Base.metadata.create_all(self.engine) - db_migrate.stamp('head') - - def setUp(self): - super(Database, self).setUp() - - conn = self.engine.connect() - conn.connection.executescript(self._DB) - self.addCleanup(self.engine.dispose) - - def post_migrations(self): - """Any addition steps that are needed outside of the migrations.""" - - -class DbTestCase(base.TestCase): - - def get_next_id(self): - return next(self._id_gen) - - def setUp(self): - cfg.CONF.set_override("enable_authentication", False) - # To use in-memory SQLite DB - cfg.CONF.set_override("connection", "sqlite://", group="database") - - super(DbTestCase, self).setUp() - - self.dbapi = dbapi.get_instance() - - global _DB_CACHE - if not _DB_CACHE: - _DB_CACHE = Database(sqla_api, migration, - sql_connection=CONF.database.connection) - self.useFixture(_DB_CACHE) - self._id_gen = utils.id_generator() diff --git a/watcher/tests/db/test_action.py b/watcher/tests/db/test_action.py deleted file mode 100644 index d98d854..0000000 --- a/watcher/tests/db/test_action.py +++ /dev/null @@ -1,394 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for manipulating Action via the DB API""" - -import freezegun -import six - -from watcher.common import exception -from watcher.common import utils as w_utils -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestDbActionFilters(base.DbTestCase): - - FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' - FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' - FAKE_TODAY = '2016-02-24T09:52:05.219414' - - def setUp(self): - super(TestDbActionFilters, self).setUp() - self.context.show_deleted = True - self._data_setup() - - def _data_setup(self): - self.audit_template_name = "Audit Template" - - self.audit_template = utils.create_test_audit_template( - name=self.audit_template_name, id=1, uuid=None) - self.audit = utils.create_test_audit( - audit_template_id=self.audit_template.id, id=1, uuid=None) - self.action_plan = utils.create_test_action_plan( - audit_id=self.audit.id, id=1, uuid=None) - - with freezegun.freeze_time(self.FAKE_TODAY): - self.action1 = utils.create_test_action( - action_plan_id=self.action_plan.id, id=1, uuid=None) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.action2 = utils.create_test_action( - action_plan_id=self.action_plan.id, id=2, uuid=None) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.action3 = utils.create_test_action( - action_plan_id=self.action_plan.id, id=3, uuid=None) - - def _soft_delete_actions(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_action(self.action1.uuid) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.soft_delete_action(self.action2.uuid) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.soft_delete_action(self.action3.uuid) - - def _update_actions(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.update_action( - self.action1.uuid, - values={"state": objects.action_plan.State.SUCCEEDED}) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.update_action( - self.action2.uuid, - values={"state": objects.action_plan.State.SUCCEEDED}) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.update_action( - self.action3.uuid, - values={"state": objects.action_plan.State.SUCCEEDED}) - - def test_get_action_filter_deleted_true(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_action(self.action1.uuid) - - res = self.dbapi.get_action_list( - self.context, filters={'deleted': True}) - - self.assertEqual([self.action1['id']], [r.id for r in res]) - - def test_get_action_filter_deleted_false(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_action(self.action1.uuid) - - res = self.dbapi.get_action_list( - self.context, filters={'deleted': False}) - - self.assertEqual([self.action2['id'], self.action3['id']], - [r.id for r in res]) - - def test_get_action_filter_deleted_at_eq(self): - self._soft_delete_actions() - - res = self.dbapi.get_action_list( - self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.action1['id']], [r.id for r in res]) - - def test_get_action_filter_deleted_at_lt(self): - self._soft_delete_actions() - - res = self.dbapi.get_action_list( - self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.action2['id'], self.action3['id']], - [r.id for r in res]) - - def test_get_action_filter_deleted_at_lte(self): - self._soft_delete_actions() - - res = self.dbapi.get_action_list( - self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.action2['id'], self.action3['id']], - [r.id for r in res]) - - def test_get_action_filter_deleted_at_gt(self): - self._soft_delete_actions() - - res = self.dbapi.get_action_list( - self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.action1['id']], [r.id for r in res]) - - def test_get_action_filter_deleted_at_gte(self): - self._soft_delete_actions() - - res = self.dbapi.get_action_list( - self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.action1['id'], self.action2['id']], - [r.id for r in res]) - - # created_at # - - def test_get_action_filter_created_at_eq(self): - res = self.dbapi.get_action_list( - self.context, filters={'created_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.action1['id']], [r.id for r in res]) - - def test_get_action_filter_created_at_lt(self): - with freezegun.freeze_time(self.FAKE_TODAY): - res = self.dbapi.get_action_list( - self.context, filters={'created_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.action2['id'], self.action3['id']], - [r.id for r in res]) - - def test_get_action_filter_created_at_lte(self): - res = self.dbapi.get_action_list( - self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.action2['id'], self.action3['id']], - [r.id for r in res]) - - def test_get_action_filter_created_at_gt(self): - res = self.dbapi.get_action_list( - self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.action1['id']], [r.id for r in res]) - - def test_get_action_filter_created_at_gte(self): - res = self.dbapi.get_action_list( - self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.action1['id'], self.action2['id']], - [r.id for r in res]) - - # updated_at # - - def test_get_action_filter_updated_at_eq(self): - self._update_actions() - - res = self.dbapi.get_action_list( - self.context, filters={'updated_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.action1['id']], [r.id for r in res]) - - def test_get_action_filter_updated_at_lt(self): - self._update_actions() - - res = self.dbapi.get_action_list( - self.context, filters={'updated_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.action2['id'], self.action3['id']], - [r.id for r in res]) - - def test_get_action_filter_updated_at_lte(self): - self._update_actions() - - res = self.dbapi.get_action_list( - self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.action2['id'], self.action3['id']], - [r.id for r in res]) - - def test_get_action_filter_updated_at_gt(self): - self._update_actions() - - res = self.dbapi.get_action_list( - self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.action1['id']], [r.id for r in res]) - - def test_get_action_filter_updated_at_gte(self): - self._update_actions() - - res = self.dbapi.get_action_list( - self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.action1['id'], self.action2['id']], - [r.id for r in res]) - - -class DbActionTestCase(base.DbTestCase): - - def _create_test_action(self, **kwargs): - action = utils.get_test_action(**kwargs) - self.dbapi.create_action(action) - return action - - def _create_test_action_plan(self, **kwargs): - action_plan = utils.get_test_action_plan(**kwargs) - self.dbapi.create_action_plan(action_plan) - return action_plan - - def test_get_action_list(self): - uuids = [] - for _ in range(1, 4): - action = utils.create_test_action(uuid=w_utils.generate_uuid()) - uuids.append(six.text_type(action['uuid'])) - actions = self.dbapi.get_action_list(self.context) - action_uuids = [a.uuid for a in actions] - self.assertEqual(3, len(action_uuids)) - self.assertEqual(sorted(uuids), sorted(action_uuids)) - for action in actions: - self.assertIsNone(action.action_plan) - - def test_get_action_list_eager(self): - _action_plan = utils.get_test_action_plan() - action_plan = self.dbapi.create_action_plan(_action_plan) - - uuids = [] - for i in range(1, 4): - action = utils.create_test_action( - id=i, uuid=w_utils.generate_uuid(), - action_plan_id=action_plan.id) - uuids.append(six.text_type(action['uuid'])) - actions = self.dbapi.get_action_list(self.context, eager=True) - action_map = {a.uuid: a for a in actions} - self.assertEqual(sorted(uuids), sorted(action_map.keys())) - eager_action = action_map[action.uuid] - self.assertEqual( - action_plan.as_dict(), eager_action.action_plan.as_dict()) - - def test_get_action_list_with_filters(self): - audit = utils.create_test_audit(uuid=w_utils.generate_uuid()) - action_plan = self._create_test_action_plan( - id=1, - uuid=w_utils.generate_uuid(), - audit_id=audit.id, - parents=None, - state=objects.action_plan.State.RECOMMENDED) - action1 = self._create_test_action( - id=1, - action_plan_id=1, - description='description action 1', - uuid=w_utils.generate_uuid(), - parents=None, - state=objects.action_plan.State.PENDING) - action2 = self._create_test_action( - id=2, - action_plan_id=2, - description='description action 2', - uuid=w_utils.generate_uuid(), - parents=[action1['uuid']], - state=objects.action_plan.State.PENDING) - action3 = self._create_test_action( - id=3, - action_plan_id=1, - description='description action 3', - uuid=w_utils.generate_uuid(), - parents=[action2['uuid']], - state=objects.action_plan.State.ONGOING) - res = self.dbapi.get_action_list( - self.context, - filters={'state': objects.action_plan.State.ONGOING}) - self.assertEqual([action3['id']], [r.id for r in res]) - - res = self.dbapi.get_action_list(self.context, - filters={'state': 'bad-state'}) - self.assertEqual([], [r.id for r in res]) - - res = self.dbapi.get_action_list( - self.context, - filters={'action_plan_id': 2}) - self.assertEqual([action2['id']], [r.id for r in res]) - - res = self.dbapi.get_action_list( - self.context, - filters={'action_plan_uuid': action_plan['uuid']}) - self.assertEqual( - sorted([action1['id'], action3['id']]), - sorted([r.id for r in res])) - - res = self.dbapi.get_action_list( - self.context, - filters={'audit_uuid': audit.uuid}) - for action in res: - self.assertEqual(action_plan['id'], action.action_plan_id) - - def test_get_action_list_with_filter_by_uuid(self): - action = self._create_test_action() - res = self.dbapi.get_action_list( - self.context, filters={'uuid': action["uuid"]}) - - self.assertEqual(len(res), 1) - self.assertEqual(action['uuid'], res[0].uuid) - - def test_get_action_by_id(self): - action = self._create_test_action() - action = self.dbapi.get_action_by_id(self.context, action['id']) - self.assertEqual(action['uuid'], action.uuid) - - def test_get_action_by_uuid(self): - action = self._create_test_action() - action = self.dbapi.get_action_by_uuid(self.context, action['uuid']) - self.assertEqual(action['id'], action.id) - - def test_get_action_that_does_not_exist(self): - self.assertRaises(exception.ActionNotFound, - self.dbapi.get_action_by_id, self.context, 1234) - - def test_update_action(self): - action = self._create_test_action() - res = self.dbapi.update_action( - action['id'], {'state': objects.action_plan.State.CANCELLED}) - self.assertEqual(objects.action_plan.State.CANCELLED, res.state) - - def test_update_action_that_does_not_exist(self): - self.assertRaises(exception.ActionNotFound, - self.dbapi.update_action, 1234, {'state': ''}) - - def test_update_action_uuid(self): - action = self._create_test_action() - self.assertRaises(exception.Invalid, - self.dbapi.update_action, action['id'], - {'uuid': 'hello'}) - - def test_destroy_action(self): - action = self._create_test_action() - self.dbapi.destroy_action(action['id']) - self.assertRaises(exception.ActionNotFound, - self.dbapi.get_action_by_id, - self.context, action['id']) - - def test_destroy_action_by_uuid(self): - uuid = w_utils.generate_uuid() - self._create_test_action(uuid=uuid) - self.assertIsNotNone(self.dbapi.get_action_by_uuid(self.context, - uuid)) - self.dbapi.destroy_action(uuid) - self.assertRaises(exception.ActionNotFound, - self.dbapi.get_action_by_uuid, self.context, uuid) - - def test_destroy_action_that_does_not_exist(self): - self.assertRaises(exception.ActionNotFound, - self.dbapi.destroy_action, 1234) - - def test_create_action_already_exists(self): - uuid = w_utils.generate_uuid() - self._create_test_action(id=1, uuid=uuid) - self.assertRaises(exception.ActionAlreadyExists, - self._create_test_action, - id=2, uuid=uuid) diff --git a/watcher/tests/db/test_action_plan.py b/watcher/tests/db/test_action_plan.py deleted file mode 100644 index 94e5757..0000000 --- a/watcher/tests/db/test_action_plan.py +++ /dev/null @@ -1,391 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for manipulating ActionPlan via the DB API""" - -import freezegun -import six - -from watcher.common import exception -from watcher.common import utils as w_utils -from watcher.objects import action_plan as ap_objects -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestDbActionPlanFilters(base.DbTestCase): - - FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' - FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' - FAKE_TODAY = '2016-02-24T09:52:05.219414' - - def setUp(self): - super(TestDbActionPlanFilters, self).setUp() - self.context.show_deleted = True - self._data_setup() - - def _data_setup(self): - self.audit_template_name = "Audit Template" - - self.audit_template = utils.create_test_audit_template( - name=self.audit_template_name, id=1, uuid=None) - self.audit = utils.create_test_audit( - audit_template_id=self.audit_template.id, id=1, uuid=None) - - with freezegun.freeze_time(self.FAKE_TODAY): - self.action_plan1 = utils.create_test_action_plan( - audit_id=self.audit.id, id=1, uuid=None) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.action_plan2 = utils.create_test_action_plan( - audit_id=self.audit.id, id=2, uuid=None) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.action_plan3 = utils.create_test_action_plan( - audit_id=self.audit.id, id=3, uuid=None) - - def _soft_delete_action_plans(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_action_plan(self.action_plan1.uuid) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.soft_delete_action_plan(self.action_plan2.uuid) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.soft_delete_action_plan(self.action_plan3.uuid) - - def _update_action_plans(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.update_action_plan( - self.action_plan1.uuid, - values={"state": ap_objects.State.SUCCEEDED}) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.update_action_plan( - self.action_plan2.uuid, - values={"state": ap_objects.State.SUCCEEDED}) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.update_action_plan( - self.action_plan3.uuid, - values={"state": ap_objects.State.SUCCEEDED}) - - def test_get_action_plan_list_filter_deleted_true(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_action_plan(self.action_plan1.uuid) - - res = self.dbapi.get_action_plan_list( - self.context, filters={'deleted': True}) - - self.assertEqual([self.action_plan1['id']], [r.id for r in res]) - - def test_get_action_plan_list_filter_deleted_false(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_action_plan(self.action_plan1.uuid) - - res = self.dbapi.get_action_plan_list( - self.context, filters={'deleted': False}) - - self.assertEqual([self.action_plan2['id'], self.action_plan3['id']], - [r.id for r in res]) - - def test_get_action_plan_list_filter_deleted_at_eq(self): - self._soft_delete_action_plans() - - res = self.dbapi.get_action_plan_list( - self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.action_plan1['id']], [r.id for r in res]) - - def test_get_action_plan_list_filter_deleted_at_lt(self): - self._soft_delete_action_plans() - - res = self.dbapi.get_action_plan_list( - self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.action_plan2['id'], self.action_plan3['id']], - [r.id for r in res]) - - def test_get_action_plan_list_filter_deleted_at_lte(self): - self._soft_delete_action_plans() - - res = self.dbapi.get_action_plan_list( - self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.action_plan2['id'], self.action_plan3['id']], - [r.id for r in res]) - - def test_get_action_plan_list_filter_deleted_at_gt(self): - self._soft_delete_action_plans() - - res = self.dbapi.get_action_plan_list( - self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.action_plan1['id']], [r.id for r in res]) - - def test_get_action_plan_list_filter_deleted_at_gte(self): - self._soft_delete_action_plans() - - res = self.dbapi.get_action_plan_list( - self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.action_plan1['id'], self.action_plan2['id']], - [r.id for r in res]) - - # created_at # - - def test_get_action_plan_list_filter_created_at_eq(self): - res = self.dbapi.get_action_plan_list( - self.context, filters={'created_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.action_plan1['id']], [r.id for r in res]) - - def test_get_action_plan_list_filter_created_at_lt(self): - res = self.dbapi.get_action_plan_list( - self.context, filters={'created_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.action_plan2['id'], self.action_plan3['id']], - [r.id for r in res]) - - def test_get_action_plan_list_filter_created_at_lte(self): - res = self.dbapi.get_action_plan_list( - self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.action_plan2['id'], self.action_plan3['id']], - [r.id for r in res]) - - def test_get_action_plan_list_filter_created_at_gt(self): - res = self.dbapi.get_action_plan_list( - self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.action_plan1['id']], [r.id for r in res]) - - def test_get_action_plan_list_filter_created_at_gte(self): - res = self.dbapi.get_action_plan_list( - self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.action_plan1['id'], self.action_plan2['id']], - [r.id for r in res]) - - # updated_at # - - def test_get_action_plan_list_filter_updated_at_eq(self): - self._update_action_plans() - - res = self.dbapi.get_action_plan_list( - self.context, filters={'updated_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.action_plan1['id']], [r.id for r in res]) - - def test_get_action_plan_list_filter_updated_at_lt(self): - self._update_action_plans() - - res = self.dbapi.get_action_plan_list( - self.context, filters={'updated_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.action_plan2['id'], self.action_plan3['id']], - [r.id for r in res]) - - def test_get_action_plan_list_filter_updated_at_lte(self): - self._update_action_plans() - - res = self.dbapi.get_action_plan_list( - self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.action_plan2['id'], self.action_plan3['id']], - [r.id for r in res]) - - def test_get_action_plan_list_filter_updated_at_gt(self): - self._update_action_plans() - - res = self.dbapi.get_action_plan_list( - self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.action_plan1['id']], [r.id for r in res]) - - def test_get_action_plan_list_filter_updated_at_gte(self): - self._update_action_plans() - - res = self.dbapi.get_action_plan_list( - self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.action_plan1['id'], self.action_plan2['id']], - [r.id for r in res]) - - -class DbActionPlanTestCase(base.DbTestCase): - - def _create_test_audit(self, **kwargs): - audit = utils.get_test_audit(**kwargs) - self.dbapi.create_audit(audit) - return audit - - def _create_test_action_plan(self, **kwargs): - action_plan = utils.get_test_action_plan(**kwargs) - self.dbapi.create_action_plan(action_plan) - return action_plan - - def test_get_action_plan_list(self): - uuids = [] - for _ in range(1, 4): - action_plan = utils.create_test_action_plan( - uuid=w_utils.generate_uuid()) - uuids.append(six.text_type(action_plan['uuid'])) - action_plans = self.dbapi.get_action_plan_list(self.context) - action_plan_uuids = [ap.uuid for ap in action_plans] - self.assertEqual(sorted(uuids), sorted(action_plan_uuids)) - for action_plan in action_plans: - self.assertIsNone(action_plan.audit) - self.assertIsNone(action_plan.strategy) - - def test_get_action_plan_list_eager(self): - _strategy = utils.get_test_strategy() - strategy = self.dbapi.create_strategy(_strategy) - _audit = utils.get_test_audit() - audit = self.dbapi.create_audit(_audit) - - uuids = [] - for _ in range(1, 4): - action_plan = utils.create_test_action_plan( - uuid=w_utils.generate_uuid()) - uuids.append(six.text_type(action_plan['uuid'])) - action_plans = self.dbapi.get_action_plan_list( - self.context, eager=True) - action_plan_map = {a.uuid: a for a in action_plans} - self.assertEqual(sorted(uuids), sorted(action_plan_map.keys())) - eager_action_plan = action_plan_map[action_plan.uuid] - self.assertEqual( - strategy.as_dict(), eager_action_plan.strategy.as_dict()) - self.assertEqual(audit.as_dict(), eager_action_plan.audit.as_dict()) - - def test_get_action_plan_list_with_filters(self): - audit = self._create_test_audit( - id=2, - audit_type='ONESHOT', - uuid=w_utils.generate_uuid(), - state=ap_objects.State.ONGOING) - action_plan1 = self._create_test_action_plan( - id=1, - uuid=w_utils.generate_uuid(), - audit_id=audit['id'], - state=ap_objects.State.RECOMMENDED) - action_plan2 = self._create_test_action_plan( - id=2, - uuid=w_utils.generate_uuid(), - audit_id=audit['id'], - state=ap_objects.State.ONGOING) - - res = self.dbapi.get_action_plan_list( - self.context, - filters={'state': ap_objects.State.RECOMMENDED}) - self.assertEqual([action_plan1['id']], [r.id for r in res]) - - res = self.dbapi.get_action_plan_list( - self.context, - filters={'state': ap_objects.State.ONGOING}) - self.assertEqual([action_plan2['id']], [r.id for r in res]) - - res = self.dbapi.get_action_plan_list( - self.context, - filters={'audit_uuid': audit['uuid']}) - - for r in res: - self.assertEqual(audit['id'], r.audit_id) - - self.dbapi.soft_delete_action_plan(action_plan1['uuid']) - res = self.dbapi.get_action_plan_list( - self.context, - filters={'audit_uuid': audit['uuid']}) - - self.assertEqual([action_plan2['id']], [r.id for r in res]) - self.assertNotEqual([action_plan1['id']], [r.id for r in res]) - - def test_get_action_plan_list_with_filter_by_uuid(self): - action_plan = self._create_test_action_plan() - res = self.dbapi.get_action_plan_list( - self.context, filters={'uuid': action_plan["uuid"]}) - - self.assertEqual(len(res), 1) - self.assertEqual(action_plan['uuid'], res[0].uuid) - - def test_get_action_plan_by_id(self): - action_plan = self._create_test_action_plan() - action_plan = self.dbapi.get_action_plan_by_id( - self.context, action_plan['id']) - self.assertEqual(action_plan['uuid'], action_plan.uuid) - - def test_get_action_plan_by_uuid(self): - action_plan = self._create_test_action_plan() - action_plan = self.dbapi.get_action_plan_by_uuid( - self.context, action_plan['uuid']) - self.assertEqual(action_plan['id'], action_plan.id) - - def test_get_action_plan_that_does_not_exist(self): - self.assertRaises(exception.ActionPlanNotFound, - self.dbapi.get_action_plan_by_id, self.context, 1234) - - def test_update_action_plan(self): - action_plan = self._create_test_action_plan() - res = self.dbapi.update_action_plan( - action_plan['id'], {'name': 'updated-model'}) - self.assertEqual('updated-model', res.name) - - def test_update_action_plan_that_does_not_exist(self): - self.assertRaises(exception.ActionPlanNotFound, - self.dbapi.update_action_plan, 1234, {'name': ''}) - - def test_update_action_plan_uuid(self): - action_plan = self._create_test_action_plan() - self.assertRaises(exception.Invalid, - self.dbapi.update_action_plan, action_plan['id'], - {'uuid': 'hello'}) - - def test_destroy_action_plan(self): - action_plan = self._create_test_action_plan() - self.dbapi.destroy_action_plan(action_plan['id']) - self.assertRaises(exception.ActionPlanNotFound, - self.dbapi.get_action_plan_by_id, - self.context, action_plan['id']) - - def test_destroy_action_plan_by_uuid(self): - uuid = w_utils.generate_uuid() - self._create_test_action_plan(uuid=uuid) - self.assertIsNotNone(self.dbapi.get_action_plan_by_uuid( - self.context, uuid)) - self.dbapi.destroy_action_plan(uuid) - self.assertRaises(exception.ActionPlanNotFound, - self.dbapi.get_action_plan_by_uuid, - self.context, uuid) - - def test_destroy_action_plan_that_does_not_exist(self): - self.assertRaises(exception.ActionPlanNotFound, - self.dbapi.destroy_action_plan, 1234) - - def test_destroy_action_plan_that_referenced_by_actions(self): - action_plan = self._create_test_action_plan() - action = utils.create_test_action(action_plan_id=action_plan['id']) - self.assertEqual(action_plan['id'], action.action_plan_id) - self.assertRaises(exception.ActionPlanReferenced, - self.dbapi.destroy_action_plan, action_plan['id']) - - def test_create_action_plan_already_exists(self): - uuid = w_utils.generate_uuid() - self._create_test_action_plan(id=1, uuid=uuid) - self.assertRaises(exception.ActionPlanAlreadyExists, - self._create_test_action_plan, - id=2, uuid=uuid) diff --git a/watcher/tests/db/test_audit.py b/watcher/tests/db/test_audit.py deleted file mode 100644 index 1b467ba..0000000 --- a/watcher/tests/db/test_audit.py +++ /dev/null @@ -1,391 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for manipulating Audit via the DB API""" - -import freezegun -import six - -from watcher.common import exception -from watcher.common import utils as w_utils -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestDbAuditFilters(base.DbTestCase): - - FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' - FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' - FAKE_TODAY = '2016-02-24T09:52:05.219414' - - def setUp(self): - super(TestDbAuditFilters, self).setUp() - self.context.show_deleted = True - self._data_setup() - - def _data_setup(self): - self.audit_template_name = "Audit Template" - - self.audit_template = utils.create_test_audit_template( - name=self.audit_template_name, id=1, uuid=None) - - with freezegun.freeze_time(self.FAKE_TODAY): - self.audit1 = utils.create_test_audit( - audit_template_id=self.audit_template.id, id=1, uuid=None) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.audit2 = utils.create_test_audit( - audit_template_id=self.audit_template.id, id=2, uuid=None, - state=objects.audit.State.FAILED) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.audit3 = utils.create_test_audit( - audit_template_id=self.audit_template.id, id=3, uuid=None, - state=objects.audit.State.CANCELLED) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.audit4 = utils.create_test_audit( - audit_template_id=self.audit_template.id, id=4, uuid=None, - state=objects.audit.State.SUSPENDED) - - def _soft_delete_audits(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_audit(self.audit1.uuid) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.soft_delete_audit(self.audit2.uuid) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.soft_delete_audit(self.audit3.uuid) - - def _update_audits(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.update_audit( - self.audit1.uuid, - values={"state": objects.audit.State.SUCCEEDED}) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.update_audit( - self.audit2.uuid, - values={"state": objects.audit.State.SUCCEEDED}) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.update_audit( - self.audit3.uuid, - values={"state": objects.audit.State.SUCCEEDED}) - - def test_get_audit_list_filter_deleted_true(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_audit(self.audit1.uuid) - - res = self.dbapi.get_audit_list( - self.context, filters={'deleted': True}) - - self.assertEqual([self.audit1['id']], [r.id for r in res]) - - def test_get_audit_list_filter_deleted_false(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_audit(self.audit1.uuid) - - res = self.dbapi.get_audit_list( - self.context, filters={'deleted': False}) - - self.assertEqual( - [self.audit2['id'], self.audit3['id'], self.audit4['id']], - [r.id for r in res]) - - def test_get_audit_list_filter_deleted_at_eq(self): - self._soft_delete_audits() - - res = self.dbapi.get_audit_list( - self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.audit1['id']], [r.id for r in res]) - - def test_get_audit_list_filter_deleted_at_lt(self): - self._soft_delete_audits() - - res = self.dbapi.get_audit_list( - self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.audit2['id'], self.audit3['id']], - [r.id for r in res]) - - def test_get_audit_list_filter_deleted_at_lte(self): - self._soft_delete_audits() - - res = self.dbapi.get_audit_list( - self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.audit2['id'], self.audit3['id']], - [r.id for r in res]) - - def test_get_audit_list_filter_deleted_at_gt(self): - self._soft_delete_audits() - - res = self.dbapi.get_audit_list( - self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.audit1['id']], [r.id for r in res]) - - def test_get_audit_list_filter_deleted_at_gte(self): - self._soft_delete_audits() - - res = self.dbapi.get_audit_list( - self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.audit1['id'], self.audit2['id']], - [r.id for r in res]) - - # created_at # - - def test_get_audit_list_filter_created_at_eq(self): - res = self.dbapi.get_audit_list( - self.context, filters={'created_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.audit1['id']], [r.id for r in res]) - - def test_get_audit_list_filter_created_at_lt(self): - res = self.dbapi.get_audit_list( - self.context, filters={'created_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.audit2['id'], self.audit3['id'], self.audit4['id']], - [r.id for r in res]) - - def test_get_audit_list_filter_created_at_lte(self): - res = self.dbapi.get_audit_list( - self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.audit2['id'], self.audit3['id'], self.audit4['id']], - [r.id for r in res]) - - def test_get_audit_list_filter_created_at_gt(self): - res = self.dbapi.get_audit_list( - self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.audit1['id']], [r.id for r in res]) - - def test_get_audit_list_filter_created_at_gte(self): - res = self.dbapi.get_audit_list( - self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.audit1['id'], self.audit2['id']], - [r.id for r in res]) - - # updated_at # - - def test_get_audit_list_filter_updated_at_eq(self): - self._update_audits() - - res = self.dbapi.get_audit_list( - self.context, filters={'updated_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.audit1['id']], [r.id for r in res]) - - def test_get_audit_list_filter_updated_at_lt(self): - self._update_audits() - - res = self.dbapi.get_audit_list( - self.context, filters={'updated_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.audit2['id'], self.audit3['id']], - [r.id for r in res]) - - def test_get_audit_list_filter_updated_at_lte(self): - self._update_audits() - - res = self.dbapi.get_audit_list( - self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.audit2['id'], self.audit3['id']], - [r.id for r in res]) - - def test_get_audit_list_filter_updated_at_gt(self): - self._update_audits() - - res = self.dbapi.get_audit_list( - self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.audit1['id']], [r.id for r in res]) - - def test_get_audit_list_filter_updated_at_gte(self): - self._update_audits() - - res = self.dbapi.get_audit_list( - self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.audit1['id'], self.audit2['id']], - [r.id for r in res]) - - def test_get_audit_list_filter_state_in(self): - res = self.dbapi.get_audit_list( - self.context, - filters={ - 'state__in': - objects.audit.AuditStateTransitionManager.INACTIVE_STATES - }) - - self.assertEqual( - [self.audit2['id'], self.audit3['id'], self.audit4['id']], - [r.id for r in res]) - - def test_get_audit_list_filter_state_notin(self): - res = self.dbapi.get_audit_list( - self.context, - filters={ - 'state__notin': - objects.audit.AuditStateTransitionManager.INACTIVE_STATES - }) - - self.assertEqual( - [self.audit1['id']], - [r.id for r in res]) - - -class DbAuditTestCase(base.DbTestCase): - - def _create_test_audit(self, **kwargs): - audit = utils.get_test_audit(**kwargs) - self.dbapi.create_audit(audit) - return audit - - def test_get_audit_list(self): - uuids = [] - for _ in range(1, 4): - audit = utils.create_test_audit(uuid=w_utils.generate_uuid()) - uuids.append(six.text_type(audit['uuid'])) - audits = self.dbapi.get_audit_list(self.context) - audit_uuids = [a.uuid for a in audits] - self.assertEqual(sorted(uuids), sorted(audit_uuids)) - for audit in audits: - self.assertIsNone(audit.goal) - self.assertIsNone(audit.strategy) - - def test_get_audit_list_eager(self): - _goal = utils.get_test_goal() - goal = self.dbapi.create_goal(_goal) - _strategy = utils.get_test_strategy() - strategy = self.dbapi.create_strategy(_strategy) - - uuids = [] - for i in range(1, 4): - audit = utils.create_test_audit( - id=i, uuid=w_utils.generate_uuid(), - goal_id=goal.id, strategy_id=strategy.id) - uuids.append(six.text_type(audit['uuid'])) - audits = self.dbapi.get_audit_list(self.context, eager=True) - audit_map = {a.uuid: a for a in audits} - self.assertEqual(sorted(uuids), sorted(audit_map.keys())) - eager_audit = audit_map[audit.uuid] - self.assertEqual(goal.as_dict(), eager_audit.goal.as_dict()) - self.assertEqual(strategy.as_dict(), eager_audit.strategy.as_dict()) - - def test_get_audit_list_with_filters(self): - audit1 = self._create_test_audit( - id=1, - audit_type=objects.audit.AuditType.ONESHOT.value, - uuid=w_utils.generate_uuid(), - state=objects.audit.State.ONGOING) - audit2 = self._create_test_audit( - id=2, - audit_type='CONTINUOUS', - uuid=w_utils.generate_uuid(), - state=objects.audit.State.PENDING) - - res = self.dbapi.get_audit_list( - self.context, - filters={'audit_type': objects.audit.AuditType.ONESHOT.value}) - self.assertEqual([audit1['id']], [r.id for r in res]) - - res = self.dbapi.get_audit_list(self.context, - filters={'audit_type': 'bad-type'}) - self.assertEqual([], [r.id for r in res]) - - res = self.dbapi.get_audit_list( - self.context, - filters={'state': objects.audit.State.ONGOING}) - self.assertEqual([audit1['id']], [r.id for r in res]) - - res = self.dbapi.get_audit_list( - self.context, - filters={'state': objects.audit.State.PENDING}) - self.assertEqual([audit2['id']], [r.id for r in res]) - - def test_get_audit_list_with_filter_by_uuid(self): - audit = self._create_test_audit() - res = self.dbapi.get_audit_list( - self.context, filters={'uuid': audit["uuid"]}) - - self.assertEqual(len(res), 1) - self.assertEqual(audit['uuid'], res[0].uuid) - - def test_get_audit_by_id(self): - audit = self._create_test_audit() - audit = self.dbapi.get_audit_by_id(self.context, audit['id']) - self.assertEqual(audit['uuid'], audit.uuid) - - def test_get_audit_by_uuid(self): - audit = self._create_test_audit() - audit = self.dbapi.get_audit_by_uuid(self.context, audit['uuid']) - self.assertEqual(audit['id'], audit.id) - - def test_get_audit_that_does_not_exist(self): - self.assertRaises(exception.AuditNotFound, - self.dbapi.get_audit_by_id, self.context, 1234) - - def test_update_audit(self): - audit = self._create_test_audit() - res = self.dbapi.update_audit(audit['id'], {'name': 'updated-model'}) - self.assertEqual('updated-model', res.name) - - def test_update_audit_that_does_not_exist(self): - self.assertRaises(exception.AuditNotFound, - self.dbapi.update_audit, 1234, {'name': ''}) - - def test_update_audit_uuid(self): - audit = self._create_test_audit() - self.assertRaises(exception.Invalid, - self.dbapi.update_audit, audit['id'], - {'uuid': 'hello'}) - - def test_destroy_audit(self): - audit = self._create_test_audit() - self.dbapi.destroy_audit(audit['id']) - self.assertRaises(exception.AuditNotFound, - self.dbapi.get_audit_by_id, - self.context, audit['id']) - - def test_destroy_audit_by_uuid(self): - audit = self._create_test_audit() - self.assertIsNotNone(self.dbapi.get_audit_by_uuid(self.context, - audit['uuid'])) - self.dbapi.destroy_audit(audit['uuid']) - self.assertRaises(exception.AuditNotFound, - self.dbapi.get_audit_by_uuid, self.context, - audit['uuid']) - - def test_destroy_audit_that_does_not_exist(self): - self.assertRaises(exception.AuditNotFound, - self.dbapi.destroy_audit, 1234) - - def test_destroy_audit_that_referenced_by_action_plans(self): - audit = self._create_test_audit() - action_plan = utils.create_test_action_plan(audit_id=audit['id']) - self.assertEqual(audit['id'], action_plan.audit_id) - self.assertRaises(exception.AuditReferenced, - self.dbapi.destroy_audit, audit['id']) diff --git a/watcher/tests/db/test_audit_template.py b/watcher/tests/db/test_audit_template.py deleted file mode 100644 index a885be4..0000000 --- a/watcher/tests/db/test_audit_template.py +++ /dev/null @@ -1,388 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for manipulating AuditTemplate via the DB API""" - -import freezegun -import six - -from watcher.common import exception -from watcher.common import utils as w_utils -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestDbAuditTemplateFilters(base.DbTestCase): - - FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' - FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' - FAKE_TODAY = '2016-02-24T09:52:05.219414' - - def setUp(self): - super(TestDbAuditTemplateFilters, self).setUp() - self.context.show_deleted = True - self._data_setup() - - def _data_setup(self): - gen_name = lambda: "Audit Template %s" % w_utils.generate_uuid() - self.audit_template1_name = gen_name() - self.audit_template2_name = gen_name() - self.audit_template3_name = gen_name() - - with freezegun.freeze_time(self.FAKE_TODAY): - self.audit_template1 = utils.create_test_audit_template( - name=self.audit_template1_name, id=1, uuid=None) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.audit_template2 = utils.create_test_audit_template( - name=self.audit_template2_name, id=2, uuid=None) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.audit_template3 = utils.create_test_audit_template( - name=self.audit_template3_name, id=3, uuid=None) - - def _soft_delete_audit_templates(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_audit_template(self.audit_template1.uuid) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.soft_delete_audit_template(self.audit_template2.uuid) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.soft_delete_audit_template(self.audit_template3.uuid) - - def _update_audit_templates(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.update_audit_template( - self.audit_template1.uuid, values={"name": "audit_template1"}) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.update_audit_template( - self.audit_template2.uuid, values={"name": "audit_template2"}) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.update_audit_template( - self.audit_template3.uuid, values={"name": "audit_template3"}) - - def test_get_audit_template_list_filter_deleted_true(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_audit_template(self.audit_template1.uuid) - - res = self.dbapi.get_audit_template_list( - self.context, filters={'deleted': True}) - - self.assertEqual([self.audit_template1['id']], [r.id for r in res]) - - def test_get_audit_template_list_filter_deleted_false(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_audit_template(self.audit_template1.uuid) - - res = self.dbapi.get_audit_template_list( - self.context, filters={'deleted': False}) - - self.assertEqual( - [self.audit_template2['id'], self.audit_template3['id']], - [r.id for r in res]) - - def test_get_audit_template_list_filter_deleted_at_eq(self): - self._soft_delete_audit_templates() - - res = self.dbapi.get_audit_template_list( - self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.audit_template1['id']], [r.id for r in res]) - - def test_get_audit_template_list_filter_deleted_at_lt(self): - self._soft_delete_audit_templates() - - res = self.dbapi.get_audit_template_list( - self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.audit_template2['id'], self.audit_template3['id']], - [r.id for r in res]) - - def test_get_audit_template_list_filter_deleted_at_lte(self): - self._soft_delete_audit_templates() - - res = self.dbapi.get_audit_template_list( - self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.audit_template2['id'], self.audit_template3['id']], - [r.id for r in res]) - - def test_get_audit_template_list_filter_deleted_at_gt(self): - self._soft_delete_audit_templates() - - res = self.dbapi.get_audit_template_list( - self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.audit_template1['id']], [r.id for r in res]) - - def test_get_audit_template_list_filter_deleted_at_gte(self): - self._soft_delete_audit_templates() - - res = self.dbapi.get_audit_template_list( - self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.audit_template1['id'], self.audit_template2['id']], - [r.id for r in res]) - - # created_at # - - def test_get_audit_template_list_filter_created_at_eq(self): - res = self.dbapi.get_audit_template_list( - self.context, filters={'created_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.audit_template1['id']], [r.id for r in res]) - - def test_get_audit_template_list_filter_created_at_lt(self): - res = self.dbapi.get_audit_template_list( - self.context, filters={'created_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.audit_template2['id'], self.audit_template3['id']], - [r.id for r in res]) - - def test_get_audit_template_list_filter_created_at_lte(self): - res = self.dbapi.get_audit_template_list( - self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.audit_template2['id'], self.audit_template3['id']], - [r.id for r in res]) - - def test_get_audit_template_list_filter_created_at_gt(self): - res = self.dbapi.get_audit_template_list( - self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.audit_template1['id']], [r.id for r in res]) - - def test_get_audit_template_list_filter_created_at_gte(self): - res = self.dbapi.get_audit_template_list( - self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.audit_template1['id'], self.audit_template2['id']], - [r.id for r in res]) - - # updated_at # - - def test_get_audit_template_list_filter_updated_at_eq(self): - self._update_audit_templates() - - res = self.dbapi.get_audit_template_list( - self.context, filters={'updated_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.audit_template1['id']], [r.id for r in res]) - - def test_get_audit_template_list_filter_updated_at_lt(self): - self._update_audit_templates() - - res = self.dbapi.get_audit_template_list( - self.context, filters={'updated_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.audit_template2['id'], self.audit_template3['id']], - [r.id for r in res]) - - def test_get_audit_template_list_filter_updated_at_lte(self): - self._update_audit_templates() - - res = self.dbapi.get_audit_template_list( - self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.audit_template2['id'], self.audit_template3['id']], - [r.id for r in res]) - - def test_get_audit_template_list_filter_updated_at_gt(self): - self._update_audit_templates() - - res = self.dbapi.get_audit_template_list( - self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.audit_template1['id']], [r.id for r in res]) - - def test_get_audit_template_list_filter_updated_at_gte(self): - self._update_audit_templates() - - res = self.dbapi.get_audit_template_list( - self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.audit_template1['id'], self.audit_template2['id']], - [r.id for r in res]) - - -class DbAuditTemplateTestCase(base.DbTestCase): - - def _create_test_goal(self, **kwargs): - goal = utils.get_test_goal(**kwargs) - self.dbapi.create_goal(goal) - return goal - - def _create_test_audit_template(self, **kwargs): - audit_template = utils.get_test_audit_template(**kwargs) - self.dbapi.create_audit_template(audit_template) - return audit_template - - def test_get_audit_template_list(self): - uuids = [] - for i in range(1, 4): - audit_template = utils.create_test_audit_template( - id=i, - uuid=w_utils.generate_uuid(), - name='My Audit Template {0}'.format(i)) - uuids.append(six.text_type(audit_template['uuid'])) - audit_templates = self.dbapi.get_audit_template_list(self.context) - audit_template_uuids = [at.uuid for at in audit_templates] - self.assertEqual(sorted(uuids), sorted(audit_template_uuids)) - for audit_template in audit_templates: - self.assertIsNone(audit_template.goal) - self.assertIsNone(audit_template.strategy) - - def test_get_audit_template_list_eager(self): - _goal = utils.get_test_goal() - goal = self.dbapi.create_goal(_goal) - _strategy = utils.get_test_strategy() - strategy = self.dbapi.create_strategy(_strategy) - - uuids = [] - for i in range(1, 4): - audit_template = utils.create_test_audit_template( - id=i, uuid=w_utils.generate_uuid(), - name='My Audit Template {0}'.format(i), - goal_id=goal.id, strategy_id=strategy.id) - uuids.append(six.text_type(audit_template['uuid'])) - audit_templates = self.dbapi.get_audit_template_list( - self.context, eager=True) - audit_template_map = {a.uuid: a for a in audit_templates} - self.assertEqual(sorted(uuids), sorted(audit_template_map.keys())) - eager_audit_template = audit_template_map[audit_template.uuid] - self.assertEqual(goal.as_dict(), eager_audit_template.goal.as_dict()) - self.assertEqual( - strategy.as_dict(), eager_audit_template.strategy.as_dict()) - - def test_get_audit_template_list_with_filters(self): - goal = self._create_test_goal(name='DUMMY') - audit_template1 = self._create_test_audit_template( - id=1, - uuid=w_utils.generate_uuid(), - name='My Audit Template 1', - description='Description of my audit template 1', - goal_id=goal['id']) - audit_template2 = self._create_test_audit_template( - id=2, - uuid=w_utils.generate_uuid(), - name='My Audit Template 2', - description='Description of my audit template 2', - goal_id=goal['id']) - - res = self.dbapi.get_audit_template_list( - self.context, filters={'name': 'My Audit Template 1'}) - self.assertEqual([audit_template1['id']], [r.id for r in res]) - - res = self.dbapi.get_audit_template_list( - self.context, filters={'name': 'Does not exist'}) - self.assertEqual([], [r.id for r in res]) - - res = self.dbapi.get_audit_template_list( - self.context, - filters={'goal': 'DUMMY'}) - self.assertEqual([audit_template1['id'], audit_template2['id']], - [r.id for r in res]) - - res = self.dbapi.get_audit_template_list( - self.context, - filters={'name': 'My Audit Template 2'}) - self.assertEqual([audit_template2['id']], [r.id for r in res]) - - def test_get_audit_template_list_with_filter_by_uuid(self): - audit_template = self._create_test_audit_template() - res = self.dbapi.get_audit_template_list( - self.context, filters={'uuid': audit_template["uuid"]}) - - self.assertEqual(len(res), 1) - self.assertEqual(audit_template['uuid'], res[0].uuid) - - def test_get_audit_template_by_id(self): - audit_template = self._create_test_audit_template() - audit_template = self.dbapi.get_audit_template_by_id( - self.context, audit_template['id']) - self.assertEqual(audit_template['uuid'], audit_template.uuid) - - def test_get_audit_template_by_uuid(self): - audit_template = self._create_test_audit_template() - audit_template = self.dbapi.get_audit_template_by_uuid( - self.context, audit_template['uuid']) - self.assertEqual(audit_template['id'], audit_template.id) - - def test_get_audit_template_that_does_not_exist(self): - self.assertRaises(exception.AuditTemplateNotFound, - self.dbapi.get_audit_template_by_id, - self.context, 1234) - - def test_update_audit_template(self): - audit_template = self._create_test_audit_template() - res = self.dbapi.update_audit_template(audit_template['id'], - {'name': 'updated-model'}) - self.assertEqual('updated-model', res.name) - - def test_update_audit_template_that_does_not_exist(self): - self.assertRaises(exception.AuditTemplateNotFound, - self.dbapi.update_audit_template, 1234, {'name': ''}) - - def test_update_audit_template_uuid(self): - audit_template = self._create_test_audit_template() - self.assertRaises(exception.Invalid, - self.dbapi.update_audit_template, - audit_template['id'], - {'uuid': 'hello'}) - - def test_destroy_audit_template(self): - audit_template = self._create_test_audit_template() - self.dbapi.destroy_audit_template(audit_template['id']) - self.assertRaises(exception.AuditTemplateNotFound, - self.dbapi.get_audit_template_by_id, - self.context, audit_template['id']) - - def test_destroy_audit_template_by_uuid(self): - uuid = w_utils.generate_uuid() - self._create_test_audit_template(uuid=uuid) - self.assertIsNotNone(self.dbapi.get_audit_template_by_uuid( - self.context, uuid)) - self.dbapi.destroy_audit_template(uuid) - self.assertRaises(exception.AuditTemplateNotFound, - self.dbapi.get_audit_template_by_uuid, - self.context, uuid) - - def test_destroy_audit_template_that_does_not_exist(self): - self.assertRaises(exception.AuditTemplateNotFound, - self.dbapi.destroy_audit_template, 1234) - - def test_create_audit_template_already_exists(self): - uuid = w_utils.generate_uuid() - self._create_test_audit_template(id=1, uuid=uuid) - self.assertRaises(exception.AuditTemplateAlreadyExists, - self._create_test_audit_template, - id=2, uuid=uuid) - - def test_audit_template_create_same_name(self): - audit_template1 = utils.create_test_audit_template( - uuid=w_utils.generate_uuid(), - name='audit_template_name') - self.assertEqual(audit_template1['uuid'], audit_template1.uuid) - self.assertRaises( - exception.AuditTemplateAlreadyExists, - utils.create_test_audit_template, - uuid=w_utils.generate_uuid(), - name='audit_template_name') diff --git a/watcher/tests/db/test_efficacy_indicator.py b/watcher/tests/db/test_efficacy_indicator.py deleted file mode 100644 index 673678c..0000000 --- a/watcher/tests/db/test_efficacy_indicator.py +++ /dev/null @@ -1,410 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for manipulating EfficacyIndicator via the DB API""" - -import freezegun -import six - -from watcher.common import exception -from watcher.common import utils as w_utils -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestDbEfficacyIndicatorFilters(base.DbTestCase): - - FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' - FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' - FAKE_TODAY = '2016-02-24T09:52:05.219414' - - def setUp(self): - super(TestDbEfficacyIndicatorFilters, self).setUp() - self.context.show_deleted = True - self._data_setup() - - def _data_setup(self): - self.audit_template_name = "Audit Template" - - self.audit_template = utils.create_test_audit_template( - name=self.audit_template_name, id=1, uuid=None) - self.audit = utils.create_test_audit( - audit_template_id=self.audit_template.id, id=1, uuid=None) - self.action_plan = utils.create_test_action_plan( - audit_id=self.audit.id, id=1, uuid=None) - - with freezegun.freeze_time(self.FAKE_TODAY): - self.efficacy_indicator1 = utils.create_test_efficacy_indicator( - action_plan_id=self.action_plan.id, id=1, uuid=None, - name="efficacy_indicator1", description="Test Indicator 1") - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.efficacy_indicator2 = utils.create_test_efficacy_indicator( - action_plan_id=self.action_plan.id, id=2, uuid=None, - name="efficacy_indicator2", description="Test Indicator 2") - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.efficacy_indicator3 = utils.create_test_efficacy_indicator( - action_plan_id=self.action_plan.id, id=3, uuid=None, - name="efficacy_indicator3", description="Test Indicator 3") - - def _soft_delete_efficacy_indicators(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_efficacy_indicator( - self.efficacy_indicator1.uuid) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.soft_delete_efficacy_indicator( - self.efficacy_indicator2.uuid) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.soft_delete_efficacy_indicator( - self.efficacy_indicator3.uuid) - - def _update_efficacy_indicators(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.update_efficacy_indicator( - self.efficacy_indicator1.uuid, - values={"description": "New description 1"}) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.update_efficacy_indicator( - self.efficacy_indicator2.uuid, - values={"description": "New description 2"}) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.update_efficacy_indicator( - self.efficacy_indicator3.uuid, - values={"description": "New description 3"}) - - def test_get_efficacy_indicator_filter_deleted_true(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_efficacy_indicator( - self.efficacy_indicator1.uuid) - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'deleted': True}) - - self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) - - def test_get_efficacy_indicator_filter_deleted_false(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_efficacy_indicator( - self.efficacy_indicator1.uuid) - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'deleted': False}) - - self.assertEqual([self.efficacy_indicator2['id'], - self.efficacy_indicator3['id']], - [r.id for r in res]) - - def test_get_efficacy_indicator_filter_deleted_at_eq(self): - self._soft_delete_efficacy_indicators() - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) - - def test_get_efficacy_indicator_filter_deleted_at_lt(self): - self._soft_delete_efficacy_indicators() - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], - [r.id for r in res]) - - def test_get_efficacy_indicator_filter_deleted_at_lte(self): - self._soft_delete_efficacy_indicators() - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], - [r.id for r in res]) - - def test_get_efficacy_indicator_filter_deleted_at_gt(self): - self._soft_delete_efficacy_indicators() - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) - - def test_get_efficacy_indicator_filter_deleted_at_gte(self): - self._soft_delete_efficacy_indicators() - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.efficacy_indicator1['id'], self.efficacy_indicator2['id']], - [r.id for r in res]) - - # created_at # - - def test_get_efficacy_indicator_filter_created_at_eq(self): - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'created_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) - - def test_get_efficacy_indicator_filter_created_at_lt(self): - with freezegun.freeze_time(self.FAKE_TODAY): - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'created_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], - [r.id for r in res]) - - def test_get_efficacy_indicator_filter_created_at_lte(self): - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], - [r.id for r in res]) - - def test_get_efficacy_indicator_filter_created_at_gt(self): - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) - - def test_get_efficacy_indicator_filter_created_at_gte(self): - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.efficacy_indicator1['id'], self.efficacy_indicator2['id']], - [r.id for r in res]) - - # updated_at # - - def test_get_efficacy_indicator_filter_updated_at_eq(self): - self._update_efficacy_indicators() - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'updated_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) - - def test_get_efficacy_indicator_filter_updated_at_lt(self): - self._update_efficacy_indicators() - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'updated_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], - [r.id for r in res]) - - def test_get_efficacy_indicator_filter_updated_at_lte(self): - self._update_efficacy_indicators() - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], - [r.id for r in res]) - - def test_get_efficacy_indicator_filter_updated_at_gt(self): - self._update_efficacy_indicators() - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) - - def test_get_efficacy_indicator_filter_updated_at_gte(self): - self._update_efficacy_indicators() - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.efficacy_indicator1['id'], self.efficacy_indicator2['id']], - [r.id for r in res]) - - -class DbEfficacyIndicatorTestCase(base.DbTestCase): - - def _create_test_efficacy_indicator(self, **kwargs): - efficacy_indicator_dict = utils.get_test_efficacy_indicator(**kwargs) - efficacy_indicator = self.dbapi.create_efficacy_indicator( - efficacy_indicator_dict) - return efficacy_indicator - - def _create_test_action_plan(self, **kwargs): - action_plan_dict = utils.get_test_action_plan(**kwargs) - action_plan = self.dbapi.create_action_plan(action_plan_dict) - return action_plan - - def test_get_efficacy_indicator_list(self): - uuids = [] - action_plan = self._create_test_action_plan() - for id_ in range(1, 4): - efficacy_indicator = utils.create_test_efficacy_indicator( - action_plan_id=action_plan.id, id=id_, uuid=None, - name="efficacy_indicator", description="Test Indicator ") - uuids.append(six.text_type(efficacy_indicator['uuid'])) - efficacy_indicators = self.dbapi.get_efficacy_indicator_list( - self.context) - efficacy_indicator_uuids = [ei.uuid for ei in efficacy_indicators] - self.assertEqual(sorted(uuids), sorted(efficacy_indicator_uuids)) - for efficacy_indicator in efficacy_indicators: - self.assertIsNone(efficacy_indicator.action_plan) - - def test_get_efficacy_indicator_list_eager(self): - _action_plan = utils.get_test_action_plan() - action_plan = self.dbapi.create_action_plan(_action_plan) - - uuids = [] - for i in range(1, 4): - efficacy_indicator = utils.create_test_efficacy_indicator( - id=i, uuid=w_utils.generate_uuid(), - action_plan_id=action_plan.id) - uuids.append(six.text_type(efficacy_indicator['uuid'])) - efficacy_indicators = self.dbapi.get_efficacy_indicator_list( - self.context, eager=True) - efficacy_indicator_map = {a.uuid: a for a in efficacy_indicators} - self.assertEqual(sorted(uuids), sorted(efficacy_indicator_map.keys())) - eager_efficacy_indicator = efficacy_indicator_map[ - efficacy_indicator.uuid] - self.assertEqual( - action_plan.as_dict(), - eager_efficacy_indicator.action_plan.as_dict()) - - def test_get_efficacy_indicator_list_with_filters(self): - audit = utils.create_test_audit(uuid=w_utils.generate_uuid()) - action_plan = self._create_test_action_plan( - id=1, - uuid=w_utils.generate_uuid(), - audit_id=audit.id, - first_efficacy_indicator_id=None, - state=objects.action_plan.State.RECOMMENDED) - efficacy_indicator1 = self._create_test_efficacy_indicator( - id=1, - name='indicator_1', - uuid=w_utils.generate_uuid(), - action_plan_id=1, - description='Description efficacy indicator 1', - unit='%') - efficacy_indicator2 = self._create_test_efficacy_indicator( - id=2, - name='indicator_2', - uuid=w_utils.generate_uuid(), - action_plan_id=2, - description='Description efficacy indicator 2', - unit='%') - efficacy_indicator3 = self._create_test_efficacy_indicator( - id=3, - name='indicator_3', - uuid=w_utils.generate_uuid(), - action_plan_id=1, - description='Description efficacy indicator 3', - unit='%') - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'name': 'indicator_3'}) - self.assertEqual([efficacy_indicator3['id']], [r.id for r in res]) - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'unit': 'kWh'}) - self.assertEqual([], [r.id for r in res]) - - res = self.dbapi.get_efficacy_indicator_list( - self.context, - filters={'action_plan_id': 2}) - self.assertEqual([efficacy_indicator2['id']], [r.id for r in res]) - - res = self.dbapi.get_efficacy_indicator_list( - self.context, - filters={'action_plan_uuid': action_plan['uuid']}) - self.assertEqual( - sorted([efficacy_indicator1['id'], efficacy_indicator3['id']]), - sorted([r.id for r in res])) - - def test_get_efficacy_indicator_list_with_filter_by_uuid(self): - efficacy_indicator = self._create_test_efficacy_indicator() - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'uuid': efficacy_indicator.uuid}) - - self.assertEqual(len(res), 1) - self.assertEqual(efficacy_indicator.uuid, res[0].uuid) - - def test_get_efficacy_indicator_by_id(self): - efficacy_indicator = self._create_test_efficacy_indicator() - efficacy_indicator = self.dbapi.get_efficacy_indicator_by_id( - self.context, efficacy_indicator.id) - self.assertEqual(efficacy_indicator.uuid, efficacy_indicator.uuid) - - def test_get_efficacy_indicator_by_uuid(self): - efficacy_indicator = self._create_test_efficacy_indicator() - efficacy_indicator = self.dbapi.get_efficacy_indicator_by_uuid( - self.context, efficacy_indicator.uuid) - self.assertEqual(efficacy_indicator['id'], efficacy_indicator.id) - - def test_get_efficacy_indicator_that_does_not_exist(self): - self.assertRaises( - exception.EfficacyIndicatorNotFound, - self.dbapi.get_efficacy_indicator_by_id, self.context, 1234) - - def test_update_efficacy_indicator(self): - efficacy_indicator = self._create_test_efficacy_indicator() - res = self.dbapi.update_efficacy_indicator( - efficacy_indicator.id, - {'state': objects.action_plan.State.CANCELLED}) - self.assertEqual('CANCELLED', res.state) - - def test_update_efficacy_indicator_that_does_not_exist(self): - self.assertRaises( - exception.EfficacyIndicatorNotFound, - self.dbapi.update_efficacy_indicator, 1234, {'state': ''}) - - def test_update_efficacy_indicator_uuid(self): - efficacy_indicator = self._create_test_efficacy_indicator() - self.assertRaises( - exception.Invalid, - self.dbapi.update_efficacy_indicator, efficacy_indicator.id, - {'uuid': 'hello'}) - - def test_destroy_efficacy_indicator(self): - efficacy_indicator = self._create_test_efficacy_indicator() - self.dbapi.destroy_efficacy_indicator(efficacy_indicator['id']) - self.assertRaises(exception.EfficacyIndicatorNotFound, - self.dbapi.get_efficacy_indicator_by_id, - self.context, efficacy_indicator['id']) - - def test_destroy_efficacy_indicator_by_uuid(self): - uuid = w_utils.generate_uuid() - self._create_test_efficacy_indicator(uuid=uuid) - self.assertIsNotNone(self.dbapi.get_efficacy_indicator_by_uuid( - self.context, uuid)) - self.dbapi.destroy_efficacy_indicator(uuid) - self.assertRaises( - exception.EfficacyIndicatorNotFound, - self.dbapi.get_efficacy_indicator_by_uuid, self.context, uuid) - - def test_destroy_efficacy_indicator_that_does_not_exist(self): - self.assertRaises(exception.EfficacyIndicatorNotFound, - self.dbapi.destroy_efficacy_indicator, 1234) - - def test_create_efficacy_indicator_already_exists(self): - uuid = w_utils.generate_uuid() - self._create_test_efficacy_indicator(id=1, uuid=uuid) - self.assertRaises(exception.EfficacyIndicatorAlreadyExists, - self._create_test_efficacy_indicator, - id=2, uuid=uuid) diff --git a/watcher/tests/db/test_goal.py b/watcher/tests/db/test_goal.py deleted file mode 100644 index cae9449..0000000 --- a/watcher/tests/db/test_goal.py +++ /dev/null @@ -1,327 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for manipulating Goal via the DB API""" - -import freezegun -import six - -from watcher.common import exception -from watcher.common import utils as w_utils -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestDbGoalFilters(base.DbTestCase): - - FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' - FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' - FAKE_TODAY = '2016-02-24T09:52:05.219414' - - def setUp(self): - super(TestDbGoalFilters, self).setUp() - self.context.show_deleted = True - self._data_setup() - - def _data_setup(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.goal1 = utils.create_test_goal( - id=1, uuid=w_utils.generate_uuid(), name="GOAL_1", - display_name="Goal 1") - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.goal2 = utils.create_test_goal( - id=2, uuid=w_utils.generate_uuid(), - name="GOAL_2", display_name="Goal 2") - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.goal3 = utils.create_test_goal( - id=3, uuid=w_utils.generate_uuid(), - name="GOAL_3", display_name="Goal 3") - - def _soft_delete_goals(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_goal(self.goal1.id) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.soft_delete_goal(self.goal2.id) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.soft_delete_goal(self.goal3.id) - - def _update_goals(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.update_goal( - self.goal1.uuid, values={"display_name": "goal1"}) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.update_goal( - self.goal2.uuid, values={"display_name": "goal2"}) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.update_goal( - self.goal3.uuid, values={"display_name": "goal3"}) - - def test_get_goal_list_filter_deleted_true(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_goal(self.goal1.id) - - res = self.dbapi.get_goal_list( - self.context, filters={'deleted': True}) - - self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) - - def test_get_goal_list_filter_deleted_false(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_goal(self.goal1.id) - - res = self.dbapi.get_goal_list( - self.context, filters={'deleted': False}) - - self.assertEqual( - set([self.goal2.uuid, self.goal3.uuid]), - set([r.uuid for r in res])) - - def test_get_goal_list_filter_deleted_at_eq(self): - self._soft_delete_goals() - - res = self.dbapi.get_goal_list( - self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) - - def test_get_goal_list_filter_deleted_at_lt(self): - self._soft_delete_goals() - - res = self.dbapi.get_goal_list( - self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - set([self.goal2.uuid, self.goal3.uuid]), - set([r.uuid for r in res])) - - def test_get_goal_list_filter_deleted_at_lte(self): - self._soft_delete_goals() - - res = self.dbapi.get_goal_list( - self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.goal2.uuid, self.goal3.uuid]), - set([r.uuid for r in res])) - - def test_get_goal_list_filter_deleted_at_gt(self): - self._soft_delete_goals() - - res = self.dbapi.get_goal_list( - self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) - - def test_get_goal_list_filter_deleted_at_gte(self): - self._soft_delete_goals() - - res = self.dbapi.get_goal_list( - self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.goal1.uuid, self.goal2.uuid]), - set([r.uuid for r in res])) - - # created_at # - - def test_get_goal_list_filter_created_at_eq(self): - res = self.dbapi.get_goal_list( - self.context, filters={'created_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) - - def test_get_goal_list_filter_created_at_lt(self): - res = self.dbapi.get_goal_list( - self.context, filters={'created_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - set([self.goal2.uuid, self.goal3.uuid]), - set([r.uuid for r in res])) - - def test_get_goal_list_filter_created_at_lte(self): - res = self.dbapi.get_goal_list( - self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.goal2.uuid, self.goal3.uuid]), - set([r.uuid for r in res])) - - def test_get_goal_list_filter_created_at_gt(self): - res = self.dbapi.get_goal_list( - self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) - - def test_get_goal_list_filter_created_at_gte(self): - res = self.dbapi.get_goal_list( - self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.goal1.uuid, self.goal2.uuid]), - set([r.uuid for r in res])) - - # updated_at # - - def test_get_goal_list_filter_updated_at_eq(self): - self._update_goals() - - res = self.dbapi.get_goal_list( - self.context, filters={'updated_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) - - def test_get_goal_list_filter_updated_at_lt(self): - self._update_goals() - - res = self.dbapi.get_goal_list( - self.context, filters={'updated_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - set([self.goal2.uuid, self.goal3.uuid]), - set([r.uuid for r in res])) - - def test_get_goal_list_filter_updated_at_lte(self): - self._update_goals() - - res = self.dbapi.get_goal_list( - self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.goal2.uuid, self.goal3.uuid]), - set([r.uuid for r in res])) - - def test_get_goal_list_filter_updated_at_gt(self): - self._update_goals() - - res = self.dbapi.get_goal_list( - self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) - - def test_get_goal_list_filter_updated_at_gte(self): - self._update_goals() - - res = self.dbapi.get_goal_list( - self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.goal1.uuid, self.goal2.uuid]), - set([r.uuid for r in res])) - - -class DbGoalTestCase(base.DbTestCase): - - def _create_test_goal(self, **kwargs): - goal = utils.get_test_goal(**kwargs) - self.dbapi.create_goal(goal) - return goal - - def test_get_goal_list(self): - uuids = [] - for i in range(1, 4): - goal = utils.create_test_goal( - id=i, - uuid=w_utils.generate_uuid(), - name="GOAL_%s" % i, - display_name='My Goal %s' % i) - uuids.append(six.text_type(goal['uuid'])) - goals = self.dbapi.get_goal_list(self.context) - goal_uuids = [g.uuid for g in goals] - self.assertEqual(sorted(uuids), sorted(goal_uuids)) - - def test_get_goal_list_with_filters(self): - goal1 = self._create_test_goal( - id=1, - uuid=w_utils.generate_uuid(), - name="GOAL_1", - display_name='Goal 1', - ) - goal2 = self._create_test_goal( - id=2, - uuid=w_utils.generate_uuid(), - name="GOAL_2", - display_name='Goal 2', - ) - - res = self.dbapi.get_goal_list(self.context, - filters={'display_name': 'Goal 1'}) - self.assertEqual([goal1['uuid']], [r.uuid for r in res]) - - res = self.dbapi.get_goal_list(self.context, - filters={'display_name': 'Goal 3'}) - self.assertEqual([], [r.uuid for r in res]) - - res = self.dbapi.get_goal_list( - self.context, filters={'name': 'GOAL_1'}) - self.assertEqual([goal1['uuid']], [r.uuid for r in res]) - - res = self.dbapi.get_goal_list( - self.context, - filters={'display_name': 'Goal 2'}) - self.assertEqual([goal2['uuid']], [r.uuid for r in res]) - - def test_get_goal_by_uuid(self): - efficacy_spec = [{"unit": "%", "name": "dummy", - "schema": "Range(min=0, max=100, min_included=True, " - "max_included=True, msg=None)", - "description": "Dummy indicator"}] - created_goal = self._create_test_goal( - efficacy_specification=efficacy_spec) - goal = self.dbapi.get_goal_by_uuid(self.context, created_goal['uuid']) - self.assertEqual(goal.uuid, created_goal['uuid']) - - def test_get_goal_that_does_not_exist(self): - random_uuid = w_utils.generate_uuid() - self.assertRaises(exception.GoalNotFound, - self.dbapi.get_goal_by_uuid, - self.context, random_uuid) - - def test_update_goal(self): - goal = self._create_test_goal() - res = self.dbapi.update_goal(goal['uuid'], - {'display_name': 'updated-model'}) - self.assertEqual('updated-model', res.display_name) - - def test_update_goal_id(self): - goal = self._create_test_goal() - self.assertRaises(exception.Invalid, - self.dbapi.update_goal, goal['uuid'], - {'uuid': 'NEW_GOAL'}) - - def test_update_goal_that_does_not_exist(self): - random_uuid = w_utils.generate_uuid() - self.assertRaises(exception.GoalNotFound, - self.dbapi.update_goal, - random_uuid, - {'display_name': ''}) - - def test_destroy_goal(self): - goal = self._create_test_goal() - self.dbapi.destroy_goal(goal['uuid']) - self.assertRaises(exception.GoalNotFound, - self.dbapi.get_goal_by_uuid, - self.context, goal['uuid']) - - def test_destroy_goal_that_does_not_exist(self): - random_uuid = w_utils.generate_uuid() - self.assertRaises(exception.GoalNotFound, - self.dbapi.destroy_goal, random_uuid) - - def test_create_goal_already_exists(self): - goal_uuid = w_utils.generate_uuid() - self._create_test_goal(uuid=goal_uuid) - self.assertRaises(exception.GoalAlreadyExists, - self._create_test_goal, - uuid=goal_uuid) diff --git a/watcher/tests/db/test_purge.py b/watcher/tests/db/test_purge.py deleted file mode 100644 index 5a0dde7..0000000 --- a/watcher/tests/db/test_purge.py +++ /dev/null @@ -1,502 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_utils import uuidutils - -import freezegun -import mock - -from watcher.common import context as watcher_context -from watcher.common import utils -from watcher.db import purge -from watcher.db.sqlalchemy import api as dbapi -from watcher.tests.db import base -from watcher.tests.objects import utils as obj_utils - - -class TestPurgeCommand(base.DbTestCase): - - def setUp(self): - super(TestPurgeCommand, self).setUp() - self.cmd = purge.PurgeCommand() - token_info = { - 'token': { - 'project': { - 'id': 'fake_project' - }, - 'user': { - 'id': 'fake_user' - } - } - } - self.context = watcher_context.RequestContext( - auth_token_info=token_info, - project_id='fake_project', - user_id='fake_user', - show_deleted=True, - ) - - self.fake_today = '2016-02-24T09:52:05.219414+00:00' - self.expired_date = '2016-01-24T09:52:05.219414+00:00' - - self.m_input = mock.Mock() - p = mock.patch("watcher.db.purge.input", self.m_input) - self.m_input.return_value = 'y' - p.start() - self.addCleanup(p.stop) - - self._id_generator = None - self._data_setup() - - def _generate_id(self): - if self._id_generator is None: - self._id_generator = self._get_id_generator() - return next(self._id_generator) - - def _get_id_generator(self): - seed = 1 - while True: - yield seed - seed += 1 - - def generate_unique_name(self, prefix): - return "%s%s" % (prefix, uuidutils.generate_uuid()) - - def _data_setup(self): - # All the 1's are soft_deleted and are expired - # All the 2's are soft_deleted but are not expired - # All the 3's are *not* soft_deleted - - # Number of days we want to keep in DB (no purge for them) - self.cmd.age_in_days = 10 - self.cmd.max_number = None - self.cmd.orphans = True - - goal1_name = "GOAL_1" - goal2_name = "GOAL_2" - goal3_name = "GOAL_3" - - strategy1_name = "strategy_1" - strategy2_name = "strategy_2" - strategy3_name = "strategy_3" - - self.audit_template1_name = self.generate_unique_name( - prefix="Audit Template 1 ") - self.audit_template2_name = self.generate_unique_name( - prefix="Audit Template 2 ") - self.audit_template3_name = self.generate_unique_name( - prefix="Audit Template 3 ") - - with freezegun.freeze_time(self.expired_date): - self.goal1 = obj_utils.create_test_goal( - self.context, id=self._generate_id(), - uuid=utils.generate_uuid(), - name=goal1_name, display_name=goal1_name.lower()) - self.goal2 = obj_utils.create_test_goal( - self.context, id=self._generate_id(), - uuid=utils.generate_uuid(), - name=goal2_name, display_name=goal2_name.lower()) - self.goal3 = obj_utils.create_test_goal( - self.context, id=self._generate_id(), - uuid=utils.generate_uuid(), - name=goal3_name, display_name=goal3_name.lower()) - self.goal1.soft_delete() - - with freezegun.freeze_time(self.expired_date): - self.strategy1 = obj_utils.create_test_strategy( - self.context, id=self._generate_id(), - uuid=utils.generate_uuid(), - name=strategy1_name, display_name=strategy1_name.lower(), - goal_id=self.goal1.id) - self.strategy2 = obj_utils.create_test_strategy( - self.context, id=self._generate_id(), - uuid=utils.generate_uuid(), - name=strategy2_name, display_name=strategy2_name.lower(), - goal_id=self.goal2.id) - self.strategy3 = obj_utils.create_test_strategy( - self.context, id=self._generate_id(), - uuid=utils.generate_uuid(), - name=strategy3_name, display_name=strategy3_name.lower(), - goal_id=self.goal3.id) - self.strategy1.soft_delete() - - with freezegun.freeze_time(self.expired_date): - self.audit_template1 = obj_utils.create_test_audit_template( - self.context, name=self.audit_template1_name, - id=self._generate_id(), - uuid=utils.generate_uuid(), goal_id=self.goal1.id, - strategy_id=self.strategy1.id) - self.audit_template2 = obj_utils.create_test_audit_template( - self.context, name=self.audit_template2_name, - id=self._generate_id(), - uuid=utils.generate_uuid(), goal_id=self.goal2.id, - strategy_id=self.strategy2.id) - self.audit_template3 = obj_utils.create_test_audit_template( - self.context, name=self.audit_template3_name, - id=self._generate_id(), - uuid=utils.generate_uuid(), goal_id=self.goal3.id, - strategy_id=self.strategy3.id) - self.audit_template1.soft_delete() - - with freezegun.freeze_time(self.expired_date): - self.audit1 = obj_utils.create_test_audit( - self.context, id=self._generate_id(), - uuid=utils.generate_uuid(), - goal_id=self.goal1.id, strategy_id=self.strategy1.id) - self.audit2 = obj_utils.create_test_audit( - self.context, id=self._generate_id(), - uuid=utils.generate_uuid(), - goal_id=self.goal2.id, strategy_id=self.strategy2.id) - self.audit3 = obj_utils.create_test_audit( - self.context, id=self._generate_id(), - uuid=utils.generate_uuid(), - goal_id=self.goal3.id, strategy_id=self.strategy3.id) - self.audit1.soft_delete() - - with freezegun.freeze_time(self.expired_date): - self.action_plan1 = obj_utils.create_test_action_plan( - self.context, audit_id=self.audit1.id, - id=self._generate_id(), uuid=utils.generate_uuid(), - strategy_id=self.strategy1.id) - self.action_plan2 = obj_utils.create_test_action_plan( - self.context, audit_id=self.audit2.id, - id=self._generate_id(), - strategy_id=self.strategy2.id, - uuid=utils.generate_uuid()) - self.action_plan3 = obj_utils.create_test_action_plan( - self.context, audit_id=self.audit3.id, - id=self._generate_id(), uuid=utils.generate_uuid(), - strategy_id=self.strategy3.id) - - self.action1 = obj_utils.create_test_action( - self.context, action_plan_id=self.action_plan1.id, - id=self._generate_id(), - uuid=utils.generate_uuid()) - self.action2 = obj_utils.create_test_action( - self.context, action_plan_id=self.action_plan2.id, - id=self._generate_id(), uuid=utils.generate_uuid()) - self.action3 = obj_utils.create_test_action( - self.context, action_plan_id=self.action_plan3.id, - id=self._generate_id(), uuid=utils.generate_uuid()) - self.action_plan1.soft_delete() - - @mock.patch.object(dbapi.Connection, "destroy_action") - @mock.patch.object(dbapi.Connection, "destroy_action_plan") - @mock.patch.object(dbapi.Connection, "destroy_audit") - @mock.patch.object(dbapi.Connection, "destroy_audit_template") - @mock.patch.object(dbapi.Connection, "destroy_strategy") - @mock.patch.object(dbapi.Connection, "destroy_goal") - def test_execute_max_number_exceeded(self, - m_destroy_goal, - m_destroy_strategy, - m_destroy_audit_template, - m_destroy_audit, - m_destroy_action_plan, - m_destroy_action): - self.cmd.age_in_days = None - self.cmd.max_number = 10 - - with freezegun.freeze_time(self.fake_today): - self.goal2.soft_delete() - self.strategy2.soft_delete() - self.audit_template2.soft_delete() - self.audit2.soft_delete() - self.action_plan2.soft_delete() - - with freezegun.freeze_time(self.fake_today): - self.cmd.execute() - - # The 1's and the 2's are purgeable (due to age of day set to 0), - # but max_number = 10, and because of no Db integrity violation, we - # should be able to purge only 6 objects. - self.assertEqual(m_destroy_goal.call_count, 1) - self.assertEqual(m_destroy_strategy.call_count, 1) - self.assertEqual(m_destroy_audit_template.call_count, 1) - self.assertEqual(m_destroy_audit.call_count, 1) - self.assertEqual(m_destroy_action_plan.call_count, 1) - self.assertEqual(m_destroy_action.call_count, 1) - - def test_find_deleted_entries(self): - self.cmd.age_in_days = None - - with freezegun.freeze_time(self.fake_today): - objects_map = self.cmd.find_objects_to_delete() - - self.assertEqual(len(objects_map.goals), 1) - self.assertEqual(len(objects_map.strategies), 1) - self.assertEqual(len(objects_map.audit_templates), 1) - self.assertEqual(len(objects_map.audits), 1) - self.assertEqual(len(objects_map.action_plans), 1) - self.assertEqual(len(objects_map.actions), 1) - - def test_find_deleted_and_expired_entries(self): - with freezegun.freeze_time(self.fake_today): - self.goal2.soft_delete() - self.strategy2.soft_delete() - self.audit_template2.soft_delete() - self.audit2.soft_delete() - self.action_plan2.soft_delete() - - with freezegun.freeze_time(self.fake_today): - objects_map = self.cmd.find_objects_to_delete() - - # The 1's are purgeable (due to age of day set to 10) - self.assertEqual(len(objects_map.goals), 1) - self.assertEqual(len(objects_map.strategies), 1) - self.assertEqual(len(objects_map.audit_templates), 1) - self.assertEqual(len(objects_map.audits), 1) - self.assertEqual(len(objects_map.action_plans), 1) - self.assertEqual(len(objects_map.actions), 1) - - def test_find_deleted_and_nonexpired_related_entries(self): - with freezegun.freeze_time(self.fake_today): - # orphan audit template - audit_template4 = obj_utils.create_test_audit_template( - self.context, goal_id=self.goal2.id, - name=self.generate_unique_name(prefix="Audit Template 4 "), - strategy_id=self.strategy1.id, id=self._generate_id(), - uuid=utils.generate_uuid()) - audit4 = obj_utils.create_test_audit( - self.context, audit_template_id=audit_template4.id, - strategy_id=self.strategy1.id, id=self._generate_id(), - uuid=utils.generate_uuid()) - action_plan4 = obj_utils.create_test_action_plan( - self.context, - id=self._generate_id(), uuid=utils.generate_uuid(), - audit_id=audit4.id, strategy_id=self.strategy1.id) - action4 = obj_utils.create_test_action( - self.context, action_plan_id=action_plan4.id, - id=self._generate_id(), - uuid=utils.generate_uuid()) - - audit_template5 = obj_utils.create_test_audit_template( - self.context, goal_id=self.goal1.id, - name=self.generate_unique_name(prefix="Audit Template 5 "), - strategy_id=None, id=self._generate_id(), - uuid=utils.generate_uuid()) - audit5 = obj_utils.create_test_audit( - self.context, audit_template_id=audit_template5.id, - strategy_id=self.strategy1.id, id=self._generate_id(), - uuid=utils.generate_uuid()) - action_plan5 = obj_utils.create_test_action_plan( - self.context, - id=self._generate_id(), uuid=utils.generate_uuid(), - audit_id=audit5.id, strategy_id=self.strategy1.id) - action5 = obj_utils.create_test_action( - self.context, action_plan_id=action_plan5.id, - id=self._generate_id(), - uuid=utils.generate_uuid()) - - self.goal2.soft_delete() - self.strategy2.soft_delete() - self.audit_template2.soft_delete() - self.audit2.soft_delete() - self.action_plan2.soft_delete() - - # All the 4's should be purged as well because they are orphans - # even though they were not deleted - - # All the 5's should be purged as well even though they are not - # expired because their related audit template is itself expired - audit_template5.soft_delete() - audit5.soft_delete() - action_plan5.soft_delete() - - with freezegun.freeze_time(self.fake_today): - objects_map = self.cmd.find_objects_to_delete() - - self.assertEqual(len(objects_map.goals), 1) - self.assertEqual(len(objects_map.strategies), 1) - self.assertEqual(len(objects_map.audit_templates), 3) - self.assertEqual(len(objects_map.audits), 3) - self.assertEqual(len(objects_map.action_plans), 3) - self.assertEqual(len(objects_map.actions), 3) - self.assertEqual( - set([self.action1.id, action4.id, action5.id]), - set([entry.id for entry in objects_map.actions])) - - @mock.patch.object(dbapi.Connection, "destroy_action") - @mock.patch.object(dbapi.Connection, "destroy_action_plan") - @mock.patch.object(dbapi.Connection, "destroy_audit") - @mock.patch.object(dbapi.Connection, "destroy_audit_template") - @mock.patch.object(dbapi.Connection, "destroy_strategy") - @mock.patch.object(dbapi.Connection, "destroy_goal") - def test_purge_command(self, m_destroy_goal, m_destroy_strategy, - m_destroy_audit_template, m_destroy_audit, - m_destroy_action_plan, m_destroy_action): - with freezegun.freeze_time(self.fake_today): - self.cmd.execute() - - m_destroy_audit_template.assert_called_once_with( - self.audit_template1.uuid) - m_destroy_audit.assert_called_with( - self.audit1.uuid) - m_destroy_action_plan.assert_called_with( - self.action_plan1.uuid) - m_destroy_action.assert_called_with( - self.action1.uuid) - - @mock.patch.object(dbapi.Connection, "destroy_action") - @mock.patch.object(dbapi.Connection, "destroy_action_plan") - @mock.patch.object(dbapi.Connection, "destroy_audit") - @mock.patch.object(dbapi.Connection, "destroy_audit_template") - @mock.patch.object(dbapi.Connection, "destroy_strategy") - @mock.patch.object(dbapi.Connection, "destroy_goal") - def test_purge_command_with_nonexpired_related_entries( - self, m_destroy_goal, m_destroy_strategy, - m_destroy_audit_template, m_destroy_audit, - m_destroy_action_plan, m_destroy_action): - with freezegun.freeze_time(self.fake_today): - # orphan audit template - audit_template4 = obj_utils.create_test_audit_template( - self.context, goal_id=self.goal2.id, - name=self.generate_unique_name(prefix="Audit Template 4 "), - strategy_id=None, id=self._generate_id(), - uuid=utils.generate_uuid()) - audit4 = obj_utils.create_test_audit( - self.context, - id=self._generate_id(), uuid=utils.generate_uuid(), - audit_template_id=audit_template4.id) - action_plan4 = obj_utils.create_test_action_plan( - self.context, - id=self._generate_id(), uuid=utils.generate_uuid(), - audit_id=audit4.id, strategy_id=self.strategy1.id) - action4 = obj_utils.create_test_action( - self.context, action_plan_id=action_plan4.id, - id=self._generate_id(), - uuid=utils.generate_uuid()) - - audit_template5 = obj_utils.create_test_audit_template( - self.context, goal_id=self.goal1.id, - name=self.generate_unique_name(prefix="Audit Template 5 "), - strategy_id=None, id=self._generate_id(), - uuid=utils.generate_uuid()) - audit5 = obj_utils.create_test_audit( - self.context, audit_template_id=audit_template5.id, - strategy_id=self.strategy1.id, id=self._generate_id(), - uuid=utils.generate_uuid()) - action_plan5 = obj_utils.create_test_action_plan( - self.context, - id=self._generate_id(), uuid=utils.generate_uuid(), - audit_id=audit5.id, strategy_id=self.strategy1.id) - action5 = obj_utils.create_test_action( - self.context, action_plan_id=action_plan5.id, - id=self._generate_id(), - uuid=utils.generate_uuid()) - - self.goal2.soft_delete() - self.strategy2.soft_delete() - self.audit_template2.soft_delete() - self.audit2.soft_delete() - self.action_plan2.soft_delete() - - # All the 4's should be purged as well because they are orphans - # even though they were not deleted - - # All the 5's should be purged as well even though they are not - # expired because their related audit template is itself expired - audit_template5.soft_delete() - audit5.soft_delete() - action_plan5.soft_delete() - - with freezegun.freeze_time(self.fake_today): - self.cmd.execute() - - self.assertEqual(m_destroy_goal.call_count, 1) - self.assertEqual(m_destroy_strategy.call_count, 1) - self.assertEqual(m_destroy_audit_template.call_count, 3) - self.assertEqual(m_destroy_audit.call_count, 3) - self.assertEqual(m_destroy_action_plan.call_count, 3) - self.assertEqual(m_destroy_action.call_count, 3) - - m_destroy_audit_template.assert_any_call(self.audit_template1.uuid) - m_destroy_audit.assert_any_call(self.audit1.uuid) - m_destroy_audit.assert_any_call(audit4.uuid) - m_destroy_action_plan.assert_any_call(self.action_plan1.uuid) - m_destroy_action_plan.assert_any_call(action_plan4.uuid) - m_destroy_action_plan.assert_any_call(action_plan5.uuid) - m_destroy_action.assert_any_call(self.action1.uuid) - m_destroy_action.assert_any_call(action4.uuid) - m_destroy_action.assert_any_call(action5.uuid) - - @mock.patch.object(dbapi.Connection, "destroy_action") - @mock.patch.object(dbapi.Connection, "destroy_action_plan") - @mock.patch.object(dbapi.Connection, "destroy_audit") - @mock.patch.object(dbapi.Connection, "destroy_audit_template") - @mock.patch.object(dbapi.Connection, "destroy_strategy") - @mock.patch.object(dbapi.Connection, "destroy_goal") - def test_purge_command_with_strategy_uuid( - self, m_destroy_goal, m_destroy_strategy, - m_destroy_audit_template, m_destroy_audit, - m_destroy_action_plan, m_destroy_action): - self.cmd.exclude_orphans = False - self.cmd.uuid = self.strategy1.uuid - - with freezegun.freeze_time(self.fake_today): - self.cmd.execute() - - self.assertEqual(m_destroy_goal.call_count, 0) - self.assertEqual(m_destroy_strategy.call_count, 1) - self.assertEqual(m_destroy_audit_template.call_count, 1) - self.assertEqual(m_destroy_audit.call_count, 1) - self.assertEqual(m_destroy_action_plan.call_count, 1) - self.assertEqual(m_destroy_action.call_count, 1) - - @mock.patch.object(dbapi.Connection, "destroy_action") - @mock.patch.object(dbapi.Connection, "destroy_action_plan") - @mock.patch.object(dbapi.Connection, "destroy_audit") - @mock.patch.object(dbapi.Connection, "destroy_audit_template") - @mock.patch.object(dbapi.Connection, "destroy_strategy") - @mock.patch.object(dbapi.Connection, "destroy_goal") - def test_purge_command_with_audit_template_not_expired( - self, m_destroy_goal, m_destroy_strategy, - m_destroy_audit_template, m_destroy_audit, - m_destroy_action_plan, m_destroy_action): - self.cmd.exclude_orphans = True - self.cmd.uuid = self.audit_template2.uuid - - with freezegun.freeze_time(self.fake_today): - self.cmd.execute() - - self.assertEqual(m_destroy_goal.call_count, 0) - self.assertEqual(m_destroy_strategy.call_count, 0) - self.assertEqual(m_destroy_audit_template.call_count, 0) - self.assertEqual(m_destroy_audit.call_count, 0) - self.assertEqual(m_destroy_action_plan.call_count, 0) - self.assertEqual(m_destroy_action.call_count, 0) - - @mock.patch.object(dbapi.Connection, "destroy_action") - @mock.patch.object(dbapi.Connection, "destroy_action_plan") - @mock.patch.object(dbapi.Connection, "destroy_audit") - @mock.patch.object(dbapi.Connection, "destroy_audit_template") - @mock.patch.object(dbapi.Connection, "destroy_strategy") - @mock.patch.object(dbapi.Connection, "destroy_goal") - def test_purge_command_with_audit_template_not_soft_deleted( - self, m_destroy_goal, m_destroy_strategy, - m_destroy_audit_template, m_destroy_audit, - m_destroy_action_plan, m_destroy_action): - self.cmd.exclude_orphans = False - self.cmd.uuid = self.audit_template3.uuid - - with freezegun.freeze_time(self.fake_today): - self.cmd.execute() - - self.assertEqual(m_destroy_goal.call_count, 0) - self.assertEqual(m_destroy_strategy.call_count, 0) - self.assertEqual(m_destroy_audit_template.call_count, 0) - self.assertEqual(m_destroy_audit.call_count, 0) - self.assertEqual(m_destroy_action_plan.call_count, 0) - self.assertEqual(m_destroy_action.call_count, 0) diff --git a/watcher/tests/db/test_scoring_engine.py b/watcher/tests/db/test_scoring_engine.py deleted file mode 100644 index 02da05e..0000000 --- a/watcher/tests/db/test_scoring_engine.py +++ /dev/null @@ -1,337 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -"""Tests for manipulating ScoringEngine via the DB API""" - -import freezegun -import six - -from watcher.common import exception -from watcher.common import utils as w_utils -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestDbScoringEngineFilters(base.DbTestCase): - - FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' - FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' - FAKE_TODAY = '2016-02-24T09:52:05.219414' - - def setUp(self): - super(TestDbScoringEngineFilters, self).setUp() - self.context.show_deleted = True - self._data_setup() - - def _data_setup(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.scoring_engine1 = utils.create_test_scoring_engine( - id=1, uuid='e8370ede-4f39-11e6-9ffa-08002722cb22', - name="se-1", description="Scoring Engine 1", metainfo="a1=b1") - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.scoring_engine2 = utils.create_test_scoring_engine( - id=2, uuid='e8370ede-4f39-11e6-9ffa-08002722cb23', - name="se-2", description="Scoring Engine 2", metainfo="a2=b2") - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.scoring_engine3 = utils.create_test_scoring_engine( - id=3, uuid='e8370ede-4f39-11e6-9ffa-08002722cb24', - name="se-3", description="Scoring Engine 3", metainfo="a3=b3") - - def _soft_delete_scoring_engines(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_scoring_engine(self.scoring_engine1.id) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.soft_delete_scoring_engine(self.scoring_engine2.id) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.soft_delete_scoring_engine(self.scoring_engine3.id) - - def _update_scoring_engines(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.update_scoring_engine( - self.scoring_engine1.id, - values={"description": "scoring_engine1"}) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.update_scoring_engine( - self.scoring_engine2.id, - values={"description": "scoring_engine2"}) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.update_scoring_engine( - self.scoring_engine3.id, - values={"description": "scoring_engine3"}) - - def test_get_scoring_engine_list_filter_deleted_true(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_scoring_engine(self.scoring_engine1.id) - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'deleted': True}) - - self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) - - def test_get_scoring_engine_list_filter_deleted_false(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_scoring_engine(self.scoring_engine1.id) - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'deleted': False}) - - self.assertEqual( - set([self.scoring_engine2['id'], self.scoring_engine3['id']]), - set([r.id for r in res])) - - def test_get_scoring_engine_list_filter_deleted_at_eq(self): - self._soft_delete_scoring_engines() - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) - - def test_get_scoring_engine_list_filter_deleted_at_lt(self): - self._soft_delete_scoring_engines() - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - set([self.scoring_engine2['id'], self.scoring_engine3['id']]), - set([r.id for r in res])) - - def test_get_scoring_engine_list_filter_deleted_at_lte(self): - self._soft_delete_scoring_engines() - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.scoring_engine2['id'], self.scoring_engine3['id']]), - set([r.id for r in res])) - - def test_get_scoring_engine_list_filter_deleted_at_gt(self): - self._soft_delete_scoring_engines() - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) - - def test_get_scoring_engine_list_filter_deleted_at_gte(self): - self._soft_delete_scoring_engines() - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.scoring_engine1['id'], self.scoring_engine2['id']]), - set([r.id for r in res])) - - # created_at # - - def test_get_scoring_engine_list_filter_created_at_eq(self): - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'created_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) - - def test_get_scoring_engine_list_filter_created_at_lt(self): - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'created_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - set([self.scoring_engine2['id'], self.scoring_engine3['id']]), - set([r.id for r in res])) - - def test_get_scoring_engine_list_filter_created_at_lte(self): - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.scoring_engine2['id'], self.scoring_engine3['id']]), - set([r.id for r in res])) - - def test_get_scoring_engine_list_filter_created_at_gt(self): - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) - - def test_get_scoring_engine_list_filter_created_at_gte(self): - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.scoring_engine1['id'], self.scoring_engine2['id']]), - set([r.id for r in res])) - - # updated_at # - - def test_get_scoring_engine_list_filter_updated_at_eq(self): - self._update_scoring_engines() - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'updated_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) - - def test_get_scoring_engine_list_filter_updated_at_lt(self): - self._update_scoring_engines() - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'updated_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - set([self.scoring_engine2['id'], self.scoring_engine3['id']]), - set([r.id for r in res])) - - def test_get_scoring_engine_list_filter_updated_at_lte(self): - self._update_scoring_engines() - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.scoring_engine2['id'], self.scoring_engine3['id']]), - set([r.id for r in res])) - - def test_get_scoring_engine_list_filter_updated_at_gt(self): - self._update_scoring_engines() - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) - - def test_get_scoring_engine_list_filter_updated_at_gte(self): - self._update_scoring_engines() - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.scoring_engine1['id'], self.scoring_engine2['id']]), - set([r.id for r in res])) - - -class DbScoringEngineTestCase(base.DbTestCase): - - def _create_test_scoring_engine(self, **kwargs): - scoring_engine = utils.get_test_scoring_engine(**kwargs) - self.dbapi.create_scoring_engine(scoring_engine) - return scoring_engine - - def test_get_scoring_engine_list(self): - names = [] - for i in range(1, 4): - scoring_engine = utils.create_test_scoring_engine( - id=i, - uuid=w_utils.generate_uuid(), - name="SE_ID_%s" % i, - description='My ScoringEngine {0}'.format(i), - metainfo='a{0}=b{0}'.format(i)) - names.append(six.text_type(scoring_engine['name'])) - scoring_engines = self.dbapi.get_scoring_engine_list(self.context) - scoring_engines_names = [se.name for se in scoring_engines] - self.assertEqual(sorted(names), sorted(scoring_engines_names)) - - def test_get_scoring_engine_list_with_filters(self): - scoring_engine1 = self._create_test_scoring_engine( - id=1, - uuid=w_utils.generate_uuid(), - name="SE_ID_1", - description='ScoringEngine 1', - metainfo="a1=b1", - ) - scoring_engine2 = self._create_test_scoring_engine( - id=2, - uuid=w_utils.generate_uuid(), - name="SE_ID_2", - description='ScoringEngine 2', - metainfo="a2=b2", - ) - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'description': 'ScoringEngine 1'}) - self.assertEqual([scoring_engine1['name']], [r.name for r in res]) - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'description': 'ScoringEngine 3'}) - self.assertEqual([], [r.name for r in res]) - - res = self.dbapi.get_scoring_engine_list( - self.context, - filters={'description': 'ScoringEngine 2'}) - self.assertEqual([scoring_engine2['name']], [r.name for r in res]) - - def test_get_scoring_engine_by_id(self): - created_scoring_engine = self._create_test_scoring_engine() - scoring_engine = self.dbapi.get_scoring_engine_by_id( - self.context, created_scoring_engine['id']) - self.assertEqual(scoring_engine.id, created_scoring_engine['id']) - - def test_get_scoring_engine_by_uuid(self): - created_scoring_engine = self._create_test_scoring_engine() - scoring_engine = self.dbapi.get_scoring_engine_by_uuid( - self.context, created_scoring_engine['uuid']) - self.assertEqual(scoring_engine.uuid, created_scoring_engine['uuid']) - - def test_get_scoring_engine_by_name(self): - created_scoring_engine = self._create_test_scoring_engine() - scoring_engine = self.dbapi.get_scoring_engine_by_name( - self.context, created_scoring_engine['name']) - self.assertEqual(scoring_engine.name, created_scoring_engine['name']) - - def test_get_scoring_engine_that_does_not_exist(self): - self.assertRaises(exception.ScoringEngineNotFound, - self.dbapi.get_scoring_engine_by_id, - self.context, 404) - - def test_update_scoring_engine(self): - scoring_engine = self._create_test_scoring_engine() - res = self.dbapi.update_scoring_engine( - scoring_engine['id'], {'description': 'updated-model'}) - self.assertEqual('updated-model', res.description) - - def test_update_scoring_engine_id(self): - scoring_engine = self._create_test_scoring_engine() - self.assertRaises(exception.Invalid, - self.dbapi.update_scoring_engine, - scoring_engine['id'], - {'uuid': w_utils.generate_uuid()}) - - def test_update_scoring_engine_that_does_not_exist(self): - self.assertRaises(exception.ScoringEngineNotFound, - self.dbapi.update_scoring_engine, - 404, - {'description': ''}) - - def test_destroy_scoring_engine(self): - scoring_engine = self._create_test_scoring_engine() - self.dbapi.destroy_scoring_engine(scoring_engine['id']) - self.assertRaises(exception.ScoringEngineNotFound, - self.dbapi.get_scoring_engine_by_id, - self.context, scoring_engine['id']) - - def test_destroy_scoring_engine_that_does_not_exist(self): - self.assertRaises(exception.ScoringEngineNotFound, - self.dbapi.destroy_scoring_engine, 404) - - def test_create_scoring_engine_already_exists(self): - scoring_engine_id = "SE_ID" - self._create_test_scoring_engine(name=scoring_engine_id) - self.assertRaises(exception.ScoringEngineAlreadyExists, - self._create_test_scoring_engine, - name=scoring_engine_id) diff --git a/watcher/tests/db/test_service.py b/watcher/tests/db/test_service.py deleted file mode 100644 index cda5470..0000000 --- a/watcher/tests/db/test_service.py +++ /dev/null @@ -1,302 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Servionica -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -"""Tests for manipulating Service via the DB API""" - -import freezegun - -from oslo_utils import timeutils - -from watcher.common import exception -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestDbServiceFilters(base.DbTestCase): - - FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' - FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' - FAKE_TODAY = '2016-02-24T09:52:05.219414' - - def setUp(self): - super(TestDbServiceFilters, self).setUp() - self.context.show_deleted = True - self._data_setup() - - def _data_setup(self): - service1_name = "SERVICE_ID_1" - service2_name = "SERVICE_ID_2" - service3_name = "SERVICE_ID_3" - - with freezegun.freeze_time(self.FAKE_TODAY): - self.service1 = utils.create_test_service( - id=1, name=service1_name, host="controller", - last_seen_up=timeutils.parse_isotime("2016-09-22T08:32:05")) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.service2 = utils.create_test_service( - id=2, name=service2_name, host="controller", - last_seen_up=timeutils.parse_isotime("2016-09-22T08:32:05")) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.service3 = utils.create_test_service( - id=3, name=service3_name, host="controller", - last_seen_up=timeutils.parse_isotime("2016-09-22T08:32:05")) - - def _soft_delete_services(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_service(self.service1.id) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.soft_delete_service(self.service2.id) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.soft_delete_service(self.service3.id) - - def _update_services(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.update_service( - self.service1.id, values={"host": "controller1"}) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.update_service( - self.service2.id, values={"host": "controller2"}) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.update_service( - self.service3.id, values={"host": "controller3"}) - - def test_get_service_list_filter_deleted_true(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_service(self.service1.id) - - res = self.dbapi.get_service_list( - self.context, filters={'deleted': True}) - - self.assertEqual([self.service1['name']], [r.name for r in res]) - - def test_get_service_list_filter_deleted_false(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_service(self.service1.id) - - res = self.dbapi.get_service_list( - self.context, filters={'deleted': False}) - - self.assertEqual( - set([self.service2['name'], self.service3['name']]), - set([r.name for r in res])) - - def test_get_service_list_filter_deleted_at_eq(self): - self._soft_delete_services() - - res = self.dbapi.get_service_list( - self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.service1['id']], [r.id for r in res]) - - def test_get_service_list_filter_deleted_at_lt(self): - self._soft_delete_services() - - res = self.dbapi.get_service_list( - self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - set([self.service2['id'], self.service3['id']]), - set([r.id for r in res])) - - def test_get_service_list_filter_deleted_at_lte(self): - self._soft_delete_services() - - res = self.dbapi.get_service_list( - self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.service2['id'], self.service3['id']]), - set([r.id for r in res])) - - def test_get_service_list_filter_deleted_at_gt(self): - self._soft_delete_services() - - res = self.dbapi.get_service_list( - self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.service1['id']], [r.id for r in res]) - - def test_get_service_list_filter_deleted_at_gte(self): - self._soft_delete_services() - - res = self.dbapi.get_service_list( - self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.service1['id'], self.service2['id']]), - set([r.id for r in res])) - - # created_at # - - def test_get_service_list_filter_created_at_eq(self): - res = self.dbapi.get_service_list( - self.context, filters={'created_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.service1['id']], [r.id for r in res]) - - def test_get_service_list_filter_created_at_lt(self): - res = self.dbapi.get_service_list( - self.context, filters={'created_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - set([self.service2['id'], self.service3['id']]), - set([r.id for r in res])) - - def test_get_service_list_filter_created_at_lte(self): - res = self.dbapi.get_service_list( - self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.service2['id'], self.service3['id']]), - set([r.id for r in res])) - - def test_get_service_list_filter_created_at_gt(self): - res = self.dbapi.get_service_list( - self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.service1['id']], [r.id for r in res]) - - def test_get_service_list_filter_created_at_gte(self): - res = self.dbapi.get_service_list( - self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.service1['id'], self.service2['id']]), - set([r.id for r in res])) - - # updated_at # - - def test_get_service_list_filter_updated_at_eq(self): - self._update_services() - - res = self.dbapi.get_service_list( - self.context, filters={'updated_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.service1['id']], [r.id for r in res]) - - def test_get_service_list_filter_updated_at_lt(self): - self._update_services() - - res = self.dbapi.get_service_list( - self.context, filters={'updated_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - set([self.service2['id'], self.service3['id']]), - set([r.id for r in res])) - - def test_get_service_list_filter_updated_at_lte(self): - self._update_services() - - res = self.dbapi.get_service_list( - self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.service2['id'], self.service3['id']]), - set([r.id for r in res])) - - def test_get_service_list_filter_updated_at_gt(self): - self._update_services() - - res = self.dbapi.get_service_list( - self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.service1['id']], [r.id for r in res]) - - def test_get_service_list_filter_updated_at_gte(self): - self._update_services() - - res = self.dbapi.get_service_list( - self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.service1['id'], self.service2['id']]), - set([r.id for r in res])) - - -class DbServiceTestCase(base.DbTestCase): - - def _create_test_service(self, **kwargs): - service = utils.get_test_service(**kwargs) - self.dbapi.create_service(service) - return service - - def test_get_service_list(self): - ids = [] - for i in range(1, 4): - service = utils.create_test_service( - id=i, - name="SERVICE_ID_%s" % i, - host="controller_{0}".format(i)) - ids.append(service['id']) - services = self.dbapi.get_service_list(self.context) - service_ids = [s.id for s in services] - self.assertEqual(sorted(ids), sorted(service_ids)) - - def test_get_service_list_with_filters(self): - service1 = self._create_test_service( - id=1, - name="SERVICE_ID_1", - host="controller_1", - ) - service2 = self._create_test_service( - id=2, - name="SERVICE_ID_2", - host="controller_2", - ) - - res = self.dbapi.get_service_list( - self.context, filters={'host': 'controller_1'}) - self.assertEqual([service1['id']], [r.id for r in res]) - - res = self.dbapi.get_service_list( - self.context, filters={'host': 'controller_3'}) - self.assertEqual([], [r.id for r in res]) - - res = self.dbapi.get_service_list( - self.context, - filters={'host': 'controller_2'}) - self.assertEqual([service2['id']], [r.id for r in res]) - - def test_get_service_by_name(self): - created_service = self._create_test_service() - service = self.dbapi.get_service_by_name( - self.context, created_service['name']) - self.assertEqual(service.name, created_service['name']) - - def test_get_service_that_does_not_exist(self): - self.assertRaises(exception.ServiceNotFound, - self.dbapi.get_service_by_id, - self.context, 404) - - def test_update_service(self): - service = self._create_test_service() - res = self.dbapi.update_service( - service['id'], {'host': 'controller_test'}) - self.assertEqual('controller_test', res.host) - - def test_update_service_that_does_not_exist(self): - self.assertRaises(exception.ServiceNotFound, - self.dbapi.update_service, - 405, - {'name': ''}) - - def test_create_service_already_exists(self): - service_id = "STRATEGY_ID" - self._create_test_service(name=service_id) - self.assertRaises(exception.ServiceAlreadyExists, - self._create_test_service, - name=service_id) diff --git a/watcher/tests/db/test_strategy.py b/watcher/tests/db/test_strategy.py deleted file mode 100644 index 081fa79..0000000 --- a/watcher/tests/db/test_strategy.py +++ /dev/null @@ -1,364 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -"""Tests for manipulating Strategy via the DB API""" - -import freezegun -import six - -from watcher.common import exception -from watcher.common import utils as w_utils -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestDbStrategyFilters(base.DbTestCase): - - FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' - FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' - FAKE_TODAY = '2016-02-24T09:52:05.219414' - - def setUp(self): - super(TestDbStrategyFilters, self).setUp() - self.context.show_deleted = True - self._data_setup() - - def _data_setup(self): - strategy1_name = "STRATEGY_ID_1" - strategy2_name = "STRATEGY_ID_2" - strategy3_name = "STRATEGY_ID_3" - - self.goal1 = utils.create_test_goal( - id=1, uuid=w_utils.generate_uuid(), - name="GOAL_ID", display_name="Goal") - self.goal2 = utils.create_test_goal( - id=2, uuid=w_utils.generate_uuid(), - name="DUMMY", display_name="Dummy") - - with freezegun.freeze_time(self.FAKE_TODAY): - self.strategy1 = utils.create_test_strategy( - id=1, uuid=w_utils.generate_uuid(), - name=strategy1_name, display_name="Strategy 1", - goal_id=self.goal1.id) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.strategy2 = utils.create_test_strategy( - id=2, uuid=w_utils.generate_uuid(), - name=strategy2_name, display_name="Strategy 2", - goal_id=self.goal1.id) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.strategy3 = utils.create_test_strategy( - id=3, uuid=w_utils.generate_uuid(), - name=strategy3_name, display_name="Strategy 3", - goal_id=self.goal2.id) - - def _soft_delete_strategys(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_strategy(self.strategy1.id) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.soft_delete_strategy(self.strategy2.id) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.soft_delete_strategy(self.strategy3.id) - - def _update_strategies(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.update_strategy( - self.strategy1.id, values={"display_name": "strategy1"}) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.update_strategy( - self.strategy2.id, values={"display_name": "strategy2"}) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.update_strategy( - self.strategy3.id, values={"display_name": "strategy3"}) - - def test_get_strategy_list_filter_deleted_true(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_strategy(self.strategy1.id) - - res = self.dbapi.get_strategy_list( - self.context, filters={'deleted': True}) - - self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) - - def test_get_strategy_list_filter_deleted_false(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_strategy(self.strategy1.id) - - res = self.dbapi.get_strategy_list( - self.context, filters={'deleted': False}) - - self.assertEqual( - set([self.strategy2['uuid'], self.strategy3['uuid']]), - set([r.uuid for r in res])) - - def test_get_strategy_list_filter_deleted_at_eq(self): - self._soft_delete_strategys() - - res = self.dbapi.get_strategy_list( - self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) - - def test_get_strategy_list_filter_deleted_at_lt(self): - self._soft_delete_strategys() - - res = self.dbapi.get_strategy_list( - self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - set([self.strategy2['uuid'], self.strategy3['uuid']]), - set([r.uuid for r in res])) - - def test_get_strategy_list_filter_deleted_at_lte(self): - self._soft_delete_strategys() - - res = self.dbapi.get_strategy_list( - self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.strategy2['uuid'], self.strategy3['uuid']]), - set([r.uuid for r in res])) - - def test_get_strategy_list_filter_deleted_at_gt(self): - self._soft_delete_strategys() - - res = self.dbapi.get_strategy_list( - self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) - - def test_get_strategy_list_filter_deleted_at_gte(self): - self._soft_delete_strategys() - - res = self.dbapi.get_strategy_list( - self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.strategy1['uuid'], self.strategy2['uuid']]), - set([r.uuid for r in res])) - - # created_at # - - def test_get_strategy_list_filter_created_at_eq(self): - res = self.dbapi.get_strategy_list( - self.context, filters={'created_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) - - def test_get_strategy_list_filter_created_at_lt(self): - res = self.dbapi.get_strategy_list( - self.context, filters={'created_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - set([self.strategy2['uuid'], self.strategy3['uuid']]), - set([r.uuid for r in res])) - - def test_get_strategy_list_filter_created_at_lte(self): - res = self.dbapi.get_strategy_list( - self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.strategy2['uuid'], self.strategy3['uuid']]), - set([r.uuid for r in res])) - - def test_get_strategy_list_filter_created_at_gt(self): - res = self.dbapi.get_strategy_list( - self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) - - def test_get_strategy_list_filter_created_at_gte(self): - res = self.dbapi.get_strategy_list( - self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.strategy1['uuid'], self.strategy2['uuid']]), - set([r.uuid for r in res])) - - # updated_at # - - def test_get_strategy_list_filter_updated_at_eq(self): - self._update_strategies() - - res = self.dbapi.get_strategy_list( - self.context, filters={'updated_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) - - def test_get_strategy_list_filter_updated_at_lt(self): - self._update_strategies() - - res = self.dbapi.get_strategy_list( - self.context, filters={'updated_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - set([self.strategy2['uuid'], self.strategy3['uuid']]), - set([r.uuid for r in res])) - - def test_get_strategy_list_filter_updated_at_lte(self): - self._update_strategies() - - res = self.dbapi.get_strategy_list( - self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.strategy2['uuid'], self.strategy3['uuid']]), - set([r.uuid for r in res])) - - def test_get_strategy_list_filter_updated_at_gt(self): - self._update_strategies() - - res = self.dbapi.get_strategy_list( - self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) - - def test_get_strategy_list_filter_updated_at_gte(self): - self._update_strategies() - - res = self.dbapi.get_strategy_list( - self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.strategy1['uuid'], self.strategy2['uuid']]), - set([r.uuid for r in res])) - - -class DbStrategyTestCase(base.DbTestCase): - - def _create_test_strategy(self, **kwargs): - strategy = utils.get_test_strategy(**kwargs) - self.dbapi.create_strategy(strategy) - return strategy - - def test_get_strategy_list(self): - uuids = [] - for i in range(1, 4): - strategy = utils.create_test_strategy( - id=i, - uuid=w_utils.generate_uuid(), - name="STRATEGY_ID_%s" % i, - display_name='My Strategy {0}'.format(i)) - uuids.append(six.text_type(strategy['uuid'])) - strategies = self.dbapi.get_strategy_list(self.context) - strategy_uuids = [s.uuid for s in strategies] - self.assertEqual(sorted(uuids), sorted(strategy_uuids)) - for strategy in strategies: - self.assertIsNone(strategy.goal) - - def test_get_strategy_list_eager(self): - _goal = utils.get_test_goal() - goal = self.dbapi.create_goal(_goal) - uuids = [] - for i in range(1, 4): - strategy = utils.create_test_strategy( - id=i, - uuid=w_utils.generate_uuid(), - name="STRATEGY_ID_%s" % i, - display_name='My Strategy {0}'.format(i), - goal_id=goal.id) - uuids.append(six.text_type(strategy['uuid'])) - strategys = self.dbapi.get_strategy_list(self.context, eager=True) - strategy_map = {a.uuid: a for a in strategys} - self.assertEqual(sorted(uuids), sorted(strategy_map.keys())) - eager_strategy = strategy_map[strategy.uuid] - self.assertEqual(goal.as_dict(), eager_strategy.goal.as_dict()) - - def test_get_strategy_list_with_filters(self): - strategy1 = self._create_test_strategy( - id=1, - uuid=w_utils.generate_uuid(), - name="STRATEGY_ID_1", - display_name='Strategy 1', - ) - strategy2 = self._create_test_strategy( - id=2, - uuid=w_utils.generate_uuid(), - name="STRATEGY_ID_2", - display_name='Strategy 2', - ) - - res = self.dbapi.get_strategy_list( - self.context, filters={'display_name': 'Strategy 1'}) - self.assertEqual([strategy1['uuid']], [r.uuid for r in res]) - - res = self.dbapi.get_strategy_list( - self.context, filters={'display_name': 'Strategy 3'}) - self.assertEqual([], [r.uuid for r in res]) - - res = self.dbapi.get_strategy_list( - self.context, - filters={'goal_id': 1}) - self.assertEqual([strategy1['uuid'], strategy2['uuid']], - [r.uuid for r in res]) - - res = self.dbapi.get_strategy_list( - self.context, - filters={'display_name': 'Strategy 2'}) - self.assertEqual([strategy2['uuid']], [r.uuid for r in res]) - - def test_get_strategy_by_uuid(self): - created_strategy = self._create_test_strategy() - strategy = self.dbapi.get_strategy_by_uuid( - self.context, created_strategy['uuid']) - self.assertEqual(strategy.uuid, created_strategy['uuid']) - - def test_get_strategy_by_name(self): - created_strategy = self._create_test_strategy() - strategy = self.dbapi.get_strategy_by_name( - self.context, created_strategy['name']) - self.assertEqual(strategy.name, created_strategy['name']) - - def test_get_strategy_that_does_not_exist(self): - self.assertRaises(exception.StrategyNotFound, - self.dbapi.get_strategy_by_id, - self.context, 404) - - def test_update_strategy(self): - strategy = self._create_test_strategy() - res = self.dbapi.update_strategy( - strategy['uuid'], {'display_name': 'updated-model'}) - self.assertEqual('updated-model', res.display_name) - - def test_update_goal_id(self): - strategy = self._create_test_strategy() - self.assertRaises(exception.Invalid, - self.dbapi.update_strategy, strategy['uuid'], - {'uuid': 'new_strategy_id'}) - - def test_update_strategy_that_does_not_exist(self): - self.assertRaises(exception.StrategyNotFound, - self.dbapi.update_strategy, - 404, - {'display_name': ''}) - - def test_destroy_strategy(self): - strategy = self._create_test_strategy() - self.dbapi.destroy_strategy(strategy['uuid']) - self.assertRaises(exception.StrategyNotFound, - self.dbapi.get_strategy_by_id, - self.context, strategy['uuid']) - - def test_destroy_strategy_that_does_not_exist(self): - self.assertRaises(exception.StrategyNotFound, - self.dbapi.destroy_strategy, 404) - - def test_create_strategy_already_exists(self): - strategy_id = "STRATEGY_ID" - self._create_test_strategy(name=strategy_id) - self.assertRaises(exception.StrategyAlreadyExists, - self._create_test_strategy, - name=strategy_id) diff --git a/watcher/tests/db/utils.py b/watcher/tests/db/utils.py deleted file mode 100644 index 65b88c6..0000000 --- a/watcher/tests/db/utils.py +++ /dev/null @@ -1,333 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Watcher test utilities.""" - -from oslo_utils import timeutils - -from watcher.db import api as db_api -from watcher.db.sqlalchemy import models -from watcher import objects - - -def id_generator(): - id_ = 1 - while True: - yield id_ - id_ += 1 - - -def _load_relationships(model, db_data): - rel_data = {} - relationships = db_api.get_instance()._get_relationships(model) - for name, relationship in relationships.items(): - related_model = relationship.argument - if not db_data.get(name): - rel_data[name] = None - else: - rel_data[name] = related_model(**db_data.get(name)) - - return rel_data - - -def get_test_audit_template(**kwargs): - audit_template_data = { - 'id': kwargs.get('id', 1), - 'uuid': kwargs.get('uuid', 'e74c40e0-d825-11e2-a28f-0800200c9a66'), - 'goal_id': kwargs.get('goal_id', 1), - 'strategy_id': kwargs.get('strategy_id', None), - 'name': kwargs.get('name', 'My Audit Template'), - 'description': kwargs.get('description', 'Desc. Of My Audit Template'), - 'scope': kwargs.get('scope', []), - 'created_at': kwargs.get('created_at'), - 'updated_at': kwargs.get('updated_at'), - 'deleted_at': kwargs.get('deleted_at'), - } - - # ObjectField doesn't allow None nor dict, so if we want to simulate a - # non-eager object loading, the field should not be referenced at all. - audit_template_data.update( - _load_relationships(models.AuditTemplate, kwargs)) - - return audit_template_data - - -def create_test_audit_template(**kwargs): - """Create test audit template entry in DB and return AuditTemplate DB object. - - Function to be used to create test AuditTemplate objects in the database. - :param kwargs: kwargsargs with overriding values for audit template's - attributes. - :returns: Test AuditTemplate DB object. - """ - audit_template = get_test_audit_template(**kwargs) - # Let DB generate ID if it isn't specified explicitly - if 'id' not in kwargs: - del audit_template['id'] - dbapi = db_api.get_instance() - return dbapi.create_audit_template(audit_template) - - -def get_test_audit(**kwargs): - audit_data = { - 'id': kwargs.get('id', 1), - 'uuid': kwargs.get('uuid', '10a47dd1-4874-4298-91cf-eff046dbdb8d'), - 'audit_type': kwargs.get('audit_type', 'ONESHOT'), - 'state': kwargs.get('state', objects.audit.State.PENDING), - 'created_at': kwargs.get('created_at'), - 'updated_at': kwargs.get('updated_at'), - 'deleted_at': kwargs.get('deleted_at'), - 'parameters': kwargs.get('parameters', {}), - 'interval': kwargs.get('interval', '3600'), - 'goal_id': kwargs.get('goal_id', 1), - 'strategy_id': kwargs.get('strategy_id', None), - 'scope': kwargs.get('scope', []), - 'auto_trigger': kwargs.get('auto_trigger', False), - 'next_run_time': kwargs.get('next_run_time') - } - # ObjectField doesn't allow None nor dict, so if we want to simulate a - # non-eager object loading, the field should not be referenced at all. - audit_data.update(_load_relationships(models.Audit, kwargs)) - - return audit_data - - -def create_test_audit(**kwargs): - """Create test audit entry in DB and return Audit DB object. - - Function to be used to create test Audit objects in the database. - :param kwargs: kwargsargs with overriding values for audit's attributes. - :returns: Test Audit DB object. - """ - audit = get_test_audit(**kwargs) - # Let DB generate ID if it isn't specified explicitly - if 'id' not in kwargs: - del audit['id'] - dbapi = db_api.get_instance() - return dbapi.create_audit(audit) - - -def get_test_action(**kwargs): - action_data = { - 'id': kwargs.get('id', 1), - 'uuid': kwargs.get('uuid', '10a47dd1-4874-4298-91cf-eff046dbdb8d'), - 'action_plan_id': kwargs.get('action_plan_id', 1), - 'action_type': kwargs.get('action_type', 'nop'), - 'input_parameters': - kwargs.get('input_parameters', - {'key1': 'val1', - 'key2': 'val2', - 'resource_id': - '10a47dd1-4874-4298-91cf-eff046dbdb8d'}), - 'state': kwargs.get('state', objects.action_plan.State.PENDING), - 'parents': kwargs.get('parents', []), - 'created_at': kwargs.get('created_at'), - 'updated_at': kwargs.get('updated_at'), - 'deleted_at': kwargs.get('deleted_at'), - } - - # ObjectField doesn't allow None nor dict, so if we want to simulate a - # non-eager object loading, the field should not be referenced at all. - action_data.update(_load_relationships(models.Action, kwargs)) - - return action_data - - -def create_test_action(**kwargs): - """Create test action entry in DB and return Action DB object. - - Function to be used to create test Action objects in the database. - :param kwargs: kwargsargs with overriding values for action's attributes. - :returns: Test Action DB object. - """ - action = get_test_action(**kwargs) - # Let DB generate ID if it isn't specified explicitly - if 'id' not in kwargs: - del action['id'] - dbapi = db_api.get_instance() - return dbapi.create_action(action) - - -def get_test_action_plan(**kwargs): - action_plan_data = { - 'id': kwargs.get('id', 1), - 'uuid': kwargs.get('uuid', '76be87bd-3422-43f9-93a0-e85a577e3061'), - 'state': kwargs.get('state', objects.action_plan.State.ONGOING), - 'audit_id': kwargs.get('audit_id', 1), - 'strategy_id': kwargs.get('strategy_id', 1), - 'global_efficacy': kwargs.get('global_efficacy', {}), - 'created_at': kwargs.get('created_at'), - 'updated_at': kwargs.get('updated_at'), - 'deleted_at': kwargs.get('deleted_at'), - } - - # ObjectField doesn't allow None nor dict, so if we want to simulate a - # non-eager object loading, the field should not be referenced at all. - action_plan_data.update(_load_relationships(models.ActionPlan, kwargs)) - - return action_plan_data - - -def create_test_action_plan(**kwargs): - """Create test action plan entry in DB and return Action Plan DB object. - - Function to be used to create test Action objects in the database. - :param kwargs: kwargsargs with overriding values for action's attributes. - :returns: Test Action DB object. - """ - action = get_test_action_plan(**kwargs) - # Let DB generate ID if it isn't specified explicitly - if 'id' not in kwargs: - del action['id'] - dbapi = db_api.get_instance() - return dbapi.create_action_plan(action) - - -def get_test_goal(**kwargs): - return { - 'id': kwargs.get('id', 1), - 'uuid': kwargs.get('uuid', 'f7ad87ae-4298-91cf-93a0-f35a852e3652'), - 'name': kwargs.get('name', 'TEST'), - 'display_name': kwargs.get('display_name', 'test goal'), - 'created_at': kwargs.get('created_at'), - 'updated_at': kwargs.get('updated_at'), - 'deleted_at': kwargs.get('deleted_at'), - 'efficacy_specification': kwargs.get('efficacy_specification', []), - } - - -def create_test_goal(**kwargs): - """Create test goal entry in DB and return Goal DB object. - - Function to be used to create test Goal objects in the database. - :param kwargs: kwargs which override default goal values of its attributes. - :returns: Test Goal DB object. - """ - goal = get_test_goal(**kwargs) - dbapi = db_api.get_instance() - return dbapi.create_goal(goal) - - -def get_test_scoring_engine(**kwargs): - return { - 'id': kwargs.get('id', 1), - 'uuid': kwargs.get('uuid', 'e8370ede-4f39-11e6-9ffa-08002722cb21'), - 'name': kwargs.get('name', 'test-se-01'), - 'description': kwargs.get('description', 'test scoring engine 01'), - 'metainfo': kwargs.get('metainfo', 'test_attr=test_val'), - 'created_at': kwargs.get('created_at'), - 'updated_at': kwargs.get('updated_at'), - 'deleted_at': kwargs.get('deleted_at'), - } - - -def create_test_scoring_engine(**kwargs): - """Create test scoring engine in DB and return ScoringEngine DB object. - - Function to be used to create test ScoringEngine objects in the database. - :param kwargs: kwargs with overriding values for SE'sattributes. - :returns: Test ScoringEngine DB object. - """ - scoring_engine = get_test_scoring_engine(**kwargs) - dbapi = db_api.get_instance() - return dbapi.create_scoring_engine(scoring_engine) - - -def get_test_strategy(**kwargs): - strategy_data = { - 'id': kwargs.get('id', 1), - 'uuid': kwargs.get('uuid', 'cb3d0b58-4415-4d90-b75b-1e96878730e3'), - 'name': kwargs.get('name', 'TEST'), - 'display_name': kwargs.get('display_name', 'test strategy'), - 'goal_id': kwargs.get('goal_id', 1), - 'created_at': kwargs.get('created_at'), - 'updated_at': kwargs.get('updated_at'), - 'deleted_at': kwargs.get('deleted_at'), - 'parameters_spec': kwargs.get('parameters_spec', {}), - } - - # ObjectField doesn't allow None nor dict, so if we want to simulate a - # non-eager object loading, the field should not be referenced at all. - strategy_data.update(_load_relationships(models.Strategy, kwargs)) - - return strategy_data - - -def get_test_service(**kwargs): - return { - 'id': kwargs.get('id', 1), - 'name': kwargs.get('name', 'watcher-service'), - 'host': kwargs.get('host', 'controller'), - 'last_seen_up': kwargs.get( - 'last_seen_up', - timeutils.parse_isotime('2016-09-22T08:32:06').replace(tzinfo=None) - ), - 'created_at': kwargs.get('created_at'), - 'updated_at': kwargs.get('updated_at'), - 'deleted_at': kwargs.get('deleted_at'), - } - - -def create_test_service(**kwargs): - """Create test service entry in DB and return Service DB object. - - Function to be used to create test Service objects in the database. - :param kwargs: kwargs with overriding values for service's attributes. - :returns: Test Service DB object. - """ - service = get_test_service(**kwargs) - dbapi = db_api.get_instance() - return dbapi.create_service(service) - - -def create_test_strategy(**kwargs): - """Create test strategy entry in DB and return Strategy DB object. - - Function to be used to create test Strategy objects in the database. - :param kwargs: kwargs with overriding values for strategy's attributes. - :returns: Test Strategy DB object. - """ - strategy = get_test_strategy(**kwargs) - dbapi = db_api.get_instance() - return dbapi.create_strategy(strategy) - - -def get_test_efficacy_indicator(**kwargs): - return { - 'id': kwargs.get('id', 1), - 'uuid': kwargs.get('uuid', '202cfcf9-811c-411a-8a35-d8351f64eb24'), - 'name': kwargs.get('name', 'test_indicator'), - 'description': kwargs.get('description', 'Test indicator'), - 'unit': kwargs.get('unit', '%'), - 'value': kwargs.get('value', 0), - 'action_plan_id': kwargs.get('action_plan_id', 1), - 'created_at': kwargs.get('created_at'), - 'updated_at': kwargs.get('updated_at'), - 'deleted_at': kwargs.get('deleted_at'), - } - - -def create_test_efficacy_indicator(**kwargs): - """Create and return a test efficacy indicator entry in DB. - - Function to be used to create test EfficacyIndicator objects in the DB. - :param kwargs: kwargs for overriding the values of the attributes - :returns: Test EfficacyIndicator DB object. - """ - efficacy_indicator = get_test_efficacy_indicator(**kwargs) - # Let DB generate ID if it isn't specified explicitly - if 'id' not in kwargs: - del efficacy_indicator['id'] - dbapi = db_api.get_instance() - return dbapi.create_efficacy_indicator(efficacy_indicator) diff --git a/watcher/tests/decision_engine/__init__.py b/watcher/tests/decision_engine/__init__.py deleted file mode 100644 index 2327bf1..0000000 --- a/watcher/tests/decision_engine/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__author__ = 'Jean-Emile DARTOIS ' diff --git a/watcher/tests/decision_engine/audit/__init__.py b/watcher/tests/decision_engine/audit/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/audit/test_audit_handlers.py b/watcher/tests/decision_engine/audit/test_audit_handlers.py deleted file mode 100644 index ed5ca96..0000000 --- a/watcher/tests/decision_engine/audit/test_audit_handlers.py +++ /dev/null @@ -1,363 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -import mock -from oslo_utils import uuidutils - -from apscheduler import job - -from watcher.applier import rpcapi -from watcher.common import exception -from watcher.common import scheduling -from watcher.db.sqlalchemy import api as sq_api -from watcher.decision_engine.audit import continuous -from watcher.decision_engine.audit import oneshot -from watcher.decision_engine.model.collector import manager -from watcher.decision_engine.strategy.strategies import dummy_strategy -from watcher import notifications -from watcher import objects -from watcher.tests.db import base -from watcher.tests.decision_engine.model import faker_cluster_state as faker -from watcher.tests.objects import utils as obj_utils - - -class TestOneShotAuditHandler(base.DbTestCase): - - def setUp(self): - super(TestOneShotAuditHandler, self).setUp() - p_audit_notifications = mock.patch.object( - notifications, 'audit', autospec=True) - self.m_audit_notifications = p_audit_notifications.start() - self.addCleanup(p_audit_notifications.stop) - - self.goal = obj_utils.create_test_goal( - self.context, id=1, name=dummy_strategy.DummyStrategy.get_name()) - self.strategy = obj_utils.create_test_strategy( - self.context, name=dummy_strategy.DummyStrategy.get_name(), - goal_id=self.goal.id) - audit_template = obj_utils.create_test_audit_template( - self.context, strategy_id=self.strategy.id) - self.audit = obj_utils.create_test_audit( - self.context, - uuid=uuidutils.generate_uuid(), - goal_id=self.goal.id, - strategy_id=self.strategy.id, - audit_template_id=audit_template.id, - goal=self.goal) - - @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector") - def test_trigger_audit_without_errors(self, m_collector): - m_collector.return_value = faker.FakerModelCollector() - audit_handler = oneshot.OneShotAuditHandler() - audit_handler.execute(self.audit, self.context) - - expected_calls = [ - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.STRATEGY, - phase=objects.fields.NotificationPhase.START), - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.STRATEGY, - phase=objects.fields.NotificationPhase.END), - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.PLANNER, - phase=objects.fields.NotificationPhase.START), - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.PLANNER, - phase=objects.fields.NotificationPhase.END)] - - self.assertEqual( - expected_calls, - self.m_audit_notifications.send_action_notification.call_args_list) - - @mock.patch.object(dummy_strategy.DummyStrategy, "do_execute") - @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector") - def test_trigger_audit_with_error(self, m_collector, m_do_execute): - m_collector.return_value = faker.FakerModelCollector() - m_do_execute.side_effect = Exception - audit_handler = oneshot.OneShotAuditHandler() - audit_handler.execute(self.audit, self.context) - - expected_calls = [ - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.STRATEGY, - phase=objects.fields.NotificationPhase.START), - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.STRATEGY, - priority=objects.fields.NotificationPriority.ERROR, - phase=objects.fields.NotificationPhase.ERROR)] - - self.assertEqual( - expected_calls, - self.m_audit_notifications.send_action_notification.call_args_list) - - @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector") - def test_trigger_audit_state_succeeded(self, m_collector): - m_collector.return_value = faker.FakerModelCollector() - audit_handler = oneshot.OneShotAuditHandler() - audit_handler.execute(self.audit, self.context) - audit = objects.audit.Audit.get_by_uuid(self.context, self.audit.uuid) - self.assertEqual(objects.audit.State.SUCCEEDED, audit.state) - - expected_calls = [ - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.STRATEGY, - phase=objects.fields.NotificationPhase.START), - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.STRATEGY, - phase=objects.fields.NotificationPhase.END), - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.PLANNER, - phase=objects.fields.NotificationPhase.START), - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.PLANNER, - phase=objects.fields.NotificationPhase.END)] - - self.assertEqual( - expected_calls, - self.m_audit_notifications.send_action_notification.call_args_list) - - @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector") - def test_trigger_audit_send_notification(self, m_collector): - m_collector.return_value = faker.FakerModelCollector() - audit_handler = oneshot.OneShotAuditHandler() - audit_handler.execute(self.audit, self.context) - - expected_calls = [ - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.STRATEGY, - phase=objects.fields.NotificationPhase.START), - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.STRATEGY, - phase=objects.fields.NotificationPhase.END), - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.PLANNER, - phase=objects.fields.NotificationPhase.START), - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.PLANNER, - phase=objects.fields.NotificationPhase.END)] - - self.assertEqual( - expected_calls, - self.m_audit_notifications.send_action_notification.call_args_list) - - -class TestAutoTriggerActionPlan(base.DbTestCase): - - def setUp(self): - super(TestAutoTriggerActionPlan, self).setUp() - self.goal = obj_utils.create_test_goal( - self.context, id=1, name=dummy_strategy.DummyStrategy.get_name()) - self.strategy = obj_utils.create_test_strategy( - self.context, name=dummy_strategy.DummyStrategy.get_name(), - goal_id=self.goal.id) - audit_template = obj_utils.create_test_audit_template( - self.context) - self.audit = obj_utils.create_test_audit( - self.context, - id=0, - uuid=uuidutils.generate_uuid(), - audit_template_id=audit_template.id, - goal_id=self.goal.id, - audit_type=objects.audit.AuditType.CONTINUOUS.value, - goal=self.goal, - auto_trigger=True) - self.ongoing_action_plan = obj_utils.create_test_action_plan( - self.context, - uuid=uuidutils.generate_uuid(), - audit_id=self.audit.id, - strategy_id=self.strategy.id, - audit=self.audit, - strategy=self.strategy, - ) - self.recommended_action_plan = obj_utils.create_test_action_plan( - self.context, - uuid=uuidutils.generate_uuid(), - state=objects.action_plan.State.ONGOING, - audit_id=self.audit.id, - strategy_id=self.strategy.id, - audit=self.audit, - strategy=self.strategy, - ) - - @mock.patch.object(oneshot.OneShotAuditHandler, 'do_execute') - @mock.patch.object(objects.action_plan.ActionPlan, 'list') - def test_trigger_audit_with_actionplan_ongoing(self, mock_list, - mock_do_execute): - mock_list.return_value = [self.ongoing_action_plan] - audit_handler = oneshot.OneShotAuditHandler() - audit_handler.execute(self.audit, self.context) - self.assertFalse(mock_do_execute.called) - - @mock.patch.object(rpcapi.ApplierAPI, 'launch_action_plan') - @mock.patch.object(objects.action_plan.ActionPlan, 'list') - @mock.patch.object(objects.audit.Audit, 'get_by_id') - def test_trigger_action_plan_without_ongoing(self, mock_get_by_id, - mock_list, mock_applier): - mock_get_by_id.return_value = self.audit - mock_list.return_value = [] - auto_trigger_handler = oneshot.OneShotAuditHandler() - with mock.patch.object(auto_trigger_handler, - 'do_schedule') as m_schedule: - m_schedule().uuid = self.recommended_action_plan.uuid - auto_trigger_handler.post_execute(self.audit, mock.MagicMock(), - self.context) - mock_applier.assert_called_once_with(self.context, - self.recommended_action_plan.uuid) - - -class TestContinuousAuditHandler(base.DbTestCase): - - def setUp(self): - super(TestContinuousAuditHandler, self).setUp() - self.goal = obj_utils.create_test_goal( - self.context, id=1, name=dummy_strategy.DummyStrategy.get_name()) - audit_template = obj_utils.create_test_audit_template( - self.context) - self.audits = [ - obj_utils.create_test_audit( - self.context, - id=id_, - uuid=uuidutils.generate_uuid(), - audit_template_id=audit_template.id, - goal_id=self.goal.id, - audit_type=objects.audit.AuditType.CONTINUOUS.value, - goal=self.goal) - for id_ in range(2, 4)] - - @mock.patch.object(objects.service.Service, 'list') - @mock.patch.object(sq_api, 'get_engine') - @mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job') - @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') - @mock.patch.object(objects.audit.Audit, 'list') - def test_launch_audits_periodically_with_interval( - self, mock_list, mock_jobs, m_add_job, m_engine, m_service): - audit_handler = continuous.ContinuousAuditHandler() - mock_list.return_value = self.audits - self.audits[0].next_run_time = (datetime.datetime.now() - - datetime.timedelta(seconds=1800)) - mock_jobs.return_value = mock.MagicMock() - m_engine.return_value = mock.MagicMock() - m_add_job.return_value = mock.MagicMock() - - audit_handler.launch_audits_periodically() - m_service.assert_called() - m_engine.assert_called() - m_add_job.assert_called() - mock_jobs.assert_called() - self.assertIsNotNone(self.audits[0].next_run_time) - self.assertIsNone(self.audits[1].next_run_time) - - @mock.patch.object(objects.service.Service, 'list') - @mock.patch.object(sq_api, 'get_engine') - @mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job') - @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') - @mock.patch.object(objects.audit.Audit, 'list') - def test_launch_audits_periodically_with_cron( - self, mock_list, mock_jobs, m_add_job, m_engine, m_service): - audit_handler = continuous.ContinuousAuditHandler() - mock_list.return_value = self.audits - self.audits[0].interval = "*/5 * * * *" - mock_jobs.return_value = mock.MagicMock() - m_engine.return_value = mock.MagicMock() - m_add_job.return_value = mock.MagicMock() - - audit_handler.launch_audits_periodically() - m_service.assert_called() - m_engine.assert_called() - m_add_job.assert_called() - mock_jobs.assert_called() - self.assertIsNotNone(self.audits[0].next_run_time) - self.assertIsNone(self.audits[1].next_run_time) - - @mock.patch.object(continuous.ContinuousAuditHandler, '_next_cron_time') - @mock.patch.object(objects.service.Service, 'list') - @mock.patch.object(sq_api, 'get_engine') - @mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job') - @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') - @mock.patch.object(objects.audit.Audit, 'list') - def test_launch_audits_periodically_with_invalid_cron( - self, mock_list, mock_jobs, m_add_job, m_engine, m_service, - mock_cron): - audit_handler = continuous.ContinuousAuditHandler() - mock_list.return_value = self.audits - self.audits[0].interval = "*/5* * * *" - mock_cron.side_effect = exception.CronFormatIsInvalid - mock_jobs.return_value = mock.MagicMock() - m_engine.return_value = mock.MagicMock() - m_add_job.return_value = mock.MagicMock() - - self.assertRaises(exception.CronFormatIsInvalid, - audit_handler.launch_audits_periodically) - - @mock.patch.object(objects.service.Service, 'list') - @mock.patch.object(sq_api, 'get_engine') - @mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job') - @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') - @mock.patch.object(objects.audit.Audit, 'list') - def test_launch_multiply_audits_periodically(self, mock_list, - mock_jobs, m_add_job, - m_engine, m_service): - audit_handler = continuous.ContinuousAuditHandler() - mock_list.return_value = self.audits - mock_jobs.return_value = mock.MagicMock() - m_engine.return_value = mock.MagicMock() - m_service.return_value = mock.MagicMock() - calls = [mock.call(audit_handler.execute_audit, 'interval', - args=[mock.ANY, mock.ANY], - seconds=3600, - name='execute_audit', - next_run_time=mock.ANY) for _ in self.audits] - audit_handler.launch_audits_periodically() - m_add_job.assert_has_calls(calls) - - @mock.patch.object(objects.service.Service, 'list') - @mock.patch.object(sq_api, 'get_engine') - @mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job') - @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') - @mock.patch.object(objects.audit.Audit, 'list') - def test_period_audit_not_called_when_deleted(self, mock_list, - mock_jobs, m_add_job, - m_engine, m_service): - audit_handler = continuous.ContinuousAuditHandler() - mock_list.return_value = self.audits - mock_jobs.return_value = mock.MagicMock() - m_service.return_value = mock.MagicMock() - m_engine.return_value = mock.MagicMock() - self.audits[1].state = objects.audit.State.CANCELLED - self.audits[0].state = objects.audit.State.SUSPENDED - - ap_jobs = [job.Job(mock.MagicMock(), name='execute_audit', - func=audit_handler.execute_audit, - args=(self.audits[0], mock.MagicMock()), - kwargs={}), - job.Job(mock.MagicMock(), name='execute_audit', - func=audit_handler.execute_audit, - args=(self.audits[1], mock.MagicMock()), - kwargs={}) - ] - mock_jobs.return_value = ap_jobs - audit_handler.launch_audits_periodically() - - audit_handler.update_audit_state(self.audits[1], - objects.audit.State.CANCELLED) - audit_handler.update_audit_state(self.audits[0], - objects.audit.State.SUSPENDED) - is_inactive = audit_handler._is_audit_inactive(self.audits[1]) - self.assertTrue(is_inactive) - is_inactive = audit_handler._is_audit_inactive(self.audits[0]) - self.assertTrue(is_inactive) diff --git a/watcher/tests/decision_engine/cluster/__init__.py b/watcher/tests/decision_engine/cluster/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/cluster/test_cluster_data_model_collector.py b/watcher/tests/decision_engine/cluster/test_cluster_data_model_collector.py deleted file mode 100644 index f0d8433..0000000 --- a/watcher/tests/decision_engine/cluster/test_cluster_data_model_collector.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.decision_engine.model.collector import base -from watcher.decision_engine.model import model_root -from watcher.tests import base as test_base - - -class DummyClusterDataModelCollector(base.BaseClusterDataModelCollector): - - @property - def notification_endpoints(self): - return [] - - def execute(self): - model = model_root.ModelRoot() - # Do something here... - return model - - -class TestClusterDataModelCollector(test_base.TestCase): - - def test_is_singleton(self): - m_config = mock.Mock() - inst1 = DummyClusterDataModelCollector(config=m_config) - inst2 = DummyClusterDataModelCollector(config=m_config) - - self.assertIs(inst1, inst2) - - def test_in_memory_model_is_copied(self): - m_config = mock.Mock() - collector = DummyClusterDataModelCollector(config=m_config) - collector.synchronize() - - self.assertIs( - collector._cluster_data_model, collector.cluster_data_model) - self.assertIsNot( - collector.cluster_data_model, - collector.get_latest_cluster_data_model()) diff --git a/watcher/tests/decision_engine/cluster/test_nova_cdmc.py b/watcher/tests/decision_engine/cluster/test_nova_cdmc.py deleted file mode 100644 index a685766..0000000 --- a/watcher/tests/decision_engine/cluster/test_nova_cdmc.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.common import nova_helper -from watcher.common import utils -from watcher.decision_engine.model.collector import nova -from watcher.tests import base -from watcher.tests import conf_fixture - - -class TestNovaClusterDataModelCollector(base.TestCase): - - def setUp(self): - super(TestNovaClusterDataModelCollector, self).setUp() - self.useFixture(conf_fixture.ConfReloadFixture()) - - @mock.patch('keystoneclient.v3.client.Client', mock.Mock()) - @mock.patch.object(nova_helper, 'NovaHelper') - def test_nova_cdmc_execute(self, m_nova_helper_cls): - m_nova_helper = mock.Mock(name="nova_helper") - m_nova_helper_cls.return_value = m_nova_helper - m_nova_helper.get_service.return_value = mock.Mock( - host="test_hostname") - - fake_compute_node = mock.Mock( - id=1337, - service={'id': 123}, - hypervisor_hostname='test_hostname', - memory_mb=333, - free_disk_gb=222, - local_gb=111, - vcpus=4, - state='TEST_STATE', - status='TEST_STATUS', - ) - fake_instance = mock.Mock( - id='ef500f7e-dac8-470f-960c-169486fce71b', - human_id='fake_instance', - flavor={'ram': 333, 'disk': 222, 'vcpus': 4, 'id': 1}, - metadata={'hi': 'hello'}, - ) - setattr(fake_instance, 'OS-EXT-STS:vm_state', 'VM_STATE') - setattr(fake_instance, 'OS-EXT-SRV-ATTR:host', 'test_hostname') - m_nova_helper.get_compute_node_list.return_value = [fake_compute_node] - # m_nova_helper.get_instances_by_node.return_value = [fake_instance] - m_nova_helper.get_instance_list.return_value = [fake_instance] - - m_nova_helper.get_flavor.return_value = utils.Struct(**{ - 'ram': 333, 'disk': 222, 'vcpus': 4}) - - m_config = mock.Mock() - m_osc = mock.Mock() - - nova_cdmc = nova.NovaClusterDataModelCollector( - config=m_config, osc=m_osc) - - model = nova_cdmc.execute() - - compute_nodes = model.get_all_compute_nodes() - instances = model.get_all_instances() - - self.assertEqual(1, len(compute_nodes)) - self.assertEqual(1, len(instances)) - - node = list(compute_nodes.values())[0] - instance = list(instances.values())[0] - - self.assertEqual(node.uuid, 'test_hostname') - self.assertEqual(instance.uuid, 'ef500f7e-dac8-470f-960c-169486fce71b') diff --git a/watcher/tests/decision_engine/event_consumer/__init__.py b/watcher/tests/decision_engine/event_consumer/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/fake_goals.py b/watcher/tests/decision_engine/fake_goals.py deleted file mode 100644 index 435253d..0000000 --- a/watcher/tests/decision_engine/fake_goals.py +++ /dev/null @@ -1,83 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import voluptuous - -from watcher.decision_engine.goal import base as base_goal -from watcher.decision_engine.goal.efficacy import base as efficacy_base -from watcher.decision_engine.goal.efficacy import indicators -from watcher.decision_engine.goal.efficacy import specs - - -class FakeGoal(base_goal.Goal): - - NAME = NotImplemented - DISPLAY_NAME = NotImplemented - - @classmethod - def get_name(cls): - return cls.NAME - - @classmethod - def get_display_name(cls): - return cls.DISPLAY_NAME - - @classmethod - def get_translatable_display_name(cls): - return cls.DISPLAY_NAME - - @classmethod - def get_efficacy_specification(cls): - """The efficacy spec for the current goal""" - return specs.Unclassified() - - -class DummyIndicator(indicators.IndicatorSpecification): - def __init__(self): - super(DummyIndicator, self).__init__( - name="dummy", - description="Dummy indicator", - unit="%", - ) - - @property - def schema(self): - return voluptuous.Schema( - voluptuous.Range(min=0, max=100), required=True) - - -class DummySpec1(efficacy_base.EfficacySpecification): - - def get_indicators_specifications(self): - return [DummyIndicator()] - - def get_global_efficacy_indicator(self, indicators_map): - return None - - -class FakeDummy1(FakeGoal): - NAME = "dummy_1" - DISPLAY_NAME = "Dummy 1" - - @classmethod - def get_efficacy_specification(cls): - """The efficacy spec for the current goal""" - return DummySpec1() - - -class FakeDummy2(FakeGoal): - NAME = "dummy_2" - DISPLAY_NAME = "Dummy 2" diff --git a/watcher/tests/decision_engine/fake_strategies.py b/watcher/tests/decision_engine/fake_strategies.py deleted file mode 100644 index 002290b..0000000 --- a/watcher/tests/decision_engine/fake_strategies.py +++ /dev/null @@ -1,85 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -from watcher.decision_engine.strategy.strategies import base as base_strategy - - -class FakeStrategy(base_strategy.BaseStrategy): - - NAME = NotImplemented - DISPLAY_NAME = NotImplemented - GOAL_NAME = NotImplemented - - @classmethod - def get_name(cls): - return cls.NAME - - @classmethod - def get_display_name(cls): - return cls.DISPLAY_NAME - - @classmethod - def get_translatable_display_name(cls): - return cls.DISPLAY_NAME - - @classmethod - def get_goal_name(cls): - return cls.GOAL_NAME - - @classmethod - def get_config_opts(cls): - return [] - - def pre_execute(self): - pass - - def do_execute(self): - pass - - def post_execute(self): - pass - - -class FakeDummy1Strategy1(FakeStrategy): - GOAL_NAME = "dummy_1" - NAME = "strategy_1" - DISPLAY_NAME = "Strategy 1" - - @classmethod - def get_config_opts(cls): - return [ - cfg.StrOpt('test_opt', help="Option used for testing."), - ] - - -class FakeDummy1Strategy2(FakeStrategy): - GOAL_NAME = "dummy_1" - NAME = "strategy_2" - DISPLAY_NAME = "Strategy 2" - - -class FakeDummy2Strategy3(FakeStrategy): - GOAL_NAME = "dummy_2" - NAME = "strategy_3" - DISPLAY_NAME = "Strategy 3" - - -class FakeDummy2Strategy4(FakeStrategy): - GOAL_NAME = "dummy_2" - NAME = "strategy_4" - DISPLAY_NAME = "Strategy 4" diff --git a/watcher/tests/decision_engine/loading/__init__.py b/watcher/tests/decision_engine/loading/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/loading/test_collector_loader.py b/watcher/tests/decision_engine/loading/test_collector_loader.py deleted file mode 100644 index 049c348..0000000 --- a/watcher/tests/decision_engine/loading/test_collector_loader.py +++ /dev/null @@ -1,82 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -from stevedore import driver as drivermanager -from stevedore import extension as stevedore_extension - -from watcher.common import clients -from watcher.common import exception -from watcher.decision_engine.loading import default as default_loading -from watcher.tests import base -from watcher.tests import conf_fixture -from watcher.tests.decision_engine.model import faker_cluster_state - - -class TestClusterDataModelCollectorLoader(base.TestCase): - - def setUp(self): - super(TestClusterDataModelCollectorLoader, self).setUp() - self.useFixture(conf_fixture.ConfReloadFixture()) - self.collector_loader = ( - default_loading.ClusterDataModelCollectorLoader()) - - def test_load_collector_with_empty_model(self): - self.assertRaises( - exception.LoadingError, self.collector_loader.load, None) - - def test_collector_loader(self): - fake_driver = "fake" - # Set up the fake Stevedore extensions - fake_driver_call = drivermanager.DriverManager.make_test_instance( - extension=stevedore_extension.Extension( - name=fake_driver, - entry_point="%s:%s" % ( - faker_cluster_state.FakerModelCollector.__module__, - faker_cluster_state.FakerModelCollector.__name__), - plugin=faker_cluster_state.FakerModelCollector, - obj=None, - ), - namespace="watcher_cluster_data_model_collectors", - ) - - with mock.patch.object(drivermanager, - "DriverManager") as m_driver_manager: - m_driver_manager.return_value = fake_driver_call - loaded_collector = self.collector_loader.load("fake") - - self.assertIsInstance( - loaded_collector, faker_cluster_state.FakerModelCollector) - - -class TestLoadClusterDataModelCollectors(base.TestCase): - - collector_loader = default_loading.ClusterDataModelCollectorLoader() - - scenarios = [ - (collector_name, - {"collector_name": collector_name, "collector_cls": collector_cls}) - for collector_name, collector_cls - in collector_loader.list_available().items()] - - def setUp(self): - super(TestLoadClusterDataModelCollectors, self).setUp() - self.useFixture(conf_fixture.ConfReloadFixture()) - - @mock.patch.object(clients, 'OpenStackClients', mock.Mock()) - def test_load_cluster_data_model_collectors(self): - collector = self.collector_loader.load(self.collector_name) - self.assertIsNotNone(collector) diff --git a/watcher/tests/decision_engine/loading/test_default_planner_loader.py b/watcher/tests/decision_engine/loading/test_default_planner_loader.py deleted file mode 100644 index 0354da9..0000000 --- a/watcher/tests/decision_engine/loading/test_default_planner_loader.py +++ /dev/null @@ -1,31 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.decision_engine.loading import default -from watcher.decision_engine.planner import base as planner -from watcher.tests import base - - -class TestDefaultPlannerLoader(base.TestCase): - def setUp(self): - super(TestDefaultPlannerLoader, self).setUp() - self.loader = default.DefaultPlannerLoader() - - def test_endpoints(self): - for endpoint in self.loader.list_available(): - loaded = self.loader.load(endpoint) - self.assertIsNotNone(loaded) - self.assertIsInstance(loaded, planner.BasePlanner) diff --git a/watcher/tests/decision_engine/loading/test_default_strategy_loader.py b/watcher/tests/decision_engine/loading/test_default_strategy_loader.py deleted file mode 100644 index 5bb3c58..0000000 --- a/watcher/tests/decision_engine/loading/test_default_strategy_loader.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -from stevedore import extension - -from watcher.common import exception -from watcher.decision_engine.loading import default as default_loading -from watcher.decision_engine.strategy.strategies import dummy_strategy -from watcher.tests import base - - -class TestDefaultStrategyLoader(base.TestCase): - - def setUp(self): - super(TestDefaultStrategyLoader, self).setUp() - self.strategy_loader = default_loading.DefaultStrategyLoader() - - def test_load_strategy_with_empty_model(self): - self.assertRaises( - exception.LoadingError, self.strategy_loader.load, None) - - def test_strategy_loader(self): - dummy_strategy_name = "dummy" - # Set up the fake Stevedore extensions - fake_extmanager_call = extension.ExtensionManager.make_test_instance( - extensions=[extension.Extension( - name=dummy_strategy_name, - entry_point="%s:%s" % ( - dummy_strategy.DummyStrategy.__module__, - dummy_strategy.DummyStrategy.__name__), - plugin=dummy_strategy.DummyStrategy, - obj=None, - )], - namespace="watcher_strategies", - ) - - with mock.patch.object(extension, "ExtensionManager") as m_ext_manager: - m_ext_manager.return_value = fake_extmanager_call - loaded_strategy = self.strategy_loader.load( - "dummy") - - self.assertEqual("dummy", loaded_strategy.name) - self.assertEqual("Dummy strategy", loaded_strategy.display_name) - - def test_load_dummy_strategy(self): - strategy_loader = default_loading.DefaultStrategyLoader() - loaded_strategy = strategy_loader.load("dummy") - self.assertIsInstance(loaded_strategy, dummy_strategy.DummyStrategy) - - -class TestLoadStrategiesWithDefaultStrategyLoader(base.TestCase): - - strategy_loader = default_loading.DefaultStrategyLoader() - - scenarios = [ - (strategy_name, - {"strategy_name": strategy_name, "strategy_cls": strategy_cls}) - for strategy_name, strategy_cls - in strategy_loader.list_available().items()] - - def test_load_strategies(self): - strategy = self.strategy_loader.load(self.strategy_name) - self.assertIsNotNone(strategy) - self.assertEqual(self.strategy_name, strategy.name) diff --git a/watcher/tests/decision_engine/loading/test_goal_loader.py b/watcher/tests/decision_engine/loading/test_goal_loader.py deleted file mode 100644 index 13ab618..0000000 --- a/watcher/tests/decision_engine/loading/test_goal_loader.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -from stevedore import extension - -from watcher.common import exception -from watcher.decision_engine.goal import goals -from watcher.decision_engine.loading import default as default_loading -from watcher.tests import base - - -class TestDefaultGoalLoader(base.TestCase): - - def setUp(self): - super(TestDefaultGoalLoader, self).setUp() - self.goal_loader = default_loading.DefaultGoalLoader() - - def test_load_goal_with_empty_model(self): - self.assertRaises( - exception.LoadingError, self.goal_loader.load, None) - - def test_goal_loader(self): - dummy_goal_name = "dummy" - # Set up the fake Stevedore extensions - fake_extmanager_call = extension.ExtensionManager.make_test_instance( - extensions=[extension.Extension( - name=dummy_goal_name, - entry_point="%s:%s" % ( - goals.Dummy.__module__, - goals.Dummy.__name__), - plugin=goals.Dummy, - obj=None, - )], - namespace="watcher_goals", - ) - - with mock.patch.object(extension, "ExtensionManager") as m_ext_manager: - m_ext_manager.return_value = fake_extmanager_call - loaded_goal = self.goal_loader.load("dummy") - - self.assertEqual("dummy", loaded_goal.name) - self.assertEqual("Dummy goal", loaded_goal.display_name) - - def test_load_dummy_goal(self): - goal_loader = default_loading.DefaultGoalLoader() - loaded_goal = goal_loader.load("dummy") - self.assertIsInstance(loaded_goal, goals.Dummy) - - -class TestLoadGoalsWithDefaultGoalLoader(base.TestCase): - - goal_loader = default_loading.DefaultGoalLoader() - - # test matrix (1 test execution per goal entry point) - scenarios = [ - (goal_name, - {"goal_name": goal_name, "goal_cls": goal_cls}) - for goal_name, goal_cls - in goal_loader.list_available().items()] - - def test_load_goals(self): - goal = self.goal_loader.load(self.goal_name) - self.assertIsNotNone(goal) - self.assertEqual(self.goal_name, goal.name) diff --git a/watcher/tests/decision_engine/messaging/__init__.py b/watcher/tests/decision_engine/messaging/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/messaging/test_audit_endpoint.py b/watcher/tests/decision_engine/messaging/test_audit_endpoint.py deleted file mode 100644 index 2a72cba..0000000 --- a/watcher/tests/decision_engine/messaging/test_audit_endpoint.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.decision_engine.audit import continuous as continuous_handler -from watcher.decision_engine.audit import oneshot as oneshot_handler -from watcher.decision_engine.messaging import audit_endpoint -from watcher.decision_engine.model.collector import manager -from watcher.tests.db import base -from watcher.tests.decision_engine.model import faker_cluster_state -from watcher.tests.objects import utils as obj_utils - - -class TestAuditEndpoint(base.DbTestCase): - def setUp(self): - super(TestAuditEndpoint, self).setUp() - self.goal = obj_utils.create_test_goal(self.context) - self.audit_template = obj_utils.create_test_audit_template( - self.context) - self.audit = obj_utils.create_test_audit( - self.context, - audit_template_id=self.audit_template.id) - - @mock.patch.object(continuous_handler.ContinuousAuditHandler, 'start') - @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector") - def test_do_trigger_audit(self, mock_collector, mock_handler): - mock_collector.return_value = faker_cluster_state.FakerModelCollector() - - audit_handler = oneshot_handler.OneShotAuditHandler - endpoint = audit_endpoint.AuditEndpoint(audit_handler) - - with mock.patch.object(oneshot_handler.OneShotAuditHandler, - 'execute') as mock_call: - mock_call.return_value = 0 - endpoint.do_trigger_audit(self.context, self.audit.uuid) - - self.assertEqual(mock_call.call_count, 1) - - @mock.patch.object(continuous_handler.ContinuousAuditHandler, 'start') - @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector") - def test_trigger_audit(self, mock_collector, mock_handler): - mock_collector.return_value = faker_cluster_state.FakerModelCollector() - - audit_handler = oneshot_handler.OneShotAuditHandler - endpoint = audit_endpoint.AuditEndpoint(audit_handler) - - with mock.patch.object(endpoint.executor, 'submit') as mock_call: - mock_execute = mock.call(endpoint.do_trigger_audit, - self.context, - self.audit.uuid) - endpoint.trigger_audit(self.context, self.audit.uuid) - - mock_call.assert_has_calls([mock_execute]) - self.assertEqual(mock_call.call_count, 1) diff --git a/watcher/tests/decision_engine/model/__init__.py b/watcher/tests/decision_engine/model/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/model/ceilometer_metrics.py b/watcher/tests/decision_engine/model/ceilometer_metrics.py deleted file mode 100644 index 9c5d336..0000000 --- a/watcher/tests/decision_engine/model/ceilometer_metrics.py +++ /dev/null @@ -1,295 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import oslo_utils - - -class FakeCeilometerMetrics(object): - def __init__(self): - self.emptytype = "" - - def empty_one_metric(self, emptytype): - self.emptytype = emptytype - - def mock_get_statistics(self, resource_id, meter_name, period, - aggregate='avg'): - result = 0 - if meter_name == "hardware.cpu.util": - result = self.get_usage_node_cpu(resource_id) - elif meter_name == "compute.node.cpu.percent": - result = self.get_usage_node_cpu(resource_id) - elif meter_name == "hardware.memory.used": - result = self.get_usage_node_ram(resource_id) - elif meter_name == "cpu_util": - result = self.get_average_usage_instance_cpu(resource_id) - elif meter_name == "memory.resident": - result = self.get_average_usage_instance_memory(resource_id) - elif meter_name == "hardware.ipmi.node.outlet_temperature": - result = self.get_average_outlet_temperature(resource_id) - elif meter_name == "hardware.ipmi.node.airflow": - result = self.get_average_airflow(resource_id) - elif meter_name == "hardware.ipmi.node.temperature": - result = self.get_average_inlet_t(resource_id) - elif meter_name == "hardware.ipmi.node.power": - result = self.get_average_power(resource_id) - return result - - def mock_get_statistics_wb(self, resource_id, meter_name, period, - aggregate='avg'): - result = 0.0 - if meter_name == "cpu_util": - result = self.get_average_usage_instance_cpu_wb(resource_id) - return result - - def mock_get_statistics_nn(self, resource_id, meter_name, period, - aggregate='avg'): - result = 0.0 - if meter_name == "cpu_l3_cache" and period == 100: - result = self.get_average_l3_cache_current(resource_id) - if meter_name == "cpu_l3_cache" and period == 200: - result = self.get_average_l3_cache_previous(resource_id) - return result - - @staticmethod - def get_average_l3_cache_current(uuid): - """The average l3 cache used by instance""" - mock = {} - mock['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 35 * oslo_utils.units.Ki - mock['cae81432-1631-4d4e-b29c-6f3acdcde906'] = 30 * oslo_utils.units.Ki - mock['INSTANCE_3'] = 40 * oslo_utils.units.Ki - mock['INSTANCE_4'] = 35 * oslo_utils.units.Ki - if uuid not in mock.keys(): - mock[uuid] = 25 * oslo_utils.units.Ki - return mock[str(uuid)] - - @staticmethod - def get_average_l3_cache_previous(uuid): - """The average l3 cache used by instance""" - mock = {} - mock['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 34.5 * ( - oslo_utils.units.Ki) - mock['cae81432-1631-4d4e-b29c-6f3acdcde906'] = 30.5 * ( - oslo_utils.units.Ki) - mock['INSTANCE_3'] = 60 * oslo_utils.units.Ki - mock['INSTANCE_4'] = 22.5 * oslo_utils.units.Ki - if uuid not in mock.keys(): - mock[uuid] = 25 * oslo_utils.units.Ki - return mock[str(uuid)] - - @staticmethod - def get_average_outlet_temperature(uuid): - """The average outlet temperature for host""" - mock = {} - mock['Node_0'] = 30 - # use a big value to make sure it exceeds threshold - mock['Node_1'] = 100 - if uuid not in mock.keys(): - mock[uuid] = 100 - return float(mock[str(uuid)]) - - @staticmethod - def get_usage_node_ram(uuid): - mock = {} - # Ceilometer returns hardware.memory.used samples in KB. - mock['Node_0'] = 7 * oslo_utils.units.Ki - mock['Node_1'] = 5 * oslo_utils.units.Ki - mock['Node_2'] = 29 * oslo_utils.units.Ki - mock['Node_3'] = 8 * oslo_utils.units.Ki - mock['Node_4'] = 4 * oslo_utils.units.Ki - - if uuid not in mock.keys(): - # mock[uuid] = random.randint(1, 4) - mock[uuid] = 8 - - return float(mock[str(uuid)]) - - @staticmethod - def get_average_airflow(uuid): - """The average outlet temperature for host""" - mock = {} - mock['Node_0'] = 400 - # use a big value to make sure it exceeds threshold - mock['Node_1'] = 100 - if uuid not in mock.keys(): - mock[uuid] = 200 - return mock[str(uuid)] - - @staticmethod - def get_average_inlet_t(uuid): - """The average outlet temperature for host""" - mock = {} - mock['Node_0'] = 24 - mock['Node_1'] = 26 - if uuid not in mock.keys(): - mock[uuid] = 28 - return mock[str(uuid)] - - @staticmethod - def get_average_power(uuid): - """The average outlet temperature for host""" - mock = {} - mock['Node_0'] = 260 - mock['Node_1'] = 240 - if uuid not in mock.keys(): - mock[uuid] = 200 - return mock[str(uuid)] - - @staticmethod - def get_usage_node_cpu(uuid): - """The last VM CPU usage values to average - - :param uuid:00 - :return: - """ - # query influxdb stream - - # compute in stream - - # Normalize - mock = {} - # node 0 - mock['Node_0_hostname_0'] = 7 - mock['Node_1_hostname_1'] = 7 - # node 1 - mock['Node_2_hostname_2'] = 80 - # node 2 - mock['Node_3_hostname_3'] = 5 - mock['Node_4_hostname_4'] = 5 - mock['Node_5_hostname_5'] = 10 - - # node 3 - mock['Node_6_hostname_6'] = 8 - mock['Node_19_hostname_19'] = 10 - # node 4 - mock['INSTANCE_7_hostname_7'] = 4 - - mock['Node_0'] = 7 - mock['Node_1'] = 5 - mock['Node_2'] = 10 - mock['Node_3'] = 4 - mock['Node_4'] = 2 - - if uuid not in mock.keys(): - # mock[uuid] = random.randint(1, 4) - mock[uuid] = 8 - - return float(mock[str(uuid)]) - - @staticmethod - def get_average_usage_instance_cpu_wb(uuid): - """The last VM CPU usage values to average - - :param uuid:00 - :return: - """ - # query influxdb stream - - # compute in stream - - # Normalize - mock = {} - # node 0 - mock['INSTANCE_1'] = 80 - mock['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 50 - # node 1 - mock['INSTANCE_3'] = 20 - mock['INSTANCE_4'] = 10 - return float(mock[str(uuid)]) - - @staticmethod - def get_average_usage_instance_cpu(uuid): - """The last VM CPU usage values to average - - :param uuid:00 - :return: - """ - # query influxdb stream - - # compute in stream - - # Normalize - mock = {} - # node 0 - mock['INSTANCE_0'] = 7 - mock['INSTANCE_1'] = 7 - # node 1 - mock['INSTANCE_2'] = 10 - # node 2 - mock['INSTANCE_3'] = 5 - mock['INSTANCE_4'] = 5 - mock['INSTANCE_5'] = 10 - - # node 3 - mock['INSTANCE_6'] = 8 - - # node 4 - mock['INSTANCE_7'] = 4 - if uuid not in mock.keys(): - # mock[uuid] = random.randint(1, 4) - mock[uuid] = 8 - - return mock[str(uuid)] - - @staticmethod - def get_average_usage_instance_memory(uuid): - mock = {} - # node 0 - mock['INSTANCE_0'] = 2 - mock['INSTANCE_1'] = 5 - # node 1 - mock['INSTANCE_2'] = 5 - # node 2 - mock['INSTANCE_3'] = 8 - mock['INSTANCE_4'] = 5 - mock['INSTANCE_5'] = 16 - - # node 3 - mock['INSTANCE_6'] = 8 - - # node 4 - mock['INSTANCE_7'] = 4 - if uuid not in mock.keys(): - # mock[uuid] = random.randint(1, 4) - mock[uuid] = 10 - - return mock[str(uuid)] - - @staticmethod - def get_average_usage_instance_disk(uuid): - mock = {} - # node 0 - mock['INSTANCE_0'] = 2 - mock['INSTANCE_1'] = 2 - # node 1 - mock['INSTANCE_2'] = 2 - # node 2 - mock['INSTANCE_3'] = 10 - mock['INSTANCE_4'] = 15 - mock['INSTANCE_5'] = 20 - - # node 3 - mock['INSTANCE_6'] = 8 - - # node 4 - mock['INSTANCE_7'] = 4 - - if uuid not in mock.keys(): - # mock[uuid] = random.randint(1, 4) - mock[uuid] = 4 - - return mock[str(uuid)] diff --git a/watcher/tests/decision_engine/model/data/scenario_1.xml b/watcher/tests/decision_engine/model/data/scenario_1.xml deleted file mode 100644 index 7476af2..0000000 --- a/watcher/tests/decision_engine/model/data/scenario_1.xml +++ /dev/null @@ -1,47 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/watcher/tests/decision_engine/model/data/scenario_1_with_metrics.xml b/watcher/tests/decision_engine/model/data/scenario_1_with_metrics.xml deleted file mode 100644 index d8b80af..0000000 --- a/watcher/tests/decision_engine/model/data/scenario_1_with_metrics.xml +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - diff --git a/watcher/tests/decision_engine/model/data/scenario_2_with_metrics.xml b/watcher/tests/decision_engine/model/data/scenario_2_with_metrics.xml deleted file mode 100644 index 668ef19..0000000 --- a/watcher/tests/decision_engine/model/data/scenario_2_with_metrics.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - diff --git a/watcher/tests/decision_engine/model/data/scenario_3_with_2_nodes.xml b/watcher/tests/decision_engine/model/data/scenario_3_with_2_nodes.xml deleted file mode 100644 index 189d81e..0000000 --- a/watcher/tests/decision_engine/model/data/scenario_3_with_2_nodes.xml +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - diff --git a/watcher/tests/decision_engine/model/data/scenario_3_with_metrics.xml b/watcher/tests/decision_engine/model/data/scenario_3_with_metrics.xml deleted file mode 100644 index 5c60799..0000000 --- a/watcher/tests/decision_engine/model/data/scenario_3_with_metrics.xml +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - - diff --git a/watcher/tests/decision_engine/model/data/scenario_4_with_1_node_no_instance.xml b/watcher/tests/decision_engine/model/data/scenario_4_with_1_node_no_instance.xml deleted file mode 100644 index 963beca..0000000 --- a/watcher/tests/decision_engine/model/data/scenario_4_with_1_node_no_instance.xml +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/watcher/tests/decision_engine/model/data/scenario_5_with_instance_disk_0.xml b/watcher/tests/decision_engine/model/data/scenario_5_with_instance_disk_0.xml deleted file mode 100644 index a8bffab..0000000 --- a/watcher/tests/decision_engine/model/data/scenario_5_with_instance_disk_0.xml +++ /dev/null @@ -1,5 +0,0 @@ - - - - - diff --git a/watcher/tests/decision_engine/model/data/scenario_6_with_2_nodes.xml b/watcher/tests/decision_engine/model/data/scenario_6_with_2_nodes.xml deleted file mode 100644 index c12eaba..0000000 --- a/watcher/tests/decision_engine/model/data/scenario_6_with_2_nodes.xml +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - diff --git a/watcher/tests/decision_engine/model/data/scenario_7_with_2_nodes.xml b/watcher/tests/decision_engine/model/data/scenario_7_with_2_nodes.xml deleted file mode 100644 index cf86c00..0000000 --- a/watcher/tests/decision_engine/model/data/scenario_7_with_2_nodes.xml +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - diff --git a/watcher/tests/decision_engine/model/data/scenario_8_with_4_nodes.xml b/watcher/tests/decision_engine/model/data/scenario_8_with_4_nodes.xml deleted file mode 100644 index a646c6e..0000000 --- a/watcher/tests/decision_engine/model/data/scenario_8_with_4_nodes.xml +++ /dev/null @@ -1,16 +0,0 @@ - - - - - - - - - - - - - - - - diff --git a/watcher/tests/decision_engine/model/data/scenario_9_with_3_active_plus_1_disabled_nodes.xml b/watcher/tests/decision_engine/model/data/scenario_9_with_3_active_plus_1_disabled_nodes.xml deleted file mode 100644 index d1d3f94..0000000 --- a/watcher/tests/decision_engine/model/data/scenario_9_with_3_active_plus_1_disabled_nodes.xml +++ /dev/null @@ -1,16 +0,0 @@ - - - - - - - - - - - - - - - - diff --git a/watcher/tests/decision_engine/model/data/storage_scenario_1.xml b/watcher/tests/decision_engine/model/data/storage_scenario_1.xml deleted file mode 100644 index af2e416..0000000 --- a/watcher/tests/decision_engine/model/data/storage_scenario_1.xml +++ /dev/null @@ -1,23 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - diff --git a/watcher/tests/decision_engine/model/faker_cluster_and_metrics.py b/watcher/tests/decision_engine/model/faker_cluster_and_metrics.py deleted file mode 100644 index 3c40d66..0000000 --- a/watcher/tests/decision_engine/model/faker_cluster_and_metrics.py +++ /dev/null @@ -1,242 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Authors: Vojtech CIMA -# Bruno GRAZIOLI -# Sean MURPHY -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import mock - -from watcher.decision_engine.model.collector import base -from watcher.decision_engine.model import model_root as modelroot - - -class FakerModelCollector(base.BaseClusterDataModelCollector): - - def __init__(self, config=None, osc=None): - if config is None: - config = mock.Mock() - super(FakerModelCollector, self).__init__(config) - - @property - def notification_endpoints(self): - return [] - - def execute(self): - return self.generate_scenario_1() - - def load_data(self, filename): - cwd = os.path.abspath(os.path.dirname(__file__)) - data_folder = os.path.join(cwd, "data") - - with open(os.path.join(data_folder, filename), 'rb') as xml_file: - xml_data = xml_file.read() - - return xml_data - - def load_model(self, filename): - return modelroot.ModelRoot.from_xml(self.load_data(filename)) - - def generate_scenario_1(self): - """Simulates cluster with 2 nodes and 2 instances using 1:1 mapping""" - return self.load_model('scenario_1_with_metrics.xml') - - def generate_scenario_2(self): - """Simulates a cluster - - With 4 nodes and 6 instances all mapped to a single node - """ - return self.load_model('scenario_2_with_metrics.xml') - - def generate_scenario_3(self): - """Simulates a cluster - - With 4 nodes and 6 instances all mapped to one node - """ - return self.load_model('scenario_3_with_metrics.xml') - - def generate_scenario_4(self): - """Simulates a cluster - - With 4 nodes and 6 instances spread on all nodes - """ - return self.load_model('scenario_4_with_metrics.xml') - - -class FakeCeilometerMetrics(object): - def __init__(self, model): - self.model = model - - def mock_get_statistics(self, resource_id, meter_name, period=3600, - aggregate='avg'): - if meter_name == "compute.node.cpu.percent": - return self.get_node_cpu_util(resource_id) - elif meter_name == "cpu_util": - return self.get_instance_cpu_util(resource_id) - elif meter_name == "memory.usage": - return self.get_instance_ram_util(resource_id) - elif meter_name == "disk.root.size": - return self.get_instance_disk_root_size(resource_id) - - def get_node_cpu_util(self, r_id): - """Calculates node utilization dynamicaly. - - node CPU utilization should consider - and corelate with actual instance-node mappings - provided within a cluster model. - Returns relative node CPU utilization <0, 100>. - :param r_id: resource id - """ - node_uuid = '%s_%s' % (r_id.split('_')[0], r_id.split('_')[1]) - node = self.model.get_node_by_uuid(node_uuid) - instances = self.model.get_node_instances(node) - util_sum = 0.0 - for instance_uuid in instances: - instance = self.model.get_instance_by_uuid(instance_uuid) - total_cpu_util = instance.vcpus * self.get_instance_cpu_util( - instance.uuid) - util_sum += total_cpu_util / 100.0 - util_sum /= node.vcpus - return util_sum * 100.0 - - @staticmethod - def get_instance_cpu_util(r_id): - instance_cpu_util = dict() - instance_cpu_util['INSTANCE_0'] = 10 - instance_cpu_util['INSTANCE_1'] = 30 - instance_cpu_util['INSTANCE_2'] = 60 - instance_cpu_util['INSTANCE_3'] = 20 - instance_cpu_util['INSTANCE_4'] = 40 - instance_cpu_util['INSTANCE_5'] = 50 - instance_cpu_util['INSTANCE_6'] = 100 - instance_cpu_util['INSTANCE_7'] = 100 - instance_cpu_util['INSTANCE_8'] = 100 - instance_cpu_util['INSTANCE_9'] = 100 - return instance_cpu_util[str(r_id)] - - @staticmethod - def get_instance_ram_util(r_id): - instance_ram_util = dict() - instance_ram_util['INSTANCE_0'] = 1 - instance_ram_util['INSTANCE_1'] = 2 - instance_ram_util['INSTANCE_2'] = 4 - instance_ram_util['INSTANCE_3'] = 8 - instance_ram_util['INSTANCE_4'] = 3 - instance_ram_util['INSTANCE_5'] = 2 - instance_ram_util['INSTANCE_6'] = 1 - instance_ram_util['INSTANCE_7'] = 2 - instance_ram_util['INSTANCE_8'] = 4 - instance_ram_util['INSTANCE_9'] = 8 - return instance_ram_util[str(r_id)] - - @staticmethod - def get_instance_disk_root_size(r_id): - instance_disk_util = dict() - instance_disk_util['INSTANCE_0'] = 10 - instance_disk_util['INSTANCE_1'] = 15 - instance_disk_util['INSTANCE_2'] = 30 - instance_disk_util['INSTANCE_3'] = 35 - instance_disk_util['INSTANCE_4'] = 20 - instance_disk_util['INSTANCE_5'] = 25 - instance_disk_util['INSTANCE_6'] = 25 - instance_disk_util['INSTANCE_7'] = 25 - instance_disk_util['INSTANCE_8'] = 25 - instance_disk_util['INSTANCE_9'] = 25 - return instance_disk_util[str(r_id)] - - -class FakeGnocchiMetrics(object): - def __init__(self, model): - self.model = model - - def mock_get_statistics(self, resource_id, metric, granularity, - start_time, stop_time, aggregation='mean'): - if metric == "compute.node.cpu.percent": - return self.get_node_cpu_util(resource_id) - elif metric == "cpu_util": - return self.get_instance_cpu_util(resource_id) - elif metric == "memory.usage": - return self.get_instance_ram_util(resource_id) - elif metric == "disk.root.size": - return self.get_instance_disk_root_size(resource_id) - - def get_node_cpu_util(self, r_id): - """Calculates node utilization dynamicaly. - - node CPU utilization should consider - and corelate with actual instance-node mappings - provided within a cluster model. - Returns relative node CPU utilization <0, 100>. - - :param r_id: resource id - """ - node_uuid = '%s_%s' % (r_id.split('_')[0], r_id.split('_')[1]) - node = self.model.get_node_by_uuid(node_uuid) - instances = self.model.get_node_instances(node) - util_sum = 0.0 - for instance_uuid in instances: - instance = self.model.get_instance_by_uuid(instance_uuid) - total_cpu_util = instance.vcpus * self.get_instance_cpu_util( - instance.uuid) - util_sum += total_cpu_util / 100.0 - util_sum /= node.vcpus - return util_sum * 100.0 - - @staticmethod - def get_instance_cpu_util(r_id): - instance_cpu_util = dict() - instance_cpu_util['INSTANCE_0'] = 10 - instance_cpu_util['INSTANCE_1'] = 30 - instance_cpu_util['INSTANCE_2'] = 60 - instance_cpu_util['INSTANCE_3'] = 20 - instance_cpu_util['INSTANCE_4'] = 40 - instance_cpu_util['INSTANCE_5'] = 50 - instance_cpu_util['INSTANCE_6'] = 100 - instance_cpu_util['INSTANCE_7'] = 100 - instance_cpu_util['INSTANCE_8'] = 100 - instance_cpu_util['INSTANCE_9'] = 100 - return instance_cpu_util[str(r_id)] - - @staticmethod - def get_instance_ram_util(r_id): - instance_ram_util = dict() - instance_ram_util['INSTANCE_0'] = 1 - instance_ram_util['INSTANCE_1'] = 2 - instance_ram_util['INSTANCE_2'] = 4 - instance_ram_util['INSTANCE_3'] = 8 - instance_ram_util['INSTANCE_4'] = 3 - instance_ram_util['INSTANCE_5'] = 2 - instance_ram_util['INSTANCE_6'] = 1 - instance_ram_util['INSTANCE_7'] = 2 - instance_ram_util['INSTANCE_8'] = 4 - instance_ram_util['INSTANCE_9'] = 8 - return instance_ram_util[str(r_id)] - - @staticmethod - def get_instance_disk_root_size(r_id): - instance_disk_util = dict() - instance_disk_util['INSTANCE_0'] = 10 - instance_disk_util['INSTANCE_1'] = 15 - instance_disk_util['INSTANCE_2'] = 30 - instance_disk_util['INSTANCE_3'] = 35 - instance_disk_util['INSTANCE_4'] = 20 - instance_disk_util['INSTANCE_5'] = 25 - instance_disk_util['INSTANCE_6'] = 25 - instance_disk_util['INSTANCE_7'] = 25 - instance_disk_util['INSTANCE_8'] = 25 - instance_disk_util['INSTANCE_9'] = 25 - return instance_disk_util[str(r_id)] diff --git a/watcher/tests/decision_engine/model/faker_cluster_state.py b/watcher/tests/decision_engine/model/faker_cluster_state.py deleted file mode 100644 index 1893544..0000000 --- a/watcher/tests/decision_engine/model/faker_cluster_state.py +++ /dev/null @@ -1,257 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import mock - -from watcher.decision_engine.model.collector import base -from watcher.decision_engine.model import element -from watcher.decision_engine.model import model_root as modelroot - - -class FakerModelCollector(base.BaseClusterDataModelCollector): - - def __init__(self, config=None, osc=None): - if config is None: - config = mock.Mock(period=777) - super(FakerModelCollector, self).__init__(config) - - @property - def notification_endpoints(self): - return [] - - def load_data(self, filename): - cwd = os.path.abspath(os.path.dirname(__file__)) - data_folder = os.path.join(cwd, "data") - - with open(os.path.join(data_folder, filename), 'rb') as xml_file: - xml_data = xml_file.read() - - return xml_data - - def load_model(self, filename): - return modelroot.ModelRoot.from_xml(self.load_data(filename)) - - def execute(self): - return self._cluster_data_model or self.build_scenario_1() - - def build_scenario_1(self): - instances = [] - - model = modelroot.ModelRoot() - # number of nodes - node_count = 5 - # number max of instance per node - node_instance_count = 7 - # total number of virtual machine - instance_count = (node_count * node_instance_count) - - for id_ in range(0, node_count): - node_uuid = "Node_{0}".format(id_) - hostname = "hostname_{0}".format(id_) - node_attributes = { - "id": id_, - "uuid": node_uuid, - "hostname": hostname, - "memory": 132, - "disk": 250, - "disk_capacity": 250, - "vcpus": 40, - } - node = element.ComputeNode(**node_attributes) - model.add_node(node) - - for i in range(0, instance_count): - instance_uuid = "INSTANCE_{0}".format(i) - instance_attributes = { - "uuid": instance_uuid, - "memory": 2, - "disk": 20, - "disk_capacity": 20, - "vcpus": 10, - "metadata": - '{"optimize": true,"top": "floor","nested": {"x": "y"}}' - } - - instance = element.Instance(**instance_attributes) - instances.append(instance) - model.add_instance(instance) - - mappings = [ - ("INSTANCE_0", "Node_0"), - ("INSTANCE_1", "Node_0"), - ("INSTANCE_2", "Node_1"), - ("INSTANCE_3", "Node_2"), - ("INSTANCE_4", "Node_2"), - ("INSTANCE_5", "Node_2"), - ("INSTANCE_6", "Node_3"), - ("INSTANCE_7", "Node_4"), - ] - for instance_uuid, node_uuid in mappings: - model.map_instance( - model.get_instance_by_uuid(instance_uuid), - model.get_node_by_uuid(node_uuid), - ) - - return model - - def generate_scenario_1(self): - return self.load_model('scenario_1.xml') - - def generate_scenario_3_with_2_nodes(self): - return self.load_model('scenario_3_with_2_nodes.xml') - - def generate_scenario_4_with_1_node_no_instance(self): - return self.load_model('scenario_4_with_1_node_no_instance.xml') - - def generate_scenario_5_with_instance_disk_0(self): - return self.load_model('scenario_5_with_instance_disk_0.xml') - - def generate_scenario_6_with_2_nodes(self): - return self.load_model('scenario_6_with_2_nodes.xml') - - def generate_scenario_7_with_2_nodes(self): - return self.load_model('scenario_7_with_2_nodes.xml') - - def generate_scenario_8_with_4_nodes(self): - return self.load_model('scenario_8_with_4_nodes.xml') - - def generate_scenario_9_with_3_active_plus_1_disabled_nodes(self): - return self.load_model( - 'scenario_9_with_3_active_plus_1_disabled_nodes.xml') - - -class FakerStorageModelCollector(base.BaseClusterDataModelCollector): - - def __init__(self, config=None, osc=None): - if config is None: - config = mock.Mock(period=777) - super(FakerStorageModelCollector, self).__init__(config) - - @property - def notification_endpoints(self): - return [] - - def load_data(self, filename): - cwd = os.path.abspath(os.path.dirname(__file__)) - data_folder = os.path.join(cwd, "data") - - with open(os.path.join(data_folder, filename), 'rb') as xml_file: - xml_data = xml_file.read() - - return xml_data - - def load_model(self, filename): - return modelroot.StorageModelRoot.from_xml(self.load_data(filename)) - - def execute(self): - return self._cluster_data_model or self.build_scenario_1() - - def build_scenario_1(self): - - model = modelroot.StorageModelRoot() - # number of nodes - node_count = 2 - # number of pools per node - pool_count = 2 - # number of volumes - volume_count = 9 - - for i in range(0, node_count): - host = "host_{0}@backend_{0}".format(i) - zone = "zone_{0}".format(i) - volume_type = "type_{0}".format(i) - node_attributes = { - "host": host, - "zone": zone, - "status": 'enabled', - "state": 'up', - "volume_type": volume_type, - } - node = element.StorageNode(**node_attributes) - model.add_node(node) - - for j in range(0, pool_count): - name = "host_{0}@backend_{0}#pool_{1}".format(i, j) - pool_attributes = { - "name": name, - "total_volumes": 2, - "total_capacity_gb": 500, - "free_capacity_gb": 420, - "provisioned_capacity_gb": 80, - "allocated_capacity_gb": 80, - "virtual_free": 420, - } - pool = element.Pool(**pool_attributes) - model.add_pool(pool) - - mappings = [ - ("host_0@backend_0#pool_0", "host_0@backend_0"), - ("host_0@backend_0#pool_1", "host_0@backend_0"), - ("host_1@backend_1#pool_0", "host_1@backend_1"), - ("host_1@backend_1#pool_1", "host_1@backend_1"), - ] - - for pool_name, node_name in mappings: - model.map_pool( - model.get_pool_by_pool_name(pool_name), - model.get_node_by_name(node_name), - ) - - for k in range(volume_count): - uuid = "VOLUME_{0}".format(k) - name = "name_{0}".format(k) - project_id = "project_{0}".format(k) - volume_attributes = { - "size": 40, - "status": "in-use", - "uuid": uuid, - "attachments": - '[{"server_id": "server","attachment_id": "attachment"}]', - "name": name, - "multiattach": 'True', - "snapshot_id": uuid, - "project_id": project_id, - "metadata": '{"readonly": false,"attached_mode": "rw"}', - "bootable": 'False' - } - volume = element.Volume(**volume_attributes) - model.add_volume(volume) - - mappings = [ - ("VOLUME_0", "host_0@backend_0#pool_0"), - ("VOLUME_1", "host_0@backend_0#pool_0"), - ("VOLUME_2", "host_0@backend_0#pool_1"), - ("VOLUME_3", "host_0@backend_0#pool_1"), - ("VOLUME_4", "host_1@backend_1#pool_0"), - ("VOLUME_5", "host_1@backend_1#pool_0"), - ("VOLUME_6", "host_1@backend_1#pool_1"), - ("VOLUME_7", "host_1@backend_1#pool_1"), - ] - - for volume_uuid, pool_name in mappings: - model.map_volume( - model.get_volume_by_uuid(volume_uuid), - model.get_pool_by_pool_name(pool_name), - ) - - return model - - def generate_scenario_1(self): - return self.load_model('storage_scenario_1.xml') diff --git a/watcher/tests/decision_engine/model/gnocchi_metrics.py b/watcher/tests/decision_engine/model/gnocchi_metrics.py deleted file mode 100644 index 982bcac..0000000 --- a/watcher/tests/decision_engine/model/gnocchi_metrics.py +++ /dev/null @@ -1,244 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import oslo_utils - - -class FakeGnocchiMetrics(object): - def __init__(self): - self.emptytype = "" - - def empty_one_metric(self, emptytype): - self.emptytype = emptytype - - def mock_get_statistics(self, resource_id, metric, granularity, - start_time, stop_time, aggregation='mean'): - result = 0 - meter_name = metric - if meter_name == "hardware.cpu.util": - result = self.get_usage_node_cpu(resource_id) - elif meter_name == "compute.node.cpu.percent": - result = self.get_usage_node_cpu(resource_id) - elif meter_name == "hardware.memory.used": - result = self.get_usage_node_ram(resource_id) - elif meter_name == "cpu_util": - result = self.get_average_usage_instance_cpu(resource_id) - elif meter_name == "memory.resident": - result = self.get_average_usage_instance_memory(resource_id) - elif meter_name == "hardware.ipmi.node.outlet_temperature": - result = self.get_average_outlet_temperature(resource_id) - elif meter_name == "hardware.ipmi.node.airflow": - result = self.get_average_airflow(resource_id) - elif meter_name == "hardware.ipmi.node.temperature": - result = self.get_average_inlet_t(resource_id) - elif meter_name == "hardware.ipmi.node.power": - result = self.get_average_power(resource_id) - return result - - def mock_get_statistics_wb(self, resource_id, metric, granularity, - start_time, stop_time, aggregation='mean'): - result = 0.0 - if metric == "cpu_util": - result = self.get_average_usage_instance_cpu_wb(resource_id) - return result - - @staticmethod - def get_average_outlet_temperature(uuid): - """The average outlet temperature for host""" - mock = {} - mock['Node_0'] = 30 - # use a big value to make sure it exceeds threshold - mock['Node_1'] = 100 - if uuid not in mock.keys(): - mock[uuid] = 100 - return mock[str(uuid)] - - @staticmethod - def get_usage_node_ram(uuid): - mock = {} - # Gnocchi returns hardware.memory.used samples in KB. - mock['Node_0'] = 7 * oslo_utils.units.Ki - mock['Node_1'] = 5 * oslo_utils.units.Ki - mock['Node_2'] = 29 * oslo_utils.units.Ki - mock['Node_3'] = 8 * oslo_utils.units.Ki - mock['Node_4'] = 4 * oslo_utils.units.Ki - - if uuid not in mock.keys(): - mock[uuid] = 8 - - return float(mock[str(uuid)]) - - @staticmethod - def get_average_airflow(uuid): - """The average outlet temperature for host""" - mock = {} - mock['Node_0'] = 400 - # use a big value to make sure it exceeds threshold - mock['Node_1'] = 100 - if uuid not in mock.keys(): - mock[uuid] = 200 - return mock[str(uuid)] - - @staticmethod - def get_average_inlet_t(uuid): - """The average outlet temperature for host""" - mock = {} - mock['Node_0'] = 24 - mock['Node_1'] = 26 - if uuid not in mock.keys(): - mock[uuid] = 28 - return mock[str(uuid)] - - @staticmethod - def get_average_power(uuid): - """The average outlet temperature for host""" - mock = {} - mock['Node_0'] = 260 - mock['Node_1'] = 240 - if uuid not in mock.keys(): - mock[uuid] = 200 - return mock[str(uuid)] - - @staticmethod - def get_usage_node_cpu(uuid): - """The last VM CPU usage values to average - - :param uuid: instance UUID - :return: float value - """ - # Normalize - mock = {} - # node 0 - mock['Node_0_hostname_0'] = 7 - mock['Node_1_hostname_1'] = 7 - # node 1 - mock['Node_2_hostname_2'] = 80 - # node 2 - mock['Node_3_hostname_3'] = 5 - mock['Node_4_hostname_4'] = 5 - mock['Node_5_hostname_5'] = 10 - - # node 3 - mock['Node_6_hostname_6'] = 8 - mock['Node_19_hostname_19'] = 10 - # node 4 - mock['INSTANCE_7_hostname_7'] = 4 - - mock['Node_0'] = 7 - mock['Node_1'] = 5 - mock['Node_2'] = 10 - mock['Node_3'] = 4 - mock['Node_4'] = 2 - - if uuid not in mock.keys(): - mock[uuid] = 8 - - return float(mock[str(uuid)]) - - @staticmethod - def get_average_usage_instance_cpu(uuid): - """The last VM CPU usage values to average - - :param uuid: instance UUID - :return: int value - """ - - # Normalize - mock = {} - # node 0 - mock['INSTANCE_0'] = 7 - mock['INSTANCE_1'] = 7 - # node 1 - mock['INSTANCE_2'] = 10 - # node 2 - mock['INSTANCE_3'] = 5 - mock['INSTANCE_4'] = 5 - mock['INSTANCE_5'] = 10 - - # node 3 - mock['INSTANCE_6'] = 8 - - # node 4 - mock['INSTANCE_7'] = 4 - if uuid not in mock.keys(): - mock[uuid] = 8 - - return mock[str(uuid)] - - @staticmethod - def get_average_usage_instance_memory(uuid): - mock = {} - # node 0 - mock['INSTANCE_0'] = 2 - mock['INSTANCE_1'] = 5 - # node 1 - mock['INSTANCE_2'] = 5 - # node 2 - mock['INSTANCE_3'] = 8 - mock['INSTANCE_4'] = 5 - mock['INSTANCE_5'] = 16 - - # node 3 - mock['INSTANCE_6'] = 8 - - # node 4 - mock['INSTANCE_7'] = 4 - if uuid not in mock.keys(): - mock[uuid] = 10 - - return mock[str(uuid)] - - @staticmethod - def get_average_usage_instance_disk(uuid): - mock = {} - # node 0 - mock['INSTANCE_0'] = 2 - mock['INSTANCE_1'] = 2 - # node 1 - mock['INSTANCE_2'] = 2 - # node 2 - mock['INSTANCE_3'] = 10 - mock['INSTANCE_4'] = 15 - mock['INSTANCE_5'] = 20 - - # node 3 - mock['INSTANCE_6'] = 8 - - # node 4 - mock['INSTANCE_7'] = 4 - - if uuid not in mock.keys(): - mock[uuid] = 4 - - return mock[str(uuid)] - - @staticmethod - def get_average_usage_instance_cpu_wb(uuid): - """The last VM CPU usage values to average - - :param uuid: instance UUID - :return: float value - """ - # query influxdb stream - - # compute in stream - - # Normalize - mock = {} - # node 0 - mock['INSTANCE_1'] = 80 - mock['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 50 - # node 1 - mock['INSTANCE_3'] = 20 - mock['INSTANCE_4'] = 10 - return float(mock[str(uuid)]) diff --git a/watcher/tests/decision_engine/model/monasca_metrics.py b/watcher/tests/decision_engine/model/monasca_metrics.py deleted file mode 100644 index 12ebb27..0000000 --- a/watcher/tests/decision_engine/model/monasca_metrics.py +++ /dev/null @@ -1,266 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import oslo_utils - - -class FakeMonascaMetrics(object): - def __init__(self): - self.emptytype = "" - - def empty_one_metric(self, emptytype): - self.emptytype = emptytype - - def mock_get_statistics(self, meter_name, dimensions, period, - aggregate='avg'): - resource_id = dimensions.get( - "resource_id") or dimensions.get("hostname") - result = 0.0 - if meter_name == "cpu.percent": - result = self.get_usage_node_cpu(resource_id) - elif meter_name == "vm.cpu.utilization_perc": - result = self.get_average_usage_instance_cpu(resource_id) - # elif meter_name == "hardware.memory.used": - # result = self.get_usage_node_ram(resource_id) - # elif meter_name == "memory.resident": - # result = self.get_average_usage_instance_memory(resource_id) - # elif meter_name == "hardware.ipmi.node.outlet_temperature": - # result = self.get_average_outlet_temperature(resource_id) - # elif meter_name == "hardware.ipmi.node.airflow": - # result = self.get_average_airflow(resource_id) - # elif meter_name == "hardware.ipmi.node.temperature": - # result = self.get_average_inlet_t(resource_id) - # elif meter_name == "hardware.ipmi.node.power": - # result = self.get_average_power(resource_id) - return result - - def mock_get_statistics_wb(self, meter_name, dimensions, period, - aggregate='avg'): - resource_id = dimensions.get( - "resource_id") or dimensions.get("hostname") - result = 0.0 - if meter_name == "vm.cpu.utilization_perc": - result = self.get_average_usage_instance_cpu_wb(resource_id) - return result - - @staticmethod - def get_average_outlet_temperature(uuid): - """The average outlet temperature for host""" - measurements = {} - measurements['Node_0'] = 30 - # use a big value to make sure it exceeds threshold - measurements['Node_1'] = 100 - if uuid not in measurements.keys(): - measurements[uuid] = 100 - return [{'columns': ['avg'], - 'statistics': [[float(measurements[str(uuid)])]]}] - - @staticmethod - def get_usage_node_ram(uuid): - measurements = {} - # Monasca returns hardware.memory.used samples in KB. - measurements['Node_0'] = 7 * oslo_utils.units.Ki - measurements['Node_1'] = 5 * oslo_utils.units.Ki - measurements['Node_2'] = 29 * oslo_utils.units.Ki - measurements['Node_3'] = 8 * oslo_utils.units.Ki - measurements['Node_4'] = 4 * oslo_utils.units.Ki - - if uuid not in measurements.keys(): - # measurements[uuid] = random.randint(1, 4) - measurements[uuid] = 8 - - return float(measurements[str(uuid)]) - - @staticmethod - def get_average_airflow(uuid): - """The average outlet temperature for host""" - measurements = {} - measurements['Node_0'] = 400 - # use a big value to make sure it exceeds threshold - measurements['Node_1'] = 100 - if uuid not in measurements.keys(): - measurements[uuid] = 200 - return [{'columns': ['avg'], - 'statistics': [[float(measurements[str(uuid)])]]}] - - @staticmethod - def get_average_inlet_t(uuid): - """The average outlet temperature for host""" - measurements = {} - measurements['Node_0'] = 24 - measurements['Node_1'] = 26 - if uuid not in measurements.keys(): - measurements[uuid] = 28 - return [{'columns': ['avg'], - 'statistics': [[float(measurements[str(uuid)])]]}] - - @staticmethod - def get_average_power(uuid): - """The average outlet temperature for host""" - measurements = {} - measurements['Node_0'] = 260 - measurements['Node_1'] = 240 - if uuid not in measurements.keys(): - measurements[uuid] = 200 - return [{'columns': ['avg'], - 'statistics': [[float(measurements[str(uuid)])]]}] - - @staticmethod - def get_usage_node_cpu(uuid): - """The last VM CPU usage values to average - - :param uuid:00 - :return: - """ - # query influxdb stream - - # compute in stream - - # Normalize - measurements = {} - # node 0 - measurements['Node_0'] = 7 - measurements['Node_1'] = 7 - # node 1 - measurements['Node_2'] = 80 - # node 2 - measurements['Node_3'] = 5 - measurements['Node_4'] = 5 - measurements['Node_5'] = 10 - - # node 3 - measurements['Node_6'] = 8 - measurements['Node_19'] = 10 - # node 4 - measurements['INSTANCE_7'] = 4 - - if uuid not in measurements.keys(): - # measurements[uuid] = random.randint(1, 4) - measurements[uuid] = 8 - - return [{'columns': ['avg'], - 'statistics': [[float(measurements[str(uuid)])]]}] - # return float(measurements[str(uuid)]) - - @staticmethod - def get_average_usage_instance_cpu_wb(uuid): - """The last VM CPU usage values to average - - :param uuid:00 - :return: - """ - # query influxdb stream - - # compute in stream - - # Normalize - measurements = {} - # node 0 - measurements['INSTANCE_1'] = 80 - measurements['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 50 - # node 1 - measurements['INSTANCE_3'] = 20 - measurements['INSTANCE_4'] = 10 - return [{'columns': ['avg'], - 'statistics': [[float(measurements[str(uuid)])]]}] - - @staticmethod - def get_average_usage_instance_cpu(uuid): - """The last VM CPU usage values to average - - :param uuid:00 - :return: - """ - # query influxdb stream - - # compute in stream - - # Normalize - measurements = {} - # node 0 - measurements['INSTANCE_0'] = 7 - measurements['INSTANCE_1'] = 7 - # node 1 - measurements['INSTANCE_2'] = 10 - # node 2 - measurements['INSTANCE_3'] = 5 - measurements['INSTANCE_4'] = 5 - measurements['INSTANCE_5'] = 10 - - # node 3 - measurements['INSTANCE_6'] = 8 - - # node 4 - measurements['INSTANCE_7'] = 4 - if uuid not in measurements.keys(): - # measurements[uuid] = random.randint(1, 4) - measurements[uuid] = 8 - - return [{'columns': ['avg'], - 'statistics': [[float(measurements[str(uuid)])]]}] - - @staticmethod - def get_average_usage_instance_memory(uuid): - measurements = {} - # node 0 - measurements['INSTANCE_0'] = 2 - measurements['INSTANCE_1'] = 5 - # node 1 - measurements['INSTANCE_2'] = 5 - # node 2 - measurements['INSTANCE_3'] = 8 - measurements['INSTANCE_4'] = 5 - measurements['INSTANCE_5'] = 16 - - # node 3 - measurements['INSTANCE_6'] = 8 - - # node 4 - measurements['INSTANCE_7'] = 4 - if uuid not in measurements.keys(): - # measurements[uuid] = random.randint(1, 4) - measurements[uuid] = 10 - - return [{'columns': ['avg'], - 'statistics': [[float(measurements[str(uuid)])]]}] - - @staticmethod - def get_average_usage_instance_disk(uuid): - measurements = {} - # node 0 - measurements['INSTANCE_0'] = 2 - measurements['INSTANCE_1'] = 2 - # node 1 - measurements['INSTANCE_2'] = 2 - # node 2 - measurements['INSTANCE_3'] = 10 - measurements['INSTANCE_4'] = 15 - measurements['INSTANCE_5'] = 20 - - # node 3 - measurements['INSTANCE_6'] = 8 - - # node 4 - measurements['INSTANCE_7'] = 4 - - if uuid not in measurements.keys(): - # measurements[uuid] = random.randint(1, 4) - measurements[uuid] = 4 - - return [{'columns': ['avg'], - 'statistics': [[float(measurements[str(uuid)])]]}] diff --git a/watcher/tests/decision_engine/model/notification/__init__.py b/watcher/tests/decision_engine/model/notification/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/model/notification/data/capacity.json b/watcher/tests/decision_engine/model/notification/data/capacity.json deleted file mode 100644 index 28a8f55..0000000 --- a/watcher/tests/decision_engine/model/notification/data/capacity.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "capacity.host1@backend1#pool1", - "event_type": "capacity.pool", - "payload": { - "name_to_id": "capacity.host1@backend1#pool1", - "total": 3, - "free": 1, - "allocated": 2, - "provisioned": 2, - "virtual_free": 1, - "reported_at": "2017-05-15T13:42:11Z" - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/instance-create.json b/watcher/tests/decision_engine/model/notification/data/instance-create.json deleted file mode 100644 index ddb1aa0..0000000 --- a/watcher/tests/decision_engine/model/notification/data/instance-create.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "event_type": "instance.update", - "payload": { - "nova_object.data": { - "architecture": "x86_64", - "audit_period": { - "nova_object.data": { - "audit_period_beginning": "2012-10-01T00:00:00Z", - "audit_period_ending": "2012-10-29T13:42:11Z" - }, - "nova_object.name": "AuditPeriodPayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0" - }, - "availability_zone": null, - "bandwidth": [], - "created_at": "2012-10-29T13:42:11Z", - "deleted_at": null, - "display_name": "some-server", - "host": "compute", - "host_name": "some-server", - "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", - "kernel_id": "", - "launched_at": null, - "metadata": {}, - "node": "fake-mini", - "old_display_name": null, - "os_type": null, - "progress": 0, - "ramdisk_id": "", - "reservation_id": "r-sd3ygfjj", - "state": "active", - "task_state": "scheduling", - "power_state": "pending", - "ip_addresses": [], - "state_update": { - "nova_object.version": "1.0", - "nova_object.name": "InstanceStateUpdatePayload", - "nova_object.namespace": "nova", - "nova_object.data": { - "old_state": "building", - "new_task_state": null, - "old_task_state": "spawning", - "state": "active" - } - }, - "tenant_id": "6f70656e737461636b20342065766572", - "terminated_at": null, - "flavor": { - "nova_object.name": "FlavorPayload", - "nova_object.data": { - "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", - "root_gb": 1, - "vcpus": 1, - "ephemeral_gb": 0, - "memory_mb": 512 - }, - "nova_object.version": "1.0", - "nova_object.namespace": "nova" - }, - "user_id": "fake", - "uuid": "c03c0bf9-f46e-4e4f-93f1-817568567ee2" - }, - "nova_object.name": "InstanceUpdatePayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0" - }, - "priority": "INFO", - "publisher_id": "nova-compute:compute" -} diff --git a/watcher/tests/decision_engine/model/notification/data/instance-delete-end.json b/watcher/tests/decision_engine/model/notification/data/instance-delete-end.json deleted file mode 100644 index 75eaffa..0000000 --- a/watcher/tests/decision_engine/model/notification/data/instance-delete-end.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "event_type":"instance.delete.end", - "payload":{ - "nova_object.data":{ - "architecture":"x86_64", - "availability_zone":null, - "created_at":"2012-10-29T13:42:11Z", - "deleted_at":"2012-10-29T13:42:11Z", - "display_name":"some-server", - "fault":null, - "host":"compute", - "host_name":"some-server", - "ip_addresses":[], - "kernel_id":"", - "launched_at":"2012-10-29T13:42:11Z", - "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", - "metadata":{}, - "node":"fake-mini", - "os_type":null, - "progress":0, - "ramdisk_id":"", - "reservation_id":"r-npxv0e40", - "state":"deleted", - "task_state":null, - "power_state":"pending", - "tenant_id":"6f70656e737461636b20342065766572", - "terminated_at":"2012-10-29T13:42:11Z", - "flavor": { - "nova_object.name": "FlavorPayload", - "nova_object.data": { - "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", - "root_gb": 1, - "vcpus": 1, - "ephemeral_gb": 0, - "memory_mb": 512 - }, - "nova_object.version": "1.0", - "nova_object.namespace": "nova" - }, - "user_id":"fake", - "uuid":"178b0921-8f85-4257-88b6-2e743b5a975c" - }, - "nova_object.name":"InstanceActionPayload", - "nova_object.namespace":"nova", - "nova_object.version":"1.0" - }, - "priority":"INFO", - "publisher_id":"nova-compute:compute" -} diff --git a/watcher/tests/decision_engine/model/notification/data/instance-update.json b/watcher/tests/decision_engine/model/notification/data/instance-update.json deleted file mode 100644 index f79485a..0000000 --- a/watcher/tests/decision_engine/model/notification/data/instance-update.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "event_type": "instance.update", - "payload": { - "nova_object.data": { - "architecture": "x86_64", - "audit_period": { - "nova_object.data": { - "audit_period_beginning": "2012-10-01T00:00:00Z", - "audit_period_ending": "2012-10-29T13:42:11Z"}, - "nova_object.name": "AuditPeriodPayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0" - }, - "availability_zone": null, - "bandwidth": [], - "created_at": "2012-10-29T13:42:11Z", - "deleted_at": null, - "display_name": "some-server", - "host": "compute", - "host_name": "some-server", - "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", - "kernel_id": "", - "launched_at": null, - "metadata": {}, - "node": "fake-mini", - "old_display_name": null, - "os_type": null, - "progress": 0, - "ramdisk_id": "", - "reservation_id": "r-sd3ygfjj", - "state": "active", - "task_state": "scheduling", - "power_state": "pending", - "ip_addresses": [], - "state_update": { - "nova_object.data": { - "new_task_state": null, - "old_state": null, - "old_task_state": null, - "state": "active"}, - "nova_object.name": "InstanceStateUpdatePayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0"}, - "tenant_id": "6f70656e737461636b20342065766572", - "terminated_at": null, - "flavor": { - "nova_object.name": "FlavorPayload", - "nova_object.data": { - "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", - "root_gb": 1, - "vcpus": 1, - "ephemeral_gb": 0, - "memory_mb": 512 - }, - "nova_object.version": "1.0", - "nova_object.namespace": "nova" - }, - "user_id": "fake", - "uuid": "c03c0bf9-f46e-4e4f-93f1-817568567ee2"}, - "nova_object.name": "InstanceUpdatePayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0"}, - "priority": "INFO", - "publisher_id": "nova-compute:compute" -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario3_instance-create.json b/watcher/tests/decision_engine/model/notification/data/scenario3_instance-create.json deleted file mode 100644 index d180f8d..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario3_instance-create.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "event_type": "instance.update", - "payload": { - "nova_object.data": { - "architecture": "x86_64", - "audit_period": { - "nova_object.data": { - "audit_period_beginning": "2012-10-01T00:00:00Z", - "audit_period_ending": "2012-10-29T13:42:11Z" - }, - "nova_object.name": "AuditPeriodPayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0" - }, - "availability_zone": null, - "bandwidth": [], - "created_at": "2012-10-29T13:42:11Z", - "deleted_at": null, - "display_name": "some-server", - "host": "Node_0", - "host_name": "some-server", - "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", - "kernel_id": "", - "launched_at": null, - "metadata": {}, - "node": "hostname_0", - "old_display_name": null, - "os_type": null, - "progress": 0, - "ramdisk_id": "", - "reservation_id": "r-sd3ygfjj", - "state": "active", - "task_state": "scheduling", - "power_state": "pending", - "ip_addresses": [], - "state_update": { - "nova_object.version": "1.0", - "nova_object.name": "InstanceStateUpdatePayload", - "nova_object.namespace": "nova", - "nova_object.data": { - "old_state": "building", - "new_task_state": null, - "old_task_state": "spawning", - "state": "active" - } - }, - "tenant_id": "6f70656e737461636b20342065766572", - "terminated_at": null, - "flavor": { - "nova_object.name": "FlavorPayload", - "nova_object.data": { - "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", - "root_gb": 1, - "vcpus": 1, - "ephemeral_gb": 0, - "memory_mb": 512 - }, - "nova_object.version": "1.0", - "nova_object.namespace": "nova" - }, - "user_id": "fake", - "uuid": "c03c0bf9-f46e-4e4f-93f1-817568567ee2" - }, - "nova_object.name": "InstanceUpdatePayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0" - }, - "priority": "INFO", - "publisher_id": "nova-compute:Node_0" -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario3_instance-delete-end.json b/watcher/tests/decision_engine/model/notification/data/scenario3_instance-delete-end.json deleted file mode 100644 index 90898b8..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario3_instance-delete-end.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "event_type":"instance.delete.end", - "payload":{ - "nova_object.data":{ - "architecture":"x86_64", - "availability_zone":null, - "created_at":"2012-10-29T13:42:11Z", - "deleted_at":"2012-10-29T13:42:11Z", - "display_name":"some-server", - "fault":null, - "host":"Node_0", - "host_name":"some-server", - "ip_addresses":[], - "kernel_id":"", - "launched_at":"2012-10-29T13:42:11Z", - "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", - "metadata":{}, - "node":"fake-mini", - "os_type":null, - "progress":0, - "ramdisk_id":"", - "reservation_id":"r-npxv0e40", - "state":"deleted", - "task_state":null, - "power_state":"pending", - "tenant_id":"6f70656e737461636b20342065766572", - "terminated_at":"2012-10-29T13:42:11Z", - "flavor": { - "nova_object.name": "FlavorPayload", - "nova_object.data": { - "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", - "root_gb": 1, - "vcpus": 1, - "ephemeral_gb": 0, - "memory_mb": 512 - }, - "nova_object.version": "1.0", - "nova_object.namespace": "nova" - }, - "user_id":"fake", - "uuid":"73b09e16-35b7-4922-804e-e8f5d9b740fc" - }, - "nova_object.name":"InstanceActionPayload", - "nova_object.namespace":"nova", - "nova_object.version":"1.0" - }, - "priority":"INFO", - "publisher_id":"nova-compute:Node_0" -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario3_instance-update.json b/watcher/tests/decision_engine/model/notification/data/scenario3_instance-update.json deleted file mode 100644 index 23d23b9..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario3_instance-update.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "event_type": "instance.update", - "payload": { - "nova_object.data": { - "architecture": "x86_64", - "audit_period": { - "nova_object.data": { - "audit_period_beginning": "2012-10-01T00:00:00Z", - "audit_period_ending": "2012-10-29T13:42:11Z"}, - "nova_object.name": "AuditPeriodPayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0" - }, - "availability_zone": null, - "bandwidth": [], - "created_at": "2012-10-29T13:42:11Z", - "deleted_at": null, - "display_name": "NEW_INSTANCE0", - "host": "Node_0", - "host_name": "NEW_INSTANCE0", - "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", - "kernel_id": "", - "launched_at": null, - "metadata": {}, - "node": "hostname_0", - "old_display_name": null, - "os_type": null, - "progress": 0, - "ramdisk_id": "", - "reservation_id": "r-sd3ygfjj", - "state": "paused", - "task_state": "scheduling", - "power_state": "pending", - "ip_addresses": [], - "state_update": { - "nova_object.data": { - "old_task_state": null, - "new_task_state": null, - "old_state": "paused", - "state": "paused"}, - "nova_object.name": "InstanceStateUpdatePayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0"}, - "tenant_id": "6f70656e737461636b20342065766572", - "terminated_at": null, - "flavor": { - "nova_object.name": "FlavorPayload", - "nova_object.data": { - "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", - "root_gb": 1, - "vcpus": 1, - "ephemeral_gb": 0, - "memory_mb": 512 - }, - "nova_object.version": "1.0", - "nova_object.namespace": "nova" - }, - "user_id": "fake", - "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc"}, - "nova_object.name": "InstanceUpdatePayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0"}, - "priority": "INFO", - "publisher_id": "nova-compute:Node_0" -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario3_legacy_instance-create-end.json b/watcher/tests/decision_engine/model/notification/data/scenario3_legacy_instance-create-end.json deleted file mode 100644 index 3a0b366..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario3_legacy_instance-create-end.json +++ /dev/null @@ -1,62 +0,0 @@ -{ - "event_type": "compute.instance.create.end", - "metadata": { - "message_id": "577bfd11-88e0-4044-b8ae-496e3257efe2", - "timestamp": "2016-08-19 10:20:59.279903" - }, - "payload": { - "access_ip_v4": null, - "access_ip_v6": null, - "architecture": null, - "availability_zone": "nova", - "cell_name": "", - "created_at": "2016-08-19 10:20:49+00:00", - "deleted_at": "", - "disk_gb": 1, - "display_name": "INSTANCE_0", - "ephemeral_gb": 0, - "fixed_ips": [ - { - "address": "192.168.1.197", - "floating_ips": [], - "label": "demo-net", - "meta": {}, - "type": "fixed", - "version": 4, - "vif_mac": "fa:16:3e:a3:c0:0f" - } - ], - "host": "Node_0", - "hostname": "INSTANCE_0", - "image_meta": { - "base_image_ref": "205f96f5-91f9-42eb-9138-03fffcea2b97", - "container_format": "bare", - "disk_format": "qcow2", - "min_disk": "1", - "min_ram": "0" - }, - "image_ref_url": "http://127.0.0.1:9292/images/205f96f5-91f9-42eb-9138-03fffcea2b97", - "instance_flavor_id": "1", - "instance_id": "c03c0bf9-f46e-4e4f-93f1-817568567ee2", - "instance_type": "m1.tiny", - "instance_type_id": 2, - "kernel_id": "", - "launched_at": "2016-08-19T10:20:59.135390", - "memory_mb": 512, - "message": "Success", - "metadata": {}, - "node": "Node_0", - "os_type": null, - "progress": "", - "ramdisk_id": "", - "reservation_id": "r-56edz88e", - "root_gb": 1, - "state": "active", - "state_description": "", - "tenant_id": "57ab04ad6d3b495789a58258bc00842b", - "terminated_at": "", - "user_id": "cd7d93be51e4460ab51514b2a925b23a", - "vcpus": 1 - }, - "publisher_id": "compute.Node_0" -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario3_legacy_instance-delete-end.json b/watcher/tests/decision_engine/model/notification/data/scenario3_legacy_instance-delete-end.json deleted file mode 100644 index 12b0a12..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario3_legacy_instance-delete-end.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "publisher_id": "compute:compute", - "event_type": "compute.instance.delete.end", - "payload": { - "access_ip_v4": null, - "access_ip_v6": null, - "architecture": null, - "availability_zone": "nova", - "cell_name": "", - "created_at": "2016-08-17 15:10:12+00:00", - "deleted_at": "2016-08-17T15:10:33.000000", - "disk_gb": 1, - "display_name": "some-server", - "ephemeral_gb": 0, - "host": "Node_0", - "hostname": "some-server", - "image_meta": { - "base_image_ref": "205f96f5-91f9-42eb-9138-03fffcea2b97", - "container_format": "bare", - "disk_format": "qcow2", - "min_disk": "1", - "min_ram": "0" - }, - "image_ref_url": "http://10.50.254.222:9292/images/205f96f5-91f9-42eb-9138-03fffcea2b97", - "instance_flavor_id": "1", - "instance_id": "73b09e16-35b7-4922-804e-e8f5d9b740fc", - "instance_type": "m1.tiny", - "instance_type_id": 2, - "kernel_id": "", - "launched_at": "2016-08-17T15:10:23.000000", - "memory_mb": 512, - "metadata": {}, - "node": "Node_0", - "os_type": null, - "progress": "", - "ramdisk_id": "", - "reservation_id": "r-z76fnsyy", - "root_gb": 1, - "state": "deleted", - "state_description": "", - "tenant_id": "15995ea2694e4268b3631db32e38678b", - "terminated_at": "2016-08-17T15:10:33.008164", - "user_id": "cd7d93be51e4460ab51514b2a925b23a", - "vcpus": 1 - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario3_legacy_instance-update.json b/watcher/tests/decision_engine/model/notification/data/scenario3_legacy_instance-update.json deleted file mode 100644 index ce2b997..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario3_legacy_instance-update.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "publisher_id": "compute:Node_0", - "event_type": "compute.instance.update", - "payload": { - "access_ip_v4": null, - "access_ip_v6": null, - "architecture": null, - "audit_period_beginning": "2016-08-17T13:00:00.000000", - "audit_period_ending": "2016-08-17T13:56:05.262440", - "availability_zone": "nova", - "bandwidth": {}, - "cell_name": "", - "created_at": "2016-08-17 13:53:23+00:00", - "deleted_at": "", - "disk_gb": 1, - "display_name": "NEW_INSTANCE0", - "ephemeral_gb": 0, - "host": "Node_0", - "hostname": "NEW_INSTANCE0", - "image_meta": { - "base_image_ref": "205f96f5-91f9-42eb-9138-03fffcea2b97", - "container_format": "bare", - "disk_format": "qcow2", - "min_disk": "1", - "min_ram": "0" - }, - "image_ref_url": "http://10.50.0.222:9292/images/205f96f5-91f9-42eb-9138-03fffcea2b97", - "instance_flavor_id": "1", - "instance_id": "73b09e16-35b7-4922-804e-e8f5d9b740fc", - "instance_type": "m1.tiny", - "instance_type_id": 2, - "kernel_id": "", - "launched_at": "2016-08-17T13:53:35.000000", - "memory_mb": 512, - "metadata": {}, - "new_task_state": null, - "node": "hostname_0", - "old_state": "paused", - "old_task_state": null, - "os_type": null, - "progress": "", - "ramdisk_id": "", - "reservation_id": "r-0822ymml", - "root_gb": 1, - "state": "paused", - "state_description": "paused", - "tenant_id": "a4b4772d93c74d5e8b7c68cdd2a014e1", - "terminated_at": "", - "user_id": "ce64facc93354bbfa90f4f9f9a3e1e75", - "vcpus": 1 - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario3_legacy_livemigration-post-dest-end.json b/watcher/tests/decision_engine/model/notification/data/scenario3_legacy_livemigration-post-dest-end.json deleted file mode 100644 index 916b91b..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario3_legacy_livemigration-post-dest-end.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "event_type": "compute.instance.live_migration.post.dest.end", - "metadata": { - "message_id": "9f58cad4-ff90-40f8-a8e4-633807f4a995", - "timestamp": "2016-08-19 10:13:44.645575" - }, - "payload": { - "access_ip_v4": null, - "access_ip_v6": null, - "architecture": null, - "availability_zone": "nova", - "cell_name": "", - "created_at": "2016-08-18 09:49:23+00:00", - "deleted_at": "", - "disk_gb": 1, - "display_name": "INSTANCE_0", - "ephemeral_gb": 0, - "fixed_ips": [ - { - "address": "192.168.1.196", - "floating_ips": [], - "label": "demo-net", - "meta": {}, - "type": "fixed", - "version": 4, - "vif_mac": "fa:16:3e:cc:ba:81" - } - ], - "host": "Node_1", - "hostname": "INSTANCE_0", - "image_meta": { - "base_image_ref": "205f96f5-91f9-42eb-9138-03fffcea2b97", - "container_format": "bare", - "disk_format": "qcow2", - "min_disk": "1", - "min_ram": "0" - }, - "image_ref_url": "http://10.50.254.222:9292/images/205f96f5-91f9-42eb-9138-03fffcea2b97", - "instance_flavor_id": "1", - "instance_id": "73b09e16-35b7-4922-804e-e8f5d9b740fc", - "instance_type": "m1.tiny", - "instance_type_id": 2, - "kernel_id": "", - "launched_at": "2016-08-18T09:49:33.000000", - "memory_mb": 512, - "metadata": {}, - "node": "Node_1", - "os_type": null, - "progress": "", - "ramdisk_id": "", - "reservation_id": "r-he04tfco", - "root_gb": 1, - "state": "active", - "state_description": "", - "tenant_id": "57ab04ad6d3b495789a58258bc00842b", - "terminated_at": "", - "user_id": "cd7d93be51e4460ab51514b2a925b23a", - "vcpus": 1 - }, - "publisher_id": "compute.Node_1" -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario3_notfound_instance-update.json b/watcher/tests/decision_engine/model/notification/data/scenario3_notfound_instance-update.json deleted file mode 100644 index 2f27862..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario3_notfound_instance-update.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "event_type": "instance.update", - "payload": { - "nova_object.data": { - "architecture": "x86_64", - "audit_period": { - "nova_object.data": { - "audit_period_beginning": "2012-10-01T00:00:00Z", - "audit_period_ending": "2012-10-29T13:42:11Z"}, - "nova_object.name": "AuditPeriodPayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0" - }, - "availability_zone": null, - "bandwidth": [], - "created_at": "2012-10-29T13:42:11Z", - "deleted_at": null, - "display_name": "NEW INSTANCE 9966d6bd-a45c-4e1c-9d57-3054899a3ec7", - "host": "Node_2", - "host_name": "NEW_INSTANCE_9966d6bd-a45c-4e1c-9d57-3054899a3ec7", - "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", - "kernel_id": "", - "launched_at": null, - "metadata": {}, - "node": "hostname_0", - "old_display_name": null, - "os_type": null, - "progress": 0, - "ramdisk_id": "", - "reservation_id": "r-sd3ygfjj", - "state": "paused", - "task_state": "scheduling", - "power_state": "pending", - "ip_addresses": [], - "state_update": { - "nova_object.data": { - "old_task_state": null, - "new_task_state": null, - "old_state": "paused", - "state": "paused"}, - "nova_object.name": "InstanceStateUpdatePayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0"}, - "tenant_id": "6f70656e737461636b20342065766572", - "terminated_at": null, - "flavor": { - "nova_object.name": "FlavorPayload", - "nova_object.data": { - "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", - "root_gb": 1, - "vcpus": 1, - "ephemeral_gb": 0, - "memory_mb": 512 - }, - "nova_object.version": "1.0", - "nova_object.namespace": "nova" - }, - "user_id": "fake", - "uuid": "9966d6bd-a45c-4e1c-9d57-3054899a3ec7"}, - "nova_object.name": "InstanceUpdatePayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0"}, - "priority": "INFO", - "publisher_id": "nova-compute:Node_2" -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario3_notfound_legacy_instance-update.json b/watcher/tests/decision_engine/model/notification/data/scenario3_notfound_legacy_instance-update.json deleted file mode 100644 index caf2863..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario3_notfound_legacy_instance-update.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "publisher_id": "compute:Node_2", - "event_type": "compute.instance.update", - "payload": { - "access_ip_v4": null, - "access_ip_v6": null, - "architecture": null, - "audit_period_beginning": "2016-08-17T13:00:00.000000", - "audit_period_ending": "2016-08-17T13:56:05.262440", - "availability_zone": "nova", - "bandwidth": {}, - "cell_name": "", - "created_at": "2016-08-17 13:53:23+00:00", - "deleted_at": "", - "disk_gb": 1, - "display_name": "NEW INSTANCE 9966d6bd-a45c-4e1c-9d57-3054899a3ec7", - "ephemeral_gb": 0, - "host": "Node_2", - "hostname": "NEW_INSTANCE_9966d6bd-a45c-4e1c-9d57-3054899a3ec7", - "image_meta": { - "base_image_ref": "205f96f5-91f9-42eb-9138-03fffcea2b97", - "container_format": "bare", - "disk_format": "qcow2", - "min_disk": "1", - "min_ram": "0" - }, - "image_ref_url": "http://10.50.0.222:9292/images/205f96f5-91f9-42eb-9138-03fffcea2b97", - "instance_flavor_id": "1", - "instance_id": "9966d6bd-a45c-4e1c-9d57-3054899a3ec7", - "instance_type": "m1.tiny", - "instance_type_id": 2, - "kernel_id": "", - "launched_at": "2016-08-17T13:53:35.000000", - "memory_mb": 512, - "metadata": {}, - "new_task_state": null, - "node": "hostname_0", - "old_state": "paused", - "old_task_state": null, - "os_type": null, - "progress": "", - "ramdisk_id": "", - "reservation_id": "r-0822ymml", - "root_gb": 1, - "state": "paused", - "state_description": "paused", - "tenant_id": "a4b4772d93c74d5e8b7c68cdd2a014e1", - "terminated_at": "", - "user_id": "ce64facc93354bbfa90f4f9f9a3e1e75", - "vcpus": 1 - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario3_service-update-disabled.json b/watcher/tests/decision_engine/model/notification/data/scenario3_service-update-disabled.json deleted file mode 100644 index 410f12d..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario3_service-update-disabled.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "nova_object.namespace": "nova", - "nova_object.name": "ServiceStatusPayload", - "nova_object.version": "1.0", - "nova_object.data": { - "host": "Node_0", - "disabled": true, - "last_seen_up": "2012-10-29T13:42:05Z", - "binary": "nova-compute", - "topic": "compute", - "disabled_reason": null, - "report_count": 1, - "forced_down": true, - "version": 15 - } - }, - "event_type": "service.update", - "publisher_id": "nova-compute:Node_0" -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario3_service-update-enabled.json b/watcher/tests/decision_engine/model/notification/data/scenario3_service-update-enabled.json deleted file mode 100644 index f3e7f23..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario3_service-update-enabled.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "nova_object.namespace": "nova", - "nova_object.name": "ServiceStatusPayload", - "nova_object.version": "1.0", - "nova_object.data": { - "host": "Node_0", - "disabled": false, - "last_seen_up": "2012-10-29T13:42:05Z", - "binary": "nova-compute", - "topic": "compute", - "disabled_reason": null, - "report_count": 1, - "forced_down": false, - "version": 15 - } - }, - "event_type": "service.update", - "publisher_id": "nova-compute:Node_0" -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario_1_bootable-volume-create.json b/watcher/tests/decision_engine/model/notification/data/scenario_1_bootable-volume-create.json deleted file mode 100644 index 4b0fe28..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario_1_bootable-volume-create.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "volume.host_0@backend_0#pool_0", - "event_type": "volume.create.end", - "payload": { - "host": "host_0@backend_0#pool_0", - "volume_id": "VOLUME_00", - "display_name": "name_00", - "size": "40", - "status": "available", - "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], - "snapshot_id": "", - "tenant_id": "project_00", - "metadata": {"readonly": false, "attached_mode": "rw"}, - "glance_metadata": {} - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity.json b/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity.json deleted file mode 100644 index 7831bdd..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "capacity.host_0@backend_0#pool_0", - "event_type": "capacity.pool", - "payload": { - "name_to_id": "host_0@backend_0#pool_0", - "total": 500, - "free": 460, - "allocated": 40, - "provisioned": 40, - "virtual_free": 460, - "reported_at": "2017-05-15T13:42:11Z" - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_node_notfound.json b/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_node_notfound.json deleted file mode 100644 index 650a575..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_node_notfound.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "capacity.host_2@backend_2#pool_0", - "event_type": "capacity.pool", - "payload": { - "name_to_id": "host_2@backend_2#pool_0", - "total": 500, - "free": 460, - "allocated": 40, - "provisioned": 40, - "virtual_free": 460, - "reported_at": "2017-05-15T13:42:11Z" - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_pool_notfound.json b/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_pool_notfound.json deleted file mode 100644 index 948c230..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_pool_notfound.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "capacity.host_0@backend_0#pool_2", - "event_type": "capacity.pool", - "payload": { - "name_to_id": "host_0@backend_0#pool_2", - "total": 500, - "free": 380, - "allocated": 120, - "provisioned": 120, - "virtual_free": 380, - "reported_at": "2017-05-15T13:42:11Z" - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario_1_error-volume-create.json b/watcher/tests/decision_engine/model/notification/data/scenario_1_error-volume-create.json deleted file mode 100644 index ac0739e..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario_1_error-volume-create.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "volume.host_0@backend_0#pool_0", - "event_type": "volume.create.end", - "payload": { - "host": "", - "volume_id": "VOLUME_00", - "display_name": "name_00", - "size": "40", - "status": "error", - "volume_attachment": [], - "snapshot_id": "", - "tenant_id": "project_00", - "metadata": {} - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-attach.json b/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-attach.json deleted file mode 100644 index 2f2a2a1..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-attach.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "volume.host_0@backend_0#pool_0", - "event_type": "volume.attach.end", - "payload": { - "host": "host_0@backend_0#pool_0", - "volume_id": "VOLUME_0", - "display_name": "name_0", - "size": "40", - "status": "in-use", - "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], - "snapshot_id": "", - "tenant_id": "project_0", - "metadata": {"readonly": false, "attached_mode": "rw"} - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create.json b/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create.json deleted file mode 100644 index 089da52..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "volume.host_0@backend_0#pool_0", - "event_type": "volume.create.end", - "payload": { - "host": "host_0@backend_0#pool_0", - "volume_id": "VOLUME_00", - "display_name": "name_00", - "size": "40", - "status": "available", - "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], - "snapshot_id": "", - "tenant_id": "project_00", - "metadata": {"readonly": false, "attached_mode": "rw"} - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create_pool_notfound.json b/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create_pool_notfound.json deleted file mode 100644 index a772f35..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create_pool_notfound.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "volume.host_2@backend_2#pool_0", - "event_type": "volume.create.end", - "payload": { - "host": "host_2@backend_2#pool_0", - "volume_id": "VOLUME_00", - "display_name": "name_00", - "size": "40", - "status": "available", - "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], - "snapshot_id": "", - "tenant_id": "project_00", - "metadata": {"readonly": false, "attached_mode": "rw"} - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-delete.json b/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-delete.json deleted file mode 100644 index 6d09d7f..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-delete.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "volume.host_0@backend_0#pool_0", - "event_type": "volume.delete.end", - "payload": { - "host": "host_0@backend_0#pool_0", - "volume_id": "VOLUME_0", - "display_name": "name_0", - "size": "40", - "status": "deleting", - "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], - "snapshot_id": "", - "tenant_id": "project_0", - "metadata": {"readonly": false, "attached_mode": "rw"} - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-detach.json b/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-detach.json deleted file mode 100644 index f2d635d..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-detach.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "volume.host_0@backend_0#pool_0", - "event_type": "volume.detach.end", - "payload": { - "host": "host_0@backend_0#pool_0", - "volume_id": "VOLUME_0", - "display_name": "name_0", - "size": "40", - "status": "available", - "volume_attachment": [], - "snapshot_id": "", - "tenant_id": "project_0", - "metadata": {} - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-resize.json b/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-resize.json deleted file mode 100644 index 6b4597f..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-resize.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "volume.host_0@backend_0#pool_0", - "event_type": "volume.resize.end", - "payload": { - "host": "host_0@backend_0#pool_0", - "volume_id": "VOLUME_0", - "display_name": "name_0", - "size": "20", - "status": "in-use", - "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], - "snapshot_id": "", - "tenant_id": "project_0", - "metadata": {"readonly": false, "attached_mode": "rw"} - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-update.json b/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-update.json deleted file mode 100644 index b846ddc..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-update.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "volume.host_0@backend_0#pool_0", - "event_type": "volume.update.end", - "payload": { - "host": "host_0@backend_0#pool_0", - "volume_id": "VOLUME_0", - "display_name": "name_01", - "size": "40", - "status": "enabled", - "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], - "snapshot_id": "", - "tenant_id": "project_0", - "metadata": {"readonly": false, "attached_mode": "rw"} - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/service-update.json b/watcher/tests/decision_engine/model/notification/data/service-update.json deleted file mode 100644 index 1baf63a..0000000 --- a/watcher/tests/decision_engine/model/notification/data/service-update.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "nova_object.namespace": "nova", - "nova_object.name": "ServiceStatusPayload", - "nova_object.version": "1.0", - "nova_object.data": { - "host": "host1", - "disabled": false, - "last_seen_up": "2012-10-29T13:42:05Z", - "binary": "nova-compute", - "topic": "compute", - "disabled_reason": null, - "report_count": 1, - "forced_down": false, - "version": 15 - } - }, - "event_type": "service.update", - "publisher_id": "nova-compute:host1" -} diff --git a/watcher/tests/decision_engine/model/notification/fake_managers.py b/watcher/tests/decision_engine/model/notification/fake_managers.py deleted file mode 100644 index 0d196f3..0000000 --- a/watcher/tests/decision_engine/model/notification/fake_managers.py +++ /dev/null @@ -1,85 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.common import service_manager -from watcher.decision_engine.model.notification import cinder as cnotification -from watcher.decision_engine.model.notification import nova as novanotification -from watcher.tests.decision_engine.model import faker_cluster_state - - -class FakeManager(service_manager.ServiceManager): - - API_VERSION = '1.0' - - fake_cdmc = faker_cluster_state.FakerModelCollector() - - @property - def service_name(self): - return 'watcher-fake' - - @property - def api_version(self): - return self.API_VERSION - - @property - def publisher_id(self): - return 'test_publisher_id' - - @property - def conductor_topic(self): - return 'test_conductor_topic' - - @property - def notification_topics(self): - return ['nova'] - - @property - def conductor_endpoints(self): - return [] # Disable audit endpoint - - @property - def notification_endpoints(self): - return [ - novanotification.ServiceUpdated(self.fake_cdmc), - - novanotification.InstanceCreated(self.fake_cdmc), - novanotification.InstanceUpdated(self.fake_cdmc), - novanotification.InstanceDeletedEnd(self.fake_cdmc), - - novanotification.LegacyInstanceCreatedEnd(self.fake_cdmc), - novanotification.LegacyInstanceUpdated(self.fake_cdmc), - novanotification.LegacyLiveMigratedEnd(self.fake_cdmc), - novanotification.LegacyInstanceDeletedEnd(self.fake_cdmc), - ] - - -class FakeStorageManager(FakeManager): - - fake_cdmc = faker_cluster_state.FakerStorageModelCollector() - - @property - def notification_endpoints(self): - return [ - cnotification.CapacityNotificationEndpoint(self.fake_cdmc), - cnotification.VolumeCreateEnd(self.fake_cdmc), - cnotification.VolumeUpdateEnd(self.fake_cdmc), - cnotification.VolumeDeleteEnd(self.fake_cdmc), - cnotification.VolumeAttachEnd(self.fake_cdmc), - cnotification.VolumeDetachEnd(self.fake_cdmc), - cnotification.VolumeResizeEnd(self.fake_cdmc), - ] diff --git a/watcher/tests/decision_engine/model/notification/test_cinder_notifications.py b/watcher/tests/decision_engine/model/notification/test_cinder_notifications.py deleted file mode 100644 index 020ef66..0000000 --- a/watcher/tests/decision_engine/model/notification/test_cinder_notifications.py +++ /dev/null @@ -1,607 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2017 NEC Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import os - -import mock -from oslo_serialization import jsonutils - -from watcher.common import cinder_helper -from watcher.common import context -from watcher.common import exception -from watcher.common import service as watcher_service -from watcher.db.sqlalchemy import api as db_api -from watcher.decision_engine.model.notification import cinder as cnotification -from watcher.tests import base as base_test -from watcher.tests.db import utils -from watcher.tests.decision_engine.model import faker_cluster_state -from watcher.tests.decision_engine.model.notification import fake_managers - - -class NotificationTestCase(base_test.TestCase): - - @staticmethod - def load_message(filename): - cwd = os.path.abspath(os.path.dirname(__file__)) - data_folder = os.path.join(cwd, "data") - - with open(os.path.join(data_folder, filename), 'rb') as json_file: - json_data = jsonutils.load(json_file) - - return json_data - - -class TestReceiveCinderNotifications(NotificationTestCase): - - FAKE_METADATA = {'message_id': None, 'timestamp': None} - - def setUp(self): - super(TestReceiveCinderNotifications, self).setUp() - - p_from_dict = mock.patch.object(context.RequestContext, 'from_dict') - m_from_dict = p_from_dict.start() - m_from_dict.return_value = self.context - self.addCleanup(p_from_dict.stop) - - p_get_service_list = mock.patch.object( - db_api.Connection, 'get_service_list') - p_update_service = mock.patch.object( - db_api.Connection, 'update_service') - m_get_service_list = p_get_service_list.start() - m_update_service = p_update_service.start() - fake_service = utils.get_test_service( - created_at=datetime.datetime.utcnow()) - - m_get_service_list.return_value = [fake_service] - m_update_service.return_value = fake_service.copy() - - self.addCleanup(p_get_service_list.stop) - self.addCleanup(p_update_service.stop) - - @mock.patch.object(cnotification.CapacityNotificationEndpoint, 'info') - def test_cinder_receive_capacity(self, m_info): - message = self.load_message('capacity.json') - expected_message = message['payload'] - - de_service = watcher_service.Service(fake_managers.FakeStorageManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - m_info.assert_called_once_with( - self.context, 'capacity.host1@backend1#pool1', 'capacity.pool', - expected_message, self.FAKE_METADATA) - - @mock.patch.object(cnotification.VolumeCreateEnd, 'info') - def test_cinder_receive_volume_create_end(self, m_info): - message = self.load_message('scenario_1_volume-create.json') - expected_message = message['payload'] - - de_service = watcher_service.Service(fake_managers.FakeStorageManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - m_info.assert_called_once_with( - self.context, 'volume.host_0@backend_0#pool_0', - 'volume.create.end', expected_message, self.FAKE_METADATA) - - @mock.patch.object(cnotification.VolumeUpdateEnd, 'info') - def test_cinder_receive_volume_update_end(self, m_info): - message = self.load_message('scenario_1_volume-update.json') - expected_message = message['payload'] - - de_service = watcher_service.Service(fake_managers.FakeStorageManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - m_info.assert_called_once_with( - self.context, 'volume.host_0@backend_0#pool_0', - 'volume.update.end', expected_message, self.FAKE_METADATA) - - @mock.patch.object(cnotification.VolumeAttachEnd, 'info') - def test_cinder_receive_volume_attach_end(self, m_info): - message = self.load_message('scenario_1_volume-attach.json') - expected_message = message['payload'] - - de_service = watcher_service.Service(fake_managers.FakeStorageManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - m_info.assert_called_once_with( - self.context, 'volume.host_0@backend_0#pool_0', - 'volume.attach.end', expected_message, self.FAKE_METADATA) - - @mock.patch.object(cnotification.VolumeDetachEnd, 'info') - def test_cinder_receive_volume_detach_end(self, m_info): - message = self.load_message('scenario_1_volume-detach.json') - expected_message = message['payload'] - - de_service = watcher_service.Service(fake_managers.FakeStorageManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - m_info.assert_called_once_with( - self.context, 'volume.host_0@backend_0#pool_0', - 'volume.detach.end', expected_message, self.FAKE_METADATA) - - @mock.patch.object(cnotification.VolumeResizeEnd, 'info') - def test_cinder_receive_volume_resize_end(self, m_info): - message = self.load_message('scenario_1_volume-resize.json') - expected_message = message['payload'] - - de_service = watcher_service.Service(fake_managers.FakeStorageManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - m_info.assert_called_once_with( - self.context, 'volume.host_0@backend_0#pool_0', - 'volume.resize.end', expected_message, self.FAKE_METADATA) - - @mock.patch.object(cnotification.VolumeDeleteEnd, 'info') - def test_cinder_receive_volume_delete_end(self, m_info): - message = self.load_message('scenario_1_volume-delete.json') - expected_message = message['payload'] - - de_service = watcher_service.Service(fake_managers.FakeStorageManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - m_info.assert_called_once_with( - self.context, 'volume.host_0@backend_0#pool_0', - 'volume.delete.end', expected_message, self.FAKE_METADATA) - - -class TestCinderNotifications(NotificationTestCase): - - FAKE_METADATA = {'message_id': None, 'timestamp': None} - - def setUp(self): - super(TestCinderNotifications, self).setUp() - # fake cluster - self.fake_cdmc = faker_cluster_state.FakerStorageModelCollector() - - def test_cinder_capacity(self): - """test consuming capacity""" - - storage_model = self.fake_cdmc.generate_scenario_1() - self.fake_cdmc.cluster_data_model = storage_model - handler = cnotification.CapacityNotificationEndpoint(self.fake_cdmc) - - pool_0_name = 'host_0@backend_0#pool_0' - pool_0 = storage_model.get_pool_by_pool_name(pool_0_name) - - # before - self.assertEqual(pool_0_name, pool_0.name) - self.assertEqual(420, pool_0.free_capacity_gb) - self.assertEqual(420, pool_0.virtual_free) - self.assertEqual(80, pool_0.allocated_capacity_gb) - self.assertEqual(80, pool_0.provisioned_capacity_gb) - - message = self.load_message('scenario_1_capacity.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - # after - self.assertEqual(pool_0_name, pool_0.name) - self.assertEqual(460, pool_0.free_capacity_gb) - self.assertEqual(460, pool_0.virtual_free) - self.assertEqual(40, pool_0.allocated_capacity_gb) - self.assertEqual(40, pool_0.provisioned_capacity_gb) - - @mock.patch.object(cinder_helper, 'CinderHelper') - def test_cinder_capacity_pool_notfound(self, m_cinder_helper): - """test consuming capacity, new pool in existing node""" - - # storage_pool_by_name mock - return_mock = mock.Mock() - return_mock.configure_mock( - name='host_0@backend_0#pool_2', - total_volumes='2', - total_capacity_gb='500', - free_capacity_gb='380', - provisioned_capacity_gb='120', - allocated_capacity_gb='120') - - m_get_storage_pool_by_name = mock.Mock( - side_effect=lambda name: return_mock) - - m_cinder_helper.return_value = mock.Mock( - get_storage_pool_by_name=m_get_storage_pool_by_name) - - storage_model = self.fake_cdmc.generate_scenario_1() - self.fake_cdmc.cluster_data_model = storage_model - handler = cnotification.CapacityNotificationEndpoint(self.fake_cdmc) - - message = self.load_message('scenario_1_capacity_pool_notfound.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - # after consuming message, still pool_0 exists - pool_0_name = 'host_0@backend_0#pool_0' - pool_0 = storage_model.get_pool_by_pool_name(pool_0_name) - self.assertEqual(pool_0_name, pool_0.name) - self.assertEqual(420, pool_0.free_capacity_gb) - self.assertEqual(420, pool_0.virtual_free) - self.assertEqual(80, pool_0.allocated_capacity_gb) - self.assertEqual(80, pool_0.provisioned_capacity_gb) - - # new pool was added - pool_1_name = 'host_0@backend_0#pool_2' - m_get_storage_pool_by_name.assert_called_once_with(pool_1_name) - storage_node = storage_model.get_node_by_pool_name(pool_1_name) - self.assertEqual('host_0@backend_0', storage_node.host) - pool_1 = storage_model.get_pool_by_pool_name(pool_1_name) - self.assertEqual(pool_1_name, pool_1.name) - self.assertEqual(500, pool_1.total_capacity_gb) - self.assertEqual(380, pool_1.free_capacity_gb) - self.assertEqual(120, pool_1.allocated_capacity_gb) - - @mock.patch.object(cinder_helper, 'CinderHelper') - def test_cinder_capacity_node_notfound(self, m_cinder_helper): - """test consuming capacity, new pool in new node""" - - return_pool_mock = mock.Mock() - return_pool_mock.configure_mock( - name='host_2@backend_2#pool_0', - total_volumes='2', - total_capacity_gb='500', - free_capacity_gb='460', - provisioned_capacity_gb='40', - allocated_capacity_gb='40') - - m_get_storage_pool_by_name = mock.Mock( - side_effect=lambda name: return_pool_mock) - - # storage_node_by_name mock - return_node_mock = mock.Mock() - return_node_mock.configure_mock( - host='host_2@backend_2', - zone='nova', - state='up', - status='enabled') - - m_get_storage_node_by_name = mock.Mock( - side_effect=lambda name: return_node_mock) - - m_get_volume_type_by_backendname = mock.Mock( - side_effect=lambda name: mock.Mock('backend_2')) - - m_cinder_helper.return_value = mock.Mock( - get_storage_pool_by_name=m_get_storage_pool_by_name, - get_storage_node_by_name=m_get_storage_node_by_name, - get_volume_type_by_backendname=m_get_volume_type_by_backendname) - - storage_model = self.fake_cdmc.generate_scenario_1() - self.fake_cdmc.cluster_data_model = storage_model - handler = cnotification.CapacityNotificationEndpoint(self.fake_cdmc) - - message = self.load_message('scenario_1_capacity_node_notfound.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - # new pool and new node was added - node_1_name = 'host_2@backend_2' - pool_1_name = node_1_name + '#pool_0' - volume_type = 'backend_2' - m_get_storage_pool_by_name.assert_called_once_with(pool_1_name) - m_get_storage_node_by_name.assert_called_once_with(node_1_name) - m_get_volume_type_by_backendname.assert_called_once_with(volume_type) - # new node was added - storage_node = storage_model.get_node_by_pool_name(pool_1_name) - self.assertEqual('host_2@backend_2', storage_node.host) - # new pool was added - pool_1 = storage_model.get_pool_by_pool_name(pool_1_name) - self.assertEqual(pool_1_name, pool_1.name) - self.assertEqual(500, pool_1.total_capacity_gb) - self.assertEqual(460, pool_1.free_capacity_gb) - self.assertEqual(40, pool_1.allocated_capacity_gb) - self.assertEqual(40, pool_1.provisioned_capacity_gb) - - @mock.patch.object(cinder_helper, 'CinderHelper') - def test_cinder_volume_create(self, m_cinder_helper): - """test creating volume in existing pool and node""" - - # create storage_pool_by_name mock - return_pool_mock = mock.Mock() - return_pool_mock.configure_mock( - name='host_0@backend_0#pool_0', - total_volumes='3', - total_capacity_gb='500', - free_capacity_gb='380', - provisioned_capacity_gb='120', - allocated_capacity_gb='120') - - m_get_storage_pool_by_name = mock.Mock( - side_effect=lambda name: return_pool_mock) - - m_cinder_helper.return_value = mock.Mock( - get_storage_pool_by_name=m_get_storage_pool_by_name) - - storage_model = self.fake_cdmc.generate_scenario_1() - self.fake_cdmc.cluster_data_model = storage_model - handler = cnotification.VolumeCreateEnd(self.fake_cdmc) - - message = self.load_message('scenario_1_volume-create.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - # check that volume00 was added to the model - volume_00_name = 'VOLUME_00' - volume_00 = storage_model.get_volume_by_uuid(volume_00_name) - self.assertEqual(volume_00_name, volume_00.uuid) - self.assertFalse(volume_00.bootable) - # check that capacity was updated - pool_0_name = 'host_0@backend_0#pool_0' - m_get_storage_pool_by_name.assert_called_once_with(pool_0_name) - pool_0 = storage_model.get_pool_by_pool_name(pool_0_name) - self.assertEqual(pool_0.name, pool_0_name) - self.assertEqual(3, pool_0.total_volumes) - self.assertEqual(380, pool_0.free_capacity_gb) - self.assertEqual(120, pool_0.allocated_capacity_gb) - self.assertEqual(120, pool_0.provisioned_capacity_gb) - - @mock.patch.object(cinder_helper, 'CinderHelper') - def test_cinder_bootable_volume_create(self, m_cinder_helper): - """test creating bootable volume in existing pool and node""" - - # create storage_pool_by_name mock - return_pool_mock = mock.Mock() - return_pool_mock.configure_mock( - name='host_0@backend_0#pool_0', - total_volumes='3', - total_capacity_gb='500', - free_capacity_gb='380', - provisioned_capacity_gb='120', - allocated_capacity_gb='120') - - m_get_storage_pool_by_name = mock.Mock( - side_effect=lambda name: return_pool_mock) - - m_cinder_helper.return_value = mock.Mock( - get_storage_pool_by_name=m_get_storage_pool_by_name) - - storage_model = self.fake_cdmc.generate_scenario_1() - self.fake_cdmc.cluster_data_model = storage_model - handler = cnotification.VolumeCreateEnd(self.fake_cdmc) - - message = self.load_message('scenario_1_bootable-volume-create.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - # check that volume00 was added to the model - volume_00_name = 'VOLUME_00' - volume_00 = storage_model.get_volume_by_uuid(volume_00_name) - self.assertEqual(volume_00_name, volume_00.uuid) - self.assertTrue(volume_00.bootable) - # check that capacity was updated - pool_0_name = 'host_0@backend_0#pool_0' - m_get_storage_pool_by_name.assert_called_once_with(pool_0_name) - pool_0 = storage_model.get_pool_by_pool_name(pool_0_name) - self.assertEqual(pool_0.name, pool_0_name) - self.assertEqual(3, pool_0.total_volumes) - self.assertEqual(380, pool_0.free_capacity_gb) - self.assertEqual(120, pool_0.allocated_capacity_gb) - self.assertEqual(120, pool_0.provisioned_capacity_gb) - - @mock.patch.object(cinder_helper, 'CinderHelper') - def test_cinder_volume_create_pool_notfound(self, m_cinder_helper): - """check creating volume in not existing pool and node""" - - # get_storage_pool_by_name mock - return_pool_mock = mock.Mock() - return_pool_mock.configure_mock( - name='host_2@backend_2#pool_0', - total_volumes='1', - total_capacity_gb='500', - free_capacity_gb='460', - provisioned_capacity_gb='40', - allocated_capacity_gb='40') - - m_get_storage_pool_by_name = mock.Mock( - side_effect=lambda name: return_pool_mock) - - # create storage_node_by_name mock - return_node_mock = mock.Mock() - return_node_mock.configure_mock( - host='host_2@backend_2', - zone='nova', - state='up', - status='enabled') - - m_get_storage_node_by_name = mock.Mock( - side_effect=lambda name: return_node_mock) - - m_get_volume_type_by_backendname = mock.Mock( - side_effect=lambda name: mock.Mock('backend_2')) - - m_cinder_helper.return_value = mock.Mock( - get_storage_pool_by_name=m_get_storage_pool_by_name, - get_storage_node_by_name=m_get_storage_node_by_name, - get_volume_type_by_backendname=m_get_volume_type_by_backendname) - - storage_model = self.fake_cdmc.generate_scenario_1() - self.fake_cdmc.cluster_data_model = storage_model - handler = cnotification.VolumeCreateEnd(self.fake_cdmc) - - message = self.load_message( - 'scenario_1_volume-create_pool_notfound.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - # check that volume00 was added to the model - volume_00_name = 'VOLUME_00' - volume_00 = storage_model.get_volume_by_uuid(volume_00_name) - self.assertEqual(volume_00_name, volume_00.uuid) - # check that capacity was updated - node_2_name = 'host_2@backend_2' - pool_0_name = node_2_name + '#pool_0' - pool_0 = storage_model.get_pool_by_pool_name(pool_0_name) - self.assertEqual(pool_0.name, pool_0_name) - self.assertEqual(1, pool_0.total_volumes) - self.assertEqual(460, pool_0.free_capacity_gb) - self.assertEqual(40, pool_0.allocated_capacity_gb) - self.assertEqual(40, pool_0.provisioned_capacity_gb) - # check that node was added - m_get_storage_node_by_name.assert_called_once_with(node_2_name) - - @mock.patch.object(cinder_helper, 'CinderHelper') - def test_cinder_error_volume_unmapped(self, m_cinder_helper): - """test creating error volume unmapped""" - - m_get_storage_pool_by_name = mock.Mock( - side_effect=exception.PoolNotFound(name="TEST")) - m_cinder_helper.return_value = mock.Mock( - get_storage_pool_by_name=m_get_storage_pool_by_name) - - storage_model = self.fake_cdmc.generate_scenario_1() - self.fake_cdmc.cluster_data_model = storage_model - handler = cnotification.VolumeCreateEnd(self.fake_cdmc) - - message = self.load_message('scenario_1_error-volume-create.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - # we do not call get_storage_pool_by_name - m_get_storage_pool_by_name.assert_not_called() - # check that volume00 was added to the model - volume_00_name = 'VOLUME_00' - volume_00 = storage_model.get_volume_by_uuid(volume_00_name) - self.assertEqual(volume_00_name, volume_00.uuid) - - @mock.patch.object(cinder_helper, 'CinderHelper') - def test_cinder_volume_update(self, m_cinder_helper): - """test updating volume in existing pool and node""" - - storage_model = self.fake_cdmc.generate_scenario_1() - self.fake_cdmc.cluster_data_model = storage_model - handler = cnotification.VolumeUpdateEnd(self.fake_cdmc) - - volume_0_name = 'VOLUME_0' - volume_0 = storage_model.get_volume_by_uuid(volume_0_name) - self.assertEqual('name_0', volume_0.name) - - # create storage_pool_by name mock - return_pool_mock = mock.Mock() - return_pool_mock.configure_mock( - name='host_0@backend_0#pool_0', - total_volumes='2', - total_capacity_gb='500', - free_capacity_gb='420', - provisioned_capacity_gb='80', - allocated_capacity_gb='80') - - m_get_storage_pool_by_name = mock.Mock( - side_effect=lambda name: return_pool_mock) - - m_cinder_helper.return_value = mock.Mock( - get_storage_pool_by_name=m_get_storage_pool_by_name) - - message = self.load_message('scenario_1_volume-update.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - # check that name of volume_0 was updated in the model - volume_0 = storage_model.get_volume_by_uuid(volume_0_name) - self.assertEqual('name_01', volume_0.name) - - @mock.patch.object(cinder_helper, 'CinderHelper') - def test_cinder_volume_delete(self, m_cinder_helper): - """test deleting volume""" - - # create storage_pool_by name mock - return_pool_mock = mock.Mock() - return_pool_mock.configure_mock( - name='host_0@backend_0#pool_0', - total_volumes='1', - total_capacity_gb='500', - free_capacity_gb='460', - provisioned_capacity_gb='40', - allocated_capacity_gb='40') - - m_get_storage_pool_by_name = mock.Mock( - side_effect=lambda name: return_pool_mock) - - m_cinder_helper.return_value = mock.Mock( - get_storage_pool_by_name=m_get_storage_pool_by_name) - - storage_model = self.fake_cdmc.generate_scenario_1() - self.fake_cdmc.cluster_data_model = storage_model - handler = cnotification.VolumeDeleteEnd(self.fake_cdmc) - - # volume exists before consuming - volume_0_uuid = 'VOLUME_0' - volume_0 = storage_model.get_volume_by_uuid(volume_0_uuid) - self.assertEqual(volume_0_uuid, volume_0.uuid) - - message = self.load_message('scenario_1_volume-delete.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - # volume does not exists after consuming - self.assertRaises( - exception.VolumeNotFound, - storage_model.get_volume_by_uuid, volume_0_uuid) - - # check that capacity was updated - pool_0_name = 'host_0@backend_0#pool_0' - m_get_storage_pool_by_name.assert_called_once_with(pool_0_name) - pool_0 = storage_model.get_pool_by_pool_name(pool_0_name) - self.assertEqual(pool_0.name, pool_0_name) - self.assertEqual(1, pool_0.total_volumes) - self.assertEqual(460, pool_0.free_capacity_gb) - self.assertEqual(40, pool_0.allocated_capacity_gb) - self.assertEqual(40, pool_0.provisioned_capacity_gb) diff --git a/watcher/tests/decision_engine/model/notification/test_notifications.py b/watcher/tests/decision_engine/model/notification/test_notifications.py deleted file mode 100644 index 9e2e6fa..0000000 --- a/watcher/tests/decision_engine/model/notification/test_notifications.py +++ /dev/null @@ -1,108 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import mock -from oslo_serialization import jsonutils - -from watcher.common import context -from watcher.common import service as watcher_service -from watcher.decision_engine.model.notification import base -from watcher.decision_engine.model.notification import filtering -from watcher.tests import base as base_test -from watcher.tests.decision_engine.model.notification import fake_managers - - -class DummyManager(fake_managers.FakeManager): - - @property - def notification_endpoints(self): - return [DummyNotification(self.fake_cdmc)] - - -class DummyNotification(base.NotificationEndpoint): - - @property - def filter_rule(self): - return filtering.NotificationFilter( - publisher_id=r'.*', - event_type=r'compute.dummy', - payload={'data': {'nested': r'^T.*'}}, - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - pass - - -class NotificationTestCase(base_test.TestCase): - - def load_message(self, filename): - cwd = os.path.abspath(os.path.dirname(__file__)) - data_folder = os.path.join(cwd, "data") - - with open(os.path.join(data_folder, filename), 'rb') as json_file: - json_data = jsonutils.load(json_file) - - return json_data - - -class TestReceiveNotifications(NotificationTestCase): - - def setUp(self): - super(TestReceiveNotifications, self).setUp() - - p_from_dict = mock.patch.object(context.RequestContext, 'from_dict') - m_from_dict = p_from_dict.start() - m_from_dict.return_value = self.context - self.addCleanup(p_from_dict.stop) - - @mock.patch.object(watcher_service.ServiceHeartbeat, 'send_beat') - @mock.patch.object(DummyNotification, 'info') - def test_receive_dummy_notification(self, m_info, m_heartbeat): - message = { - 'publisher_id': 'nova-compute', - 'event_type': 'compute.dummy', - 'payload': {'data': {'nested': 'TEST'}}, - 'priority': 'INFO', - } - de_service = watcher_service.Service(DummyManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - - m_info.assert_called_once_with( - self.context, 'nova-compute', 'compute.dummy', - {'data': {'nested': 'TEST'}}, - {'message_id': None, 'timestamp': None}) - - @mock.patch.object(watcher_service.ServiceHeartbeat, 'send_beat') - @mock.patch.object(DummyNotification, 'info') - def test_skip_unwanted_notification(self, m_info, m_heartbeat): - message = { - 'publisher_id': 'nova-compute', - 'event_type': 'compute.dummy', - 'payload': {'data': {'nested': 'unwanted'}}, - 'priority': 'INFO', - } - de_service = watcher_service.Service(DummyManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - - self.assertEqual(0, m_info.call_count) diff --git a/watcher/tests/decision_engine/model/notification/test_nova_notifications.py b/watcher/tests/decision_engine/model/notification/test_nova_notifications.py deleted file mode 100644 index f257dc7..0000000 --- a/watcher/tests/decision_engine/model/notification/test_nova_notifications.py +++ /dev/null @@ -1,523 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import mock -from oslo_serialization import jsonutils - -from watcher.common import context -from watcher.common import exception -from watcher.common import nova_helper -from watcher.common import service as watcher_service -from watcher.decision_engine.model import element -from watcher.decision_engine.model.notification import nova as novanotification -from watcher.tests import base as base_test -from watcher.tests.decision_engine.model import faker_cluster_state -from watcher.tests.decision_engine.model.notification import fake_managers - - -class NotificationTestCase(base_test.TestCase): - - @staticmethod - def load_message(filename): - cwd = os.path.abspath(os.path.dirname(__file__)) - data_folder = os.path.join(cwd, "data") - - with open(os.path.join(data_folder, filename), 'rb') as json_file: - json_data = jsonutils.load(json_file) - - return json_data - - -class TestReceiveNovaNotifications(NotificationTestCase): - - FAKE_METADATA = {'message_id': None, 'timestamp': None} - - def setUp(self): - super(TestReceiveNovaNotifications, self).setUp() - - p_from_dict = mock.patch.object(context.RequestContext, 'from_dict') - m_from_dict = p_from_dict.start() - m_from_dict.return_value = self.context - self.addCleanup(p_from_dict.stop) - p_heartbeat = mock.patch.object( - watcher_service.ServiceHeartbeat, "send_beat") - self.m_heartbeat = p_heartbeat.start() - self.addCleanup(p_heartbeat.stop) - - @mock.patch.object(novanotification.ServiceUpdated, 'info') - def test_nova_receive_service_update(self, m_info): - message = self.load_message('service-update.json') - expected_message = message['payload'] - - de_service = watcher_service.Service(fake_managers.FakeManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - m_info.assert_called_once_with( - self.context, 'nova-compute:host1', 'service.update', - expected_message, self.FAKE_METADATA) - - @mock.patch.object(novanotification.InstanceCreated, 'info') - def test_nova_receive_instance_create(self, m_info): - message = self.load_message('instance-create.json') - expected_message = message['payload'] - - de_service = watcher_service.Service(fake_managers.FakeManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - m_info.assert_called_once_with( - self.context, 'nova-compute:compute', 'instance.update', - expected_message, self.FAKE_METADATA) - - @mock.patch.object(novanotification.InstanceUpdated, 'info') - def test_nova_receive_instance_update(self, m_info): - message = self.load_message('instance-update.json') - expected_message = message['payload'] - - de_service = watcher_service.Service(fake_managers.FakeManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - m_info.assert_called_once_with( - self.context, 'nova-compute:compute', 'instance.update', - expected_message, self.FAKE_METADATA) - - @mock.patch.object(novanotification.InstanceDeletedEnd, 'info') - def test_nova_receive_instance_delete_end(self, m_info): - message = self.load_message('instance-delete-end.json') - expected_message = message['payload'] - - de_service = watcher_service.Service(fake_managers.FakeManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - m_info.assert_called_once_with( - self.context, 'nova-compute:compute', 'instance.delete.end', - expected_message, self.FAKE_METADATA) - - -class TestNovaNotifications(NotificationTestCase): - - FAKE_METADATA = {'message_id': None, 'timestamp': None} - - def setUp(self): - super(TestNovaNotifications, self).setUp() - # fake cluster - self.fake_cdmc = faker_cluster_state.FakerModelCollector() - - def test_nova_service_update(self): - compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() - self.fake_cdmc.cluster_data_model = compute_model - handler = novanotification.ServiceUpdated(self.fake_cdmc) - - node0_uuid = 'Node_0' - node0 = compute_model.get_node_by_uuid(node0_uuid) - - message = self.load_message('scenario3_service-update-disabled.json') - - self.assertEqual('hostname_0', node0.hostname) - self.assertEqual(element.ServiceState.ONLINE.value, node0.state) - self.assertEqual(element.ServiceState.ENABLED.value, node0.status) - - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - self.assertEqual('Node_0', node0.hostname) - self.assertEqual(element.ServiceState.OFFLINE.value, node0.state) - self.assertEqual(element.ServiceState.DISABLED.value, node0.status) - - message = self.load_message('scenario3_service-update-enabled.json') - - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - self.assertEqual('Node_0', node0.hostname) - self.assertEqual(element.ServiceState.ONLINE.value, node0.state) - self.assertEqual(element.ServiceState.ENABLED.value, node0.status) - - def test_nova_instance_update(self): - compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() - self.fake_cdmc.cluster_data_model = compute_model - handler = novanotification.InstanceUpdated(self.fake_cdmc) - - instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' - instance0 = compute_model.get_instance_by_uuid(instance0_uuid) - - message = self.load_message('scenario3_instance-update.json') - - self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) - - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - self.assertEqual(element.InstanceState.PAUSED.value, instance0.state) - - @mock.patch.object(nova_helper, "NovaHelper") - def test_nova_instance_update_notfound_still_creates( - self, m_nova_helper_cls): - m_get_compute_node_by_hostname = mock.Mock( - side_effect=lambda uuid: mock.Mock( - name='m_get_compute_node_by_hostname', - id=3, - hypervisor_hostname="Node_2", - state='up', - status='enabled', - uuid=uuid, - memory_mb=7777, - vcpus=42, - free_disk_gb=974, - local_gb=1337)) - m_nova_helper_cls.return_value = mock.Mock( - get_compute_node_by_hostname=m_get_compute_node_by_hostname, - name='m_nova_helper') - compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() - self.fake_cdmc.cluster_data_model = compute_model - handler = novanotification.InstanceUpdated(self.fake_cdmc) - - instance0_uuid = '9966d6bd-a45c-4e1c-9d57-3054899a3ec7' - - message = self.load_message('scenario3_notfound_instance-update.json') - - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - instance0 = compute_model.get_instance_by_uuid(instance0_uuid) - - self.assertEqual(element.InstanceState.PAUSED.value, instance0.state) - self.assertEqual(1, instance0.vcpus) - self.assertEqual(1, instance0.disk_capacity) - self.assertEqual(512, instance0.memory) - - m_get_compute_node_by_hostname.assert_called_once_with('Node_2') - node_2 = compute_model.get_node_by_uuid('Node_2') - self.assertEqual(7777, node_2.memory) - self.assertEqual(42, node_2.vcpus) - self.assertEqual(974, node_2.disk) - self.assertEqual(1337, node_2.disk_capacity) - - @mock.patch.object(nova_helper, "NovaHelper") - def test_instance_update_node_notfound_set_unmapped( - self, m_nova_helper_cls): - m_get_compute_node_by_hostname = mock.Mock( - side_effect=exception.ComputeNodeNotFound(name="TEST")) - m_nova_helper_cls.return_value = mock.Mock( - get_compute_node_by_hostname=m_get_compute_node_by_hostname, - name='m_nova_helper') - - compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() - self.fake_cdmc.cluster_data_model = compute_model - handler = novanotification.InstanceUpdated(self.fake_cdmc) - - instance0_uuid = '9966d6bd-a45c-4e1c-9d57-3054899a3ec7' - - message = self.load_message( - 'scenario3_notfound_instance-update.json') - - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - instance0 = compute_model.get_instance_by_uuid(instance0_uuid) - - self.assertEqual(element.InstanceState.PAUSED.value, instance0.state) - self.assertEqual(1, instance0.vcpus) - self.assertEqual(1, instance0.disk) - self.assertEqual(1, instance0.disk_capacity) - self.assertEqual(512, instance0.memory) - - m_get_compute_node_by_hostname.assert_any_call('Node_2') - self.assertRaises( - exception.ComputeNodeNotFound, - compute_model.get_node_by_uuid, 'Node_2') - - def test_nova_instance_create(self): - compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() - self.fake_cdmc.cluster_data_model = compute_model - handler = novanotification.InstanceCreated(self.fake_cdmc) - - instance0_uuid = 'c03c0bf9-f46e-4e4f-93f1-817568567ee2' - - self.assertRaises( - exception.InstanceNotFound, - compute_model.get_instance_by_uuid, instance0_uuid) - - message = self.load_message('scenario3_instance-create.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - instance0 = compute_model.get_instance_by_uuid(instance0_uuid) - - self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) - self.assertEqual(1, instance0.vcpus) - self.assertEqual(1, instance0.disk_capacity) - self.assertEqual(512, instance0.memory) - - def test_nova_instance_delete_end(self): - compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() - self.fake_cdmc.cluster_data_model = compute_model - handler = novanotification.InstanceDeletedEnd(self.fake_cdmc) - - instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' - - # Before - self.assertTrue(compute_model.get_instance_by_uuid(instance0_uuid)) - - message = self.load_message('scenario3_instance-delete-end.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - # After - self.assertRaises( - exception.InstanceNotFound, - compute_model.get_instance_by_uuid, instance0_uuid) - - -class TestLegacyNovaNotifications(NotificationTestCase): - - FAKE_METADATA = {'message_id': None, 'timestamp': None} - - def setUp(self): - super(TestLegacyNovaNotifications, self).setUp() - # fake cluster - self.fake_cdmc = faker_cluster_state.FakerModelCollector() - - def test_legacy_instance_created_end(self): - compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() - self.fake_cdmc.cluster_data_model = compute_model - handler = novanotification.LegacyInstanceCreatedEnd(self.fake_cdmc) - - instance0_uuid = 'c03c0bf9-f46e-4e4f-93f1-817568567ee2' - self.assertRaises( - exception.InstanceNotFound, - compute_model.get_instance_by_uuid, instance0_uuid) - - message = self.load_message( - 'scenario3_legacy_instance-create-end.json') - - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - instance0 = compute_model.get_instance_by_uuid(instance0_uuid) - - self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) - self.assertEqual(1, instance0.vcpus) - self.assertEqual(1, instance0.disk_capacity) - self.assertEqual(512, instance0.memory) - - def test_legacy_instance_updated(self): - compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() - self.fake_cdmc.cluster_data_model = compute_model - handler = novanotification.LegacyInstanceUpdated(self.fake_cdmc) - - instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' - instance0 = compute_model.get_instance_by_uuid(instance0_uuid) - - message = self.load_message('scenario3_legacy_instance-update.json') - - self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) - - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - self.assertEqual(element.InstanceState.PAUSED.value, instance0.state) - - @mock.patch.object(nova_helper, "NovaHelper") - def test_legacy_instance_update_node_notfound_still_creates( - self, m_nova_helper_cls): - m_get_compute_node_by_hostname = mock.Mock( - side_effect=lambda uuid: mock.Mock( - name='m_get_compute_node_by_hostname', - id=3, - uuid=uuid, - hypervisor_hostname="Node_2", - state='up', - status='enabled', - memory_mb=7777, - vcpus=42, - free_disk_gb=974, - local_gb=1337)) - m_nova_helper_cls.return_value = mock.Mock( - get_compute_node_by_hostname=m_get_compute_node_by_hostname, - name='m_nova_helper') - - compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() - self.fake_cdmc.cluster_data_model = compute_model - handler = novanotification.LegacyInstanceUpdated(self.fake_cdmc) - - instance0_uuid = '9966d6bd-a45c-4e1c-9d57-3054899a3ec7' - - message = self.load_message( - 'scenario3_notfound_legacy_instance-update.json') - - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - instance0 = compute_model.get_instance_by_uuid(instance0_uuid) - - self.assertEqual(element.InstanceState.PAUSED.value, instance0.state) - self.assertEqual(1, instance0.vcpus) - self.assertEqual(1, instance0.disk) - self.assertEqual(1, instance0.disk_capacity) - self.assertEqual(512, instance0.memory) - - m_get_compute_node_by_hostname.assert_any_call('Node_2') - node_2 = compute_model.get_node_by_uuid('Node_2') - self.assertEqual(7777, node_2.memory) - self.assertEqual(42, node_2.vcpus) - self.assertEqual(974, node_2.disk) - self.assertEqual(1337, node_2.disk_capacity) - - @mock.patch.object(nova_helper, "NovaHelper") - def test_legacy_instance_update_node_notfound_set_unmapped( - self, m_nova_helper_cls): - m_get_compute_node_by_hostname = mock.Mock( - side_effect=exception.ComputeNodeNotFound) - m_nova_helper_cls.return_value = mock.Mock( - get_compute_node_by_hostname=m_get_compute_node_by_hostname, - name='m_nova_helper') - - compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() - self.fake_cdmc.cluster_data_model = compute_model - handler = novanotification.LegacyInstanceUpdated(self.fake_cdmc) - - instance0_uuid = '9966d6bd-a45c-4e1c-9d57-3054899a3ec7' - - message = self.load_message( - 'scenario3_notfound_legacy_instance-update.json') - - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - instance0 = compute_model.get_instance_by_uuid(instance0_uuid) - - self.assertEqual(element.InstanceState.PAUSED.value, instance0.state) - self.assertEqual(1, instance0.vcpus) - self.assertEqual(1, instance0.disk) - self.assertEqual(1, instance0.disk_capacity) - self.assertEqual(512, instance0.memory) - - m_get_compute_node_by_hostname.assert_any_call('Node_2') - self.assertRaises( - exception.ComputeNodeNotFound, - compute_model.get_node_by_uuid, 'Node_2') - - def test_legacy_live_migrated_end(self): - compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() - self.fake_cdmc.cluster_data_model = compute_model - handler = novanotification.LegacyLiveMigratedEnd(self.fake_cdmc) - - instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' - instance0 = compute_model.get_instance_by_uuid(instance0_uuid) - - node = compute_model.get_node_by_instance_uuid(instance0_uuid) - self.assertEqual('Node_0', node.uuid) - - message = self.load_message( - 'scenario3_legacy_livemigration-post-dest-end.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - node = compute_model.get_node_by_instance_uuid(instance0_uuid) - self.assertEqual('Node_1', node.uuid) - self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) - - def test_legacy_instance_deleted_end(self): - compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() - self.fake_cdmc.cluster_data_model = compute_model - handler = novanotification.LegacyInstanceDeletedEnd(self.fake_cdmc) - - instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' - - # Before - self.assertTrue(compute_model.get_instance_by_uuid(instance0_uuid)) - - message = self.load_message( - 'scenario3_legacy_instance-delete-end.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - # After - self.assertRaises( - exception.InstanceNotFound, - compute_model.get_instance_by_uuid, instance0_uuid) diff --git a/watcher/tests/decision_engine/model/test_element.py b/watcher/tests/decision_engine/model/test_element.py deleted file mode 100644 index 1df7324..0000000 --- a/watcher/tests/decision_engine/model/test_element.py +++ /dev/null @@ -1,154 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.decision_engine.model import element -from watcher.tests import base - - -class TestElement(base.TestCase): - - scenarios = [ - ("ComputeNode_with_all_fields", dict( - cls=element.Instance, - data={ - 'uuid': 'FAKE_UUID', - 'state': 'state', - 'hostname': 'hostname', - 'human_id': 'human_id', - 'memory': 111, - 'vcpus': 222, - 'disk': 333, - 'disk_capacity': 444, - })), - ("ComputeNode_with_some_fields", dict( - cls=element.Instance, - data={ - 'uuid': 'FAKE_UUID', - 'state': 'state', - 'vcpus': 222, - 'disk': 333, - 'disk_capacity': 444, - })), - ("Instance_with_all_fields", dict( - cls=element.Instance, - data={ - 'uuid': 'FAKE_UUID', - 'state': 'state', - 'hostname': 'hostname', - 'human_id': 'human_id', - 'memory': 111, - 'vcpus': 222, - 'disk': 333, - 'disk_capacity': 444, - })), - ("Instance_with_some_fields", dict( - cls=element.Instance, - data={ - 'uuid': 'FAKE_UUID', - 'state': 'state', - 'vcpus': 222, - 'disk': 333, - 'disk_capacity': 444, - })), - ] - - def test_as_xml_element(self): - el = self.cls(**self.data) - el.as_xml_element() - - -class TestStorageElement(base.TestCase): - - scenarios = [ - ("StorageNode_with_all_fields", dict( - cls=element.StorageNode, - data={ - 'host': 'host@backend', - 'zone': 'zone', - 'status': 'enabled', - 'state': 'up', - 'volume_type': 'volume_type', - })), - ("Pool_with_all_fields", dict( - cls=element.Pool, - data={ - 'name': 'host@backend#pool', - 'total_volumes': 1, - 'total_capacity_gb': 500, - 'free_capacity_gb': 420, - 'provisioned_capacity_gb': 80, - 'allocated_capacity_gb': 80, - 'virtual_free': 420, - })), - ("Pool_without_virtual_free_fields", dict( - cls=element.Pool, - data={ - 'name': 'host@backend#pool', - 'total_volumes': 1, - 'total_capacity_gb': 500, - 'free_capacity_gb': 420, - 'provisioned_capacity_gb': 80, - 'allocated_capacity_gb': 80, - })), - ("Volume_with_all_fields", dict( - cls=element.Volume, - data={ - 'uuid': 'FAKE_UUID', - 'size': 1, - 'status': 'in-use', - 'attachments': '[{"key": "value"}]', - 'name': 'name', - 'multiattach': 'false', - 'snapshot_id': '', - 'project_id': 'project_id', - 'metadata': '{"key": "value"}', - 'bootable': 'false', - 'human_id': 'human_id', - })), - ("Volume_without_bootable_fields", dict( - cls=element.Volume, - data={ - 'uuid': 'FAKE_UUID', - 'size': 1, - 'status': 'in-use', - 'attachments': '[]', - 'name': 'name', - 'multiattach': 'false', - 'snapshot_id': '', - 'project_id': 'project_id', - 'metadata': '{"key": "value"}', - 'human_id': 'human_id', - })), - ("Volume_without_human_id_fields", dict( - cls=element.Volume, - data={ - 'uuid': 'FAKE_UUID', - 'size': 1, - 'status': 'in-use', - 'attachments': '[]', - 'name': 'name', - 'multiattach': 'false', - 'snapshot_id': '', - 'project_id': 'project_id', - 'metadata': '{"key": "value"}', - })), - ] - - def test_as_xml_element(self): - el = self.cls(**self.data) - el.as_xml_element() diff --git a/watcher/tests/decision_engine/model/test_model.py b/watcher/tests/decision_engine/model/test_model.py deleted file mode 100644 index c4cacb9..0000000 --- a/watcher/tests/decision_engine/model/test_model.py +++ /dev/null @@ -1,369 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -from oslo_utils import uuidutils - -from watcher.common import exception -from watcher.decision_engine.model import element -from watcher.decision_engine.model import model_root -from watcher.tests import base -from watcher.tests.decision_engine.model import faker_cluster_state - - -class TestModel(base.TestCase): - - def load_data(self, filename): - cwd = os.path.abspath(os.path.dirname(__file__)) - data_folder = os.path.join(cwd, "data") - - with open(os.path.join(data_folder, filename), 'rb') as xml_file: - xml_data = xml_file.read() - - return xml_data - - def load_model(self, filename): - return model_root.ModelRoot.from_xml(self.load_data(filename)) - - def test_model_structure(self): - fake_cluster = faker_cluster_state.FakerModelCollector() - model1 = fake_cluster.build_scenario_1() - - self.assertEqual(5, len(model1.get_all_compute_nodes())) - self.assertEqual(35, len(model1.get_all_instances())) - self.assertEqual(8, len(model1.edges())) - - expected_struct_str = self.load_data('scenario_1.xml') - model2 = model_root.ModelRoot.from_xml(expected_struct_str) - - self.assertTrue(model_root.ModelRoot.is_isomorphic(model2, model1)) - - def test_build_model_from_xml(self): - fake_cluster = faker_cluster_state.FakerModelCollector() - - expected_model = fake_cluster.generate_scenario_1() - struct_str = self.load_data('scenario_1.xml') - - model = model_root.ModelRoot.from_xml(struct_str) - self.assertEqual(expected_model.to_string(), model.to_string()) - - def test_get_node_by_instance_uuid(self): - model = model_root.ModelRoot() - uuid_ = "{0}".format(uuidutils.generate_uuid()) - node = element.ComputeNode(id=1) - node.uuid = uuid_ - model.add_node(node) - self.assertEqual(node, model.get_node_by_uuid(uuid_)) - uuid_ = "{0}".format(uuidutils.generate_uuid()) - instance = element.Instance(id=1) - instance.uuid = uuid_ - model.add_instance(instance) - self.assertEqual(instance, model.get_instance_by_uuid(uuid_)) - model.map_instance(instance, node) - self.assertEqual(node, model.get_node_by_instance_uuid(instance.uuid)) - - def test_add_node(self): - model = model_root.ModelRoot() - uuid_ = "{0}".format(uuidutils.generate_uuid()) - node = element.ComputeNode(id=1) - node.uuid = uuid_ - model.add_node(node) - self.assertEqual(node, model.get_node_by_uuid(uuid_)) - - def test_delete_node(self): - model = model_root.ModelRoot() - uuid_ = "{0}".format(uuidutils.generate_uuid()) - node = element.ComputeNode(id=1) - node.uuid = uuid_ - model.add_node(node) - self.assertEqual(node, model.get_node_by_uuid(uuid_)) - model.remove_node(node) - self.assertRaises(exception.ComputeNodeNotFound, - model.get_node_by_uuid, uuid_) - - def test_get_all_compute_nodes(self): - model = model_root.ModelRoot() - for id_ in range(10): - uuid_ = "{0}".format(uuidutils.generate_uuid()) - node = element.ComputeNode(id_) - node.uuid = uuid_ - model.add_node(node) - all_nodes = model.get_all_compute_nodes() - for uuid_ in all_nodes: - node = model.get_node_by_uuid(uuid_) - model.assert_node(node) - - def test_set_get_state_nodes(self): - model = model_root.ModelRoot() - uuid_ = "{0}".format(uuidutils.generate_uuid()) - node = element.ComputeNode(id=1) - node.uuid = uuid_ - model.add_node(node) - - self.assertIn(node.state, [el.value for el in element.ServiceState]) - - node = model.get_node_by_uuid(uuid_) - node.state = element.ServiceState.OFFLINE.value - self.assertIn(node.state, [el.value for el in element.ServiceState]) - - def test_node_from_uuid_raise(self): - model = model_root.ModelRoot() - uuid_ = "{0}".format(uuidutils.generate_uuid()) - node = element.ComputeNode(id=1) - node.uuid = uuid_ - model.add_node(node) - - uuid2 = "{0}".format(uuidutils.generate_uuid()) - self.assertRaises(exception.ComputeNodeNotFound, - model.get_node_by_uuid, uuid2) - - def test_remove_node_raise(self): - model = model_root.ModelRoot() - uuid_ = "{0}".format(uuidutils.generate_uuid()) - node = element.ComputeNode(id=1) - node.uuid = uuid_ - model.add_node(node) - - uuid2 = "{0}".format(uuidutils.generate_uuid()) - node2 = element.ComputeNode(id=2) - node2.uuid = uuid2 - - self.assertRaises(exception.ComputeNodeNotFound, - model.remove_node, node2) - - def test_assert_node_raise(self): - model = model_root.ModelRoot() - uuid_ = "{0}".format(uuidutils.generate_uuid()) - node = element.ComputeNode(id=1) - node.uuid = uuid_ - model.add_node(node) - self.assertRaises(exception.IllegalArgumentException, - model.assert_node, "objet_qcq") - - def test_instance_from_uuid_raise(self): - fake_cluster = faker_cluster_state.FakerModelCollector() - model = fake_cluster.generate_scenario_1() - self.assertRaises(exception.InstanceNotFound, - model.get_instance_by_uuid, "valeur_qcq") - - def test_assert_instance_raise(self): - model = model_root.ModelRoot() - self.assertRaises(exception.IllegalArgumentException, - model.assert_instance, "valeur_qcq") - - -class TestStorageModel(base.TestCase): - - def load_data(self, filename): - cwd = os.path.abspath(os.path.dirname(__file__)) - data_folder = os.path.join(cwd, "data") - - with open(os.path.join(data_folder, filename), 'rb') as xml_file: - xml_data = xml_file.read() - - return xml_data - - def load_model(self, filename): - return model_root.StorageModelRoot.from_xml(self.load_data(filename)) - - def test_model_structure(self): - fake_cluster = faker_cluster_state.FakerStorageModelCollector() - model1 = fake_cluster.build_scenario_1() - - self.assertEqual(2, len(model1.get_all_storage_nodes())) - self.assertEqual(9, len(model1.get_all_volumes())) - self.assertEqual(12, len(model1.edges())) - - expected_struct_str = self.load_data('storage_scenario_1.xml') - model2 = model_root.StorageModelRoot.from_xml(expected_struct_str) - self.assertTrue( - model_root.StorageModelRoot.is_isomorphic(model2, model1)) - - def test_build_model_from_xml(self): - fake_cluster = faker_cluster_state.FakerStorageModelCollector() - - expected_model = fake_cluster.generate_scenario_1() - struct_str = self.load_data('storage_scenario_1.xml') - - model = model_root.StorageModelRoot.from_xml(struct_str) - self.assertEqual(expected_model.to_string(), model.to_string()) - - def test_assert_node_raise(self): - model = model_root.StorageModelRoot() - node = element.StorageNode(host="host@backend") - model.add_node(node) - self.assertRaises(exception.IllegalArgumentException, - model.assert_node, "obj") - - def test_assert_pool_raise(self): - model = model_root.StorageModelRoot() - pool = element.Pool(name="host@backend#pool") - model.add_pool(pool) - self.assertRaises(exception.IllegalArgumentException, - model.assert_pool, "obj") - - def test_assert_volume_raise(self): - model = model_root.StorageModelRoot() - uuid_ = "{0}".format(uuidutils.generate_uuid()) - volume = element.Volume(uuid=uuid_) - model.add_volume(volume) - self.assertRaises(exception.IllegalArgumentException, - model.assert_volume, "obj") - - def test_add_node(self): - model = model_root.StorageModelRoot() - hostname = "host@backend" - node = element.StorageNode(host=hostname) - model.add_node(node) - self.assertEqual(node, model.get_node_by_name(hostname)) - - def test_add_pool(self): - model = model_root.StorageModelRoot() - pool_name = "host@backend#pool" - pool = element.Pool(name=pool_name) - model.add_pool(pool) - self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) - - def test_remove_node(self): - model = model_root.StorageModelRoot() - hostname = "host@backend" - node = element.StorageNode(host=hostname) - model.add_node(node) - self.assertEqual(node, model.get_node_by_name(hostname)) - model.remove_node(node) - self.assertRaises(exception.StorageNodeNotFound, - model.get_node_by_name, hostname) - - def test_remove_pool(self): - model = model_root.StorageModelRoot() - pool_name = "host@backend#pool" - pool = element.Pool(name=pool_name) - model.add_pool(pool) - self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) - model.remove_pool(pool) - self.assertRaises(exception.PoolNotFound, - model.get_pool_by_pool_name, pool_name) - - def test_map_unmap_pool(self): - model = model_root.StorageModelRoot() - hostname = "host@backend" - node = element.StorageNode(host=hostname) - model.add_node(node) - self.assertEqual(node, model.get_node_by_name(hostname)) - pool_name = "host@backend#pool" - pool = element.Pool(name=pool_name) - model.add_pool(pool) - self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) - model.map_pool(pool, node) - self.assertTrue(pool.name in model.predecessors(node.host)) - model.unmap_pool(pool, node) - self.assertFalse(pool.name in model.predecessors(node.host)) - - def test_add_volume(self): - model = model_root.StorageModelRoot() - uuid_ = "{0}".format(uuidutils.generate_uuid()) - volume = element.Volume(uuid=uuid_) - model.add_volume(volume) - self.assertEqual(volume, model.get_volume_by_uuid(uuid_)) - - def test_remove_volume(self): - model = model_root.StorageModelRoot() - uuid_ = "{0}".format(uuidutils.generate_uuid()) - volume = element.Volume(uuid=uuid_) - model.add_volume(volume) - self.assertEqual(volume, model.get_volume_by_uuid(uuid_)) - model.remove_volume(volume) - self.assertRaises(exception.VolumeNotFound, - model.get_volume_by_uuid, uuid_) - - def test_map_unmap_volume(self): - model = model_root.StorageModelRoot() - pool_name = "host@backend#pool" - pool = element.Pool(name=pool_name) - model.add_pool(pool) - self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) - uuid_ = "{0}".format(uuidutils.generate_uuid()) - volume = element.Volume(uuid=uuid_) - model.add_volume(volume) - self.assertEqual(volume, model.get_volume_by_uuid(uuid_)) - model.map_volume(volume, pool) - self.assertTrue(volume.uuid in model.predecessors(pool.name)) - model.unmap_volume(volume, pool) - self.assertFalse(volume.uuid in model.predecessors(pool.name)) - - def test_get_all_storage_nodes(self): - model = model_root.StorageModelRoot() - for i in range(10): - hostname = "host_{0}".format(i) - node = element.StorageNode(host=hostname) - model.add_node(node) - all_nodes = model.get_all_storage_nodes() - for hostname in all_nodes: - node = model.get_node_by_name(hostname) - model.assert_node(node) - - def test_get_all_volumes(self): - model = model_root.StorageModelRoot() - for id_ in range(10): - uuid_ = "{0}".format(uuidutils.generate_uuid()) - volume = element.Volume(uuid=uuid_) - model.add_volume(volume) - all_volumes = model.get_all_volumes() - for vol in all_volumes: - volume = model.get_volume_by_uuid(vol) - model.assert_volume(volume) - - def test_get_node_pools(self): - model = model_root.StorageModelRoot() - hostname = "host@backend" - node = element.StorageNode(host=hostname) - model.add_node(node) - self.assertEqual(node, model.get_node_by_name(hostname)) - pool_name = "host@backend#pool" - pool = element.Pool(name=pool_name) - model.add_pool(pool) - self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) - model.map_pool(pool, node) - self.assertEqual([pool], model.get_node_pools(node)) - - def test_get_pool_by_volume(self): - model = model_root.StorageModelRoot() - pool_name = "host@backend#pool" - pool = element.Pool(name=pool_name) - model.add_pool(pool) - self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) - uuid_ = "{0}".format(uuidutils.generate_uuid()) - volume = element.Volume(uuid=uuid_) - model.add_volume(volume) - self.assertEqual(volume, model.get_volume_by_uuid(uuid_)) - model.map_volume(volume, pool) - self.assertEqual(pool, model.get_pool_by_volume(volume)) - - def test_get_pool_volumes(self): - model = model_root.StorageModelRoot() - pool_name = "host@backend#pool" - pool = element.Pool(name=pool_name) - model.add_pool(pool) - self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) - uuid_ = "{0}".format(uuidutils.generate_uuid()) - volume = element.Volume(uuid=uuid_) - model.add_volume(volume) - self.assertEqual(volume, model.get_volume_by_uuid(uuid_)) - model.map_volume(volume, pool) - self.assertEqual([volume], model.get_pool_volumes(pool)) diff --git a/watcher/tests/decision_engine/planner/__init__.py b/watcher/tests/decision_engine/planner/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/planner/test_planner_manager.py b/watcher/tests/decision_engine/planner/test_planner_manager.py deleted file mode 100644 index 7d030f1..0000000 --- a/watcher/tests/decision_engine/planner/test_planner_manager.py +++ /dev/null @@ -1,28 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -from watcher.decision_engine.planner import manager as planner -from watcher.decision_engine.planner import weight -from watcher.tests import base - - -class TestPlannerManager(base.TestCase): - def test_load(self): - cfg.CONF.set_override('planner', "weight", group='watcher_planner') - manager = planner.PlannerManager() - self.assertIsInstance(manager.load(), weight.WeightPlanner) diff --git a/watcher/tests/decision_engine/planner/test_weight_planner.py b/watcher/tests/decision_engine/planner/test_weight_planner.py deleted file mode 100644 index 3c9cc48..0000000 --- a/watcher/tests/decision_engine/planner/test_weight_planner.py +++ /dev/null @@ -1,944 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.common import nova_helper -from watcher.common import utils -from watcher.db import api as db_api -from watcher.decision_engine.planner import weight as pbase -from watcher.decision_engine.solution import default as dsol -from watcher.decision_engine.strategy import strategies -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils as db_utils -from watcher.tests.decision_engine.model import ceilometer_metrics as fake -from watcher.tests.decision_engine.model import faker_cluster_state -from watcher.tests.objects import utils as obj_utils - - -class SolutionFaker(object): - @staticmethod - def build(): - metrics = fake.FakerMetricsCollector() - current_state_cluster = faker_cluster_state.FakerModelCollector() - sercon = strategies.BasicConsolidation(config=mock.Mock()) - sercon.compute_model = current_state_cluster.generate_scenario_1() - sercon.ceilometer = mock.MagicMock( - get_statistics=metrics.mock_get_statistics) - return sercon.execute() - - -class SolutionFakerSingleHyp(object): - @staticmethod - def build(): - metrics = fake.FakerMetricsCollector() - current_state_cluster = faker_cluster_state.FakerModelCollector() - sercon = strategies.BasicConsolidation(config=mock.Mock()) - sercon.compute_model = ( - current_state_cluster.generate_scenario_3_with_2_nodes()) - sercon.ceilometer = mock.MagicMock( - get_statistics=metrics.mock_get_statistics) - - return sercon.execute() - - -class TestActionScheduling(base.DbTestCase): - - def setUp(self): - super(TestActionScheduling, self).setUp() - self.goal = db_utils.create_test_goal(name="dummy") - self.strategy = db_utils.create_test_strategy(name="dummy") - self.audit = db_utils.create_test_audit( - uuid=utils.generate_uuid(), strategy_id=self.strategy.id) - self.planner = pbase.WeightPlanner( - mock.Mock( - weights={ - 'turn_host_to_acpi_s3_state': 10, - 'resize': 20, - 'migrate': 30, - 'sleep': 40, - 'change_nova_service_state': 50, - 'nop': 60, - 'new_action_type': 70, - }, - parallelization={ - 'turn_host_to_acpi_s3_state': 2, - 'resize': 2, - 'migrate': 2, - 'sleep': 1, - 'change_nova_service_state': 1, - 'nop': 1, - 'new_action_type': 70, - })) - - @mock.patch.object(utils, "generate_uuid") - def test_schedule_actions(self, m_generate_uuid): - m_generate_uuid.side_effect = [ - "00000000-0000-0000-0000-000000000000", # Action plan - "11111111-1111-1111-1111-111111111111", # Migrate 1 - "22222222-2222-2222-2222-222222222222", - "33333333-3333-3333-3333-333333333333", - # "44444444-4444-4444-4444-444444444444", - # "55555555-5555-5555-5555-555555555555", - # "66666666-6666-6666-6666-666666666666", - # "77777777-7777-7777-7777-777777777777", - # "88888888-8888-8888-8888-888888888888", - # "99999999-9999-9999-9999-999999999999", - ] - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server1", - "destination_node": "server2"}) - - self.planner.config.weights = {'migrate': 3} - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - - self.assertIsNotNone(action_plan.uuid) - with mock.patch.object( - pbase.WeightPlanner, "create_scheduled_actions", - wraps=self.planner.create_scheduled_actions - ) as m_create_scheduled_actions: - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(1, m_create_scheduled_actions.call_count) - action_graph = m_create_scheduled_actions.call_args[0][0] - - expected_edges = [] - - edges = sorted([(src.as_dict(), dst.as_dict()) - for src, dst in action_graph.edges()], - key=lambda pair: pair[0]['uuid']) - for src, dst in edges: - for key in ('id', 'action_plan', 'action_plan_id', 'created_at', - 'input_parameters', 'deleted_at', 'updated_at', - 'state'): - del src[key] - del dst[key] - - self.assertEqual(len(expected_edges), len(edges)) - for pair in expected_edges: - self.assertIn(pair, edges) - - @mock.patch.object(utils, "generate_uuid") - def test_schedule_two_actions(self, m_generate_uuid): - m_generate_uuid.side_effect = [ - "00000000-0000-0000-0000-000000000000", # Action plan - "11111111-1111-1111-1111-111111111111", - "22222222-2222-2222-2222-222222222222", - "33333333-3333-3333-3333-333333333333", - "44444444-4444-4444-4444-444444444444", # Migrate 1 - "55555555-5555-5555-5555-555555555555", # Nop 1 - ] - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - # We create the migrate action before but we then schedule - # after the nop action - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server1", - "destination_node": "server2"}) - - solution.add_action(action_type="nop", - input_parameters={"message": "Hello world"}) - - self.planner.config.weights = {'migrate': 3, 'nop': 5} - - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - - self.assertIsNotNone(action_plan.uuid) - with mock.patch.object( - pbase.WeightPlanner, "create_scheduled_actions", - wraps=self.planner.create_scheduled_actions - ) as m_create_scheduled_actions: - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(1, m_create_scheduled_actions.call_count) - action_graph = m_create_scheduled_actions.call_args[0][0] - - expected_edges = \ - [({'action_type': 'nop', - 'parents': [], - 'uuid': '55555555-5555-5555-5555-555555555555'}, - {'action_type': 'migrate', - 'parents': ['55555555-5555-5555-5555-555555555555'], - 'uuid': '44444444-4444-4444-4444-444444444444'})] - - edges = sorted([(src.as_dict(), dst.as_dict()) - for src, dst in action_graph.edges()], - key=lambda pair: pair[0]['uuid']) - for src, dst in edges: - for key in ('id', 'action_plan', 'action_plan_id', 'created_at', - 'input_parameters', 'deleted_at', 'updated_at', - 'state'): - del src[key] - del dst[key] - - self.assertEqual(len(expected_edges), len(edges)) - for pair in expected_edges: - self.assertIn(pair, edges) - - @mock.patch.object(utils, "generate_uuid") - def test_schedule_actions_with_unknown_action(self, m_generate_uuid): - m_generate_uuid.side_effect = [ - "00000000-0000-0000-0000-000000000000", # Action plan - "11111111-1111-1111-1111-111111111111", # Migrate 1 - "22222222-2222-2222-2222-222222222222", # new_action_type - "33333333-3333-3333-3333-333333333333", - - ] - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - parameters = { - "src_uuid_node": "server1", - "dst_uuid_node": "server2", - } - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters=parameters) - - solution.add_action(action_type="new_action_type", - resource_id="", - input_parameters={}) - - with mock.patch.object( - pbase.WeightPlanner, "create_scheduled_actions", - wraps=self.planner.create_scheduled_actions - ) as m_create_scheduled_actions: - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(1, m_create_scheduled_actions.call_count) - action_graph = m_create_scheduled_actions.call_args[0][0] - - expected_edges = \ - [({'action_type': 'new_action_type', - 'parents': [], - 'uuid': '22222222-2222-2222-2222-222222222222'}, - {'action_type': 'migrate', - 'parents': ['22222222-2222-2222-2222-222222222222'], - 'uuid': '11111111-1111-1111-1111-111111111111'})] - - edges = sorted([(src.as_dict(), dst.as_dict()) - for src, dst in action_graph.edges()], - key=lambda pair: pair[0]['uuid']) - for src, dst in edges: - for key in ('id', 'action_plan', 'action_plan_id', 'created_at', - 'input_parameters', 'deleted_at', 'updated_at', - 'state'): - del src[key] - del dst[key] - - self.assertEqual(len(expected_edges), len(edges)) - for pair in expected_edges: - self.assertIn(pair, edges) - - @mock.patch.object(utils, "generate_uuid") - @mock.patch.object(nova_helper.NovaHelper, 'get_instance_by_uuid') - def test_schedule_migrate_resize_actions(self, m_nova, m_generate_uuid): - m_generate_uuid.side_effect = [ - "00000000-0000-0000-0000-000000000000", # Action plan - "11111111-1111-1111-1111-111111111111", # Migrate 1 - "22222222-2222-2222-2222-222222222222", # Migrate 2 - "33333333-3333-3333-3333-333333333333", # Migrate 3 - "44444444-4444-4444-4444-444444444444", # Migrate 4 - "55555555-5555-5555-5555-555555555555", # Migrate 5 - "66666666-6666-6666-6666-666666666666", # Resize 1 - "77777777-7777-7777-7777-777777777777", # Resize 2 - "88888888-8888-8888-8888-888888888888", # Nop - "99999999-9999-9999-9999-999999999999", - ] - m_nova.return_value = 'server1' - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - parameters = { - "source_node": "server1", - "destination_node": "server2", - } - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters=parameters) - - solution.add_action(action_type="resize", - resource_id="DOESNOTMATTER", - input_parameters={"flavor": "x1"}) - - with mock.patch.object( - pbase.WeightPlanner, "create_scheduled_actions", - wraps=self.planner.create_scheduled_actions - ) as m_create_scheduled_actions: - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(1, m_create_scheduled_actions.call_count) - action_graph = m_create_scheduled_actions.call_args[0][0] - - expected_edges = \ - [({'action_type': 'migrate', - 'parents': [], - 'uuid': '11111111-1111-1111-1111-111111111111'}, - {'action_type': 'resize', - 'parents': ['11111111-1111-1111-1111-111111111111'], - 'uuid': '22222222-2222-2222-2222-222222222222'})] - - edges = sorted([(src.as_dict(), dst.as_dict()) - for src, dst in action_graph.edges()], - key=lambda pair: pair[0]['uuid']) - for src, dst in edges: - for key in ('id', 'action_plan', 'action_plan_id', 'created_at', - 'input_parameters', 'deleted_at', 'updated_at', - 'state'): - del src[key] - del dst[key] - - self.assertEqual(len(expected_edges), len(edges)) - for pair in expected_edges: - self.assertIn(pair, edges) - - @mock.patch.object(utils, "generate_uuid") - def test_schedule_3_migrate_1_resize_1_acpi_actions_1_swimlane( - self, m_generate_uuid): - self.planner.config.parallelization["migrate"] = 1 - m_generate_uuid.side_effect = [ - "00000000-0000-0000-0000-000000000000", # Action plan - "11111111-1111-1111-1111-111111111111", # Migrate 1 - "22222222-2222-2222-2222-222222222222", # Migrate 2 - "33333333-3333-3333-3333-333333333333", # Migrate 3 - "44444444-4444-4444-4444-444444444444", # Resize - "55555555-5555-5555-5555-555555555555", # ACPI - "66666666-6666-6666-6666-666666666666", - "77777777-7777-7777-7777-777777777777", - "88888888-8888-8888-8888-888888888888", - "99999999-9999-9999-9999-999999999999", - ] - - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - parameters = { - "source_node": "server0", - "destination_node": "server1", - } - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters=parameters) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server1", - "destination_node": "server2"}) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server2", - "destination_node": "server3"}) - - solution.add_action(action_type="resize", - resource_id="DOESNOTMATTER", - input_parameters={'flavor': 'x1'}) - - solution.add_action(action_type="turn_host_to_acpi_s3_state", - resource_id="server1", - input_parameters={}) - - with mock.patch.object( - pbase.WeightPlanner, "create_scheduled_actions", - wraps=self.planner.create_scheduled_actions - ) as m_create_scheduled_actions: - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(1, m_create_scheduled_actions.call_count) - action_graph = m_create_scheduled_actions.call_args[0][0] - - expected_edges = \ - [({'action_type': 'migrate', - 'parents': ['11111111-1111-1111-1111-111111111111'], - 'uuid': '22222222-2222-2222-2222-222222222222'}, - {'action_type': 'migrate', - 'parents': ['22222222-2222-2222-2222-222222222222'], - 'uuid': '33333333-3333-3333-3333-333333333333'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '11111111-1111-1111-1111-111111111111'}, - {'action_type': 'migrate', - 'parents': ['11111111-1111-1111-1111-111111111111'], - 'uuid': '22222222-2222-2222-2222-222222222222'}), - ({'action_type': 'resize', - 'parents': ['33333333-3333-3333-3333-333333333333'], - 'uuid': '44444444-4444-4444-4444-444444444444'}, - {'action_type': 'turn_host_to_acpi_s3_state', - 'parents': ['44444444-4444-4444-4444-444444444444'], - 'uuid': '55555555-5555-5555-5555-555555555555'}), - ({'action_type': 'migrate', - 'parents': ['22222222-2222-2222-2222-222222222222'], - 'uuid': '33333333-3333-3333-3333-333333333333'}, - {'action_type': 'resize', - 'parents': ['33333333-3333-3333-3333-333333333333'], - 'uuid': '44444444-4444-4444-4444-444444444444'})] - - edges = sorted([(src.as_dict(), dst.as_dict()) - for src, dst in action_graph.edges()], - key=lambda pair: pair[0]['uuid']) - for src, dst in edges: - for key in ('id', 'action_plan', 'action_plan_id', 'created_at', - 'input_parameters', 'deleted_at', 'updated_at', - 'state'): - del src[key] - del dst[key] - - self.assertEqual(len(expected_edges), len(edges)) - for pair in expected_edges: - self.assertIn(pair, edges) - - @mock.patch.object(utils, "generate_uuid") - def test_schedule_migrate_resize_acpi_actions_2_swimlanes( - self, m_generate_uuid): - self.planner.config.parallelization["migrate"] = 2 - m_generate_uuid.side_effect = [ - "00000000-0000-0000-0000-000000000000", # Action plan - "11111111-1111-1111-1111-111111111111", # Migrate 1 - "22222222-2222-2222-2222-222222222222", # Migrate 2 - "33333333-3333-3333-3333-333333333333", # Migrate 3 - "44444444-4444-4444-4444-444444444444", # Resize - "55555555-5555-5555-5555-555555555555", # ACPI - "66666666-6666-6666-6666-666666666666", - "77777777-7777-7777-7777-777777777777", - "88888888-8888-8888-8888-888888888888", - "99999999-9999-9999-9999-999999999999", - ] - - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - parameters = { - "source_node": "server0", - "destination_node": "server1", - } - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters=parameters) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server1", - "destination_node": "server2"}) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server2", - "destination_node": "server3"}) - - solution.add_action(action_type="resize", - resource_id="DOESNOTMATTER", - input_parameters={'flavor': 'x1'}) - - solution.add_action(action_type="turn_host_to_acpi_s3_state", - resource_id="server1", - input_parameters={}) - - with mock.patch.object( - pbase.WeightPlanner, "create_scheduled_actions", - wraps=self.planner.create_scheduled_actions - ) as m_create_scheduled_actions: - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(1, m_create_scheduled_actions.call_count) - action_graph = m_create_scheduled_actions.call_args[0][0] - - expected_edges = \ - [({'action_type': 'migrate', - 'parents': [], - 'uuid': '11111111-1111-1111-1111-111111111111'}, - {'action_type': 'migrate', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222'], - 'uuid': '33333333-3333-3333-3333-333333333333'}), - ({'action_type': 'resize', - 'parents': ['33333333-3333-3333-3333-333333333333'], - 'uuid': '44444444-4444-4444-4444-444444444444'}, - {'action_type': 'turn_host_to_acpi_s3_state', - 'parents': ['44444444-4444-4444-4444-444444444444'], - 'uuid': '55555555-5555-5555-5555-555555555555'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '22222222-2222-2222-2222-222222222222'}, - {'action_type': 'migrate', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222'], - 'uuid': '33333333-3333-3333-3333-333333333333'}), - ({'action_type': 'migrate', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222'], - 'uuid': '33333333-3333-3333-3333-333333333333'}, - {'action_type': 'resize', - 'parents': ['33333333-3333-3333-3333-333333333333'], - 'uuid': '44444444-4444-4444-4444-444444444444'})] - - edges = sorted([(src.as_dict(), dst.as_dict()) - for src, dst in action_graph.edges()], - key=lambda pair: pair[0]['uuid']) - for src, dst in edges: - for key in ('id', 'action_plan', 'action_plan_id', 'created_at', - 'input_parameters', 'deleted_at', 'updated_at', - 'state'): - del src[key] - del dst[key] - - self.assertEqual(len(expected_edges), len(edges)) - for pair in expected_edges: - self.assertIn(pair, edges) - - @mock.patch.object(utils, "generate_uuid") - def test_schedule_migrate_resize_acpi_actions_3_swimlanes( - self, m_generate_uuid): - self.planner.config.parallelization["migrate"] = 3 - m_generate_uuid.side_effect = [ - "00000000-0000-0000-0000-000000000000", # Action plan - "11111111-1111-1111-1111-111111111111", # Migrate 1 - "22222222-2222-2222-2222-222222222222", # Migrate 2 - "33333333-3333-3333-3333-333333333333", # Migrate 3 - "44444444-4444-4444-4444-444444444444", # Resize - "55555555-5555-5555-5555-555555555555", # ACPI - "66666666-6666-6666-6666-666666666666", - "77777777-7777-7777-7777-777777777777", - "88888888-8888-8888-8888-888888888888", - "99999999-9999-9999-9999-999999999999", - ] - - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - parameters = { - "source_node": "server0", - "destination_node": "server1", - } - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters=parameters) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server1", - "destination_node": "server2"}) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server2", - "destination_node": "server3"}) - - solution.add_action(action_type="resize", - resource_id="DOESNOTMATTER", - input_parameters={'flavor': 'x1'}) - - solution.add_action(action_type="turn_host_to_acpi_s3_state", - resource_id="server1", - input_parameters={}) - - with mock.patch.object( - pbase.WeightPlanner, "create_scheduled_actions", - wraps=self.planner.create_scheduled_actions - ) as m_create_scheduled_actions: - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(1, m_create_scheduled_actions.call_count) - action_graph = m_create_scheduled_actions.call_args[0][0] - - expected_edges = \ - [({'action_type': 'resize', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222', - '33333333-3333-3333-3333-333333333333'], - 'uuid': '44444444-4444-4444-4444-444444444444'}, - {'action_type': 'turn_host_to_acpi_s3_state', - 'parents': ['44444444-4444-4444-4444-444444444444'], - 'uuid': '55555555-5555-5555-5555-555555555555'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '11111111-1111-1111-1111-111111111111'}, - {'action_type': 'resize', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222', - '33333333-3333-3333-3333-333333333333'], - 'uuid': '44444444-4444-4444-4444-444444444444'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '22222222-2222-2222-2222-222222222222'}, - {'action_type': 'resize', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222', - '33333333-3333-3333-3333-333333333333'], - 'uuid': '44444444-4444-4444-4444-444444444444'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '33333333-3333-3333-3333-333333333333'}, - {'action_type': 'resize', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222', - '33333333-3333-3333-3333-333333333333'], - 'uuid': '44444444-4444-4444-4444-444444444444'})] - - edges = sorted([(src.as_dict(), dst.as_dict()) - for src, dst in action_graph.edges()], - key=lambda pair: pair[0]['uuid']) - for src, dst in edges: - for key in ('id', 'action_plan', 'action_plan_id', 'created_at', - 'input_parameters', 'deleted_at', 'updated_at', - 'state'): - del src[key] - del dst[key] - - self.assertEqual(len(expected_edges), len(edges)) - for pair in expected_edges: - self.assertIn(pair, edges) - - @mock.patch.object(utils, "generate_uuid") - def test_schedule_three_migrate_two_resize_actions( - self, m_generate_uuid): - self.planner.config.parallelization["migrate"] = 3 - self.planner.config.parallelization["resize"] = 2 - m_generate_uuid.side_effect = [ - "00000000-0000-0000-0000-000000000000", # Action plan - "11111111-1111-1111-1111-111111111111", # Migrate 1 - "22222222-2222-2222-2222-222222222222", # Migrate 2 - "33333333-3333-3333-3333-333333333333", # Migrate 3 - "44444444-4444-4444-4444-444444444444", # Resize - "55555555-5555-5555-5555-555555555555", # ACPI - "66666666-6666-6666-6666-666666666666", - "77777777-7777-7777-7777-777777777777", - "88888888-8888-8888-8888-888888888888", - "99999999-9999-9999-9999-999999999999", - ] - - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - parameters = { - "source_node": "server0", - "destination_node": "server1", - } - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters=parameters) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server1", - "destination_node": "server2"}) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server2", - "destination_node": "server3"}) - - solution.add_action(action_type="resize", - resource_id="DOESNOTMATTER", - input_parameters={'flavor': 'x1'}) - - solution.add_action(action_type="resize", - resource_id="b189db0c-1408-4d52-b5a5-5ca14de0ff36", - input_parameters={'flavor': 'x1'}) - - with mock.patch.object( - pbase.WeightPlanner, "create_scheduled_actions", - wraps=self.planner.create_scheduled_actions - ) as m_create_scheduled_actions: - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(1, m_create_scheduled_actions.call_count) - action_graph = m_create_scheduled_actions.call_args[0][0] - - expected_edges = \ - [({'action_type': 'migrate', - 'parents': [], - 'uuid': '11111111-1111-1111-1111-111111111111'}, - {'action_type': 'resize', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222', - '33333333-3333-3333-3333-333333333333'], - 'uuid': '55555555-5555-5555-5555-555555555555'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '11111111-1111-1111-1111-111111111111'}, - {'action_type': 'resize', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222', - '33333333-3333-3333-3333-333333333333'], - 'uuid': '55555555-5555-5555-5555-555555555555'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '22222222-2222-2222-2222-222222222222'}, - {'action_type': 'resize', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222', - '33333333-3333-3333-3333-333333333333'], - 'uuid': '55555555-5555-5555-5555-555555555555'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '22222222-2222-2222-2222-222222222222'}, - {'action_type': 'resize', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222', - '33333333-3333-3333-3333-333333333333'], - 'uuid': '55555555-5555-5555-5555-555555555555'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '33333333-3333-3333-3333-333333333333'}, - {'action_type': 'resize', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222', - '33333333-3333-3333-3333-333333333333'], - 'uuid': '55555555-5555-5555-5555-555555555555'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '33333333-3333-3333-3333-333333333333'}, - {'action_type': 'resize', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222', - '33333333-3333-3333-3333-333333333333'], - 'uuid': '55555555-5555-5555-5555-555555555555'})] - - edges = sorted([(src.as_dict(), dst.as_dict()) - for src, dst in action_graph.edges()], - key=lambda pair: pair[0]['uuid']) - for src, dst in edges: - for key in ('id', 'action_plan', 'action_plan_id', 'created_at', - 'input_parameters', 'deleted_at', 'updated_at', - 'state'): - del src[key] - del dst[key] - - self.assertEqual(len(expected_edges), len(edges)) - for pair in expected_edges: - self.assertIn(pair, edges) - - @mock.patch.object(utils, "generate_uuid") - def test_schedule_5_migrate_2_resize_actions_for_2_swimlanes( - self, m_generate_uuid): - self.planner.config.parallelization["migrate"] = 2 - self.planner.config.parallelization["resize"] = 2 - m_generate_uuid.side_effect = [ - "00000000-0000-0000-0000-000000000000", # Action plan - "11111111-1111-1111-1111-111111111111", # Migrate 1 - "22222222-2222-2222-2222-222222222222", # Migrate 2 - "33333333-3333-3333-3333-333333333333", # Migrate 3 - "44444444-4444-4444-4444-444444444444", # Migrate 4 - "55555555-5555-5555-5555-555555555555", # Migrate 5 - "66666666-6666-6666-6666-666666666666", # Resize 1 - "77777777-7777-7777-7777-777777777777", # Resize 2 - "88888888-8888-8888-8888-888888888888", # Nop - "99999999-9999-9999-9999-999999999999", - ] - - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server1", - "destination_node": "server6"}) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server2", - "destination_node": "server6"}) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server3", - "destination_node": "server6"}) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server4", - "destination_node": "server6"}) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server5", - "destination_node": "server6"}) - - solution.add_action(action_type="resize", - resource_id="DOESNOTMATTER", - input_parameters={'flavor': 'x1'}) - - solution.add_action(action_type="resize", - resource_id="DOESNOTMATTER", - input_parameters={'flavor': 'x2'}) - - solution.add_action(action_type="turn_host_to_acpi_s3_state", - resource_id="DOESNOTMATTER") - - with mock.patch.object( - pbase.WeightPlanner, "create_scheduled_actions", - wraps=self.planner.create_scheduled_actions - ) as m_create_scheduled_actions: - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(1, m_create_scheduled_actions.call_count) - action_graph = m_create_scheduled_actions.call_args[0][0] - - expected_edges = \ - [({'action_type': 'migrate', - 'parents': [], - 'uuid': '11111111-1111-1111-1111-111111111111'}, - {'action_type': 'migrate', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222'], - 'uuid': '33333333-3333-3333-3333-333333333333'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '11111111-1111-1111-1111-111111111111'}, - {'action_type': 'migrate', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222'], - 'uuid': '44444444-4444-4444-4444-444444444444'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '22222222-2222-2222-2222-222222222222'}, - {'action_type': 'migrate', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222'], - 'uuid': '33333333-3333-3333-3333-333333333333'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '22222222-2222-2222-2222-222222222222'}, - {'action_type': 'migrate', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222'], - 'uuid': '44444444-4444-4444-4444-444444444444'}), - ({'action_type': 'migrate', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222'], - 'uuid': '33333333-3333-3333-3333-333333333333'}, - {'action_type': 'migrate', - 'parents': ['33333333-3333-3333-3333-333333333333', - '44444444-4444-4444-4444-444444444444'], - 'uuid': '55555555-5555-5555-5555-555555555555'}), - ({'action_type': 'migrate', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222'], - 'uuid': '44444444-4444-4444-4444-444444444444'}, - {'action_type': 'migrate', - 'parents': ['33333333-3333-3333-3333-333333333333', - '44444444-4444-4444-4444-444444444444'], - 'uuid': '55555555-5555-5555-5555-555555555555'}), - ({'action_type': 'migrate', - 'parents': ['33333333-3333-3333-3333-333333333333', - '44444444-4444-4444-4444-444444444444'], - 'uuid': '55555555-5555-5555-5555-555555555555'}, - {'action_type': 'resize', - 'parents': ['55555555-5555-5555-5555-555555555555'], - 'uuid': '66666666-6666-6666-6666-666666666666'}), - ({'action_type': 'migrate', - 'parents': ['33333333-3333-3333-3333-333333333333', - '44444444-4444-4444-4444-444444444444'], - 'uuid': '55555555-5555-5555-5555-555555555555'}, - {'action_type': 'resize', - 'parents': ['55555555-5555-5555-5555-555555555555'], - 'uuid': '77777777-7777-7777-7777-777777777777'}), - ({'action_type': 'resize', - 'parents': ['55555555-5555-5555-5555-555555555555'], - 'uuid': '66666666-6666-6666-6666-666666666666'}, - {'action_type': 'turn_host_to_acpi_s3_state', - 'parents': ['66666666-6666-6666-6666-666666666666', - '77777777-7777-7777-7777-777777777777'], - 'uuid': '88888888-8888-8888-8888-888888888888'}), - ({'action_type': 'resize', - 'parents': ['55555555-5555-5555-5555-555555555555'], - 'uuid': '77777777-7777-7777-7777-777777777777'}, - {'action_type': 'turn_host_to_acpi_s3_state', - 'parents': ['66666666-6666-6666-6666-666666666666', - '77777777-7777-7777-7777-777777777777'], - 'uuid': '88888888-8888-8888-8888-888888888888'})] - - edges = sorted([(src.as_dict(), dst.as_dict()) - for src, dst in action_graph.edges()], - key=lambda pair: pair[0]['uuid']) - for src, dst in edges: - for key in ('id', 'action_plan', 'action_plan_id', 'created_at', - 'input_parameters', 'deleted_at', 'updated_at', - 'state'): - del src[key] - del dst[key] - - self.assertEqual(len(expected_edges), len(edges)) - for pair in expected_edges: - self.assertIn(pair, edges) - - -class TestWeightPlanner(base.DbTestCase): - - def setUp(self): - super(TestWeightPlanner, self).setUp() - self.planner = pbase.WeightPlanner(mock.Mock()) - self.planner.config.weights = { - 'nop': 0, - 'sleep': 1, - 'change_nova_service_state': 2, - 'migrate': 3 - } - - self.goal = obj_utils.create_test_goal(self.context) - self.strategy = obj_utils.create_test_strategy( - self.context, goal_id=self.goal.id) - obj_utils.create_test_audit_template( - self.context, goal_id=self.goal.id, strategy_id=self.strategy.id) - - p = mock.patch.object(db_api.BaseConnection, 'create_action_plan') - self.mock_create_action_plan = p.start() - self.mock_create_action_plan.side_effect = ( - self._simulate_action_plan_create) - self.addCleanup(p.stop) - - q = mock.patch.object(db_api.BaseConnection, 'create_action') - self.mock_create_action = q.start() - self.mock_create_action.side_effect = ( - self._simulate_action_create) - self.addCleanup(q.stop) - - def _simulate_action_plan_create(self, action_plan): - action_plan.create() - return action_plan - - def _simulate_action_create(self, action): - action.create() - return action - - @mock.patch.object(objects.Strategy, 'get_by_name') - def test_scheduler_warning_empty_action_plan(self, m_get_by_name): - m_get_by_name.return_value = self.strategy - audit = db_utils.create_test_audit( - goal_id=self.goal.id, strategy_id=self.strategy.id) - fake_solution = mock.MagicMock(efficacy_indicators=[], - actions=[]) - action_plan = self.planner.schedule( - self.context, audit.id, fake_solution) - self.assertIsNotNone(action_plan.uuid) diff --git a/watcher/tests/decision_engine/planner/test_workload_stabilization_planner.py b/watcher/tests/decision_engine/planner/test_workload_stabilization_planner.py deleted file mode 100644 index 4956fef..0000000 --- a/watcher/tests/decision_engine/planner/test_workload_stabilization_planner.py +++ /dev/null @@ -1,379 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.common import exception -from watcher.common import nova_helper -from watcher.common import utils -from watcher.db import api as db_api -from watcher.decision_engine.planner import workload_stabilization as pbase -from watcher.decision_engine.solution import default as dsol -from watcher.decision_engine.strategy import strategies -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils as db_utils -from watcher.tests.decision_engine.model import ceilometer_metrics as fake -from watcher.tests.decision_engine.model import faker_cluster_state -from watcher.tests.objects import utils as obj_utils - - -class SolutionFaker(object): - @staticmethod - def build(): - metrics = fake.FakerMetricsCollector() - current_state_cluster = faker_cluster_state.FakerModelCollector() - sercon = strategies.BasicConsolidation(config=mock.Mock()) - sercon._compute_model = current_state_cluster.generate_scenario_1() - sercon.ceilometer = mock.MagicMock( - get_statistics=metrics.mock_get_statistics) - return sercon.execute() - - -class SolutionFakerSingleHyp(object): - @staticmethod - def build(): - metrics = fake.FakerMetricsCollector() - current_state_cluster = faker_cluster_state.FakerModelCollector() - sercon = strategies.BasicConsolidation(config=mock.Mock()) - sercon._compute_model = ( - current_state_cluster.generate_scenario_3_with_2_nodes()) - sercon.ceilometer = mock.MagicMock( - get_statistics=metrics.mock_get_statistics) - - return sercon.execute() - - -class TestActionScheduling(base.DbTestCase): - - def setUp(self): - super(TestActionScheduling, self).setUp() - self.goal = db_utils.create_test_goal(name="dummy") - self.strategy = db_utils.create_test_strategy(name="dummy") - self.audit = db_utils.create_test_audit( - uuid=utils.generate_uuid(), strategy_id=self.strategy.id) - self.planner = pbase.WorkloadStabilizationPlanner(mock.Mock()) - self.nova_helper = nova_helper.NovaHelper(mock.Mock()) - - def test_schedule_actions(self): - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - parameters = { - "source_node": "server1", - "destination_node": "server2", - } - solution.add_action(action_type="migrate", - resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", - input_parameters=parameters) - - with mock.patch.object( - pbase.WorkloadStabilizationPlanner, "create_action", - wraps=self.planner.create_action - ) as m_create_action: - self.planner.config.weights = {'migrate': 3} - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(1, m_create_action.call_count) - filters = {'action_plan_id': action_plan.id} - actions = objects.Action.dbapi.get_action_list(self.context, filters) - self.assertEqual("migrate", actions[0].action_type) - - def test_schedule_two_actions(self): - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - parameters = { - "source_node": "server1", - "destination_node": "server2", - } - solution.add_action(action_type="migrate", - resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", - input_parameters=parameters) - - solution.add_action(action_type="nop", - input_parameters={"message": "Hello world"}) - - with mock.patch.object( - pbase.WorkloadStabilizationPlanner, "create_action", - wraps=self.planner.create_action - ) as m_create_action: - self.planner.config.weights = {'migrate': 3, 'nop': 5} - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(2, m_create_action.call_count) - # check order - filters = {'action_plan_id': action_plan.id} - actions = objects.Action.dbapi.get_action_list(self.context, filters) - self.assertEqual("nop", actions[0].action_type) - self.assertEqual("migrate", actions[1].action_type) - - def test_schedule_actions_with_unknown_action(self): - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - parameters = { - "src_uuid_node": "server1", - "dst_uuid_node": "server2", - } - solution.add_action(action_type="migrate", - resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", - input_parameters=parameters) - - solution.add_action(action_type="new_action_type", - resource_id="", - input_parameters={}) - - with mock.patch.object( - pbase.WorkloadStabilizationPlanner, "create_action", - wraps=self.planner.create_action - ) as m_create_action: - with mock.patch.object(nova_helper, 'NovaHelper') as m_nova: - self.planner.config.weights = {'migrate': 0} - self.assertRaises(KeyError, self.planner.schedule, - self.context, self.audit.id, solution) - assert not m_nova.called - self.assertEqual(2, m_create_action.call_count) - - def test_schedule_actions_with_unsupported_action(self): - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - parameters = { - "src_uuid_node": "server1", - "dst_uuid_node": "server2", - } - solution.add_action(action_type="migrate", - resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", - input_parameters=parameters) - - solution.add_action(action_type="new_action_type", - resource_id="", - input_parameters={}) - with mock.patch.object( - pbase.WorkloadStabilizationPlanner, "create_action", - wraps=self.planner.create_action - ) as m_create_action: - with mock.patch.object(nova_helper, 'NovaHelper') as m_nova: - self.planner.config.weights = { - 'turn_host_to_acpi_s3_state': 0, - 'resize': 1, - 'migrate': 2, - 'sleep': 3, - 'change_nova_service_state': 4, - 'nop': 5, - 'new_action_type': 6} - self.assertRaises(exception.UnsupportedActionType, - self.planner.schedule, - self.context, self.audit.id, solution) - assert not m_nova.called - self.assertEqual(2, m_create_action.call_count) - - @mock.patch.object(nova_helper.NovaHelper, 'get_instance_by_uuid') - def test_schedule_migrate_resize_actions(self, mock_nova): - mock_nova.return_value = 'server1' - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - parameters = { - "source_node": "server1", - "destination_node": "server2", - } - solution.add_action(action_type="migrate", - resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", - input_parameters=parameters) - - solution.add_action(action_type="resize", - resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", - input_parameters={"flavor": "x1"}) - - with mock.patch.object( - pbase.WorkloadStabilizationPlanner, "create_action", - wraps=self.planner.create_action - ) as m_create_action: - with mock.patch.object(nova_helper, 'NovaHelper') as m_nova: - self.planner.config.weights = {'migrate': 3, 'resize': 2} - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - self.assertEqual(1, m_nova.call_count) - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(2, m_create_action.call_count) - # check order - filters = {'action_plan_id': action_plan.id} - actions = objects.Action.dbapi.get_action_list(self.context, filters) - self.assertEqual("migrate", actions[0].action_type) - self.assertEqual("resize", actions[1].action_type) - self.assertEqual(actions[0].uuid, actions[1].parents[0]) - - def test_schedule_migrate_resize_acpi_s3_actions(self): - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - parameters = { - "source_node": "server1", - "destination_node": "server2", - } - parent_migration = "b199db0c-1408-4d52-b5a5-5ca14de0ff36" - solution.add_action(action_type="migrate", - resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", - input_parameters=parameters) - - solution.add_action(action_type="resize", - resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", - input_parameters={'flavor': 'x1'}) - - solution.add_action(action_type="migrate", - resource_id="f6416850-da28-4047-a547-8c49f53e95fe", - input_parameters={"source_node": "server1", - "destination_node": "server2"}) - - solution.add_action(action_type="migrate", - resource_id="bb404e74-2caf-447b-bd1e-9234db386ca5", - input_parameters={"source_node": "server2", - "destination_node": "server3"}) - - solution.add_action(action_type="turn_host_to_acpi_s3_state", - resource_id="server1", - input_parameters={}) - - with mock.patch.object( - pbase.WorkloadStabilizationPlanner, "create_action", - wraps=self.planner.create_action - ) as m_create_action: - with mock.patch.object( - nova_helper, 'NovaHelper') as m_nova: - m_nova().get_hostname.return_value = 'server1' - m_nova().get_instance_by_uuid.return_value = ['uuid1'] - self.planner.config.weights = { - 'turn_host_to_acpi_s3_state': 0, - 'resize': 1, - 'migrate': 2, - 'sleep': 3, - 'change_nova_service_state': 4, - 'nop': 5} - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - self.assertEqual(3, m_nova.call_count) - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(5, m_create_action.call_count) - # check order - filters = {'action_plan_id': action_plan.id} - actions = objects.Action.dbapi.get_action_list(self.context, filters) - self.assertEqual("migrate", actions[0].action_type) - self.assertEqual("migrate", actions[1].action_type) - self.assertEqual("migrate", actions[2].action_type) - self.assertEqual("resize", actions[3].action_type) - self.assertEqual("turn_host_to_acpi_s3_state", actions[4].action_type) - for action in actions: - if action.input_parameters['resource_id'] == parent_migration: - parent_migration = action - break - self.assertEqual(parent_migration.uuid, actions[3].parents[0]) - - -class TestDefaultPlanner(base.DbTestCase): - - def setUp(self): - super(TestDefaultPlanner, self).setUp() - self.planner = pbase.WorkloadStabilizationPlanner(mock.Mock()) - self.planner.config.weights = { - 'nop': 0, - 'sleep': 1, - 'change_nova_service_state': 2, - 'migrate': 3 - } - - self.goal = obj_utils.create_test_goal(self.context) - self.strategy = obj_utils.create_test_strategy( - self.context, goal_id=self.goal.id) - obj_utils.create_test_audit_template( - self.context, goal_id=self.goal.id, strategy_id=self.strategy.id) - - p = mock.patch.object(db_api.BaseConnection, 'create_action_plan') - self.mock_create_action_plan = p.start() - self.mock_create_action_plan.side_effect = ( - self._simulate_action_plan_create) - self.addCleanup(p.stop) - - q = mock.patch.object(db_api.BaseConnection, 'create_action') - self.mock_create_action = q.start() - self.mock_create_action.side_effect = ( - self._simulate_action_create) - self.addCleanup(q.stop) - - def _simulate_action_plan_create(self, action_plan): - action_plan.create() - return action_plan - - def _simulate_action_create(self, action): - action.create() - return action - - @mock.patch.object(objects.Strategy, 'get_by_name') - def test_scheduler_warning_empty_action_plan(self, m_get_by_name): - m_get_by_name.return_value = self.strategy - audit = db_utils.create_test_audit( - goal_id=self.goal.id, strategy_id=self.strategy.id) - fake_solution = mock.MagicMock(efficacy_indicators=[], - actions=[]) - action_plan = self.planner.schedule( - self.context, audit.id, fake_solution) - self.assertIsNotNone(action_plan.uuid) - - -class TestActionValidator(base.DbTestCase): - INSTANCE_UUID = "94ae2f92-b7fd-4da7-9e97-f13504ae98c4" - - def setUp(self): - super(TestActionValidator, self).setUp() - self.r_osc_cls = mock.Mock() - self.r_helper_cls = mock.Mock() - self.r_helper = mock.Mock(spec=nova_helper.NovaHelper) - self.r_helper_cls.return_value = self.r_helper - r_nova_helper = mock.patch.object( - nova_helper, "NovaHelper", self.r_helper_cls) - - r_nova_helper.start() - - self.addCleanup(r_nova_helper.stop) - - def test_resize_validate_parents(self): - resize_object = pbase.ResizeActionValidator() - action = {'uuid': 'fcec56cd-74c1-406b-a7c1-81ef9f0c1393', - 'input_parameters': {'resource_id': self.INSTANCE_UUID}} - resource_action_map = {self.INSTANCE_UUID: [ - ('action_uuid', 'migrate')]} - self.r_helper.get_hostname.return_value = 'server1' - self.r_helper.get_instance_by_uuid.return_value = ['instance'] - result = resize_object.validate_parents(resource_action_map, action) - self.assertEqual('action_uuid', result[0]) - - def test_migrate_validate_parents(self): - migrate_object = pbase.MigrationActionValidator() - action = {'uuid': '712f1701-4c1b-4076-bfcf-3f23cfec6c3b', - 'input_parameters': {'source_node': 'server1', - 'resource_id': self.INSTANCE_UUID}} - resource_action_map = {} - expected_map = { - '94ae2f92-b7fd-4da7-9e97-f13504ae98c4': [ - ('712f1701-4c1b-4076-bfcf-3f23cfec6c3b', 'migrate')], - 'server1': [ - ('712f1701-4c1b-4076-bfcf-3f23cfec6c3b', 'migrate')]} - migrate_object.validate_parents(resource_action_map, action) - self.assertEqual(resource_action_map, expected_map) diff --git a/watcher/tests/decision_engine/scope/__init__.py b/watcher/tests/decision_engine/scope/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/scope/fake_scopes.py b/watcher/tests/decision_engine/scope/fake_scopes.py deleted file mode 100644 index 9e638fe..0000000 --- a/watcher/tests/decision_engine/scope/fake_scopes.py +++ /dev/null @@ -1,35 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Servionica -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -fake_scope_1 = [{'availability_zones': [{'name': 'AZ1'}]}, - {'exclude': - [{'instances': - [{'uuid': 'INSTANCE_6'}]}] - } - ] - -default_scope = [{'host_aggregates': [{'id': '*'}]}, - {'availability_zones': [{'name': 'AZ1'}, - {'name': 'AZ2'}]}, - {'exclude': [ - {'instances': [ - {'uuid': 'INSTANCE_1'}, - {'uuid': 'INSTANCE_2'}]}, - {'compute_nodes': [ - {'name': 'Node_1'}, - {'name': 'Node_2'}]} - ]} - ] diff --git a/watcher/tests/decision_engine/scope/test_default.py b/watcher/tests/decision_engine/scope/test_default.py deleted file mode 100644 index 06e7572..0000000 --- a/watcher/tests/decision_engine/scope/test_default.py +++ /dev/null @@ -1,255 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Servionica -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from jsonschema import validators -import mock - -from watcher.common import exception -from watcher.common import nova_helper -from watcher.decision_engine.scope import default -from watcher.tests import base -from watcher.tests.decision_engine.model import faker_cluster_state -from watcher.tests.decision_engine.scope import fake_scopes - - -class TestDefaultScope(base.TestCase): - - def setUp(self): - super(TestDefaultScope, self).setUp() - self.fake_cluster = faker_cluster_state.FakerModelCollector() - - @mock.patch.object(nova_helper.NovaHelper, 'get_availability_zone_list') - def test_get_scoped_model_with_zones_and_instances(self, mock_zone_list): - cluster = self.fake_cluster.generate_scenario_1() - audit_scope = fake_scopes.fake_scope_1 - mock_zone_list.return_value = [ - mock.Mock(zoneName='AZ{0}'.format(i), - hosts={'Node_{0}'.format(i): {}}) - for i in range(2)] - model = default.DefaultScope(audit_scope, mock.Mock(), - osc=mock.Mock()).get_scoped_model(cluster) - expected_edges = [('INSTANCE_2', 'Node_1')] - self.assertEqual(sorted(expected_edges), sorted(model.edges())) - - @mock.patch.object(nova_helper.NovaHelper, 'get_availability_zone_list') - def test_get_scoped_model_without_scope(self, mock_zone_list): - model = self.fake_cluster.generate_scenario_1() - default.DefaultScope([], mock.Mock(), - osc=mock.Mock()).get_scoped_model(model) - assert not mock_zone_list.called - - def test_remove_instance(self): - model = self.fake_cluster.generate_scenario_1() - default.DefaultScope([], mock.Mock(), osc=mock.Mock()).remove_instance( - model, model.get_instance_by_uuid('INSTANCE_2'), 'Node_1') - expected_edges = [ - ('INSTANCE_0', 'Node_0'), - ('INSTANCE_1', 'Node_0'), - ('INSTANCE_3', 'Node_2'), - ('INSTANCE_4', 'Node_2'), - ('INSTANCE_5', 'Node_2'), - ('INSTANCE_6', 'Node_3'), - ('INSTANCE_7', 'Node_4'), - ] - self.assertEqual(sorted(expected_edges), sorted(model.edges())) - - @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_detail') - @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_list') - def test_collect_aggregates(self, mock_aggregate, mock_detailed_aggregate): - allowed_nodes = [] - mock_aggregate.return_value = [mock.Mock(id=i) for i in range(2)] - mock_detailed_aggregate.side_effect = [ - mock.Mock(id=i, hosts=['Node_{0}'.format(i)]) for i in range(2)] - default.DefaultScope([{'host_aggregates': [{'id': 1}, {'id': 2}]}], - mock.Mock(), osc=mock.Mock())._collect_aggregates( - [{'id': 1}, {'id': 2}], allowed_nodes) - self.assertEqual(['Node_1'], allowed_nodes) - - @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_detail') - @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_list') - def test_aggregates_wildcard_is_used(self, mock_aggregate, - mock_detailed_aggregate): - allowed_nodes = [] - mock_aggregate.return_value = [mock.Mock(id=i) for i in range(2)] - mock_detailed_aggregate.side_effect = [ - mock.Mock(id=i, hosts=['Node_{0}'.format(i)]) for i in range(2)] - default.DefaultScope([{'host_aggregates': [{'id': '*'}]}], - mock.Mock(), osc=mock.Mock())._collect_aggregates( - [{'id': '*'}], allowed_nodes) - self.assertEqual(['Node_0', 'Node_1'], allowed_nodes) - - @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_list') - def test_aggregates_wildcard_with_other_ids(self, mock_aggregate): - allowed_nodes = [] - mock_aggregate.return_value = [mock.Mock(id=i) for i in range(2)] - scope_handler = default.DefaultScope( - [{'host_aggregates': [{'id': '*'}, {'id': 1}]}], - mock.Mock(), osc=mock.Mock()) - self.assertRaises(exception.WildcardCharacterIsUsed, - scope_handler._collect_aggregates, - [{'id': '*'}, {'id': 1}], - allowed_nodes) - - @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_detail') - @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_list') - def test_aggregates_with_names_and_ids(self, mock_aggregate, - mock_detailed_aggregate): - allowed_nodes = [] - mock_aggregate.return_value = [mock.Mock(id=i, - name="HA_{0}".format(i)) - for i in range(2)] - mock_collection = [mock.Mock(id=i, hosts=['Node_{0}'.format(i)]) - for i in range(2)] - mock_collection[0].name = 'HA_0' - mock_collection[1].name = 'HA_1' - - mock_detailed_aggregate.side_effect = mock_collection - - default.DefaultScope([{'host_aggregates': [{'name': 'HA_1'}, - {'id': 0}]}], - mock.Mock(), osc=mock.Mock())._collect_aggregates( - [{'name': 'HA_1'}, {'id': 0}], allowed_nodes) - self.assertEqual(['Node_0', 'Node_1'], allowed_nodes) - - @mock.patch.object(nova_helper.NovaHelper, 'get_availability_zone_list') - def test_collect_zones(self, mock_zone_list): - allowed_nodes = [] - mock_zone_list.return_value = [ - mock.Mock(zoneName="AZ{0}".format(i + 1), - hosts={'Node_{0}'.format(2 * i): 1, - 'Node_{0}'.format(2 * i + 1): 2}) - for i in range(2)] - default.DefaultScope([{'availability_zones': [{'name': "AZ1"}]}], - mock.Mock(), osc=mock.Mock())._collect_zones( - [{'name': "AZ1"}], allowed_nodes) - self.assertEqual(['Node_0', 'Node_1'], sorted(allowed_nodes)) - - @mock.patch.object(nova_helper.NovaHelper, 'get_availability_zone_list') - def test_zones_wildcard_is_used(self, mock_zone_list): - allowed_nodes = [] - mock_zone_list.return_value = [ - mock.Mock(zoneName="AZ{0}".format(i + 1), - hosts={'Node_{0}'.format(2 * i): 1, - 'Node_{0}'.format(2 * i + 1): 2}) - for i in range(2)] - default.DefaultScope([{'availability_zones': [{'name': "*"}]}], - mock.Mock(), osc=mock.Mock())._collect_zones( - [{'name': "*"}], allowed_nodes) - self.assertEqual(['Node_0', 'Node_1', 'Node_2', 'Node_3'], - sorted(allowed_nodes)) - - @mock.patch.object(nova_helper.NovaHelper, 'get_availability_zone_list') - def test_zones_wildcard_with_other_ids(self, mock_zone_list): - allowed_nodes = [] - mock_zone_list.return_value = [ - mock.Mock(zoneName="AZ{0}".format(i + 1), - hosts={'Node_{0}'.format(2 * i): 1, - 'Node_{0}'.format(2 * i + 1): 2}) - for i in range(2)] - scope_handler = default.DefaultScope( - [{'availability_zones': [{'name': "*"}, {'name': 'AZ1'}]}], - mock.Mock(), osc=mock.Mock()) - self.assertRaises(exception.WildcardCharacterIsUsed, - scope_handler._collect_zones, - [{'name': "*"}, {'name': 'AZ1'}], - allowed_nodes) - - def test_default_schema(self): - test_scope = fake_scopes.default_scope - validators.Draft4Validator( - default.DefaultScope.DEFAULT_SCHEMA).validate(test_scope) - - @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_detail') - @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_list') - def test_exclude_resource( - self, mock_aggregate, mock_detailed_aggregate): - mock_aggregate.return_value = [mock.Mock(id=i, - name="HA_{0}".format(i)) - for i in range(2)] - mock_collection = [mock.Mock(id=i, hosts=['Node_{0}'.format(i)]) - for i in range(2)] - mock_collection[0].name = 'HA_0' - mock_collection[1].name = 'HA_1' - mock_detailed_aggregate.side_effect = mock_collection - - resources_to_exclude = [{'host_aggregates': [{'name': 'HA_1'}, - {'id': 0}]}, - {'instances': [{'uuid': 'INSTANCE_1'}, - {'uuid': 'INSTANCE_2'}]}, - {'compute_nodes': [{'name': 'Node_2'}, - {'name': 'Node_3'}]}, - {'instance_metadata': [{'optimize': True}, - {'optimize1': False}]}] - instances_to_exclude = [] - nodes_to_exclude = [] - instance_metadata = [] - default.DefaultScope([], mock.Mock(), - osc=mock.Mock()).exclude_resources( - resources_to_exclude, instances=instances_to_exclude, - nodes=nodes_to_exclude, instance_metadata=instance_metadata) - - self.assertEqual(['Node_0', 'Node_1', 'Node_2', 'Node_3'], - sorted(nodes_to_exclude)) - self.assertEqual(['INSTANCE_1', 'INSTANCE_2'], - sorted(instances_to_exclude)) - self.assertEqual([{'optimize': True}, {'optimize1': False}], - instance_metadata) - - def test_exclude_instances_with_given_metadata(self): - cluster = self.fake_cluster.generate_scenario_1() - instance_metadata = [{'optimize': True}] - instances_to_remove = set() - default.DefaultScope( - [], mock.Mock(), - osc=mock.Mock()).exclude_instances_with_given_metadata( - instance_metadata, cluster, instances_to_remove) - self.assertEqual(sorted(['INSTANCE_' + str(i) for i in range(35)]), - sorted(instances_to_remove)) - - instance_metadata = [{'optimize': False}] - instances_to_remove = set() - default.DefaultScope( - [], mock.Mock(), - osc=mock.Mock()).exclude_instances_with_given_metadata( - instance_metadata, cluster, instances_to_remove) - self.assertEqual(set(), instances_to_remove) - - def test_remove_nodes_from_model(self): - model = self.fake_cluster.generate_scenario_1() - default.DefaultScope([], mock.Mock(), - osc=mock.Mock()).remove_nodes_from_model( - ['Node_1', 'Node_2'], model) - expected_edges = [ - ('INSTANCE_0', 'Node_0'), - ('INSTANCE_1', 'Node_0'), - ('INSTANCE_6', 'Node_3'), - ('INSTANCE_7', 'Node_4')] - self.assertEqual(sorted(expected_edges), sorted(model.edges())) - - def test_remove_instances_from_model(self): - model = self.fake_cluster.generate_scenario_1() - default.DefaultScope([], mock.Mock(), - osc=mock.Mock()).remove_instances_from_model( - ['INSTANCE_1', 'INSTANCE_2'], model) - expected_edges = [ - ('INSTANCE_0', 'Node_0'), - ('INSTANCE_3', 'Node_2'), - ('INSTANCE_4', 'Node_2'), - ('INSTANCE_5', 'Node_2'), - ('INSTANCE_6', 'Node_3'), - ('INSTANCE_7', 'Node_4')] - self.assertEqual(sorted(expected_edges), sorted(model.edges())) diff --git a/watcher/tests/decision_engine/scoring/__init__.py b/watcher/tests/decision_engine/scoring/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/scoring/test_dummy_scorer.py b/watcher/tests/decision_engine/scoring/test_dummy_scorer.py deleted file mode 100644 index 43a91c7..0000000 --- a/watcher/tests/decision_engine/scoring/test_dummy_scorer.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel -# -# Authors: Tomasz Kaczynski -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_serialization import jsonutils - -from watcher.decision_engine.scoring import dummy_scorer -from watcher.tests import base - - -class TestDummyScorer(base.TestCase): - - def setUp(self): - super(TestDummyScorer, self).setUp() - - def test_metadata(self): - scorer = dummy_scorer.DummyScorer(config=None) - self.assertEqual('dummy_scorer', scorer.get_name()) - self.assertIn('Dummy', scorer.get_description()) - - metainfo = scorer.get_metainfo() - self.assertIn('feature_columns', metainfo) - self.assertIn('result_columns', metainfo) - self.assertIn('workloads', metainfo) - - def test_calculate_score(self): - scorer = dummy_scorer.DummyScorer(config=None) - - self._assert_result(scorer, 0, '[0, 0, 0, 0, 0, 0, 0, 0, 0]') - self._assert_result(scorer, 0, '[50, 0, 0, 600, 0, 0, 0, 0, 0]') - self._assert_result(scorer, 0, '[0, 0, 0, 0, 600, 0, 0, 0, 0]') - self._assert_result(scorer, 1, '[85, 0, 0, 0, 0, 0, 0, 0, 0]') - self._assert_result(scorer, 2, '[0, 0, 0, 1100, 1100, 0, 0, 0, 0]') - self._assert_result(scorer, 3, - '[0, 0, 0, 0, 0, 70000000, 70000000, 0, 0]') - - def _assert_result(self, scorer, expected, features): - result_str = scorer.calculate_score(features) - actual_result = jsonutils.loads(result_str)[0] - self.assertEqual(expected, actual_result) diff --git a/watcher/tests/decision_engine/scoring/test_dummy_scoring_container.py b/watcher/tests/decision_engine/scoring/test_dummy_scoring_container.py deleted file mode 100644 index 25786b3..0000000 --- a/watcher/tests/decision_engine/scoring/test_dummy_scoring_container.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel -# -# Authors: Tomasz Kaczynski -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_serialization import jsonutils - -from watcher.decision_engine.scoring import dummy_scoring_container -from watcher.tests import base - - -class TestDummyScoringContainer(base.TestCase): - - def setUp(self): - super(TestDummyScoringContainer, self).setUp() - - def test_get_scoring_engine_list(self): - scorers = (dummy_scoring_container.DummyScoringContainer - .get_scoring_engine_list()) - - self.assertEqual(3, len(scorers)) - self.assertEqual('dummy_min_scorer', scorers[0].get_name()) - self.assertEqual('dummy_max_scorer', scorers[1].get_name()) - self.assertEqual('dummy_avg_scorer', scorers[2].get_name()) - - def test_scorers(self): - scorers = (dummy_scoring_container.DummyScoringContainer - .get_scoring_engine_list()) - - self._assert_result(scorers[0], 1.1, '[1.1, 2.2, 4, 8]') - self._assert_result(scorers[1], 8, '[1.1, 2.2, 4, 8]') - # float(1 + 2 + 4 + 8) / 4 = 15.0 / 4 = 3.75 - self._assert_result(scorers[2], 3.75, '[1, 2, 4, 8]') - - def _assert_result(self, scorer, expected, features): - result_str = scorer.calculate_score(features) - actual_result = jsonutils.loads(result_str)[0] - self.assertEqual(expected, actual_result) diff --git a/watcher/tests/decision_engine/scoring/test_scoring_factory.py b/watcher/tests/decision_engine/scoring/test_scoring_factory.py deleted file mode 100644 index dfb79c6..0000000 --- a/watcher/tests/decision_engine/scoring/test_scoring_factory.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel -# -# Authors: Tomasz Kaczynski -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.decision_engine.scoring import scoring_factory -from watcher.tests import base - - -class TestScoringFactory(base.TestCase): - - def setUp(self): - super(TestScoringFactory, self).setUp() - - def test_get_scoring_engine(self): - scorer = scoring_factory.get_scoring_engine('dummy_scorer') - self.assertEqual('dummy_scorer', scorer.get_name()) - - scorer = scoring_factory.get_scoring_engine('dummy_min_scorer') - self.assertEqual('dummy_min_scorer', scorer.get_name()) - - scorer = scoring_factory.get_scoring_engine('dummy_max_scorer') - self.assertEqual('dummy_max_scorer', scorer.get_name()) - - scorer = scoring_factory.get_scoring_engine('dummy_avg_scorer') - self.assertEqual('dummy_avg_scorer', scorer.get_name()) - - self.assertRaises( - KeyError, - scoring_factory.get_scoring_engine, - 'non_existing_scorer') - - def test_get_scoring_engine_list(self): - scoring_engines = scoring_factory.get_scoring_engine_list() - - engine_names = {'dummy_scorer', 'dummy_min_scorer', - 'dummy_max_scorer', 'dummy_avg_scorer'} - - for scorer in scoring_engines: - self.assertIn(scorer.get_name(), engine_names) diff --git a/watcher/tests/decision_engine/solution/__init__.py b/watcher/tests/decision_engine/solution/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/solution/test_default_solution.py b/watcher/tests/decision_engine/solution/test_default_solution.py deleted file mode 100644 index c0fc839..0000000 --- a/watcher/tests/decision_engine/solution/test_default_solution.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.decision_engine.solution import default -from watcher.tests import base - - -class TestDefaultSolution(base.TestCase): - - def test_default_solution(self): - solution = default.DefaultSolution( - goal=mock.Mock(), strategy=mock.Mock()) - parameters = { - "source_node": "server1", - "destination_node": "server2", - } - solution.add_action(action_type="nop", - resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", - input_parameters=parameters) - self.assertEqual(1, len(solution.actions)) - expected_action_type = "nop" - expected_parameters = { - "source_node": "server1", - "destination_node": "server2", - "resource_id": "b199db0c-1408-4d52-b5a5-5ca14de0ff36" - } - self.assertEqual(expected_action_type, - solution.actions[0].get('action_type')) - self.assertEqual(expected_parameters, - solution.actions[0].get('input_parameters')) - - def test_default_solution_with_no_input_parameters(self): - solution = default.DefaultSolution( - goal=mock.Mock(), strategy=mock.Mock()) - solution.add_action(action_type="nop", - resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36") - self.assertEqual(1, len(solution.actions)) - expected_action_type = "nop" - expected_parameters = { - "resource_id": "b199db0c-1408-4d52-b5a5-5ca14de0ff36" - } - self.assertEqual(expected_action_type, - solution.actions[0].get('action_type')) - self.assertEqual(expected_parameters, - solution.actions[0].get('input_parameters')) diff --git a/watcher/tests/decision_engine/strategy/__init__.py b/watcher/tests/decision_engine/strategy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/strategy/context/__init__.py b/watcher/tests/decision_engine/strategy/context/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/strategy/context/test_strategy_context.py b/watcher/tests/decision_engine/strategy/context/test_strategy_context.py deleted file mode 100644 index b6b5c80..0000000 --- a/watcher/tests/decision_engine/strategy/context/test_strategy_context.py +++ /dev/null @@ -1,99 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.common import utils -from watcher.decision_engine.model.collector import manager -from watcher.decision_engine.solution import default -from watcher.decision_engine.strategy.context import default as d_strategy_ctx -from watcher.decision_engine.strategy.selection import default as d_selector -from watcher.decision_engine.strategy import strategies -from watcher.tests.db import base -from watcher.tests.objects import utils as obj_utils - - -class TestStrategyContext(base.DbTestCase): - - def setUp(self): - super(TestStrategyContext, self).setUp() - obj_utils.create_test_goal(self.context, id=1, name="DUMMY") - audit_template = obj_utils.create_test_audit_template( - self.context, uuid=utils.generate_uuid()) - self.audit = obj_utils.create_test_audit( - self.context, audit_template_id=audit_template.id) - - strategy_context = d_strategy_ctx.DefaultStrategyContext() - - @mock.patch.object(strategies.DummyStrategy, 'compute_model', - new_callable=mock.PropertyMock) - @mock.patch.object(d_selector.DefaultStrategySelector, 'select') - def test_execute_strategy(self, mock_call, m_model): - m_model.return_value = mock.Mock() - mock_call.return_value = strategies.DummyStrategy( - config=mock.Mock()) - solution = self.strategy_context.execute_strategy( - self.audit, self.context) - self.assertIsInstance(solution, default.DefaultSolution) - - @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector", - mock.Mock()) - def test_execute_force_dummy(self): - goal = obj_utils.create_test_goal( - self.context, id=50, uuid=utils.generate_uuid(), name="my_goal") - - strategy = obj_utils.create_test_strategy( - self.context, id=42, uuid=utils.generate_uuid(), name="dummy", - goal_id=goal.id) - - audit = obj_utils.create_test_audit( - self.context, - id=2, - goal_id=goal.id, - strategy_id=strategy.id, - uuid=utils.generate_uuid(), - ) - - solution = self.strategy_context.execute_strategy(audit, self.context) - - self.assertEqual(len(solution.actions), 3) - - @mock.patch.object(strategies.BasicConsolidation, "execute") - @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector", - mock.Mock()) - def test_execute_force_basic(self, mock_call): - expected_strategy = "basic" - mock_call.return_value = expected_strategy - - obj_utils.create_test_goal(self.context, id=50, - uuid=utils.generate_uuid(), - name="my_goal") - - strategy = obj_utils.create_test_strategy(self.context, - id=42, - uuid=utils.generate_uuid(), - name=expected_strategy) - - audit = obj_utils.create_test_audit( - self.context, - id=2, - strategy_id=strategy.id, - uuid=utils.generate_uuid(), - ) - - solution = self.strategy_context.execute_strategy(audit, self.context) - - self.assertEqual(solution, expected_strategy) diff --git a/watcher/tests/decision_engine/strategy/selector/__init__.py b/watcher/tests/decision_engine/strategy/selector/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/strategy/selector/test_strategy_selector.py b/watcher/tests/decision_engine/strategy/selector/test_strategy_selector.py deleted file mode 100644 index d793cc6..0000000 --- a/watcher/tests/decision_engine/strategy/selector/test_strategy_selector.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.common import exception -from watcher.decision_engine.loading import default as default_loader -from watcher.decision_engine.strategy.selection import ( - default as default_selector) -from watcher.decision_engine.strategy import strategies -from watcher.tests import base - - -class TestStrategySelector(base.TestCase): - - def setUp(self): - super(TestStrategySelector, self).setUp() - - @mock.patch.object(default_loader.DefaultStrategyLoader, 'load') - def test_select_with_strategy_name(self, m_load): - expected_goal = 'dummy' - expected_strategy = "dummy" - strategy_selector = default_selector.DefaultStrategySelector( - expected_goal, expected_strategy, osc=None) - strategy_selector.select() - m_load.assert_called_once_with(expected_strategy, osc=None) - - @mock.patch.object(default_loader.DefaultStrategyLoader, 'load') - @mock.patch.object(default_loader.DefaultStrategyLoader, 'list_available') - def test_select_with_goal_name_only(self, m_list_available, m_load): - m_list_available.return_value = {"dummy": strategies.DummyStrategy} - expected_goal = 'dummy' - expected_strategy = "dummy" - strategy_selector = default_selector.DefaultStrategySelector( - expected_goal, osc=None) - strategy_selector.select() - m_load.assert_called_once_with(expected_strategy, osc=None) - - def test_select_non_existing_strategy(self): - strategy_selector = default_selector.DefaultStrategySelector( - "dummy", "NOT_FOUND") - self.assertRaises(exception.LoadingError, strategy_selector.select) - - @mock.patch.object(default_loader.DefaultStrategyLoader, 'list_available') - def test_select_no_available_strategy_for_goal(self, m_list_available): - m_list_available.return_value = {} - strategy_selector = default_selector.DefaultStrategySelector("dummy") - self.assertRaises(exception.NoAvailableStrategyForGoal, - strategy_selector.select) diff --git a/watcher/tests/decision_engine/strategy/strategies/__init__.py b/watcher/tests/decision_engine/strategy/strategies/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/strategy/strategies/test_basic_consolidation.py b/watcher/tests/decision_engine/strategy/strategies/test_basic_consolidation.py deleted file mode 100644 index 9f3cf42..0000000 --- a/watcher/tests/decision_engine/strategy/strategies/test_basic_consolidation.py +++ /dev/null @@ -1,337 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import collections -import copy -import datetime -import mock - -from watcher.applier.loading import default -from watcher.common import clients -from watcher.common import exception -from watcher.decision_engine.model import model_root -from watcher.decision_engine.strategy import strategies -from watcher.tests import base -from watcher.tests.decision_engine.model import ceilometer_metrics -from watcher.tests.decision_engine.model import faker_cluster_state -from watcher.tests.decision_engine.model import gnocchi_metrics -from watcher.tests.decision_engine.model import monasca_metrics - - -class TestBasicConsolidation(base.TestCase): - - scenarios = [ - ("Ceilometer", - {"datasource": "ceilometer", - "fake_datasource_cls": ceilometer_metrics.FakeCeilometerMetrics}), - ("Monasca", - {"datasource": "monasca", - "fake_datasource_cls": monasca_metrics.FakeMonascaMetrics}), - ("Gnocchi", - {"datasource": "gnocchi", - "fake_datasource_cls": gnocchi_metrics.FakeGnocchiMetrics}), - ] - - def setUp(self): - super(TestBasicConsolidation, self).setUp() - # fake metrics - self.fake_metrics = self.fake_datasource_cls() - # fake cluster - self.fake_cluster = faker_cluster_state.FakerModelCollector() - - p_osc = mock.patch.object( - clients, "OpenStackClients") - self.m_osc = p_osc.start() - self.addCleanup(p_osc.stop) - - p_model = mock.patch.object( - strategies.BasicConsolidation, "compute_model", - new_callable=mock.PropertyMock) - self.m_model = p_model.start() - self.addCleanup(p_model.stop) - - p_datasource = mock.patch.object( - strategies.BasicConsolidation, self.datasource, - new_callable=mock.PropertyMock) - self.m_datasource = p_datasource.start() - self.addCleanup(p_datasource.stop) - - p_audit_scope = mock.patch.object( - strategies.BasicConsolidation, "audit_scope", - new_callable=mock.PropertyMock - ) - self.m_audit_scope = p_audit_scope.start() - self.addCleanup(p_audit_scope.stop) - - self.m_audit_scope.return_value = mock.Mock() - - self.m_model.return_value = model_root.ModelRoot() - self.m_datasource.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics) - self.strategy = strategies.BasicConsolidation( - config=mock.Mock(datasource=self.datasource)) - - def test_cluster_size(self): - size_cluster = len( - self.fake_cluster.generate_scenario_1().get_all_compute_nodes()) - size_cluster_assert = 5 - self.assertEqual(size_cluster_assert, size_cluster) - - def test_basic_consolidation_score_node(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - node_1_score = 0.023333333333333317 - self.assertEqual(node_1_score, self.strategy.calculate_score_node( - model.get_node_by_uuid("Node_1"))) - node_2_score = 0.26666666666666666 - self.assertEqual(node_2_score, self.strategy.calculate_score_node( - model.get_node_by_uuid("Node_2"))) - node_0_score = 0.023333333333333317 - self.assertEqual(node_0_score, self.strategy.calculate_score_node( - model.get_node_by_uuid("Node_0"))) - - def test_basic_consolidation_score_instance(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - instance_0 = model.get_instance_by_uuid("INSTANCE_0") - instance_0_score = 0.023333333333333317 - self.assertEqual( - instance_0_score, - self.strategy.calculate_score_instance(instance_0)) - - instance_1 = model.get_instance_by_uuid("INSTANCE_1") - instance_1_score = 0.023333333333333317 - self.assertEqual( - instance_1_score, - self.strategy.calculate_score_instance(instance_1)) - instance_2 = model.get_instance_by_uuid("INSTANCE_2") - instance_2_score = 0.033333333333333326 - self.assertEqual( - instance_2_score, - self.strategy.calculate_score_instance(instance_2)) - instance_6 = model.get_instance_by_uuid("INSTANCE_6") - instance_6_score = 0.02666666666666669 - self.assertEqual( - instance_6_score, - self.strategy.calculate_score_instance(instance_6)) - instance_7 = model.get_instance_by_uuid("INSTANCE_7") - instance_7_score = 0.013333333333333345 - self.assertEqual( - instance_7_score, - self.strategy.calculate_score_instance(instance_7)) - - def test_basic_consolidation_score_instance_disk(self): - model = self.fake_cluster.generate_scenario_5_with_instance_disk_0() - self.m_model.return_value = model - instance_0 = model.get_instance_by_uuid("INSTANCE_0") - instance_0_score = 0.023333333333333355 - self.assertEqual( - instance_0_score, - self.strategy.calculate_score_instance(instance_0)) - - def test_basic_consolidation_weight(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - instance_0 = model.get_instance_by_uuid("INSTANCE_0") - cores = 16 - # 80 Go - disk = 80 - # mem 8 Go - mem = 8 - instance_0_weight_assert = 3.1999999999999997 - self.assertEqual( - instance_0_weight_assert, - self.strategy.calculate_weight(instance_0, cores, disk, mem)) - - def test_exception_model(self): - self.m_model.return_value = None - self.assertRaises( - exception.ClusterStateNotDefined, self.strategy.execute) - - def test_exception_cluster_empty(self): - model = model_root.ModelRoot() - self.m_model.return_value = model - self.assertRaises(exception.ClusterEmpty, self.strategy.execute) - - def test_check_migration(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - - all_instances = model.get_all_instances() - all_nodes = model.get_all_compute_nodes() - instance0 = all_instances[list(all_instances.keys())[0]] - node0 = all_nodes[list(all_nodes.keys())[0]] - - self.strategy.check_migration(node0, node0, instance0) - - def test_threshold(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - - all_nodes = model.get_all_compute_nodes() - node0 = all_nodes[list(all_nodes.keys())[0]] - - self.assertFalse(self.strategy.check_threshold( - node0, 1000, 1000, 1000)) - - def test_basic_consolidation_works_on_model_copy(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = copy.deepcopy(model) - - self.assertTrue(model_root.ModelRoot.is_isomorphic( - model, self.strategy.compute_model)) - self.assertIsNot(model, self.strategy.compute_model) - - def test_basic_consolidation_migration(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - - solution = self.strategy.execute() - - actions_counter = collections.Counter( - [action.get('action_type') for action in solution.actions]) - - expected_num_migrations = 1 - expected_power_state = 1 - - num_migrations = actions_counter.get("migrate", 0) - num_node_state_change = actions_counter.get( - "change_nova_service_state", 0) - self.assertEqual(expected_num_migrations, num_migrations) - self.assertEqual(expected_power_state, num_node_state_change) - - def test_basic_consolidation_execute_scenario_8_with_4_nodes(self): - model = self.fake_cluster.generate_scenario_8_with_4_nodes() - self.m_model.return_value = model - - solution = self.strategy.execute() - - actions_counter = collections.Counter( - [action.get('action_type') for action in solution.actions]) - - expected_num_migrations = 5 - expected_power_state = 3 - expected_global_efficacy = 75 - - num_migrations = actions_counter.get("migrate", 0) - num_node_state_change = actions_counter.get( - "change_nova_service_state", 0) - - global_efficacy_value = solution.global_efficacy.get("value", 0) - - self.assertEqual(expected_num_migrations, num_migrations) - self.assertEqual(expected_power_state, num_node_state_change) - self.assertEqual(expected_global_efficacy, global_efficacy_value) - - def test_exception_stale_cdm(self): - self.fake_cluster.set_cluster_data_model_as_stale() - self.m_model.return_value = self.fake_cluster.cluster_data_model - - self.assertRaises( - exception.ClusterStateNotDefined, - self.strategy.execute) - - # calculate_weight - def test_execute_no_workload(self): - model = ( - self.fake_cluster - .generate_scenario_4_with_1_node_no_instance()) - self.m_model.return_value = model - - with mock.patch.object( - strategies.BasicConsolidation, 'calculate_weight' - ) as mock_score_call: - mock_score_call.return_value = 0 - solution = self.strategy.execute() - self.assertEqual(0, solution.efficacy.global_efficacy.value) - - def test_check_parameters(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - solution = self.strategy.execute() - loader = default.DefaultActionLoader() - for action in solution.actions: - loaded_action = loader.load(action['action_type']) - loaded_action.input_parameters = action['input_parameters'] - loaded_action.validate_parameters() - - def test_periods(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - node_1 = model.get_node_by_uuid("Node_1") - p_ceilometer = mock.patch.object( - strategies.BasicConsolidation, "ceilometer") - m_ceilometer = p_ceilometer.start() - self.addCleanup(p_ceilometer.stop) - p_monasca = mock.patch.object(strategies.BasicConsolidation, "monasca") - m_monasca = p_monasca.start() - self.addCleanup(p_monasca.stop) - p_gnocchi = mock.patch.object(strategies.BasicConsolidation, "gnocchi") - m_gnocchi = p_gnocchi.start() - self.addCleanup(p_gnocchi.stop) - datetime_patcher = mock.patch.object( - datetime, 'datetime', - mock.Mock(wraps=datetime.datetime) - ) - mocked_datetime = datetime_patcher.start() - mocked_datetime.utcnow.return_value = datetime.datetime( - 2017, 3, 19, 18, 53, 11, 657417) - self.addCleanup(datetime_patcher.stop) - m_monasca.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics) - m_ceilometer.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics) - m_gnocchi.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics) - self.strategy.calculate_score_node(node_1) - resource_id = "%s_%s" % (node_1.uuid, node_1.hostname) - if self.strategy.config.datasource == "ceilometer": - m_ceilometer.statistic_aggregation.assert_called_with( - aggregate='avg', meter_name='compute.node.cpu.percent', - period=7200, resource_id=resource_id) - elif self.strategy.config.datasource == "monasca": - m_monasca.statistic_aggregation.assert_called_with( - aggregate='avg', meter_name='cpu.percent', - period=7200, dimensions={'hostname': 'Node_1'}) - elif self.strategy.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int('7200')) - m_gnocchi.statistic_aggregation.assert_called_with( - resource_id=resource_id, metric='compute.node.cpu.percent', - granularity=300, start_time=start_time, stop_time=stop_time, - aggregation='mean') - - self.strategy.input_parameters.update({"period": 600}) - self.strategy.calculate_score_node(node_1) - if self.strategy.config.datasource == "ceilometer": - m_ceilometer.statistic_aggregation.assert_called_with( - aggregate='avg', meter_name='compute.node.cpu.percent', - period=600, resource_id=resource_id) - elif self.strategy.config.datasource == "monasca": - m_monasca.statistic_aggregation.assert_called_with( - aggregate='avg', meter_name='cpu.percent', - period=600, dimensions={'hostname': 'Node_1'}) - elif self.strategy.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int('600')) - m_gnocchi.statistic_aggregation.assert_called_with( - resource_id=resource_id, metric='compute.node.cpu.percent', - granularity=300, start_time=start_time, stop_time=stop_time, - aggregation='mean') diff --git a/watcher/tests/decision_engine/strategy/strategies/test_dummy_strategy.py b/watcher/tests/decision_engine/strategy/strategies/test_dummy_strategy.py deleted file mode 100644 index 9ce0a7c..0000000 --- a/watcher/tests/decision_engine/strategy/strategies/test_dummy_strategy.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.applier.loading import default -from watcher.common import utils -from watcher.decision_engine.model import model_root -from watcher.decision_engine.strategy import strategies -from watcher.tests import base -from watcher.tests.decision_engine.model import faker_cluster_state - - -class TestDummyStrategy(base.TestCase): - - def setUp(self): - super(TestDummyStrategy, self).setUp() - # fake cluster - self.fake_cluster = faker_cluster_state.FakerModelCollector() - - p_model = mock.patch.object( - strategies.DummyStrategy, "compute_model", - new_callable=mock.PropertyMock) - self.m_model = p_model.start() - self.addCleanup(p_model.stop) - - p_audit_scope = mock.patch.object( - strategies.DummyStrategy, "audit_scope", - new_callable=mock.PropertyMock - ) - self.m_audit_scope = p_audit_scope.start() - self.addCleanup(p_audit_scope.stop) - - self.m_audit_scope.return_value = mock.Mock() - - self.m_model.return_value = model_root.ModelRoot() - self.strategy = strategies.DummyStrategy(config=mock.Mock()) - - self.m_model.return_value = model_root.ModelRoot() - self.strategy = strategies.DummyStrategy(config=mock.Mock()) - - def test_dummy_strategy(self): - dummy = strategies.DummyStrategy(config=mock.Mock()) - dummy.input_parameters = utils.Struct() - dummy.input_parameters.update({'para1': 4.0, 'para2': 'Hi'}) - solution = dummy.execute() - self.assertEqual(3, len(solution.actions)) - - def test_check_parameters(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - self.strategy.input_parameters = utils.Struct() - self.strategy.input_parameters.update({'para1': 4.0, 'para2': 'Hi'}) - solution = self.strategy.execute() - loader = default.DefaultActionLoader() - for action in solution.actions: - loaded_action = loader.load(action['action_type']) - loaded_action.input_parameters = action['input_parameters'] - loaded_action.validate_parameters() diff --git a/watcher/tests/decision_engine/strategy/strategies/test_dummy_with_scorer.py b/watcher/tests/decision_engine/strategy/strategies/test_dummy_with_scorer.py deleted file mode 100644 index cb6fa28..0000000 --- a/watcher/tests/decision_engine/strategy/strategies/test_dummy_with_scorer.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.applier.loading import default -from watcher.common import utils -from watcher.decision_engine.model import model_root -from watcher.decision_engine.strategy import strategies -from watcher.tests import base -from watcher.tests.decision_engine.model import faker_cluster_state - - -class TestDummyWithScorer(base.TestCase): - - def setUp(self): - super(TestDummyWithScorer, self).setUp() - # fake cluster - self.fake_cluster = faker_cluster_state.FakerModelCollector() - - p_model = mock.patch.object( - strategies.DummyWithScorer, "compute_model", - new_callable=mock.PropertyMock) - self.m_model = p_model.start() - self.addCleanup(p_model.stop) - - self.m_model.return_value = model_root.ModelRoot() - self.strategy = strategies.DummyWithScorer(config=mock.Mock()) - - def test_dummy_with_scorer(self): - dummy = strategies.DummyWithScorer(config=mock.Mock()) - dummy.input_parameters = utils.Struct() - dummy.input_parameters.update({'param1': 4.0, 'param2': 'Hi'}) - solution = dummy.execute() - self.assertEqual(4, len(solution.actions)) - - def test_check_parameters(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - self.strategy.input_parameters = utils.Struct() - self.strategy.input_parameters.update({'param1': 4.0, 'param2': 'Hi'}) - solution = self.strategy.execute() - loader = default.DefaultActionLoader() - for action in solution.actions: - loaded_action = loader.load(action['action_type']) - loaded_action.input_parameters = action['input_parameters'] - loaded_action.validate_parameters() diff --git a/watcher/tests/decision_engine/strategy/strategies/test_noisy_neighbor.py b/watcher/tests/decision_engine/strategy/strategies/test_noisy_neighbor.py deleted file mode 100644 index 349ce1b..0000000 --- a/watcher/tests/decision_engine/strategy/strategies/test_noisy_neighbor.py +++ /dev/null @@ -1,179 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import mock - -from watcher.applier.loading import default -from watcher.common import exception -from watcher.common import utils -from watcher.decision_engine.model import model_root -from watcher.decision_engine.strategy import strategies -from watcher.tests import base -from watcher.tests.decision_engine.model import ceilometer_metrics -from watcher.tests.decision_engine.model import faker_cluster_state - - -class TestNoisyNeighbor(base.TestCase): - - def setUp(self): - super(TestNoisyNeighbor, self).setUp() - # fake metrics - self.fake_metrics = ceilometer_metrics.FakeCeilometerMetrics() - # fake cluster - self.fake_cluster = faker_cluster_state.FakerModelCollector() - - p_model = mock.patch.object( - strategies.NoisyNeighbor, "compute_model", - new_callable=mock.PropertyMock) - self.m_model = p_model.start() - self.addCleanup(p_model.stop) - - p_ceilometer = mock.patch.object( - strategies.NoisyNeighbor, "ceilometer", - new_callable=mock.PropertyMock) - self.m_ceilometer = p_ceilometer.start() - self.addCleanup(p_ceilometer.stop) - - p_audit_scope = mock.patch.object( - strategies.NoisyNeighbor, "audit_scope", - new_callable=mock.PropertyMock - ) - self.m_audit_scope = p_audit_scope.start() - self.addCleanup(p_audit_scope.stop) - - self.m_audit_scope.return_value = mock.Mock() - - self.m_model.return_value = model_root.ModelRoot() - self.m_ceilometer.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics_nn) - self.strategy = strategies.NoisyNeighbor(config=mock.Mock()) - - self.strategy.input_parameters = utils.Struct() - self.strategy.input_parameters.update({'cache_threshold': 35}) - self.strategy.threshold = 35 - self.strategy.input_parameters.update({'period': 100}) - self.strategy.threshold = 100 - - def test_calc_used_resource(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - node = model.get_node_by_uuid('Node_0') - cores_used, mem_used, disk_used = self.strategy.calc_used_resource( - node) - - self.assertEqual((10, 2, 20), (cores_used, mem_used, disk_used)) - - def test_group_hosts(self): - self.strategy.cache_threshold = 35 - self.strategy.period = 100 - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - node_uuid = 'Node_1' - n1, n2 = self.strategy.group_hosts() - self.assertTrue(node_uuid in n1) - self.assertEqual(n1[node_uuid]['priority_vm'].uuid, 'INSTANCE_3') - self.assertEqual(n1[node_uuid]['noisy_vm'].uuid, 'INSTANCE_4') - self.assertEqual('Node_0', n2[0].uuid) - - def test_find_priority_instance(self): - self.strategy.cache_threshold = 35 - self.strategy.period = 100 - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - potential_prio_inst = model.get_instance_by_uuid('INSTANCE_3') - inst_res = self.strategy.find_priority_instance(potential_prio_inst) - self.assertEqual('INSTANCE_3', inst_res.uuid) - - def test_find_noisy_instance(self): - self.strategy.cache_threshold = 35 - self.strategy.period = 100 - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - potential_noisy_inst = model.get_instance_by_uuid('INSTANCE_4') - inst_res = self.strategy.find_noisy_instance(potential_noisy_inst) - self.assertEqual('INSTANCE_4', inst_res.uuid) - - def test_filter_destination_hosts(self): - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - self.strategy.cache_threshold = 35 - self.strategy.period = 100 - n1, n2 = self.strategy.group_hosts() - mig_source_node = max(n1.keys(), key=lambda a: - n1[a]['priority_vm']) - instance_to_mig = n1[mig_source_node]['noisy_vm'] - dest_hosts = self.strategy.filter_dest_servers( - n2, instance_to_mig) - - self.assertEqual(1, len(dest_hosts)) - self.assertEqual('Node_0', dest_hosts[0].uuid) - - def test_exception_model(self): - self.m_model.return_value = None - self.assertRaises( - exception.ClusterStateNotDefined, self.strategy.execute) - - def test_exception_cluster_empty(self): - model = model_root.ModelRoot() - self.m_model.return_value = model - self.assertRaises(exception.ClusterEmpty, self.strategy.execute) - - def test_exception_stale_cdm(self): - self.fake_cluster.set_cluster_data_model_as_stale() - self.m_model.return_value = self.fake_cluster.cluster_data_model - - self.assertRaises( - exception.ClusterStateNotDefined, - self.strategy.execute) - - def test_execute_cluster_empty(self): - model = model_root.ModelRoot() - self.m_model.return_value = model - self.assertRaises(exception.ClusterEmpty, self.strategy.execute) - - def test_execute_no_workload(self): - self.strategy.cache_threshold = 35 - self.strategy.period = 100 - model = self.fake_cluster.generate_scenario_4_with_1_node_no_instance() - self.m_model.return_value = model - - solution = self.strategy.execute() - self.assertEqual([], solution.actions) - - def test_execute(self): - self.strategy.cache_threshold = 35 - self.strategy.period = 100 - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - solution = self.strategy.execute() - actions_counter = collections.Counter( - [action.get('action_type') for action in solution.actions]) - - num_migrations = actions_counter.get("migrate", 0) - self.assertEqual(1, num_migrations) - - def test_check_parameters(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - solution = self.strategy.execute() - loader = default.DefaultActionLoader() - for action in solution.actions: - loaded_action = loader.load(action['action_type']) - loaded_action.input_parameters = action['input_parameters'] - loaded_action.validate_parameters() diff --git a/watcher/tests/decision_engine/strategy/strategies/test_outlet_temp_control.py b/watcher/tests/decision_engine/strategy/strategies/test_outlet_temp_control.py deleted file mode 100644 index 596bbbc..0000000 --- a/watcher/tests/decision_engine/strategy/strategies/test_outlet_temp_control.py +++ /dev/null @@ -1,207 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 Intel Corp -# -# Authors: Zhenzan Zhou -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import collections -import datetime -import mock - -from watcher.applier.loading import default -from watcher.common import exception -from watcher.common import utils -from watcher.decision_engine.model import model_root -from watcher.decision_engine.strategy import strategies -from watcher.tests import base -from watcher.tests.decision_engine.model import ceilometer_metrics -from watcher.tests.decision_engine.model import faker_cluster_state -from watcher.tests.decision_engine.model import gnocchi_metrics - - -class TestOutletTempControl(base.TestCase): - - scenarios = [ - ("Ceilometer", - {"datasource": "ceilometer", - "fake_datasource_cls": ceilometer_metrics.FakeCeilometerMetrics}), - ("Gnocchi", - {"datasource": "gnocchi", - "fake_datasource_cls": gnocchi_metrics.FakeGnocchiMetrics}), - ] - - def setUp(self): - super(TestOutletTempControl, self).setUp() - # fake metrics - self.fake_metrics = self.fake_datasource_cls() - - # fake cluster - self.fake_cluster = faker_cluster_state.FakerModelCollector() - - p_model = mock.patch.object( - strategies.OutletTempControl, "compute_model", - new_callable=mock.PropertyMock) - self.m_model = p_model.start() - self.addCleanup(p_model.stop) - - p_datasource = mock.patch.object( - strategies.OutletTempControl, self.datasource, - new_callable=mock.PropertyMock) - self.m_datasource = p_datasource.start() - self.addCleanup(p_datasource.stop) - - p_audit_scope = mock.patch.object( - strategies.OutletTempControl, "audit_scope", - new_callable=mock.PropertyMock - ) - self.m_audit_scope = p_audit_scope.start() - self.addCleanup(p_audit_scope.stop) - - self.m_audit_scope.return_value = mock.Mock() - - self.m_model.return_value = model_root.ModelRoot() - self.m_datasource.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics) - self.strategy = strategies.OutletTempControl( - config=mock.Mock(datasource=self.datasource)) - - self.strategy.input_parameters = utils.Struct() - self.strategy.input_parameters.update({'threshold': 34.3}) - self.strategy.threshold = 34.3 - - def test_calc_used_resource(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - node = model.get_node_by_uuid('Node_0') - cores_used, mem_used, disk_used = self.strategy.calc_used_resource( - node) - - self.assertEqual((10, 2, 20), (cores_used, mem_used, disk_used)) - - def test_group_hosts_by_outlet_temp(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - n1, n2 = self.strategy.group_hosts_by_outlet_temp() - self.assertEqual('Node_1', n1[0]['node'].uuid) - self.assertEqual('Node_0', n2[0]['node'].uuid) - - def test_choose_instance_to_migrate(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - n1, n2 = self.strategy.group_hosts_by_outlet_temp() - instance_to_mig = self.strategy.choose_instance_to_migrate(n1) - self.assertEqual('Node_1', instance_to_mig[0].uuid) - self.assertEqual('a4cab39b-9828-413a-bf88-f76921bf1517', - instance_to_mig[1].uuid) - - def test_filter_dest_servers(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - n1, n2 = self.strategy.group_hosts_by_outlet_temp() - instance_to_mig = self.strategy.choose_instance_to_migrate(n1) - dest_hosts = self.strategy.filter_dest_servers(n2, instance_to_mig[1]) - self.assertEqual(1, len(dest_hosts)) - self.assertEqual('Node_0', dest_hosts[0]['node'].uuid) - - def test_exception_model(self): - self.m_model.return_value = None - self.assertRaises( - exception.ClusterStateNotDefined, self.strategy.execute) - - def test_exception_cluster_empty(self): - model = model_root.ModelRoot() - self.m_model.return_value = model - self.assertRaises(exception.ClusterEmpty, self.strategy.execute) - - def test_exception_stale_cdm(self): - self.fake_cluster.set_cluster_data_model_as_stale() - self.m_model.return_value = self.fake_cluster.cluster_data_model - - self.assertRaises( - exception.ClusterStateNotDefined, - self.strategy.execute) - - def test_execute_cluster_empty(self): - model = model_root.ModelRoot() - self.m_model.return_value = model - self.assertRaises(exception.ClusterEmpty, self.strategy.execute) - - def test_execute_no_workload(self): - model = self.fake_cluster.generate_scenario_4_with_1_node_no_instance() - self.m_model.return_value = model - - solution = self.strategy.execute() - self.assertEqual([], solution.actions) - - def test_execute(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - solution = self.strategy.execute() - actions_counter = collections.Counter( - [action.get('action_type') for action in solution.actions]) - - num_migrations = actions_counter.get("migrate", 0) - self.assertEqual(1, num_migrations) - - def test_check_parameters(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - solution = self.strategy.execute() - loader = default.DefaultActionLoader() - for action in solution.actions: - loaded_action = loader.load(action['action_type']) - loaded_action.input_parameters = action['input_parameters'] - loaded_action.validate_parameters() - - def test_periods(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - p_ceilometer = mock.patch.object( - strategies.OutletTempControl, "ceilometer") - m_ceilometer = p_ceilometer.start() - self.addCleanup(p_ceilometer.stop) - p_gnocchi = mock.patch.object(strategies.OutletTempControl, "gnocchi") - m_gnocchi = p_gnocchi.start() - self.addCleanup(p_gnocchi.stop) - datetime_patcher = mock.patch.object( - datetime, 'datetime', - mock.Mock(wraps=datetime.datetime) - ) - mocked_datetime = datetime_patcher.start() - mocked_datetime.utcnow.return_value = datetime.datetime( - 2017, 3, 19, 18, 53, 11, 657417) - self.addCleanup(datetime_patcher.stop) - m_ceilometer.statistic_aggregation = mock.Mock( - side_effect=self.fake_metrics.mock_get_statistics) - m_gnocchi.statistic_aggregation = mock.Mock( - side_effect=self.fake_metrics.mock_get_statistics) - node = model.get_node_by_uuid('Node_0') - self.strategy.input_parameters.update({'threshold': 35.0}) - self.strategy.threshold = 35.0 - self.strategy.group_hosts_by_outlet_temp() - if self.strategy.config.datasource == "ceilometer": - m_ceilometer.statistic_aggregation.assert_any_call( - aggregate='avg', - meter_name='hardware.ipmi.node.outlet_temperature', - period=30, resource_id=node.uuid) - elif self.strategy.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int('30')) - m_gnocchi.statistic_aggregation.assert_called_with( - resource_id=mock.ANY, - metric='hardware.ipmi.node.outlet_temperature', - granularity=300, start_time=start_time, stop_time=stop_time, - aggregation='mean') diff --git a/watcher/tests/decision_engine/strategy/strategies/test_uniform_airflow.py b/watcher/tests/decision_engine/strategy/strategies/test_uniform_airflow.py deleted file mode 100644 index 63076a1..0000000 --- a/watcher/tests/decision_engine/strategy/strategies/test_uniform_airflow.py +++ /dev/null @@ -1,249 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Junjie-Huang -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import collections -import datetime -import mock - -from watcher.applier.loading import default -from watcher.common import exception -from watcher.common import utils -from watcher.decision_engine.model import model_root -from watcher.decision_engine.strategy import strategies -from watcher.tests import base -from watcher.tests.decision_engine.model import ceilometer_metrics -from watcher.tests.decision_engine.model import faker_cluster_state -from watcher.tests.decision_engine.model import gnocchi_metrics - - -class TestUniformAirflow(base.TestCase): - - scenarios = [ - ("Ceilometer", - {"datasource": "ceilometer", - "fake_datasource_cls": ceilometer_metrics.FakeCeilometerMetrics}), - ("Gnocchi", - {"datasource": "gnocchi", - "fake_datasource_cls": gnocchi_metrics.FakeGnocchiMetrics}), - ] - - def setUp(self): - super(TestUniformAirflow, self).setUp() - # fake metrics - self.fake_metrics = self.fake_datasource_cls() - # fake cluster - self.fake_cluster = faker_cluster_state.FakerModelCollector() - - p_model = mock.patch.object( - strategies.UniformAirflow, "compute_model", - new_callable=mock.PropertyMock) - self.m_model = p_model.start() - self.addCleanup(p_model.stop) - - p_datasource = mock.patch.object( - strategies.UniformAirflow, self.datasource, - new_callable=mock.PropertyMock) - self.m_datasource = p_datasource.start() - self.addCleanup(p_datasource.stop) - - p_audit_scope = mock.patch.object( - strategies.UniformAirflow, "audit_scope", - new_callable=mock.PropertyMock - ) - self.m_audit_scope = p_audit_scope.start() - self.addCleanup(p_audit_scope.stop) - - self.m_audit_scope.return_value = mock.Mock() - - self.m_model.return_value = model_root.ModelRoot() - self.m_datasource.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics) - self.strategy = strategies.UniformAirflow( - config=mock.Mock(datasource=self.datasource)) - self.strategy.input_parameters = utils.Struct() - self.strategy.input_parameters.update({'threshold_airflow': 400.0, - 'threshold_inlet_t': 28.0, - 'threshold_power': 350.0, - 'period': 300}) - self.strategy.threshold_airflow = 400 - self.strategy.threshold_inlet_t = 28 - self.strategy.threshold_power = 350 - self._period = 300 - - def test_calc_used_resource(self): - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - node = model.get_node_by_uuid('Node_0') - cores_used, mem_used, disk_used = ( - self.strategy.calculate_used_resource(node)) - self.assertEqual((cores_used, mem_used, disk_used), (25, 4, 40)) - - def test_group_hosts_by_airflow(self): - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - self.strategy.threshold_airflow = 300 - n1, n2 = self.strategy.group_hosts_by_airflow() - # print n1, n2, avg, w_map - self.assertEqual(n1[0]['node'].uuid, 'Node_0') - self.assertEqual(n2[0]['node'].uuid, 'Node_1') - - def test_choose_instance_to_migrate(self): - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - self.strategy.threshold_airflow = 300 - self.strategy.threshold_inlet_t = 22 - n1, n2 = self.strategy.group_hosts_by_airflow() - instance_to_mig = self.strategy.choose_instance_to_migrate(n1) - - self.assertEqual(instance_to_mig[0].uuid, 'Node_0') - self.assertEqual(len(instance_to_mig[1]), 1) - self.assertIn(instance_to_mig[1][0].uuid, - {'cae81432-1631-4d4e-b29c-6f3acdcde906', - '73b09e16-35b7-4922-804e-e8f5d9b740fc'}) - - def test_choose_instance_to_migrate_all(self): - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - self.strategy.threshold_airflow = 300 - self.strategy.threshold_inlet_t = 25 - n1, n2 = self.strategy.group_hosts_by_airflow() - instance_to_mig = self.strategy.choose_instance_to_migrate(n1) - - self.assertEqual(instance_to_mig[0].uuid, 'Node_0') - self.assertEqual(len(instance_to_mig[1]), 2) - self.assertEqual({'cae81432-1631-4d4e-b29c-6f3acdcde906', - '73b09e16-35b7-4922-804e-e8f5d9b740fc'}, - {inst.uuid for inst in instance_to_mig[1]}) - - def test_choose_instance_notfound(self): - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - self.strategy.threshold_airflow = 300 - self.strategy.threshold_inlet_t = 22 - n1, n2 = self.strategy.group_hosts_by_airflow() - instances = model.get_all_instances() - [model.remove_instance(inst) for inst in instances.values()] - instance_to_mig = self.strategy.choose_instance_to_migrate(n1) - self.assertIsNone(instance_to_mig) - - def test_filter_destination_hosts(self): - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - self.strategy.threshold_airflow = 300 - self.strategy.threshold_inlet_t = 22 - n1, n2 = self.strategy.group_hosts_by_airflow() - instance_to_mig = self.strategy.choose_instance_to_migrate(n1) - dest_hosts = self.strategy.filter_destination_hosts( - n2, instance_to_mig[1]) - - self.assertEqual(len(dest_hosts), 1) - self.assertEqual(dest_hosts[0]['node'].uuid, 'Node_1') - self.assertIn(instance_to_mig[1][0].uuid, - {'cae81432-1631-4d4e-b29c-6f3acdcde906', - '73b09e16-35b7-4922-804e-e8f5d9b740fc'}) - - def test_exception_model(self): - self.m_model.return_value = None - self.assertRaises( - exception.ClusterStateNotDefined, self.strategy.execute) - - def test_exception_cluster_empty(self): - model = model_root.ModelRoot() - self.m_model.return_value = model - self.assertRaises(exception.ClusterEmpty, self.strategy.execute) - - def test_exception_stale_cdm(self): - self.fake_cluster.set_cluster_data_model_as_stale() - self.m_model.return_value = self.fake_cluster.cluster_data_model - - self.assertRaises( - exception.ClusterStateNotDefined, - self.strategy.execute) - - def test_execute_cluster_empty(self): - model = model_root.ModelRoot() - self.m_model.return_value = model - self.assertRaises(exception.ClusterEmpty, self.strategy.execute) - - def test_execute_no_workload(self): - self.strategy.threshold_airflow = 300 - self.strategy.threshold_inlet_t = 25 - self.strategy.threshold_power = 300 - model = self.fake_cluster.generate_scenario_4_with_1_node_no_instance() - self.m_model.return_value = model - solution = self.strategy.execute() - self.assertEqual([], solution.actions) - - def test_execute(self): - self.strategy.threshold_airflow = 300 - self.strategy.threshold_inlet_t = 25 - self.strategy.threshold_power = 300 - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - solution = self.strategy.execute() - actions_counter = collections.Counter( - [action.get('action_type') for action in solution.actions]) - - num_migrations = actions_counter.get("migrate", 0) - self.assertEqual(num_migrations, 2) - - def test_check_parameters(self): - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - solution = self.strategy.execute() - loader = default.DefaultActionLoader() - for action in solution.actions: - loaded_action = loader.load(action['action_type']) - loaded_action.input_parameters = action['input_parameters'] - loaded_action.validate_parameters() - - def test_periods(self): - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - p_ceilometer = mock.patch.object( - strategies.UniformAirflow, "ceilometer") - m_ceilometer = p_ceilometer.start() - self.addCleanup(p_ceilometer.stop) - p_gnocchi = mock.patch.object(strategies.UniformAirflow, "gnocchi") - m_gnocchi = p_gnocchi.start() - self.addCleanup(p_gnocchi.stop) - datetime_patcher = mock.patch.object( - datetime, 'datetime', - mock.Mock(wraps=datetime.datetime) - ) - mocked_datetime = datetime_patcher.start() - mocked_datetime.utcnow.return_value = datetime.datetime( - 2017, 3, 19, 18, 53, 11, 657417) - self.addCleanup(datetime_patcher.stop) - m_ceilometer.statistic_aggregation = mock.Mock( - side_effect=self.fake_metrics.mock_get_statistics) - m_gnocchi.statistic_aggregation = mock.Mock( - side_effect=self.fake_metrics.mock_get_statistics) - self.strategy.group_hosts_by_airflow() - if self.strategy.config.datasource == "ceilometer": - m_ceilometer.statistic_aggregation.assert_any_call( - aggregate='avg', meter_name='hardware.ipmi.node.airflow', - period=300, resource_id=mock.ANY) - elif self.strategy.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int('300')) - m_gnocchi.statistic_aggregation.assert_called_with( - resource_id=mock.ANY, metric='hardware.ipmi.node.airflow', - granularity=300, start_time=start_time, stop_time=stop_time, - aggregation='mean') diff --git a/watcher/tests/decision_engine/strategy/strategies/test_vm_workload_consolidation.py b/watcher/tests/decision_engine/strategy/strategies/test_vm_workload_consolidation.py deleted file mode 100644 index 0f83824..0000000 --- a/watcher/tests/decision_engine/strategy/strategies/test_vm_workload_consolidation.py +++ /dev/null @@ -1,346 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Authors: Vojtech CIMA -# Bruno GRAZIOLI -# Sean MURPHY -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import datetime -import mock - -from watcher.common import exception -from watcher.decision_engine.model import model_root -from watcher.decision_engine.strategy import strategies -from watcher.tests import base -from watcher.tests.decision_engine.model import faker_cluster_and_metrics - - -class TestVMWorkloadConsolidation(base.TestCase): - - scenarios = [ - ("Ceilometer", - {"datasource": "ceilometer", - "fake_datasource_cls": - faker_cluster_and_metrics.FakeCeilometerMetrics}), - ("Gnocchi", - {"datasource": "gnocchi", - "fake_datasource_cls": - faker_cluster_and_metrics.FakeGnocchiMetrics}), - ] - - def setUp(self): - super(TestVMWorkloadConsolidation, self).setUp() - - # fake cluster - self.fake_cluster = faker_cluster_and_metrics.FakerModelCollector() - - p_model = mock.patch.object( - strategies.VMWorkloadConsolidation, "compute_model", - new_callable=mock.PropertyMock) - self.m_model = p_model.start() - self.addCleanup(p_model.stop) - - p_datasource = mock.patch.object( - strategies.VMWorkloadConsolidation, self.datasource, - new_callable=mock.PropertyMock) - self.m_datasource = p_datasource.start() - self.addCleanup(p_datasource.stop) - - p_audit_scope = mock.patch.object( - strategies.VMWorkloadConsolidation, "audit_scope", - new_callable=mock.PropertyMock - ) - self.m_audit_scope = p_audit_scope.start() - self.addCleanup(p_audit_scope.stop) - - self.m_audit_scope.return_value = mock.Mock() - - # fake metrics - self.fake_metrics = self.fake_datasource_cls( - self.m_model.return_value) - - self.m_model.return_value = model_root.ModelRoot() - self.m_datasource.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics) - self.strategy = strategies.VMWorkloadConsolidation( - config=mock.Mock(datasource=self.datasource)) - - def test_exception_stale_cdm(self): - self.fake_cluster.set_cluster_data_model_as_stale() - self.m_model.return_value = self.fake_cluster.cluster_data_model - - self.assertRaises( - exception.ClusterStateNotDefined, - self.strategy.execute) - - def test_get_instance_utilization(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - instance_0 = model.get_instance_by_uuid("INSTANCE_0") - instance_util = dict(cpu=1.0, ram=1, disk=10) - self.assertEqual( - instance_util, - self.strategy.get_instance_utilization(instance_0)) - - def test_get_node_utilization(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - node_0 = model.get_node_by_uuid("Node_0") - node_util = dict(cpu=1.0, ram=1, disk=10) - self.assertEqual( - node_util, - self.strategy.get_node_utilization(node_0)) - - def test_get_node_capacity(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - node_0 = model.get_node_by_uuid("Node_0") - node_util = dict(cpu=40, ram=64, disk=250) - self.assertEqual(node_util, self.strategy.get_node_capacity(node_0)) - - def test_get_relative_node_utilization(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - node = model.get_node_by_uuid('Node_0') - rhu = self.strategy.get_relative_node_utilization(node) - expected_rhu = {'disk': 0.04, 'ram': 0.015625, 'cpu': 0.025} - self.assertEqual(expected_rhu, rhu) - - def test_get_relative_cluster_utilization(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - cru = self.strategy.get_relative_cluster_utilization() - expected_cru = {'cpu': 0.05, 'disk': 0.05, 'ram': 0.0234375} - self.assertEqual(expected_cru, cru) - - def test_add_migration(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - n1 = model.get_node_by_uuid('Node_0') - n2 = model.get_node_by_uuid('Node_1') - instance_uuid = 'INSTANCE_0' - instance = model.get_instance_by_uuid(instance_uuid) - self.strategy.add_migration(instance, n1, n2) - self.assertEqual(1, len(self.strategy.solution.actions)) - expected = {'action_type': 'migrate', - 'input_parameters': {'destination_node': n2.uuid, - 'source_node': n1.uuid, - 'migration_type': 'live', - 'resource_id': instance_uuid}} - self.assertEqual(expected, self.strategy.solution.actions[0]) - - def test_is_overloaded(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - n1 = model.get_node_by_uuid('Node_0') - cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} - res = self.strategy.is_overloaded(n1, cc) - self.assertFalse(res) - - cc = {'cpu': 0.025, 'ram': 1.0, 'disk': 1.0} - res = self.strategy.is_overloaded(n1, cc) - self.assertFalse(res) - - cc = {'cpu': 0.024, 'ram': 1.0, 'disk': 1.0} - res = self.strategy.is_overloaded(n1, cc) - self.assertTrue(res) - - def test_instance_fits(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - n = model.get_node_by_uuid('Node_1') - instance0 = model.get_instance_by_uuid('INSTANCE_0') - cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} - res = self.strategy.instance_fits(instance0, n, cc) - self.assertTrue(res) - - cc = {'cpu': 0.025, 'ram': 1.0, 'disk': 1.0} - res = self.strategy.instance_fits(instance0, n, cc) - self.assertFalse(res) - - def test_add_action_enable_compute_node(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - n = model.get_node_by_uuid('Node_0') - self.strategy.add_action_enable_compute_node(n) - expected = [{'action_type': 'change_nova_service_state', - 'input_parameters': {'state': 'enabled', - 'resource_id': 'Node_0'}}] - self.assertEqual(expected, self.strategy.solution.actions) - - def test_add_action_disable_node(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - n = model.get_node_by_uuid('Node_0') - self.strategy.add_action_disable_node(n) - expected = [{'action_type': 'change_nova_service_state', - 'input_parameters': {'state': 'disabled', - 'resource_id': 'Node_0'}}] - self.assertEqual(expected, self.strategy.solution.actions) - - def test_disable_unused_nodes(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - n1 = model.get_node_by_uuid('Node_0') - n2 = model.get_node_by_uuid('Node_1') - instance_uuid = 'INSTANCE_0' - instance = model.get_instance_by_uuid(instance_uuid) - self.strategy.disable_unused_nodes() - self.assertEqual(0, len(self.strategy.solution.actions)) - - # Migrate VM to free the node - self.strategy.add_migration(instance, n1, n2) - - self.strategy.disable_unused_nodes() - expected = {'action_type': 'change_nova_service_state', - 'input_parameters': {'state': 'disabled', - 'resource_id': 'Node_0'}} - self.assertEqual(2, len(self.strategy.solution.actions)) - self.assertEqual(expected, self.strategy.solution.actions[1]) - - def test_offload_phase(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} - self.strategy.offload_phase(cc) - expected = [] - self.assertEqual(expected, self.strategy.solution.actions) - - def test_consolidation_phase(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - n1 = model.get_node_by_uuid('Node_0') - n2 = model.get_node_by_uuid('Node_1') - instance_uuid = 'INSTANCE_0' - cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} - self.strategy.consolidation_phase(cc) - expected = [{'action_type': 'migrate', - 'input_parameters': {'destination_node': n2.uuid, - 'source_node': n1.uuid, - 'migration_type': 'live', - 'resource_id': instance_uuid}}] - self.assertEqual(expected, self.strategy.solution.actions) - - def test_strategy(self): - model = self.fake_cluster.generate_scenario_2() - self.m_model.return_value = model - self.fake_metrics.model = model - n1 = model.get_node_by_uuid('Node_0') - cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} - self.strategy.offload_phase(cc) - self.strategy.consolidation_phase(cc) - self.strategy.optimize_solution() - n2 = self.strategy.solution.actions[0][ - 'input_parameters']['destination_node'] - expected = [{'action_type': 'migrate', - 'input_parameters': {'destination_node': n2, - 'source_node': n1.uuid, - 'migration_type': 'live', - 'resource_id': 'INSTANCE_3'}}, - {'action_type': 'migrate', - 'input_parameters': {'destination_node': n2, - 'source_node': n1.uuid, - 'migration_type': 'live', - 'resource_id': 'INSTANCE_1'}}] - - self.assertEqual(expected, self.strategy.solution.actions) - - def test_strategy2(self): - model = self.fake_cluster.generate_scenario_3() - self.m_model.return_value = model - self.fake_metrics.model = model - n1 = model.get_node_by_uuid('Node_0') - n2 = model.get_node_by_uuid('Node_1') - cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} - self.strategy.offload_phase(cc) - expected = [{'action_type': 'migrate', - 'input_parameters': {'destination_node': n2.uuid, - 'migration_type': 'live', - 'resource_id': 'INSTANCE_6', - 'source_node': n1.uuid}}, - {'action_type': 'migrate', - 'input_parameters': {'destination_node': n2.uuid, - 'migration_type': 'live', - 'resource_id': 'INSTANCE_7', - 'source_node': n1.uuid}}, - {'action_type': 'migrate', - 'input_parameters': {'destination_node': n2.uuid, - 'migration_type': 'live', - 'resource_id': 'INSTANCE_8', - 'source_node': n1.uuid}}] - self.assertEqual(expected, self.strategy.solution.actions) - self.strategy.consolidation_phase(cc) - expected.append({'action_type': 'migrate', - 'input_parameters': {'destination_node': n1.uuid, - 'migration_type': 'live', - 'resource_id': 'INSTANCE_7', - 'source_node': n2.uuid}}) - self.assertEqual(expected, self.strategy.solution.actions) - self.strategy.optimize_solution() - del expected[3] - del expected[1] - self.assertEqual(expected, self.strategy.solution.actions) - - def test_periods(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - p_ceilometer = mock.patch.object( - strategies.VMWorkloadConsolidation, "ceilometer") - m_ceilometer = p_ceilometer.start() - self.addCleanup(p_ceilometer.stop) - p_gnocchi = mock.patch.object( - strategies.VMWorkloadConsolidation, "gnocchi") - m_gnocchi = p_gnocchi.start() - self.addCleanup(p_gnocchi.stop) - datetime_patcher = mock.patch.object( - datetime, 'datetime', - mock.Mock(wraps=datetime.datetime) - ) - mocked_datetime = datetime_patcher.start() - mocked_datetime.utcnow.return_value = datetime.datetime( - 2017, 3, 19, 18, 53, 11, 657417) - self.addCleanup(datetime_patcher.stop) - m_ceilometer.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics) - m_gnocchi.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics) - instance0 = model.get_instance_by_uuid("INSTANCE_0") - self.strategy.get_instance_utilization(instance0) - if self.strategy.config.datasource == "ceilometer": - m_ceilometer.statistic_aggregation.assert_any_call( - aggregate='avg', meter_name='disk.root.size', - period=3600, resource_id=instance0.uuid) - elif self.strategy.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int('3600')) - m_gnocchi.statistic_aggregation.assert_called_with( - resource_id=instance0.uuid, metric='disk.root.size', - granularity=300, start_time=start_time, stop_time=stop_time, - aggregation='mean') diff --git a/watcher/tests/decision_engine/strategy/strategies/test_workload_balance.py b/watcher/tests/decision_engine/strategy/strategies/test_workload_balance.py deleted file mode 100644 index 36e06e6..0000000 --- a/watcher/tests/decision_engine/strategy/strategies/test_workload_balance.py +++ /dev/null @@ -1,217 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Junjie-Huang -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import collections -import datetime -import mock - -from watcher.applier.loading import default -from watcher.common import exception -from watcher.common import utils -from watcher.decision_engine.model import model_root -from watcher.decision_engine.strategy import strategies -from watcher.tests import base -from watcher.tests.decision_engine.model import ceilometer_metrics -from watcher.tests.decision_engine.model import faker_cluster_state -from watcher.tests.decision_engine.model import gnocchi_metrics - - -class TestWorkloadBalance(base.TestCase): - - scenarios = [ - ("Ceilometer", - {"datasource": "ceilometer", - "fake_datasource_cls": ceilometer_metrics.FakeCeilometerMetrics}), - ("Gnocchi", - {"datasource": "gnocchi", - "fake_datasource_cls": gnocchi_metrics.FakeGnocchiMetrics}), - ] - - def setUp(self): - super(TestWorkloadBalance, self).setUp() - # fake metrics - self.fake_metrics = self.fake_datasource_cls() - # fake cluster - self.fake_cluster = faker_cluster_state.FakerModelCollector() - - p_model = mock.patch.object( - strategies.WorkloadBalance, "compute_model", - new_callable=mock.PropertyMock) - self.m_model = p_model.start() - self.addCleanup(p_model.stop) - - p_datasource = mock.patch.object( - strategies.WorkloadBalance, self.datasource, - new_callable=mock.PropertyMock) - self.m_datasource = p_datasource.start() - self.addCleanup(p_datasource.stop) - - p_audit_scope = mock.patch.object( - strategies.WorkloadBalance, "audit_scope", - new_callable=mock.PropertyMock - ) - self.m_audit_scope = p_audit_scope.start() - self.addCleanup(p_audit_scope.stop) - - self.m_audit_scope.return_value = mock.Mock() - self.m_datasource.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics_wb) - self.strategy = strategies.WorkloadBalance( - config=mock.Mock(datasource=self.datasource)) - self.strategy.input_parameters = utils.Struct() - self.strategy.input_parameters.update({'threshold': 25.0, - 'period': 300}) - self.strategy.threshold = 25.0 - self.strategy._period = 300 - - def test_calc_used_resource(self): - model = self.fake_cluster.generate_scenario_6_with_2_nodes() - self.m_model.return_value = model - node = model.get_node_by_uuid('Node_0') - cores_used, mem_used, disk_used = ( - self.strategy.calculate_used_resource(node)) - - self.assertEqual((cores_used, mem_used, disk_used), (20, 4, 40)) - - def test_group_hosts_by_cpu_util(self): - model = self.fake_cluster.generate_scenario_6_with_2_nodes() - self.m_model.return_value = model - self.strategy.threshold = 30 - n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_util() - self.assertEqual(n1[0]['node'].uuid, 'Node_0') - self.assertEqual(n2[0]['node'].uuid, 'Node_1') - self.assertEqual(avg, 8.0) - - def test_choose_instance_to_migrate(self): - model = self.fake_cluster.generate_scenario_6_with_2_nodes() - self.m_model.return_value = model - n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_util() - instance_to_mig = self.strategy.choose_instance_to_migrate( - n1, avg, w_map) - self.assertEqual(instance_to_mig[0].uuid, 'Node_0') - self.assertEqual(instance_to_mig[1].uuid, - "73b09e16-35b7-4922-804e-e8f5d9b740fc") - - def test_choose_instance_notfound(self): - model = self.fake_cluster.generate_scenario_6_with_2_nodes() - self.m_model.return_value = model - n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_util() - instances = model.get_all_instances() - [model.remove_instance(inst) for inst in instances.values()] - instance_to_mig = self.strategy.choose_instance_to_migrate( - n1, avg, w_map) - self.assertIsNone(instance_to_mig) - - def test_filter_destination_hosts(self): - model = self.fake_cluster.generate_scenario_6_with_2_nodes() - self.m_model.return_value = model - self.strategy.datasource = mock.MagicMock( - statistic_aggregation=self.fake_metrics.mock_get_statistics_wb) - n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_util() - instance_to_mig = self.strategy.choose_instance_to_migrate( - n1, avg, w_map) - dest_hosts = self.strategy.filter_destination_hosts( - n2, instance_to_mig[1], avg, w_map) - self.assertEqual(len(dest_hosts), 1) - self.assertEqual(dest_hosts[0]['node'].uuid, 'Node_1') - - def test_exception_model(self): - self.m_model.return_value = None - self.assertRaises( - exception.ClusterStateNotDefined, self.strategy.execute) - - def test_exception_cluster_empty(self): - model = model_root.ModelRoot() - self.m_model.return_value = model - self.assertRaises(exception.ClusterEmpty, self.strategy.execute) - - def test_exception_stale_cdm(self): - self.fake_cluster.set_cluster_data_model_as_stale() - self.m_model.return_value = self.fake_cluster.cluster_data_model - - self.assertRaises( - exception.ClusterStateNotDefined, - self.strategy.execute) - - def test_execute_cluster_empty(self): - model = model_root.ModelRoot() - self.m_model.return_value = model - self.assertRaises(exception.ClusterEmpty, self.strategy.execute) - - def test_execute_no_workload(self): - model = self.fake_cluster.generate_scenario_4_with_1_node_no_instance() - self.m_model.return_value = model - solution = self.strategy.execute() - self.assertEqual([], solution.actions) - - def test_execute(self): - model = self.fake_cluster.generate_scenario_6_with_2_nodes() - self.m_model.return_value = model - solution = self.strategy.execute() - actions_counter = collections.Counter( - [action.get('action_type') for action in solution.actions]) - - num_migrations = actions_counter.get("migrate", 0) - self.assertEqual(num_migrations, 1) - - def test_check_parameters(self): - model = self.fake_cluster.generate_scenario_6_with_2_nodes() - self.m_model.return_value = model - solution = self.strategy.execute() - loader = default.DefaultActionLoader() - for action in solution.actions: - loaded_action = loader.load(action['action_type']) - loaded_action.input_parameters = action['input_parameters'] - loaded_action.validate_parameters() - - def test_periods(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - p_ceilometer = mock.patch.object( - strategies.WorkloadBalance, "ceilometer") - m_ceilometer = p_ceilometer.start() - self.addCleanup(p_ceilometer.stop) - p_gnocchi = mock.patch.object(strategies.WorkloadBalance, "gnocchi") - m_gnocchi = p_gnocchi.start() - self.addCleanup(p_gnocchi.stop) - datetime_patcher = mock.patch.object( - datetime, 'datetime', - mock.Mock(wraps=datetime.datetime) - ) - mocked_datetime = datetime_patcher.start() - mocked_datetime.utcnow.return_value = datetime.datetime( - 2017, 3, 19, 18, 53, 11, 657417) - self.addCleanup(datetime_patcher.stop) - m_ceilometer.statistic_aggregation = mock.Mock( - side_effect=self.fake_metrics.mock_get_statistics_wb) - m_gnocchi.statistic_aggregation = mock.Mock( - side_effect=self.fake_metrics.mock_get_statistics_wb) - instance0 = model.get_instance_by_uuid("INSTANCE_0") - self.strategy.group_hosts_by_cpu_util() - if self.strategy.config.datasource == "ceilometer": - m_ceilometer.statistic_aggregation.assert_any_call( - aggregate='avg', meter_name='cpu_util', - period=300, resource_id=instance0.uuid) - elif self.strategy.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int('300')) - m_gnocchi.statistic_aggregation.assert_called_with( - resource_id=mock.ANY, metric='cpu_util', - granularity=300, start_time=start_time, stop_time=stop_time, - aggregation='mean') diff --git a/watcher/tests/decision_engine/strategy/strategies/test_workload_stabilization.py b/watcher/tests/decision_engine/strategy/strategies/test_workload_stabilization.py deleted file mode 100644 index 8c9e656..0000000 --- a/watcher/tests/decision_engine/strategy/strategies/test_workload_stabilization.py +++ /dev/null @@ -1,277 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Servionica LLC -# -# Authors: Alexander Chadin -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import datetime -import mock - -from watcher.common import clients -from watcher.common import utils -from watcher.decision_engine.model import model_root -from watcher.decision_engine.strategy import strategies -from watcher.tests import base -from watcher.tests.decision_engine.model import ceilometer_metrics -from watcher.tests.decision_engine.model import faker_cluster_state -from watcher.tests.decision_engine.model import gnocchi_metrics - - -class TestWorkloadStabilization(base.TestCase): - - scenarios = [ - ("Ceilometer", - {"datasource": "ceilometer", - "fake_datasource_cls": ceilometer_metrics.FakeCeilometerMetrics}), - ("Gnocchi", - {"datasource": "gnocchi", - "fake_datasource_cls": gnocchi_metrics.FakeGnocchiMetrics}), - ] - - def setUp(self): - super(TestWorkloadStabilization, self).setUp() - - # fake metrics - self.fake_metrics = self.fake_datasource_cls() - - # fake cluster - self.fake_cluster = faker_cluster_state.FakerModelCollector() - - self.hosts_load_assert = { - 'Node_0': {'cpu_util': 0.07, 'memory.resident': 7.0, 'vcpus': 40}, - 'Node_1': {'cpu_util': 0.07, 'memory.resident': 5, 'vcpus': 40}, - 'Node_2': {'cpu_util': 0.8, 'memory.resident': 29, 'vcpus': 40}, - 'Node_3': {'cpu_util': 0.05, 'memory.resident': 8, 'vcpus': 40}, - 'Node_4': {'cpu_util': 0.05, 'memory.resident': 4, 'vcpus': 40}} - - p_osc = mock.patch.object( - clients, "OpenStackClients") - self.m_osc = p_osc.start() - self.addCleanup(p_osc.stop) - - p_model = mock.patch.object( - strategies.WorkloadStabilization, "compute_model", - new_callable=mock.PropertyMock) - self.m_model = p_model.start() - self.addCleanup(p_model.stop) - - p_datasource = mock.patch.object( - strategies.WorkloadStabilization, self.datasource, - new_callable=mock.PropertyMock) - self.m_datasource = p_datasource.start() - self.addCleanup(p_datasource.stop) - - p_audit_scope = mock.patch.object( - strategies.WorkloadStabilization, "audit_scope", - new_callable=mock.PropertyMock - ) - self.m_audit_scope = p_audit_scope.start() - self.addCleanup(p_audit_scope.stop) - - self.m_model.return_value = model_root.ModelRoot() - self.m_audit_scope.return_value = mock.Mock() - self.m_datasource.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics) - - self.strategy = strategies.WorkloadStabilization( - config=mock.Mock(datasource=self.datasource)) - self.strategy.input_parameters = utils.Struct() - self.strategy.input_parameters.update( - {'metrics': ["cpu_util", "memory.resident"], - 'thresholds': {"cpu_util": 0.2, "memory.resident": 0.2}, - 'weights': {"cpu_util_weight": 1.0, - "memory.resident_weight": 1.0}, - 'instance_metrics': - {"cpu_util": "compute.node.cpu.percent", - "memory.resident": "hardware.memory.used"}, - 'host_choice': 'retry', - 'retry_count': 1, - 'periods': {"instance": 720, "node": 600}}) - self.strategy.metrics = ["cpu_util", "memory.resident"] - self.strategy.thresholds = {"cpu_util": 0.2, "memory.resident": 0.2} - self.strategy.weights = {"cpu_util_weight": 1.0, - "memory.resident_weight": 1.0} - self.strategy.instance_metrics = { - "cpu_util": "compute.node.cpu.percent", - "memory.resident": "hardware.memory.used"} - self.strategy.host_choice = 'retry' - self.strategy.retry_count = 1 - self.strategy.periods = {"instance": 720, "node": 600} - - def test_get_instance_load(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - instance0 = model.get_instance_by_uuid("INSTANCE_0") - instance_0_dict = { - 'uuid': 'INSTANCE_0', 'vcpus': 10, - 'cpu_util': 0.07, 'memory.resident': 2} - self.assertEqual( - instance_0_dict, self.strategy.get_instance_load(instance0)) - - def test_periods(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - p_ceilometer = mock.patch.object( - strategies.WorkloadStabilization, "ceilometer") - m_ceilometer = p_ceilometer.start() - self.addCleanup(p_ceilometer.stop) - p_gnocchi = mock.patch.object(strategies.WorkloadStabilization, - "gnocchi") - m_gnocchi = p_gnocchi.start() - self.addCleanup(p_gnocchi.stop) - datetime_patcher = mock.patch.object( - datetime, 'datetime', - mock.Mock(wraps=datetime.datetime) - ) - mocked_datetime = datetime_patcher.start() - mocked_datetime.utcnow.return_value = datetime.datetime( - 2017, 3, 19, 18, 53, 11, 657417) - self.addCleanup(datetime_patcher.stop) - m_ceilometer.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics) - m_gnocchi.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics) - instance0 = model.get_instance_by_uuid("INSTANCE_0") - self.strategy.get_instance_load(instance0) - if self.strategy.config.datasource == "ceilometer": - m_ceilometer.statistic_aggregation.assert_called_with( - aggregate='min', meter_name='memory.resident', - period=720, resource_id=instance0.uuid) - elif self.strategy.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int('720')) - m_gnocchi.statistic_aggregation.assert_called_with( - resource_id=instance0.uuid, metric='memory.resident', - granularity=300, start_time=start_time, stop_time=stop_time, - aggregation='mean') - self.strategy.get_hosts_load() - if self.strategy.config.datasource == "ceilometer": - m_ceilometer.statistic_aggregation.assert_called_with( - aggregate='avg', meter_name='hardware.memory.used', - period=600, resource_id=mock.ANY) - elif self.strategy.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int('600')) - m_gnocchi.statistic_aggregation.assert_called_with( - resource_id=mock.ANY, metric='hardware.memory.used', - granularity=300, start_time=start_time, stop_time=stop_time, - aggregation='mean') - - def test_normalize_hosts_load(self): - self.m_model.return_value = self.fake_cluster.generate_scenario_1() - fake_hosts = {'Node_0': {'cpu_util': 0.07, 'memory.resident': 7}, - 'Node_1': {'cpu_util': 0.05, 'memory.resident': 5}} - normalized_hosts = {'Node_0': - {'cpu_util': 0.07, - 'memory.resident': 0.05303030303030303}, - 'Node_1': - {'cpu_util': 0.05, - 'memory.resident': 0.03787878787878788}} - self.assertEqual( - normalized_hosts, - self.strategy.normalize_hosts_load(fake_hosts)) - - def test_get_available_nodes(self): - self.m_model.return_value = self.fake_cluster. \ - generate_scenario_9_with_3_active_plus_1_disabled_nodes() - self.assertEqual(3, len(self.strategy.get_available_nodes())) - - def test_get_hosts_load(self): - self.m_model.return_value = self.fake_cluster.generate_scenario_1() - self.assertEqual(self.strategy.get_hosts_load(), - self.hosts_load_assert) - - def test_get_sd(self): - test_cpu_sd = 0.296 - test_ram_sd = 9.3 - self.assertEqual( - round(self.strategy.get_sd( - self.hosts_load_assert, 'cpu_util'), 3), - test_cpu_sd) - self.assertEqual( - round(self.strategy.get_sd( - self.hosts_load_assert, 'memory.resident'), 1), - test_ram_sd) - - def test_calculate_weighted_sd(self): - sd_case = [0.5, 0.75] - self.assertEqual(self.strategy.calculate_weighted_sd(sd_case), 1.25) - - def test_calculate_migration_case(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - instance = model.get_instance_by_uuid("INSTANCE_5") - src_node = model.get_node_by_uuid("Node_2") - dst_node = model.get_node_by_uuid("Node_1") - result = self.strategy.calculate_migration_case( - self.hosts_load_assert, instance, - src_node, dst_node)[-1][dst_node.uuid] - result['cpu_util'] = round(result['cpu_util'], 3) - self.assertEqual(result, {'cpu_util': 0.095, 'memory.resident': 21.0, - 'vcpus': 40}) - - def test_simulate_migrations(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.strategy.host_choice = 'retry' - self.assertEqual( - 8, - len(self.strategy.simulate_migrations(self.hosts_load_assert))) - - def test_check_threshold(self): - self.m_model.return_value = self.fake_cluster.generate_scenario_1() - self.strategy.thresholds = {'cpu_util': 0.001, 'memory.resident': 0.2} - self.strategy.simulate_migrations = mock.Mock(return_value=True) - self.assertTrue(self.strategy.check_threshold()) - - def test_execute_one_migration(self): - self.m_model.return_value = self.fake_cluster.generate_scenario_1() - self.strategy.thresholds = {'cpu_util': 0.001, 'memory.resident': 0.2} - self.strategy.simulate_migrations = mock.Mock( - return_value=[ - {'instance': 'INSTANCE_4', 's_host': 'Node_2', - 'host': 'Node_1'}] - ) - with mock.patch.object(self.strategy, 'migrate') as mock_migration: - self.strategy.do_execute() - mock_migration.assert_called_once_with( - 'INSTANCE_4', 'Node_2', 'Node_1') - - def test_execute_multiply_migrations(self): - self.m_model.return_value = self.fake_cluster.generate_scenario_1() - self.strategy.thresholds = {'cpu_util': 0.00001, - 'memory.resident': 0.0001} - self.strategy.simulate_migrations = mock.Mock( - return_value=[ - {'instance': 'INSTANCE_4', 's_host': 'Node_2', - 'host': 'Node_1'}, - {'instance': 'INSTANCE_3', 's_host': 'Node_2', - 'host': 'Node_3'}] - ) - with mock.patch.object(self.strategy, 'migrate') as mock_migrate: - self.strategy.do_execute() - self.assertEqual(mock_migrate.call_count, 2) - - def test_execute_nothing_to_migrate(self): - self.m_model.return_value = self.fake_cluster.generate_scenario_1() - self.strategy.thresholds = {'cpu_util': 0.042, - 'memory.resident': 0.0001} - self.strategy.simulate_migrations = mock.Mock(return_value=False) - with mock.patch.object(self.strategy, 'migrate') as mock_migrate: - self.strategy.execute() - mock_migrate.assert_not_called() diff --git a/watcher/tests/decision_engine/test_gmr.py b/watcher/tests/decision_engine/test_gmr.py deleted file mode 100644 index d686a6d..0000000 --- a/watcher/tests/decision_engine/test_gmr.py +++ /dev/null @@ -1,36 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.decision_engine import gmr -from watcher.decision_engine.model.collector import manager -from watcher.tests import base - - -class TestGmrPlugin(base.TestCase): - - @mock.patch.object(manager.CollectorManager, "get_collectors") - def test_show_models(self, m_get_collectors): - m_to_string = mock.Mock(return_value="") - m_get_collectors.return_value = { - "test_model": mock.Mock( - cluster_data_model=mock.Mock(to_string=m_to_string))} - output = gmr.show_models() - self.assertEqual(1, m_to_string.call_count) - self.assertIn("", output) diff --git a/watcher/tests/decision_engine/test_rpcapi.py b/watcher/tests/decision_engine/test_rpcapi.py deleted file mode 100644 index 61b9c2f..0000000 --- a/watcher/tests/decision_engine/test_rpcapi.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -import oslo_messaging as om -from watcher.common import exception -from watcher.common import utils -from watcher.decision_engine import rpcapi -from watcher.tests import base - - -class TestDecisionEngineAPI(base.TestCase): - - def setUp(self): - super(TestDecisionEngineAPI, self).setUp() - - api = rpcapi.DecisionEngineAPI() - - def test_get_api_version(self): - with mock.patch.object(om.RPCClient, 'call') as mock_call: - expected_context = self.context - self.api.check_api_version(expected_context) - mock_call.assert_called_once_with( - expected_context, 'check_api_version', - api_version=rpcapi.DecisionEngineAPI().api_version) - - def test_execute_audit_throw_exception(self): - audit_uuid = "uuid" - self.assertRaises(exception.InvalidUuidOrName, - self.api.trigger_audit, - audit_uuid) - - def test_execute_audit_without_error(self): - with mock.patch.object(om.RPCClient, 'cast') as mock_cast: - audit_uuid = utils.generate_uuid() - self.api.trigger_audit(self.context, audit_uuid) - mock_cast.assert_called_once_with( - self.context, 'trigger_audit', audit_uuid=audit_uuid) diff --git a/watcher/tests/decision_engine/test_scheduling.py b/watcher/tests/decision_engine/test_scheduling.py deleted file mode 100644 index d4a057c..0000000 --- a/watcher/tests/decision_engine/test_scheduling.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from apscheduler.schedulers import background -from apscheduler.triggers import interval as interval_trigger -import eventlet -import mock - -from watcher.decision_engine.loading import default as default_loading -from watcher.decision_engine import scheduling -from watcher.tests import base -from watcher.tests.decision_engine.model import faker_cluster_state - - -class TestDecisionEngineSchedulingService(base.TestCase): - - @mock.patch.object( - default_loading.ClusterDataModelCollectorLoader, 'load') - @mock.patch.object( - default_loading.ClusterDataModelCollectorLoader, 'list_available') - @mock.patch.object(background.BackgroundScheduler, 'start') - def test_start_de_scheduling_service(self, m_start, m_list_available, - m_load): - m_list_available.return_value = { - 'fake': faker_cluster_state.FakerModelCollector} - fake_collector = faker_cluster_state.FakerModelCollector( - config=mock.Mock(period=777)) - m_load.return_value = fake_collector - - scheduler = scheduling.DecisionEngineSchedulingService() - - scheduler.start() - - m_start.assert_called_once_with(scheduler) - jobs = scheduler.get_jobs() - self.assertEqual(2, len(jobs)) - - job = jobs[0] - self.assertTrue(bool(fake_collector.cluster_data_model)) - - self.assertIsInstance(job.trigger, interval_trigger.IntervalTrigger) - - @mock.patch.object( - default_loading.ClusterDataModelCollectorLoader, 'load') - @mock.patch.object( - default_loading.ClusterDataModelCollectorLoader, 'list_available') - @mock.patch.object(background.BackgroundScheduler, 'start') - def test_execute_sync_job_fails(self, m_start, m_list_available, - m_load): - fake_config = mock.Mock(period=.01) - fake_collector = faker_cluster_state.FakerModelCollector( - config=fake_config) - fake_collector.synchronize = mock.Mock( - side_effect=lambda: eventlet.sleep(.5)) - m_list_available.return_value = { - 'fake': faker_cluster_state.FakerModelCollector} - m_load.return_value = fake_collector - - scheduler = scheduling.DecisionEngineSchedulingService() - - scheduler.start() - - m_start.assert_called_once_with(scheduler) - jobs = scheduler.get_jobs() - self.assertEqual(2, len(jobs)) - - job = jobs[0] - job.func() - self.assertFalse(bool(fake_collector.cluster_data_model)) - - self.assertIsInstance(job.trigger, interval_trigger.IntervalTrigger) diff --git a/watcher/tests/decision_engine/test_sync.py b/watcher/tests/decision_engine/test_sync.py deleted file mode 100644 index 3a2f23e..0000000 --- a/watcher/tests/decision_engine/test_sync.py +++ /dev/null @@ -1,661 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.common import context -from watcher.common import utils -from watcher.decision_engine.loading import default -from watcher.decision_engine import sync -from watcher import objects -from watcher.tests.db import base -from watcher.tests.decision_engine import fake_goals -from watcher.tests.decision_engine import fake_strategies - - -class TestSyncer(base.DbTestCase): - - def setUp(self): - super(TestSyncer, self).setUp() - self.ctx = context.make_context() - - # This mock simulates the strategies discovery done in discover() - self.m_available_strategies = mock.Mock(return_value={ - fake_strategies.FakeDummy1Strategy1.get_name(): - fake_strategies.FakeDummy1Strategy1, - fake_strategies.FakeDummy1Strategy2.get_name(): - fake_strategies.FakeDummy1Strategy2, - fake_strategies.FakeDummy2Strategy3.get_name(): - fake_strategies.FakeDummy2Strategy3, - fake_strategies.FakeDummy2Strategy4.get_name(): - fake_strategies.FakeDummy2Strategy4, - }) - - self.m_available_goals = mock.Mock(return_value={ - fake_goals.FakeDummy1.get_name(): fake_goals.FakeDummy1, - fake_goals.FakeDummy2.get_name(): fake_goals.FakeDummy2, - }) - - self.goal1_spec = fake_goals.FakeDummy1( - config=mock.Mock()).get_efficacy_specification() - self.goal2_spec = fake_goals.FakeDummy2( - config=mock.Mock()).get_efficacy_specification() - - p_goals_load = mock.patch.object( - default.DefaultGoalLoader, 'load', - side_effect=lambda goal: self.m_available_goals()[goal]()) - p_goals = mock.patch.object( - default.DefaultGoalLoader, 'list_available', - self.m_available_goals) - p_strategies = mock.patch.object( - default.DefaultStrategyLoader, 'list_available', - self.m_available_strategies) - - p_goals.start() - p_goals_load.start() - p_strategies.start() - - self.syncer = sync.Syncer() - self.addCleanup(p_goals.stop) - self.addCleanup(p_goals_load.stop) - self.addCleanup(p_strategies.stop) - - @staticmethod - def _find_created_modified_unmodified_ids(befores, afters): - created = { - a_item.id: a_item for a_item in afters - if a_item.uuid not in (b_item.uuid for b_item in befores) - } - - modified = { - a_item.id: a_item for a_item in afters - if a_item.as_dict() not in ( - b_items.as_dict() for b_items in befores) - } - - unmodified = { - a_item.id: a_item for a_item in afters - if a_item.as_dict() in ( - b_items.as_dict() for b_items in befores) - } - - return created, modified, unmodified - - @mock.patch.object(objects.Strategy, "soft_delete") - @mock.patch.object(objects.Strategy, "save") - @mock.patch.object(objects.Strategy, "create") - @mock.patch.object(objects.Strategy, "list") - @mock.patch.object(objects.Goal, "get_by_name") - @mock.patch.object(objects.Goal, "soft_delete") - @mock.patch.object(objects.Goal, "save") - @mock.patch.object(objects.Goal, "create") - @mock.patch.object(objects.Goal, "list") - def test_sync_empty_db( - self, m_g_list, m_g_create, m_g_save, m_g_soft_delete, - m_g_get_by_name, m_s_list, m_s_create, m_s_save, m_s_soft_delete): - m_g_get_by_name.side_effect = [ - objects.Goal(self.ctx, id=i) for i in range(1, 10)] - m_g_list.return_value = [] - m_s_list.return_value = [] - - self.syncer.sync() - - self.assertEqual(2, m_g_create.call_count) - self.assertEqual(0, m_g_save.call_count) - self.assertEqual(0, m_g_soft_delete.call_count) - - self.assertEqual(4, m_s_create.call_count) - self.assertEqual(0, m_s_save.call_count) - self.assertEqual(0, m_s_soft_delete.call_count) - - @mock.patch.object(objects.Strategy, "soft_delete") - @mock.patch.object(objects.Strategy, "save") - @mock.patch.object(objects.Strategy, "create") - @mock.patch.object(objects.Strategy, "list") - @mock.patch.object(objects.Goal, "get_by_name") - @mock.patch.object(objects.Goal, "soft_delete") - @mock.patch.object(objects.Goal, "save") - @mock.patch.object(objects.Goal, "create") - @mock.patch.object(objects.Goal, "list") - def test_sync_with_existing_goal( - self, m_g_list, m_g_create, m_g_save, m_g_soft_delete, - m_g_get_by_name, m_s_list, m_s_create, m_s_save, m_s_soft_delete): - m_g_get_by_name.side_effect = [ - objects.Goal(self.ctx, id=i) for i in range(1, 10)] - m_g_list.return_value = [ - objects.Goal(self.ctx, id=1, uuid=utils.generate_uuid(), - name="dummy_1", display_name="Dummy 1", - efficacy_specification=( - self.goal1_spec.serialize_indicators_specs())) - ] - m_s_list.return_value = [] - - self.syncer.sync() - - self.assertEqual(1, m_g_create.call_count) - self.assertEqual(0, m_g_save.call_count) - self.assertEqual(0, m_g_soft_delete.call_count) - - self.assertEqual(4, m_s_create.call_count) - self.assertEqual(0, m_s_save.call_count) - self.assertEqual(0, m_s_soft_delete.call_count) - - @mock.patch.object(objects.Strategy, "soft_delete") - @mock.patch.object(objects.Strategy, "save") - @mock.patch.object(objects.Strategy, "create") - @mock.patch.object(objects.Strategy, "list") - @mock.patch.object(objects.Goal, "get_by_name") - @mock.patch.object(objects.Goal, "soft_delete") - @mock.patch.object(objects.Goal, "save") - @mock.patch.object(objects.Goal, "create") - @mock.patch.object(objects.Goal, "list") - def test_sync_with_existing_strategy( - self, m_g_list, m_g_create, m_g_save, m_g_soft_delete, - m_g_get_by_name, m_s_list, m_s_create, m_s_save, m_s_soft_delete): - m_g_get_by_name.side_effect = [ - objects.Goal(self.ctx, id=i) for i in range(1, 10)] - m_g_list.return_value = [ - objects.Goal(self.ctx, id=1, uuid=utils.generate_uuid(), - name="dummy_1", display_name="Dummy 1", - efficacy_specification=( - self.goal1_spec.serialize_indicators_specs())) - ] - m_s_list.return_value = [ - objects.Strategy(self.ctx, id=1, name="strategy_1", - goal_id=1, display_name="Strategy 1", - parameters_spec='{}') - ] - self.syncer.sync() - - self.assertEqual(1, m_g_create.call_count) - self.assertEqual(0, m_g_save.call_count) - self.assertEqual(0, m_g_soft_delete.call_count) - - self.assertEqual(3, m_s_create.call_count) - self.assertEqual(0, m_s_save.call_count) - self.assertEqual(0, m_s_soft_delete.call_count) - - @mock.patch.object(objects.Strategy, "soft_delete") - @mock.patch.object(objects.Strategy, "save") - @mock.patch.object(objects.Strategy, "create") - @mock.patch.object(objects.Strategy, "list") - @mock.patch.object(objects.Goal, "get_by_name") - @mock.patch.object(objects.Goal, "soft_delete") - @mock.patch.object(objects.Goal, "save") - @mock.patch.object(objects.Goal, "create") - @mock.patch.object(objects.Goal, "list") - def test_sync_with_modified_goal( - self, m_g_list, m_g_create, m_g_save, m_g_soft_delete, - m_g_get_by_name, m_s_list, m_s_create, m_s_save, m_s_soft_delete): - m_g_get_by_name.side_effect = [ - objects.Goal(self.ctx, id=i) for i in range(1, 10)] - m_g_list.return_value = [objects.Goal( - self.ctx, id=1, uuid=utils.generate_uuid(), - name="dummy_2", display_name="original", - efficacy_specification=self.goal2_spec.serialize_indicators_specs() - )] - m_s_list.return_value = [] - self.syncer.sync() - - self.assertEqual(2, m_g_create.call_count) - self.assertEqual(0, m_g_save.call_count) - self.assertEqual(1, m_g_soft_delete.call_count) - - self.assertEqual(4, m_s_create.call_count) - self.assertEqual(0, m_s_save.call_count) - self.assertEqual(0, m_s_soft_delete.call_count) - - @mock.patch.object(objects.Strategy, "soft_delete") - @mock.patch.object(objects.Strategy, "save") - @mock.patch.object(objects.Strategy, "create") - @mock.patch.object(objects.Strategy, "list") - @mock.patch.object(objects.Goal, "get_by_name") - @mock.patch.object(objects.Goal, "soft_delete") - @mock.patch.object(objects.Goal, "save") - @mock.patch.object(objects.Goal, "create") - @mock.patch.object(objects.Goal, "list") - def test_sync_with_modified_strategy( - self, m_g_list, m_g_create, m_g_save, m_g_soft_delete, - m_g_get_by_name, m_s_list, m_s_create, m_s_save, m_s_soft_delete): - m_g_get_by_name.side_effect = [ - objects.Goal(self.ctx, id=i) for i in range(1, 10)] - m_g_list.return_value = [ - objects.Goal(self.ctx, id=1, uuid=utils.generate_uuid(), - name="dummy_1", display_name="Dummy 1", - efficacy_specification=( - self.goal1_spec.serialize_indicators_specs())) - ] - m_s_list.return_value = [ - objects.Strategy(self.ctx, id=1, name="strategy_1", - goal_id=1, display_name="original", - parameters_spec='{}') - ] - self.syncer.sync() - - self.assertEqual(1, m_g_create.call_count) - self.assertEqual(0, m_g_save.call_count) - self.assertEqual(0, m_g_soft_delete.call_count) - - self.assertEqual(4, m_s_create.call_count) - self.assertEqual(0, m_s_save.call_count) - self.assertEqual(1, m_s_soft_delete.call_count) - - def test_end2end_sync_goals_with_modified_goal_and_strategy(self): - # ### Setup ### # - - # Here, we simulate goals and strategies already discovered in the past - # that were saved in DB - - # Should stay unmodified after sync() - goal1 = objects.Goal( - self.ctx, id=1, uuid=utils.generate_uuid(), - name="dummy_1", display_name="Dummy 1", - efficacy_specification=( - self.goal1_spec.serialize_indicators_specs())) - # Should be modified by the sync() - goal2 = objects.Goal( - self.ctx, id=2, uuid=utils.generate_uuid(), - name="dummy_2", display_name="Original", - efficacy_specification=self.goal2_spec.serialize_indicators_specs() - ) - goal1.create() - goal2.create() - - # Should stay unmodified after sync() - strategy1 = objects.Strategy( - self.ctx, id=1, name="strategy_1", uuid=utils.generate_uuid(), - display_name="Strategy 1", goal_id=goal1.id) - # Should be modified after sync() because its related goal has been - # modified - strategy2 = objects.Strategy( - self.ctx, id=2, name="strategy_2", uuid=utils.generate_uuid(), - display_name="Strategy 2", goal_id=goal2.id) - # Should be modified after sync() because its strategy name has been - # modified - strategy3 = objects.Strategy( - self.ctx, id=3, name="strategy_3", uuid=utils.generate_uuid(), - display_name="Original", goal_id=goal1.id) - # Should be modified after sync() because both its related goal - # and its strategy name have been modified - strategy4 = objects.Strategy( - self.ctx, id=4, name="strategy_4", uuid=utils.generate_uuid(), - display_name="Original", goal_id=goal2.id) - strategy1.create() - strategy2.create() - strategy3.create() - strategy4.create() - - # Here we simulate audit_templates that were already created in the - # past and hence saved within the Watcher DB - - # Should stay unmodified after sync() - audit_template1 = objects.AuditTemplate( - self.ctx, id=1, name="Synced AT1", uuid=utils.generate_uuid(), - goal_id=goal1.id, strategy_id=strategy1.id) - # Should be modified by the sync() because its associated goal - # has been modified (compared to the defined fake goals) - audit_template2 = objects.AuditTemplate( - self.ctx, id=2, name="Synced AT2", uuid=utils.generate_uuid(), - goal_id=goal2.id, strategy_id=strategy2.id) - # Should be modified by the sync() because its associated strategy - # has been modified (compared to the defined fake strategies) - audit_template3 = objects.AuditTemplate( - self.ctx, id=3, name="Synced AT3", uuid=utils.generate_uuid(), - goal_id=goal1.id, strategy_id=strategy3.id) - # Modified because of both because its associated goal and associated - # strategy should be modified - audit_template4 = objects.AuditTemplate( - self.ctx, id=4, name="Synced AT4", uuid=utils.generate_uuid(), - goal_id=goal2.id, strategy_id=strategy4.id) - audit_template1.create() - audit_template2.create() - audit_template3.create() - audit_template4.create() - - # Should stay unmodified after sync() - audit1 = objects.Audit( - self.ctx, id=1, uuid=utils.generate_uuid(), - audit_type=objects.audit.AuditType.ONESHOT.value, - state=objects.audit.State.PENDING, - goal_id=goal1.id, strategy_id=strategy1.id, auto_trigger=False) - # Should be modified by the sync() because its associated goal - # has been modified (compared to the defined fake goals) - audit2 = objects.Audit( - self.ctx, id=2, uuid=utils.generate_uuid(), - audit_type=objects.audit.AuditType.ONESHOT.value, - state=objects.audit.State.PENDING, - goal_id=goal2.id, strategy_id=strategy2.id, auto_trigger=False) - # Should be modified by the sync() because its associated strategy - # has been modified (compared to the defined fake strategies) - audit3 = objects.Audit( - self.ctx, id=3, uuid=utils.generate_uuid(), - audit_type=objects.audit.AuditType.ONESHOT.value, - state=objects.audit.State.PENDING, - goal_id=goal1.id, strategy_id=strategy3.id, auto_trigger=False) - # Modified because of both because its associated goal and associated - # strategy should be modified (compared to the defined fake - # goals/strategies) - audit4 = objects.Audit( - self.ctx, id=4, uuid=utils.generate_uuid(), - audit_type=objects.audit.AuditType.ONESHOT.value, - state=objects.audit.State.PENDING, - goal_id=goal2.id, strategy_id=strategy4.id, auto_trigger=False) - - audit1.create() - audit2.create() - audit3.create() - audit4.create() - - # Should stay unmodified after sync() - action_plan1 = objects.ActionPlan( - self.ctx, id=1, uuid=utils.generate_uuid(), - audit_id=audit1.id, strategy_id=strategy1.id, - state='DOESNOTMATTER', global_efficacy={}) - # Stale after syncing because the goal of the audit has been modified - # (compared to the defined fake goals) - action_plan2 = objects.ActionPlan( - self.ctx, id=2, uuid=utils.generate_uuid(), - audit_id=audit2.id, strategy_id=strategy2.id, - state='DOESNOTMATTER', global_efficacy={}) - # Stale after syncing because the strategy has been modified - # (compared to the defined fake strategies) - action_plan3 = objects.ActionPlan( - self.ctx, id=3, uuid=utils.generate_uuid(), - audit_id=audit3.id, strategy_id=strategy3.id, - state='DOESNOTMATTER', global_efficacy={}) - # Stale after syncing because both the strategy and the related audit - # have been modified (compared to the defined fake goals/strategies) - action_plan4 = objects.ActionPlan( - self.ctx, id=4, uuid=utils.generate_uuid(), - audit_id=audit4.id, strategy_id=strategy4.id, - state='DOESNOTMATTER', global_efficacy={}) - - action_plan1.create() - action_plan2.create() - action_plan3.create() - action_plan4.create() - - before_goals = objects.Goal.list(self.ctx) - before_strategies = objects.Strategy.list(self.ctx) - before_audit_templates = objects.AuditTemplate.list(self.ctx) - before_audits = objects.Audit.list(self.ctx) - before_action_plans = objects.ActionPlan.list(self.ctx) - - # ### Action under test ### # - - try: - self.syncer.sync() - except Exception as exc: - self.fail(exc) - - # ### Assertions ### # - - after_goals = objects.Goal.list(self.ctx) - after_strategies = objects.Strategy.list(self.ctx) - after_audit_templates = objects.AuditTemplate.list(self.ctx) - after_audits = objects.Audit.list(self.ctx) - after_action_plans = objects.ActionPlan.list(self.ctx) - - self.assertEqual(2, len(before_goals)) - self.assertEqual(4, len(before_strategies)) - self.assertEqual(4, len(before_audit_templates)) - self.assertEqual(4, len(before_audits)) - self.assertEqual(4, len(before_action_plans)) - self.assertEqual(2, len(after_goals)) - self.assertEqual(4, len(after_strategies)) - self.assertEqual(4, len(after_audit_templates)) - self.assertEqual(4, len(after_audits)) - self.assertEqual(4, len(after_action_plans)) - - self.assertEqual( - {"dummy_1", "dummy_2"}, - set([g.name for g in after_goals])) - self.assertEqual( - {"strategy_1", "strategy_2", "strategy_3", "strategy_4"}, - set([s.name for s in after_strategies])) - - created_goals, modified_goals, unmodified_goals = ( - self._find_created_modified_unmodified_ids( - before_goals, after_goals)) - - created_strategies, modified_strategies, unmodified_strategies = ( - self._find_created_modified_unmodified_ids( - before_strategies, after_strategies)) - - (created_audit_templates, modified_audit_templates, - unmodified_audit_templates) = ( - self._find_created_modified_unmodified_ids( - before_audit_templates, after_audit_templates)) - - created_audits, modified_audits, unmodified_audits = ( - self._find_created_modified_unmodified_ids( - before_audits, after_audits)) - - (created_action_plans, modified_action_plans, - unmodified_action_plans) = ( - self._find_created_modified_unmodified_ids( - before_action_plans, after_action_plans)) - - dummy_1_spec = [ - {'description': 'Dummy indicator', 'name': 'dummy', - 'schema': 'Range(min=0, max=100, min_included=True, ' - 'max_included=True, msg=None)', - 'unit': '%'}] - dummy_2_spec = [] - self.assertEqual( - [dummy_1_spec, dummy_2_spec], - [g.efficacy_specification for g in after_goals]) - - self.assertEqual(1, len(created_goals)) - self.assertEqual(3, len(created_strategies)) - self.assertEqual(0, len(created_audits)) - self.assertEqual(0, len(created_action_plans)) - - self.assertEqual(2, strategy2.goal_id) - - self.assertNotEqual( - set([strategy2.id, strategy3.id, strategy4.id]), - set(modified_strategies)) - self.assertEqual(set([strategy1.id]), set(unmodified_strategies)) - - self.assertEqual( - set([audit_template2.id, audit_template3.id, audit_template4.id]), - set(modified_audit_templates)) - self.assertEqual(set([audit_template1.id]), - set(unmodified_audit_templates)) - - self.assertEqual( - set([audit2.id, audit3.id, audit4.id]), - set(modified_audits)) - self.assertEqual(set([audit1.id]), set(unmodified_audits)) - - self.assertEqual( - set([action_plan2.id, action_plan3.id, action_plan4.id]), - set(modified_action_plans)) - self.assertTrue( - all(ap.state == objects.action_plan.State.CANCELLED - for ap in modified_action_plans.values())) - self.assertEqual(set([action_plan1.id]), set(unmodified_action_plans)) - - def test_end2end_sync_goals_with_removed_goal_and_strategy(self): - # ### Setup ### # - - # We simulate the fact that we removed 2 strategies - self.m_available_strategies.return_value = { - fake_strategies.FakeDummy1Strategy1.get_name(): - fake_strategies.FakeDummy1Strategy1 - } - # We simulate the fact that we removed the dummy_2 goal - self.m_available_goals.return_value = { - fake_goals.FakeDummy1.get_name(): fake_goals.FakeDummy1, - } - # Should stay unmodified after sync() - goal1 = objects.Goal( - self.ctx, id=1, uuid=utils.generate_uuid(), - name="dummy_1", display_name="Dummy 1", - efficacy_specification=self.goal1_spec.serialize_indicators_specs() - ) - # To be removed by the sync() - goal2 = objects.Goal( - self.ctx, id=2, uuid=utils.generate_uuid(), - name="dummy_2", display_name="Dummy 2", - efficacy_specification=self.goal2_spec.serialize_indicators_specs() - ) - goal1.create() - goal2.create() - - # Should stay unmodified after sync() - strategy1 = objects.Strategy( - self.ctx, id=1, name="strategy_1", uuid=utils.generate_uuid(), - display_name="Strategy 1", goal_id=goal1.id) - # To be removed by the sync() because strategy entry point does not - # exist anymore - strategy2 = objects.Strategy( - self.ctx, id=2, name="strategy_2", uuid=utils.generate_uuid(), - display_name="Strategy 2", goal_id=goal1.id) - # To be removed by the sync() because the goal has been soft deleted - # and because the strategy entry point does not exist anymore - strategy3 = objects.Strategy( - self.ctx, id=3, name="strategy_3", uuid=utils.generate_uuid(), - display_name="Original", goal_id=goal2.id) - strategy1.create() - strategy2.create() - strategy3.create() - - # Here we simulate audit_templates that were already created in the - # past and hence saved within the Watcher DB - - # The strategy of this audit template will be dereferenced - # as it does not exist anymore - audit_template1 = objects.AuditTemplate( - self.ctx, id=1, name="Synced AT1", uuid=utils.generate_uuid(), - goal_id=goal1.id, strategy_id=strategy1.id) - # Stale after syncing because the goal has been soft deleted - audit_template2 = objects.AuditTemplate( - self.ctx, id=2, name="Synced AT2", uuid=utils.generate_uuid(), - goal_id=goal2.id, strategy_id=strategy2.id) - - audit_template1.create() - audit_template2.create() - - # Should stay unmodified after sync() - audit1 = objects.Audit( - self.ctx, id=1, uuid=utils.generate_uuid(), - audit_type=objects.audit.AuditType.ONESHOT.value, - state=objects.audit.State.PENDING, - goal_id=goal1.id, strategy_id=strategy1.id, auto_trigger=False) - # Stale after syncing because the goal has been soft deleted - audit2 = objects.Audit( - self.ctx, id=2, uuid=utils.generate_uuid(), - audit_type=objects.audit.AuditType.ONESHOT.value, - state=objects.audit.State.PENDING, - goal_id=goal2.id, strategy_id=strategy2.id, auto_trigger=False) - audit1.create() - audit2.create() - - # Stale after syncing because its related strategy has been be - # soft deleted - action_plan1 = objects.ActionPlan( - self.ctx, id=1, uuid=utils.generate_uuid(), - audit_id=audit1.id, strategy_id=strategy1.id, - state='DOESNOTMATTER', global_efficacy={}) - # Stale after syncing because its related goal has been soft deleted - action_plan2 = objects.ActionPlan( - self.ctx, id=2, uuid=utils.generate_uuid(), - audit_id=audit2.id, strategy_id=strategy2.id, - state='DOESNOTMATTER', global_efficacy={}) - - action_plan1.create() - action_plan2.create() - - before_goals = objects.Goal.list(self.ctx) - before_strategies = objects.Strategy.list(self.ctx) - before_audit_templates = objects.AuditTemplate.list(self.ctx) - before_audits = objects.Audit.list(self.ctx) - before_action_plans = objects.ActionPlan.list(self.ctx) - - # ### Action under test ### # - - try: - self.syncer.sync() - except Exception as exc: - self.fail(exc) - - # ### Assertions ### # - - after_goals = objects.Goal.list(self.ctx) - after_strategies = objects.Strategy.list(self.ctx) - after_audit_templates = objects.AuditTemplate.list(self.ctx) - after_audits = objects.Audit.list(self.ctx) - after_action_plans = objects.ActionPlan.list(self.ctx) - - self.assertEqual(2, len(before_goals)) - self.assertEqual(3, len(before_strategies)) - self.assertEqual(2, len(before_audit_templates)) - self.assertEqual(2, len(before_audits)) - self.assertEqual(2, len(before_action_plans)) - self.assertEqual(1, len(after_goals)) - self.assertEqual(1, len(after_strategies)) - self.assertEqual(2, len(after_audit_templates)) - self.assertEqual(2, len(after_audits)) - self.assertEqual(2, len(after_action_plans)) - self.assertEqual( - {"dummy_1"}, - set([g.name for g in after_goals])) - self.assertEqual( - {"strategy_1"}, - set([s.name for s in after_strategies])) - - created_goals, modified_goals, unmodified_goals = ( - self._find_created_modified_unmodified_ids( - before_goals, after_goals)) - - created_strategies, modified_strategies, unmodified_strategies = ( - self._find_created_modified_unmodified_ids( - before_strategies, after_strategies)) - - (created_audit_templates, modified_audit_templates, - unmodified_audit_templates) = ( - self._find_created_modified_unmodified_ids( - before_audit_templates, after_audit_templates)) - - created_audits, modified_audits, unmodified_audits = ( - self._find_created_modified_unmodified_ids( - before_audits, after_audits)) - - (created_action_plans, modified_action_plans, - unmodified_action_plans) = ( - self._find_created_modified_unmodified_ids( - before_action_plans, after_action_plans)) - - self.assertEqual(0, len(created_goals)) - self.assertEqual(0, len(created_strategies)) - self.assertEqual(0, len(created_audits)) - self.assertEqual(0, len(created_action_plans)) - - self.assertEqual(set([audit_template2.id]), - set(modified_audit_templates)) - self.assertEqual(set([audit_template1.id]), - set(unmodified_audit_templates)) - - self.assertEqual(set([audit2.id]), set(modified_audits)) - self.assertEqual(set([audit1.id]), set(unmodified_audits)) - - self.assertEqual(set([action_plan2.id]), set(modified_action_plans)) - self.assertTrue( - all(ap.state == objects.action_plan.State.CANCELLED - for ap in modified_action_plans.values())) - self.assertEqual(set([action_plan1.id]), set(unmodified_action_plans)) diff --git a/watcher/tests/fake_policy.py b/watcher/tests/fake_policy.py deleted file mode 100644 index bed907c..0000000 --- a/watcher/tests/fake_policy.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright (c) 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -policy_data = """ -{ - "admin_api": "role:admin or role:administrator", - "show_password": "!", - "default": "rule:admin_api", - - "action:detail": "", - "action:get": "", - "action:get_all": "", - - "action_plan:delete": "", - "action_plan:detail": "", - "action_plan:get": "", - "action_plan:get_all": "", - "action_plan:update": "", - - "audit:create": "", - "audit:delete": "", - "audit:detail": "", - "audit:get": "", - "audit:get_all": "", - "audit:update": "", - - "audit_template:create": "", - "audit_template:delete": "", - "audit_template:detail": "", - "audit_template:get": "", - "audit_template:get_all": "", - "audit_template:update": "", - - "goal:detail": "", - "goal:get": "", - "goal:get_all": "", - - "scoring_engine:detail": "", - "scoring_engine:get": "", - "scoring_engine:get_all": "", - - "strategy:detail": "", - "strategy:get": "", - "strategy:get_all": "", - - "service:detail": "", - "service:get": "", - "service:get_all": "" -} -""" - - -policy_data_compat_juno = """ -{ - "admin": "role:admin or role:administrator", - "admin_api": "is_admin:True", - "default": "rule:admin_api" -} -""" - - -def get_policy_data(compat): - if not compat: - return policy_data - elif compat == 'juno': - return policy_data_compat_juno - else: - raise Exception('Policy data for %s not available' % compat) diff --git a/watcher/tests/fakes.py b/watcher/tests/fakes.py deleted file mode 100644 index d35b10d..0000000 --- a/watcher/tests/fakes.py +++ /dev/null @@ -1,91 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -fakeAuthTokenHeaders = {'X-User-Id': u'773a902f022949619b5c2f32cd89d419', - 'X-Roles': u'admin, ResellerAdmin, _member_', - 'X-Project-Id': u'5588aebbcdc24e17a061595f80574376', - 'X-Project-Name': 'test', - 'X-User-Name': 'test', - 'X-Auth-Token': u'5588aebbcdc24e17a061595f80574376', - 'X-Forwarded-For': u'10.10.10.10, 11.11.11.11', - 'X-Service-Catalog': u'{test: 12345}', - 'X-Auth-Url': 'fake_auth_url', - 'X-Identity-Status': 'Confirmed', - 'X-User-Domain-Name': 'domain', - 'X-Project-Domain-Id': 'project_domain_id', - 'X-User-Domain-Id': 'user_domain_id', - } - - -class FakePecanRequest(mock.Mock): - - def __init__(self, **kwargs): - super(FakePecanRequest, self).__init__(**kwargs) - self.host_url = 'http://test_url:8080/test' - self.context = {} - self.body = '' - self.content_type = 'text/unicode' - self.params = {} - self.path = '/v1/services' - self.headers = fakeAuthTokenHeaders - self.environ = {} - - def __setitem__(self, index, value): - setattr(self, index, value) - - -class FakePecanResponse(mock.Mock): - - def __init__(self, **kwargs): - super(FakePecanResponse, self).__init__(**kwargs) - self.status = None - - -class FakeApp(object): - pass - - -class FakeService(mock.Mock): - def __init__(self, **kwargs): - super(FakeService, self).__init__(**kwargs) - self.__tablename__ = 'service' - self.__resource__ = 'services' - self.user_id = 'fake user id' - self.project_id = 'fake project id' - self.uuid = 'test_uuid' - self.id = 8 - self.name = 'james' - self.service_type = 'not_this' - self.description = 'amazing' - self.tags = ['this', 'and that'] - self.read_only = True - - def as_dict(self): - return dict(service_type=self.service_type, - user_id=self.user_id, - project_id=self.project_id, - uuid=self.uuid, - id=self.id, - name=self.name, - tags=self.tags, - read_only=self.read_only, - description=self.description) - - -class FakeAuthProtocol(mock.Mock): - - def __init__(self, **kwargs): - super(FakeAuthProtocol, self).__init__(**kwargs) - self.app = FakeApp() - self.config = '' diff --git a/watcher/tests/notifications/__init__.py b/watcher/tests/notifications/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/notifications/test_action_notification.py b/watcher/tests/notifications/test_action_notification.py deleted file mode 100644 index 2a4a5b2..0000000 --- a/watcher/tests/notifications/test_action_notification.py +++ /dev/null @@ -1,355 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import freezegun -import mock -import oslo_messaging as om - -from watcher.common import exception -from watcher.common import rpc -from watcher import notifications -from watcher import objects -from watcher.tests.db import base -from watcher.tests.objects import utils - - -@freezegun.freeze_time('2016-10-18T09:52:05.219414') -class TestActionNotification(base.DbTestCase): - - def setUp(self): - super(TestActionNotification, self).setUp() - p_get_notifier = mock.patch.object(rpc, 'get_notifier') - m_get_notifier = p_get_notifier.start() - self.addCleanup(p_get_notifier.stop) - self.m_notifier = mock.Mock(spec=om.Notifier) - - def fake_get_notifier(publisher_id): - self.m_notifier.publisher_id = publisher_id - return self.m_notifier - - m_get_notifier.side_effect = fake_get_notifier - self.goal = utils.create_test_goal(mock.Mock()) - self.strategy = utils.create_test_strategy(mock.Mock()) - self.audit = utils.create_test_audit(mock.Mock(), - strategy_id=self.strategy.id) - self.action_plan = utils.create_test_action_plan(mock.Mock()) - - def test_send_invalid_action_plan(self): - action_plan = utils.get_test_action_plan( - mock.Mock(), state='DOESNOTMATTER', audit_id=1) - - self.assertRaises( - exception.InvalidActionPlan, - notifications.action_plan.send_update, - mock.MagicMock(), action_plan, host='node0') - - def test_send_action_update(self): - action = utils.create_test_action( - mock.Mock(), state=objects.action.State.ONGOING, - action_type='nop', input_parameters={'param1': 1, 'param2': 2}, - parents=[], action_plan_id=self.action_plan.id) - notifications.action.send_update( - mock.MagicMock(), action, host='node0', - old_state=objects.action.State.PENDING) - - # The 1st notification is because we created the object. - # The 2nd notification is because we created the action plan object. - self.assertEqual(4, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - payload = notification['payload'] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0', - 'watcher_object.name': 'ActionUpdatePayload', - 'watcher_object.data': { - 'uuid': '10a47dd1-4874-4298-91cf-eff046dbdb8d', - 'input_parameters': { - 'param2': 2, - 'param1': 1 - }, - 'created_at': '2016-10-18T09:52:05Z', - 'updated_at': None, - 'state_update': { - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0', - 'watcher_object.name': 'ActionStateUpdatePayload', - 'watcher_object.data': { - 'old_state': 'PENDING', - 'state': 'ONGOING' - } - }, - 'state': 'ONGOING', - 'action_plan': { - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0', - 'watcher_object.name': 'TerseActionPlanPayload', - 'watcher_object.data': { - 'uuid': '76be87bd-3422-43f9-93a0-e85a577e3061', - 'global_efficacy': {}, - 'created_at': '2016-10-18T09:52:05Z', - 'updated_at': None, - 'state': 'ONGOING', - 'audit_uuid': '10a47dd1-4874-4298' - '-91cf-eff046dbdb8d', - 'strategy_uuid': 'cb3d0b58-4415-4d90' - '-b75b-1e96878730e3', - 'deleted_at': None - } - }, - 'parents': [], - 'action_type': 'nop', - 'deleted_at': None - } - }, - payload - ) - - def test_send_action_plan_create(self): - action = utils.create_test_action( - mock.Mock(), state=objects.action.State.PENDING, - action_type='nop', input_parameters={'param1': 1, 'param2': 2}, - parents=[], action_plan_id=self.action_plan.id) - notifications.action.send_create(mock.MagicMock(), action, - host='node0') - - self.assertEqual(4, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - payload = notification['payload'] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0', - 'watcher_object.name': 'ActionCreatePayload', - 'watcher_object.data': { - 'uuid': '10a47dd1-4874-4298-91cf-eff046dbdb8d', - 'input_parameters': { - 'param2': 2, - 'param1': 1 - }, - 'created_at': '2016-10-18T09:52:05Z', - 'updated_at': None, - 'state': 'PENDING', - 'action_plan': { - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0', - 'watcher_object.name': 'TerseActionPlanPayload', - 'watcher_object.data': { - 'uuid': '76be87bd-3422-43f9-93a0-e85a577e3061', - 'global_efficacy': {}, - 'created_at': '2016-10-18T09:52:05Z', - 'updated_at': None, - 'state': 'ONGOING', - 'audit_uuid': '10a47dd1-4874-4298' - '-91cf-eff046dbdb8d', - 'strategy_uuid': 'cb3d0b58-4415-4d90' - '-b75b-1e96878730e3', - 'deleted_at': None - } - }, - 'parents': [], - 'action_type': 'nop', - 'deleted_at': None - } - }, - payload - ) - - def test_send_action_delete(self): - action = utils.create_test_action( - mock.Mock(), state=objects.action.State.DELETED, - action_type='nop', input_parameters={'param1': 1, 'param2': 2}, - parents=[], action_plan_id=self.action_plan.id) - notifications.action.send_delete(mock.MagicMock(), action, - host='node0') - - # The 1st notification is because we created the audit object. - # The 2nd notification is because we created the action plan object. - self.assertEqual(4, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - payload = notification['payload'] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0', - 'watcher_object.name': 'ActionDeletePayload', - 'watcher_object.data': { - 'uuid': '10a47dd1-4874-4298-91cf-eff046dbdb8d', - 'input_parameters': { - 'param2': 2, - 'param1': 1 - }, - 'created_at': '2016-10-18T09:52:05Z', - 'updated_at': None, - 'state': 'DELETED', - 'action_plan': { - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0', - 'watcher_object.name': 'TerseActionPlanPayload', - 'watcher_object.data': { - 'uuid': '76be87bd-3422-43f9-93a0-e85a577e3061', - 'global_efficacy': {}, - 'created_at': '2016-10-18T09:52:05Z', - 'updated_at': None, - 'state': 'ONGOING', - 'audit_uuid': '10a47dd1-4874-4298' - '-91cf-eff046dbdb8d', - 'strategy_uuid': 'cb3d0b58-4415-4d90' - '-b75b-1e96878730e3', - 'deleted_at': None - } - }, - 'parents': [], - 'action_type': 'nop', - 'deleted_at': None - } - }, - payload - ) - - def test_send_action_execution(self): - action = utils.create_test_action( - mock.Mock(), state=objects.action.State.PENDING, - action_type='nop', input_parameters={'param1': 1, 'param2': 2}, - parents=[], action_plan_id=self.action_plan.id) - notifications.action.send_execution_notification( - mock.MagicMock(), action, 'execution', phase='start', host='node0') - - # The 1st notification is because we created the audit object. - # The 2nd notification is because we created the action plan object. - self.assertEqual(4, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - 'event_type': 'action.execution.start', - 'payload': { - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0', - 'watcher_object.name': 'ActionExecutionPayload', - 'watcher_object.data': { - 'uuid': '10a47dd1-4874-4298-91cf-eff046dbdb8d', - 'input_parameters': { - 'param2': 2, - 'param1': 1 - }, - 'created_at': '2016-10-18T09:52:05Z', - 'fault': None, - 'updated_at': None, - 'state': 'PENDING', - 'action_plan': { - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0', - 'watcher_object.name': 'TerseActionPlanPayload', - 'watcher_object.data': { - 'uuid': '76be87bd-3422-43f9-93a0-e85a577e3061', - 'global_efficacy': {}, - 'created_at': '2016-10-18T09:52:05Z', - 'updated_at': None, - 'state': 'ONGOING', - 'audit_uuid': '10a47dd1-4874-4298' - '-91cf-eff046dbdb8d', - 'strategy_uuid': 'cb3d0b58-4415-4d90' - '-b75b-1e96878730e3', - 'deleted_at': None - } - }, - 'parents': [], - 'action_type': 'nop', - 'deleted_at': None - } - } - }, - notification - ) - - def test_send_action_execution_with_error(self): - action = utils.create_test_action( - mock.Mock(), state=objects.action.State.FAILED, - action_type='nop', input_parameters={'param1': 1, 'param2': 2}, - parents=[], action_plan_id=self.action_plan.id) - - try: - # This is to load the exception in sys.exc_info() - raise exception.WatcherException("TEST") - except exception.WatcherException: - notifications.action.send_execution_notification( - mock.MagicMock(), action, 'execution', phase='error', - host='node0', priority='error') - - self.assertEqual(1, self.m_notifier.error.call_count) - notification = self.m_notifier.error.call_args[1] - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - 'event_type': 'action.execution.error', - 'payload': { - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0', - 'watcher_object.name': 'ActionExecutionPayload', - 'watcher_object.data': { - 'uuid': '10a47dd1-4874-4298-91cf-eff046dbdb8d', - 'input_parameters': { - 'param2': 2, - 'param1': 1 - }, - 'created_at': '2016-10-18T09:52:05Z', - 'fault': { - 'watcher_object.data': { - 'exception': u'WatcherException', - 'exception_message': u'TEST', - 'function_name': ( - 'test_send_action_execution_with_error'), - 'module_name': ( - 'watcher.tests.notifications.' - 'test_action_notification') - }, - 'watcher_object.name': 'ExceptionPayload', - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0' - }, - 'updated_at': None, - 'state': 'FAILED', - 'action_plan': { - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0', - 'watcher_object.name': 'TerseActionPlanPayload', - 'watcher_object.data': { - 'uuid': '76be87bd-3422-43f9-93a0-e85a577e3061', - 'global_efficacy': {}, - 'created_at': '2016-10-18T09:52:05Z', - 'updated_at': None, - 'state': 'ONGOING', - 'audit_uuid': '10a47dd1-4874-4298' - '-91cf-eff046dbdb8d', - 'strategy_uuid': 'cb3d0b58-4415-4d90' - '-b75b-1e96878730e3', - 'deleted_at': None - } - }, - 'parents': [], - 'action_type': 'nop', - 'deleted_at': None - } - } - }, - notification - ) diff --git a/watcher/tests/notifications/test_action_plan_notification.py b/watcher/tests/notifications/test_action_plan_notification.py deleted file mode 100644 index 47dce1f..0000000 --- a/watcher/tests/notifications/test_action_plan_notification.py +++ /dev/null @@ -1,429 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import freezegun -import mock -import oslo_messaging as om - -from watcher.common import exception -from watcher.common import rpc -from watcher import notifications -from watcher import objects -from watcher.tests.db import base -from watcher.tests.objects import utils - - -@freezegun.freeze_time('2016-10-18T09:52:05.219414') -class TestActionPlanNotification(base.DbTestCase): - - def setUp(self): - super(TestActionPlanNotification, self).setUp() - p_get_notifier = mock.patch.object(rpc, 'get_notifier') - m_get_notifier = p_get_notifier.start() - self.addCleanup(p_get_notifier.stop) - self.m_notifier = mock.Mock(spec=om.Notifier) - - def fake_get_notifier(publisher_id): - self.m_notifier.publisher_id = publisher_id - return self.m_notifier - - m_get_notifier.side_effect = fake_get_notifier - self.goal = utils.create_test_goal(mock.Mock()) - self.audit = utils.create_test_audit(mock.Mock(), interval=None) - self.strategy = utils.create_test_strategy(mock.Mock()) - - def test_send_invalid_action_plan(self): - action_plan = utils.get_test_action_plan( - mock.Mock(), state='DOESNOTMATTER', audit_id=1) - - self.assertRaises( - exception.InvalidActionPlan, - notifications.action_plan.send_update, - mock.MagicMock(), action_plan, host='node0') - - def test_send_action_plan_update(self): - action_plan = utils.create_test_action_plan( - mock.Mock(), state=objects.action_plan.State.ONGOING, - audit_id=self.audit.id, strategy_id=self.strategy.id, - audit=self.audit, strategy=self.strategy) - notifications.action_plan.send_update( - mock.MagicMock(), action_plan, host='node0', - old_state=objects.action_plan.State.PENDING) - - # The 1st notification is because we created the object. - # The 2nd notification is because we created the action plan object. - self.assertEqual(3, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - payload = notification['payload'] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "global_efficacy": {}, - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "strategy": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "updated_at": None, - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "name": "TEST", - "parameters_spec": {}, - "created_at": "2016-10-18T09:52:05Z", - "display_name": "test strategy", - "deleted_at": None - }, - "watcher_object.name": "StrategyPayload" - }, - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "audit": { - "watcher_object.data": { - "interval": None, - "next_run_time": None, - "auto_trigger": False, - "parameters": {}, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "strategy_uuid": None, - "goal_uuid": ( - "f7ad87ae-4298-91cf-93a0-f35a852e3652"), - "deleted_at": None, - "scope": [], - "state": "PENDING", - "updated_at": None, - "created_at": "2016-10-18T09:52:05Z", - "audit_type": "ONESHOT" - }, - "watcher_object.name": "TerseAuditPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.1" - }, - "deleted_at": None, - "state": "ONGOING", - "updated_at": None, - "created_at": "2016-10-18T09:52:05Z", - "state_update": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "old_state": "PENDING", - "state": "ONGOING" - }, - "watcher_object.name": "ActionPlanStateUpdatePayload" - }, - }, - "watcher_object.name": "ActionPlanUpdatePayload" - }, - payload - ) - - def test_send_action_plan_create(self): - action_plan = utils.get_test_action_plan( - mock.Mock(), state=objects.action_plan.State.PENDING, - audit_id=self.audit.id, strategy_id=self.strategy.id, - audit=self.audit.as_dict(), strategy=self.strategy.as_dict()) - notifications.action_plan.send_create( - mock.MagicMock(), action_plan, host='node0') - - self.assertEqual(2, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - payload = notification['payload'] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "global_efficacy": {}, - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "strategy": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "updated_at": None, - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "name": "TEST", - "parameters_spec": {}, - "created_at": "2016-10-18T09:52:05Z", - "display_name": "test strategy", - "deleted_at": None - }, - "watcher_object.name": "StrategyPayload" - }, - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "audit": { - "watcher_object.data": { - "interval": None, - "next_run_time": None, - "auto_trigger": False, - "parameters": {}, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "strategy_uuid": None, - "goal_uuid": ( - "f7ad87ae-4298-91cf-93a0-f35a852e3652"), - "deleted_at": None, - "scope": [], - "state": "PENDING", - "updated_at": None, - "created_at": "2016-10-18T09:52:05Z", - "audit_type": "ONESHOT" - }, - "watcher_object.name": "TerseAuditPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.1" - }, - "deleted_at": None, - "state": "PENDING", - "updated_at": None, - "created_at": None, - }, - "watcher_object.name": "ActionPlanCreatePayload" - }, - payload - ) - - def test_send_action_plan_delete(self): - action_plan = utils.create_test_action_plan( - mock.Mock(), state=objects.action_plan.State.DELETED, - audit_id=self.audit.id, strategy_id=self.strategy.id) - notifications.action_plan.send_delete( - mock.MagicMock(), action_plan, host='node0') - - # The 1st notification is because we created the audit object. - # The 2nd notification is because we created the action plan object. - self.assertEqual(3, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - payload = notification['payload'] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "global_efficacy": {}, - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "strategy": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "updated_at": None, - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "name": "TEST", - "parameters_spec": {}, - "created_at": "2016-10-18T09:52:05Z", - "display_name": "test strategy", - "deleted_at": None - }, - "watcher_object.name": "StrategyPayload" - }, - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "audit": { - "watcher_object.data": { - "interval": None, - "next_run_time": None, - "auto_trigger": False, - "parameters": {}, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "strategy_uuid": None, - "goal_uuid": ( - "f7ad87ae-4298-91cf-93a0-f35a852e3652"), - "deleted_at": None, - "scope": [], - "state": "PENDING", - "updated_at": None, - "created_at": "2016-10-18T09:52:05Z", - "audit_type": "ONESHOT" - }, - "watcher_object.name": "TerseAuditPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.1" - }, - "deleted_at": None, - "state": "DELETED", - "updated_at": None, - "created_at": "2016-10-18T09:52:05Z", - }, - "watcher_object.name": "ActionPlanDeletePayload" - }, - payload - ) - - def test_send_action_plan_action(self): - action_plan = utils.create_test_action_plan( - mock.Mock(), state=objects.action_plan.State.ONGOING, - audit_id=self.audit.id, strategy_id=self.strategy.id, - audit=self.audit, strategy=self.strategy) - notifications.action_plan.send_action_notification( - mock.MagicMock(), action_plan, host='node0', - action='execution', phase='start') - - # The 1st notification is because we created the audit object. - # The 2nd notification is because we created the action plan object. - self.assertEqual(3, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - "event_type": "action_plan.execution.start", - "payload": { - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": None, - "fault": None, - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "audit": { - "watcher_object.namespace": "watcher", - "watcher_object.name": "TerseAuditPayload", - "watcher_object.version": "1.1", - "watcher_object.data": { - "interval": None, - "next_run_time": None, - "auto_trigger": False, - "parameters": {}, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "strategy_uuid": None, - "goal_uuid": ( - "f7ad87ae-4298-91cf-93a0-f35a852e3652"), - "deleted_at": None, - "scope": [], - "state": "PENDING", - "updated_at": None, - "created_at": "2016-10-18T09:52:05Z", - "audit_type": "ONESHOT" - } - }, - "global_efficacy": {}, - "state": "ONGOING", - "strategy_uuid": ( - "cb3d0b58-4415-4d90-b75b-1e96878730e3"), - "strategy": { - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": None, - "display_name": "test strategy", - "name": "TEST", - "parameters_spec": {}, - "updated_at": None, - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3" - }, - "watcher_object.name": "StrategyPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "updated_at": None, - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061" - }, - "watcher_object.name": "ActionPlanActionPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - } - }, - notification - ) - - def test_send_action_plan_action_with_error(self): - action_plan = utils.create_test_action_plan( - mock.Mock(), state=objects.action_plan.State.ONGOING, - audit_id=self.audit.id, strategy_id=self.strategy.id, - audit=self.audit, strategy=self.strategy) - - try: - # This is to load the exception in sys.exc_info() - raise exception.WatcherException("TEST") - except exception.WatcherException: - notifications.action_plan.send_action_notification( - mock.MagicMock(), action_plan, host='node0', - action='execution', priority='error', phase='error') - - self.assertEqual(1, self.m_notifier.error.call_count) - notification = self.m_notifier.error.call_args[1] - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - "event_type": "action_plan.execution.error", - "payload": { - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": None, - "fault": { - "watcher_object.data": { - "exception": "WatcherException", - "exception_message": "TEST", - "function_name": ( - "test_send_action_plan_action_with_error"), - "module_name": "watcher.tests.notifications." - "test_action_plan_notification" - }, - "watcher_object.name": "ExceptionPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "audit": { - "watcher_object.data": { - "interval": None, - "next_run_time": None, - "auto_trigger": False, - "parameters": {}, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "strategy_uuid": None, - "goal_uuid": ( - "f7ad87ae-4298-91cf-93a0-f35a852e3652"), - "deleted_at": None, - "scope": [], - "state": "PENDING", - "updated_at": None, - "created_at": "2016-10-18T09:52:05Z", - "audit_type": "ONESHOT" - }, - "watcher_object.name": "TerseAuditPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.1" - }, - "global_efficacy": {}, - "state": "ONGOING", - "strategy_uuid": ( - "cb3d0b58-4415-4d90-b75b-1e96878730e3"), - "strategy": { - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": None, - "display_name": "test strategy", - "name": "TEST", - "parameters_spec": {}, - "updated_at": None, - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3" - }, - "watcher_object.name": "StrategyPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "updated_at": None, - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061" - }, - "watcher_object.name": "ActionPlanActionPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - } - }, - notification - ) diff --git a/watcher/tests/notifications/test_audit_notification.py b/watcher/tests/notifications/test_audit_notification.py deleted file mode 100644 index 0a5178a..0000000 --- a/watcher/tests/notifications/test_audit_notification.py +++ /dev/null @@ -1,477 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import freezegun -import mock -import oslo_messaging as om - -from watcher.common import exception -from watcher.common import rpc -from watcher import notifications -from watcher import objects -from watcher.tests.db import base -from watcher.tests.objects import utils - - -@freezegun.freeze_time('2016-10-18T09:52:05.219414') -class TestAuditNotification(base.DbTestCase): - - def setUp(self): - super(TestAuditNotification, self).setUp() - p_get_notifier = mock.patch.object(rpc, 'get_notifier') - m_get_notifier = p_get_notifier.start() - self.addCleanup(p_get_notifier.stop) - self.m_notifier = mock.Mock(spec=om.Notifier) - - def fake_get_notifier(publisher_id): - self.m_notifier.publisher_id = publisher_id - return self.m_notifier - - m_get_notifier.side_effect = fake_get_notifier - self.goal = utils.create_test_goal(mock.Mock()) - self.strategy = utils.create_test_strategy(mock.Mock()) - - def test_send_invalid_audit(self): - audit = utils.get_test_audit( - mock.Mock(), interval=None, state='DOESNOTMATTER', goal_id=1) - - self.assertRaises( - exception.InvalidAudit, - notifications.audit.send_update, - mock.MagicMock(), audit, host='node0') - - def test_send_audit_update_with_strategy(self): - audit = utils.create_test_audit( - mock.Mock(), interval=None, state=objects.audit.State.ONGOING, - goal_id=self.goal.id, strategy_id=self.strategy.id, - goal=self.goal, strategy=self.strategy) - notifications.audit.send_update( - mock.MagicMock(), audit, host='node0', - old_state=objects.audit.State.PENDING) - - # The 1st notification is because we created the object. - self.assertEqual(2, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - payload = notification['payload'] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.1", - "watcher_object.data": { - "interval": None, - "next_run_time": None, - "auto_trigger": False, - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "strategy": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "updated_at": None, - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "name": "TEST", - "parameters_spec": {}, - "created_at": "2016-10-18T09:52:05Z", - "display_name": "test strategy", - "deleted_at": None - }, - "watcher_object.name": "StrategyPayload" - }, - "parameters": {}, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "goal_uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", - "goal": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "updated_at": None, - "uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", - "name": "TEST", - "efficacy_specification": [], - "created_at": "2016-10-18T09:52:05Z", - "display_name": "test goal", - "deleted_at": None - }, - "watcher_object.name": "GoalPayload" - }, - "deleted_at": None, - "scope": [], - "state": "ONGOING", - "updated_at": None, - "created_at": "2016-10-18T09:52:05Z", - "state_update": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "old_state": "PENDING", - "state": "ONGOING" - }, - "watcher_object.name": "AuditStateUpdatePayload" - }, - "audit_type": "ONESHOT" - }, - "watcher_object.name": "AuditUpdatePayload" - }, - payload - ) - - def test_send_audit_update_without_strategy(self): - audit = utils.get_test_audit( - mock.Mock(), interval=None, state=objects.audit.State.ONGOING, - goal_id=self.goal.id, goal=self.goal) - notifications.audit.send_update( - mock.MagicMock(), audit, host='node0', - old_state=objects.audit.State.PENDING) - - self.assertEqual(1, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - payload = notification['payload'] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.1", - "watcher_object.data": { - "interval": None, - "next_run_time": None, - "auto_trigger": False, - "parameters": {}, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "goal_uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", - "goal": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "updated_at": None, - "uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", - "name": "TEST", - "efficacy_specification": [], - "created_at": "2016-10-18T09:52:05Z", - "display_name": "test goal", - "deleted_at": None - }, - "watcher_object.name": "GoalPayload" - }, - "strategy_uuid": None, - "strategy": None, - "deleted_at": None, - "scope": [], - "state": "ONGOING", - "updated_at": None, - "created_at": None, - "state_update": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "old_state": "PENDING", - "state": "ONGOING" - }, - "watcher_object.name": "AuditStateUpdatePayload" - }, - "audit_type": "ONESHOT" - }, - "watcher_object.name": "AuditUpdatePayload" - }, - payload - ) - - def test_send_audit_create(self): - audit = utils.get_test_audit( - mock.Mock(), interval=None, state=objects.audit.State.PENDING, - goal_id=self.goal.id, strategy_id=self.strategy.id, - goal=self.goal.as_dict(), strategy=self.strategy.as_dict()) - notifications.audit.send_create( - mock.MagicMock(), audit, host='node0') - - self.assertEqual(1, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - payload = notification['payload'] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.1", - "watcher_object.data": { - "interval": None, - "next_run_time": None, - "auto_trigger": False, - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "strategy": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "updated_at": None, - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "name": "TEST", - "parameters_spec": {}, - "created_at": "2016-10-18T09:52:05Z", - "display_name": "test strategy", - "deleted_at": None - }, - "watcher_object.name": "StrategyPayload" - }, - "parameters": {}, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "goal_uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", - "goal": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "updated_at": None, - "uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", - "name": "TEST", - "efficacy_specification": [], - "created_at": "2016-10-18T09:52:05Z", - "display_name": "test goal", - "deleted_at": None - }, - "watcher_object.name": "GoalPayload" - }, - "deleted_at": None, - "scope": [], - "state": "PENDING", - "updated_at": None, - "created_at": None, - "audit_type": "ONESHOT" - }, - "watcher_object.name": "AuditCreatePayload" - }, - payload - ) - - def test_send_audit_delete(self): - audit = utils.create_test_audit( - mock.Mock(), interval=None, state=objects.audit.State.DELETED, - goal_id=self.goal.id, strategy_id=self.strategy.id) - notifications.audit.send_delete( - mock.MagicMock(), audit, host='node0') - - # The 1st notification is because we created the object. - self.assertEqual(2, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - payload = notification['payload'] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.1", - "watcher_object.data": { - "interval": None, - "next_run_time": None, - "auto_trigger": False, - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "strategy": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "updated_at": None, - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "name": "TEST", - "parameters_spec": {}, - "created_at": "2016-10-18T09:52:05Z", - "display_name": "test strategy", - "deleted_at": None - }, - "watcher_object.name": "StrategyPayload" - }, - "parameters": {}, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "goal_uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", - "goal": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "updated_at": None, - "uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", - "name": "TEST", - "efficacy_specification": [], - "created_at": "2016-10-18T09:52:05Z", - "display_name": "test goal", - "deleted_at": None - }, - "watcher_object.name": "GoalPayload" - }, - "deleted_at": None, - "scope": [], - "state": "DELETED", - "updated_at": None, - "created_at": "2016-10-18T09:52:05Z", - "audit_type": "ONESHOT" - }, - "watcher_object.name": "AuditDeletePayload" - }, - payload - ) - - def test_send_audit_action(self): - audit = utils.create_test_audit( - mock.Mock(), interval=None, state=objects.audit.State.ONGOING, - goal_id=self.goal.id, strategy_id=self.strategy.id, - goal=self.goal, strategy=self.strategy) - notifications.audit.send_action_notification( - mock.MagicMock(), audit, host='node0', - action='strategy', phase='start') - - # The 1st notification is because we created the object. - self.assertEqual(2, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - notification = self.m_notifier.info.call_args[1] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - "event_type": "audit.strategy.start", - "payload": { - "watcher_object.data": { - "audit_type": "ONESHOT", - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": None, - "fault": None, - "goal_uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", - "goal": { - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": None, - "display_name": "test goal", - "efficacy_specification": [], - "name": "TEST", - "updated_at": None, - "uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652" - }, - "watcher_object.name": "GoalPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "interval": None, - "next_run_time": None, - "auto_trigger": False, - "parameters": {}, - "scope": [], - "state": "ONGOING", - "strategy_uuid": ( - "cb3d0b58-4415-4d90-b75b-1e96878730e3"), - "strategy": { - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": None, - "display_name": "test strategy", - "name": "TEST", - "parameters_spec": {}, - "updated_at": None, - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3" - }, - "watcher_object.name": "StrategyPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "updated_at": None, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d" - }, - "watcher_object.name": "AuditActionPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.1" - } - }, - notification - ) - - def test_send_audit_action_with_error(self): - audit = utils.create_test_audit( - mock.Mock(), interval=None, state=objects.audit.State.ONGOING, - goal_id=self.goal.id, strategy_id=self.strategy.id, - goal=self.goal, strategy=self.strategy) - - try: - # This is to load the exception in sys.exc_info() - raise exception.WatcherException("TEST") - except exception.WatcherException: - notifications.audit.send_action_notification( - mock.MagicMock(), audit, host='node0', - action='strategy', priority='error', phase='error') - - self.assertEqual(1, self.m_notifier.error.call_count) - notification = self.m_notifier.error.call_args[1] - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - "event_type": "audit.strategy.error", - "payload": { - "watcher_object.data": { - "audit_type": "ONESHOT", - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": None, - "fault": { - "watcher_object.data": { - "exception": "WatcherException", - "exception_message": "TEST", - "function_name": ( - "test_send_audit_action_with_error"), - "module_name": "watcher.tests.notifications." - "test_audit_notification" - }, - "watcher_object.name": "ExceptionPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "goal_uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", - "goal": { - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": None, - "display_name": "test goal", - "efficacy_specification": [], - "name": "TEST", - "updated_at": None, - "uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652" - }, - "watcher_object.name": "GoalPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "interval": None, - "next_run_time": None, - "auto_trigger": False, - "parameters": {}, - "scope": [], - "state": "ONGOING", - "strategy_uuid": ( - "cb3d0b58-4415-4d90-b75b-1e96878730e3"), - "strategy": { - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": None, - "display_name": "test strategy", - "name": "TEST", - "parameters_spec": {}, - "updated_at": None, - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3" - }, - "watcher_object.name": "StrategyPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "updated_at": None, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d" - }, - "watcher_object.name": "AuditActionPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.1" - } - }, - notification - ) diff --git a/watcher/tests/notifications/test_notification.py b/watcher/tests/notifications/test_notification.py deleted file mode 100644 index d60a148..0000000 --- a/watcher/tests/notifications/test_notification.py +++ /dev/null @@ -1,355 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections - -import mock -from oslo_versionedobjects import fixture - -from watcher.common import exception -from watcher.common import rpc -from watcher.notifications import base as notificationbase -from watcher.objects import base -from watcher.objects import fields as wfields -from watcher.tests import base as testbase -from watcher.tests.objects import test_objects - - -class TestNotificationBase(testbase.TestCase): - - @base.WatcherObjectRegistry.register_if(False) - class TestObject(base.WatcherObject): - VERSION = '1.0' - fields = { - 'field_1': wfields.StringField(), - 'field_2': wfields.IntegerField(), - 'not_important_field': wfields.IntegerField(), - } - - @base.WatcherObjectRegistry.register_if(False) - class TestNotificationPayload(notificationbase.NotificationPayloadBase): - VERSION = '1.0' - - SCHEMA = { - 'field_1': ('source_field', 'field_1'), - 'field_2': ('source_field', 'field_2'), - } - - fields = { - 'extra_field': wfields.StringField(), # filled by ctor - 'field_1': wfields.StringField(), # filled by the schema - 'field_2': wfields.IntegerField(), # filled by the schema - } - - def populate_schema(self, source_field): - super(TestNotificationBase.TestNotificationPayload, - self).populate_schema(source_field=source_field) - - @base.WatcherObjectRegistry.register_if(False) - class TestNotificationPayloadEmptySchema( - notificationbase.NotificationPayloadBase): - VERSION = '1.0' - - fields = { - 'extra_field': wfields.StringField(), # filled by ctor - } - - @notificationbase.notification_sample('test-update-1.json') - @notificationbase.notification_sample('test-update-2.json') - @base.WatcherObjectRegistry.register_if(False) - class TestNotification(notificationbase.NotificationBase): - VERSION = '1.0' - fields = { - 'payload': wfields.ObjectField('TestNotificationPayload') - } - - @base.WatcherObjectRegistry.register_if(False) - class TestNotificationEmptySchema(notificationbase.NotificationBase): - VERSION = '1.0' - fields = { - 'payload': wfields.ObjectField( - 'TestNotificationPayloadEmptySchema') - } - - expected_payload = { - 'watcher_object.name': 'TestNotificationPayload', - 'watcher_object.data': { - 'extra_field': 'test string', - 'field_1': 'test1', - 'field_2': 42}, - 'watcher_object.version': '1.0', - 'watcher_object.namespace': 'watcher'} - - def setUp(self): - super(TestNotificationBase, self).setUp() - - self.my_obj = self.TestObject(field_1='test1', - field_2=42, - not_important_field=13) - - self.payload = self.TestNotificationPayload( - extra_field='test string') - self.payload.populate_schema(source_field=self.my_obj) - - self.notification = self.TestNotification( - event_type=notificationbase.EventType( - object='test_object', - action=wfields.NotificationAction.UPDATE, - phase=wfields.NotificationPhase.START), - publisher=notificationbase.NotificationPublisher( - host='fake-host', binary='watcher-fake'), - priority=wfields.NotificationPriority.INFO, - payload=self.payload) - - def _verify_notification(self, mock_notifier, mock_context, - expected_event_type, - expected_payload): - mock_notifier.prepare.assert_called_once_with( - publisher_id='watcher-fake:fake-host') - mock_notify = mock_notifier.prepare.return_value.info - self.assertTrue(mock_notify.called) - self.assertEqual(mock_notify.call_args[0][0], mock_context) - self.assertEqual(mock_notify.call_args[1]['event_type'], - expected_event_type) - actual_payload = mock_notify.call_args[1]['payload'] - self.assertEqual(expected_payload, actual_payload) - - @mock.patch.object(rpc, 'NOTIFIER') - def test_emit_notification(self, mock_notifier): - mock_context = mock.Mock() - mock_context.to_dict.return_value = {} - self.notification.emit(mock_context) - - self._verify_notification( - mock_notifier, - mock_context, - expected_event_type='test_object.update.start', - expected_payload=self.expected_payload) - - @mock.patch.object(rpc, 'NOTIFIER') - def test_no_emit_notifs_disabled(self, mock_notifier): - # Make sure notifications aren't emitted when notification_level - # isn't defined, indicating notifications should be disabled - self.config(notification_level=None) - notif = self.TestNotification( - event_type=notificationbase.EventType( - object='test_object', - action=wfields.NotificationAction.UPDATE, - phase=wfields.NotificationPhase.START), - publisher=notificationbase.NotificationPublisher( - host='fake-host', binary='watcher-fake'), - priority=wfields.NotificationPriority.INFO, - payload=self.payload) - - mock_context = mock.Mock() - notif.emit(mock_context) - - self.assertFalse(mock_notifier.called) - - @mock.patch.object(rpc, 'NOTIFIER') - def test_no_emit_level_too_low(self, mock_notifier): - # Make sure notification doesn't emit when set notification - # level < config level - self.config(notification_level='warning') - notif = self.TestNotification( - event_type=notificationbase.EventType( - object='test_object', - action=wfields.NotificationAction.UPDATE, - phase=wfields.NotificationPhase.START), - publisher=notificationbase.NotificationPublisher( - host='fake-host', binary='watcher-fake'), - priority=wfields.NotificationPriority.INFO, - payload=self.payload) - - mock_context = mock.Mock() - notif.emit(mock_context) - - self.assertFalse(mock_notifier.called) - - @mock.patch.object(rpc, 'NOTIFIER') - def test_emit_event_type_without_phase(self, mock_notifier): - noti = self.TestNotification( - event_type=notificationbase.EventType( - object='test_object', - action=wfields.NotificationAction.UPDATE), - publisher=notificationbase.NotificationPublisher( - host='fake-host', binary='watcher-fake'), - priority=wfields.NotificationPriority.INFO, - payload=self.payload) - - mock_context = mock.Mock() - mock_context.to_dict.return_value = {} - noti.emit(mock_context) - - self._verify_notification( - mock_notifier, - mock_context, - expected_event_type='test_object.update', - expected_payload=self.expected_payload) - - @mock.patch.object(rpc, 'NOTIFIER') - def test_not_possible_to_emit_if_not_populated(self, mock_notifier): - non_populated_payload = self.TestNotificationPayload( - extra_field='test string') - noti = self.TestNotification( - event_type=notificationbase.EventType( - object='test_object', - action=wfields.NotificationAction.UPDATE), - publisher=notificationbase.NotificationPublisher( - host='fake-host', binary='watcher-fake'), - priority=wfields.NotificationPriority.INFO, - payload=non_populated_payload) - - mock_context = mock.Mock() - self.assertRaises(exception.NotificationPayloadError, - noti.emit, mock_context) - self.assertFalse(mock_notifier.called) - - @mock.patch.object(rpc, 'NOTIFIER') - def test_empty_schema(self, mock_notifier): - non_populated_payload = self.TestNotificationPayloadEmptySchema( - extra_field='test string') - noti = self.TestNotificationEmptySchema( - event_type=notificationbase.EventType( - object='test_object', - action=wfields.NotificationAction.UPDATE), - publisher=notificationbase.NotificationPublisher( - host='fake-host', binary='watcher-fake'), - priority=wfields.NotificationPriority.INFO, - payload=non_populated_payload) - - mock_context = mock.Mock() - mock_context.to_dict.return_value = {} - noti.emit(mock_context) - - self._verify_notification( - mock_notifier, - mock_context, - expected_event_type='test_object.update', - expected_payload={ - 'watcher_object.name': 'TestNotificationPayloadEmptySchema', - 'watcher_object.data': {'extra_field': 'test string'}, - 'watcher_object.version': '1.0', - 'watcher_object.namespace': 'watcher'}) - - def test_sample_decorator(self): - self.assertEqual(2, len(self.TestNotification.samples)) - self.assertIn('test-update-1.json', self.TestNotification.samples) - self.assertIn('test-update-2.json', self.TestNotification.samples) - - -expected_notification_fingerprints = { - 'EventType': '1.3-4258a2c86eca79fd34a7dffe1278eab9', - 'ExceptionNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'ExceptionPayload': '1.0-4516ae282a55fe2fd5c754967ee6248b', - 'NotificationPublisher': '1.0-bbbc1402fb0e443a3eb227cc52b61545', - 'TerseAuditPayload': '1.1-19b0e9224c0953366418a30ed785f267', - 'AuditPayload': '1.1-4c59e0cc5d30c42d3b842ce0332709d5', - 'AuditStateUpdatePayload': '1.0-1a1b606bf14a2c468800c2b010801ce5', - 'AuditUpdateNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'AuditUpdatePayload': '1.1-9b1f725e736051b976571701e5cc1e55', - 'AuditCreateNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'AuditCreatePayload': '1.1-4c59e0cc5d30c42d3b842ce0332709d5', - 'AuditDeleteNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'AuditDeletePayload': '1.1-4c59e0cc5d30c42d3b842ce0332709d5', - 'AuditActionNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'AuditActionPayload': '1.1-5a43e7321495c19f98ef5663efa0a821', - 'GoalPayload': '1.0-fa1fecb8b01dd047eef808ded4d50d1a', - 'StrategyPayload': '1.0-94f01c137b083ac236ae82573c1fcfc1', - 'ActionPlanActionPayload': '1.0-d9f134708e06cf2ff2d3b8d522ac2aa8', - 'ActionPlanCreateNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'ActionPlanCreatePayload': '1.0-23d0abbfa43acfd49b2b3097770efdce', - 'ActionPlanDeleteNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'ActionPlanDeletePayload': '1.0-23d0abbfa43acfd49b2b3097770efdce', - 'ActionPlanPayload': '1.0-23d0abbfa43acfd49b2b3097770efdce', - 'ActionPlanStateUpdatePayload': '1.0-1a1b606bf14a2c468800c2b010801ce5', - 'ActionPlanUpdateNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'ActionPlanUpdatePayload': '1.0-3e1a348a0579c6c43c1c3d7257e3f26b', - 'ActionPlanActionNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'ActionCreateNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'ActionCreatePayload': '1.0-519b93b7450319d8928b4b6e6362df31', - 'ActionDeleteNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'ActionDeletePayload': '1.0-519b93b7450319d8928b4b6e6362df31', - 'ActionExecutionNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'ActionExecutionPayload': '1.0-bff9f820a2abf7bb6d7027b7450157df', - 'ActionPayload': '1.0-519b93b7450319d8928b4b6e6362df31', - 'ActionStateUpdatePayload': '1.0-1a1b606bf14a2c468800c2b010801ce5', - 'ActionUpdateNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'ActionUpdatePayload': '1.0-03306c7e7f4d49ac328c261eff6b30b8', - 'TerseActionPlanPayload': '1.0-42bf7a5585cc111a9a4dbc008a04c67e', - 'ServiceUpdateNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'ServicePayload': '1.0-9c5a9bc51e6606e0ec3cf95baf698f4f', - 'ServiceStatusUpdatePayload': '1.0-1a1b606bf14a2c468800c2b010801ce5', - 'ServiceUpdatePayload': '1.0-e0e9812a45958974693a723a2c820c3f' - -} - - -class TestNotificationObjectVersions(testbase.TestCase): - def setUp(self): - super(TestNotificationObjectVersions, self).setUp() - base.WatcherObjectRegistry.register_notification_objects() - - def test_versions(self): - checker = fixture.ObjectVersionChecker( - test_objects.get_watcher_objects()) - expected_notification_fingerprints.update( - test_objects.expected_object_fingerprints) - expected, actual = checker.test_hashes( - expected_notification_fingerprints) - self.assertEqual(expected, actual, - 'Some notification objects have changed; please make ' - 'sure the versions have been bumped, and then update ' - 'their hashes here.') - - def test_notification_payload_version_depends_on_the_schema(self): - @base.WatcherObjectRegistry.register_if(False) - class TestNotificationPayload( - notificationbase.NotificationPayloadBase): - VERSION = '1.0' - - SCHEMA = { - 'field_1': ('source_field', 'field_1'), - 'field_2': ('source_field', 'field_2'), - } - - fields = { - 'extra_field': wfields.StringField(), # filled by ctor - 'field_1': wfields.StringField(), # filled by the schema - 'field_2': wfields.IntegerField(), # filled by the schema - } - - checker = fixture.ObjectVersionChecker( - {'TestNotificationPayload': (TestNotificationPayload,)}) - - old_hash = checker.get_hashes(extra_data_func=get_extra_data) - TestNotificationPayload.SCHEMA['field_3'] = ('source_field', - 'field_3') - new_hash = checker.get_hashes(extra_data_func=get_extra_data) - - self.assertNotEqual(old_hash, new_hash) - - -def get_extra_data(obj_class): - extra_data = tuple() - - # Get the SCHEMA items to add to the fingerprint - # if we are looking at a notification - if issubclass(obj_class, notificationbase.NotificationPayloadBase): - schema_data = collections.OrderedDict( - sorted(obj_class.SCHEMA.items())) - - extra_data += (schema_data,) - - return extra_data diff --git a/watcher/tests/notifications/test_service_notifications.py b/watcher/tests/notifications/test_service_notifications.py deleted file mode 100644 index 538fed0..0000000 --- a/watcher/tests/notifications/test_service_notifications.py +++ /dev/null @@ -1,77 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 Servionica -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -import freezegun -import mock -import oslo_messaging as om - -from watcher.common import rpc -from watcher import notifications -from watcher.objects import service as w_service -from watcher.tests.db import base -from watcher.tests.objects import utils - - -@freezegun.freeze_time('2016-10-18T09:52:05.219414') -class TestActionPlanNotification(base.DbTestCase): - - def setUp(self): - super(TestActionPlanNotification, self).setUp() - p_get_notifier = mock.patch.object(rpc, 'get_notifier') - m_get_notifier = p_get_notifier.start() - self.addCleanup(p_get_notifier.stop) - self.m_notifier = mock.Mock(spec=om.Notifier) - - def fake_get_notifier(publisher_id): - self.m_notifier.publisher_id = publisher_id - return self.m_notifier - - m_get_notifier.side_effect = fake_get_notifier - - def test_service_failed(self): - service = utils.get_test_service(mock.Mock(), - created_at=datetime.datetime.utcnow()) - state = w_service.ServiceStatus.FAILED - notifications.service.send_service_update(mock.MagicMock(), - service, - state, - host='node0') - notification = self.m_notifier.warning.call_args[1] - payload = notification['payload'] - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual({ - 'watcher_object.data': { - 'last_seen_up': '2016-09-22T08:32:06Z', - 'name': 'watcher-service', - 'sevice_host': 'controller', - 'status_update': { - 'watcher_object.data': { - 'old_state': 'ACTIVE', - 'state': 'FAILED' - }, - 'watcher_object.name': 'ServiceStatusUpdatePayload', - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0' - } - }, - 'watcher_object.name': 'ServiceUpdatePayload', - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0' - }, - payload - ) diff --git a/watcher/tests/objects/__init__.py b/watcher/tests/objects/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/objects/test_action.py b/watcher/tests/objects/test_action.py deleted file mode 100644 index 82c6706..0000000 --- a/watcher/tests/objects/test_action.py +++ /dev/null @@ -1,231 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import iso8601 -import mock - -from watcher.common import exception -from watcher.common import utils as c_utils -from watcher.db.sqlalchemy import api as db_api -from watcher import notifications -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestActionObject(base.DbTestCase): - - action_plan_id = 2 - - scenarios = [ - ('non_eager', dict( - eager=False, - fake_action=utils.get_test_action( - action_plan_id=action_plan_id))), - ('eager_with_non_eager_load', dict( - eager=True, - fake_action=utils.get_test_action( - action_plan_id=action_plan_id))), - ('eager_with_eager_load', dict( - eager=True, - fake_action=utils.get_test_action( - action_plan_id=action_plan_id, - action_plan=utils.get_test_action_plan(id=action_plan_id)))), - ] - - def setUp(self): - super(TestActionObject, self).setUp() - - p_action_notifications = mock.patch.object( - notifications, 'action_plan', autospec=True) - self.m_action_notifications = p_action_notifications.start() - self.addCleanup(p_action_notifications.stop) - self.m_send_update = self.m_action_notifications.send_update - - self.fake_action_plan = utils.create_test_action_plan( - id=self.action_plan_id) - - def eager_action_assert(self, action): - if self.eager: - self.assertIsNotNone(action.action_plan) - fields_to_check = set( - super(objects.ActionPlan, objects.ActionPlan).fields - ).symmetric_difference(objects.ActionPlan.fields) - db_data = { - k: v for k, v in self.fake_action_plan.as_dict().items() - if k in fields_to_check} - object_data = { - k: v for k, v in action.action_plan.as_dict().items() - if k in fields_to_check} - self.assertEqual(db_data, object_data) - - @mock.patch.object(db_api.Connection, 'get_action_by_id') - def test_get_by_id(self, mock_get_action): - mock_get_action.return_value = self.fake_action - action_id = self.fake_action['id'] - action = objects.Action.get(self.context, action_id, eager=self.eager) - mock_get_action.assert_called_once_with( - self.context, action_id, eager=self.eager) - self.assertEqual(self.context, action._context) - self.eager_action_assert(action) - self.assertEqual(0, self.m_send_update.call_count) - - @mock.patch.object(db_api.Connection, 'get_action_by_uuid') - def test_get_by_uuid(self, mock_get_action): - mock_get_action.return_value = self.fake_action - uuid = self.fake_action['uuid'] - action = objects.Action.get(self.context, uuid, eager=self.eager) - mock_get_action.assert_called_once_with( - self.context, uuid, eager=self.eager) - self.assertEqual(self.context, action._context) - self.assertEqual(0, self.m_send_update.call_count) - - def test_get_bad_id_and_uuid(self): - self.assertRaises(exception.InvalidIdentity, - objects.Action.get, self.context, 'not-a-uuid', - eager=self.eager) - - @mock.patch.object(db_api.Connection, 'get_action_list') - def test_list(self, mock_get_list): - mock_get_list.return_value = [self.fake_action] - actions = objects.Action.list(self.context, eager=self.eager) - self.assertEqual(1, mock_get_list.call_count) - self.assertEqual(1, len(actions)) - self.assertIsInstance(actions[0], objects.Action) - self.assertEqual(self.context, actions[0]._context) - for action in actions: - self.eager_action_assert(action) - self.assertEqual(0, self.m_send_update.call_count) - - @mock.patch.object(objects.Strategy, 'get') - @mock.patch.object(objects.Audit, 'get') - @mock.patch.object(db_api.Connection, 'update_action') - @mock.patch.object(db_api.Connection, 'get_action_by_uuid') - def test_save(self, mock_get_action, mock_update_action, mock_get_audit, - mock_get_strategy): - mock_get_action.return_value = self.fake_action - fake_saved_action = self.fake_action.copy() - mock_get_audit.return_value = mock.PropertyMock( - uuid=c_utils.generate_uuid()) - mock_get_strategy.return_value = mock.PropertyMock( - uuid=c_utils.generate_uuid()) - fake_saved_action['updated_at'] = datetime.datetime.utcnow() - mock_update_action.return_value = fake_saved_action - uuid = self.fake_action['uuid'] - action = objects.Action.get_by_uuid( - self.context, uuid, eager=self.eager) - action.state = objects.action.State.SUCCEEDED - if not self.eager: - self.assertRaises(exception.EagerlyLoadedActionRequired, - action.save) - else: - action.save() - - expected_update_at = fake_saved_action['updated_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - - mock_get_action.assert_called_once_with( - self.context, uuid, eager=self.eager) - mock_update_action.assert_called_once_with( - uuid, {'state': objects.action.State.SUCCEEDED}) - self.assertEqual(self.context, action._context) - self.assertEqual(expected_update_at, action.updated_at) - self.assertEqual(0, self.m_send_update.call_count) - - @mock.patch.object(db_api.Connection, 'get_action_by_uuid') - def test_refresh(self, mock_get_action): - returns = [dict(self.fake_action, state="first state"), - dict(self.fake_action, state="second state")] - mock_get_action.side_effect = returns - uuid = self.fake_action['uuid'] - expected = [mock.call(self.context, uuid, eager=self.eager), - mock.call(self.context, uuid, eager=self.eager)] - action = objects.Action.get(self.context, uuid, eager=self.eager) - self.assertEqual("first state", action.state) - action.refresh(eager=self.eager) - self.assertEqual("second state", action.state) - self.assertEqual(expected, mock_get_action.call_args_list) - self.assertEqual(self.context, action._context) - self.eager_action_assert(action) - self.assertEqual(0, self.m_send_update.call_count) - - -class TestCreateDeleteActionObject(base.DbTestCase): - - def setUp(self): - super(TestCreateDeleteActionObject, self).setUp() - self.fake_strategy = utils.create_test_strategy(name="DUMMY") - self.fake_audit = utils.create_test_audit() - self.fake_action_plan = utils.create_test_action_plan() - self.fake_action = utils.get_test_action( - created_at=datetime.datetime.utcnow()) - - @mock.patch.object(db_api.Connection, 'create_action') - def test_create(self, mock_create_action): - mock_create_action.return_value = self.fake_action - action = objects.Action(self.context, **self.fake_action) - action.create() - expected_action = self.fake_action.copy() - expected_action['created_at'] = expected_action['created_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - mock_create_action.assert_called_once_with(expected_action) - self.assertEqual(self.context, action._context) - - @mock.patch.object(notifications.action, 'send_delete') - @mock.patch.object(notifications.action, 'send_update') - @mock.patch.object(db_api.Connection, 'update_action') - @mock.patch.object(db_api.Connection, 'soft_delete_action') - @mock.patch.object(db_api.Connection, 'get_action_by_uuid') - def test_soft_delete(self, mock_get_action, - mock_soft_delete_action, mock_update_action, - mock_send_update, mock_send_delete): - mock_get_action.return_value = self.fake_action - fake_deleted_action = self.fake_action.copy() - fake_deleted_action['deleted_at'] = datetime.datetime.utcnow() - mock_soft_delete_action.return_value = fake_deleted_action - mock_update_action.return_value = fake_deleted_action - - expected_action = fake_deleted_action.copy() - expected_action['created_at'] = expected_action['created_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - expected_action['deleted_at'] = expected_action['deleted_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - del expected_action['action_plan'] - - uuid = self.fake_action['uuid'] - action = objects.Action.get_by_uuid(self.context, uuid) - action.soft_delete() - mock_get_action.assert_called_once_with( - self.context, uuid, eager=False) - mock_soft_delete_action.assert_called_once_with(uuid) - mock_update_action.assert_called_once_with( - uuid, {'state': objects.action.State.DELETED}) - self.assertEqual(self.context, action._context) - self.assertEqual(expected_action, action.as_dict()) - - @mock.patch.object(db_api.Connection, 'destroy_action') - @mock.patch.object(db_api.Connection, 'get_action_by_uuid') - def test_destroy(self, mock_get_action, mock_destroy_action): - mock_get_action.return_value = self.fake_action - uuid = self.fake_action['uuid'] - action = objects.Action.get_by_uuid(self.context, uuid) - action.destroy() - - mock_get_action.assert_called_once_with( - self.context, uuid, eager=False) - mock_destroy_action.assert_called_once_with(uuid) - self.assertEqual(self.context, action._context) diff --git a/watcher/tests/objects/test_action_plan.py b/watcher/tests/objects/test_action_plan.py deleted file mode 100644 index a06c948..0000000 --- a/watcher/tests/objects/test_action_plan.py +++ /dev/null @@ -1,324 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import iso8601 -import mock - -from watcher.common import exception -from watcher.common import utils as common_utils -from watcher import conf -from watcher.db.sqlalchemy import api as db_api -from watcher import notifications -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils - -CONF = conf.CONF - - -class TestActionPlanObject(base.DbTestCase): - - audit_id = 2 - strategy_id = 2 - - scenarios = [ - ('non_eager', dict( - eager=False, - fake_action_plan=utils.get_test_action_plan( - created_at=datetime.datetime.utcnow(), - audit_id=audit_id, - strategy_id=strategy_id))), - ('eager_with_non_eager_load', dict( - eager=True, - fake_action_plan=utils.get_test_action_plan( - created_at=datetime.datetime.utcnow(), - audit_id=audit_id, - strategy_id=strategy_id))), - ('eager_with_eager_load', dict( - eager=True, - fake_action_plan=utils.get_test_action_plan( - created_at=datetime.datetime.utcnow(), - strategy_id=strategy_id, - strategy=utils.get_test_strategy(id=strategy_id), - audit_id=audit_id, - audit=utils.get_test_audit(id=audit_id)))), - ] - - def setUp(self): - super(TestActionPlanObject, self).setUp() - - p_action_plan_notifications = mock.patch.object( - notifications, 'action_plan', autospec=True) - self.m_action_plan_notifications = p_action_plan_notifications.start() - self.addCleanup(p_action_plan_notifications.stop) - self.m_send_update = self.m_action_plan_notifications.send_update - - self.fake_audit = utils.create_test_audit(id=self.audit_id) - self.fake_strategy = utils.create_test_strategy( - id=self.strategy_id, name="DUMMY") - - def eager_load_action_plan_assert(self, action_plan): - if self.eager: - self.assertIsNotNone(action_plan.audit) - fields_to_check = set( - super(objects.Audit, objects.Audit).fields - ).symmetric_difference(objects.Audit.fields) - db_data = { - k: v for k, v in self.fake_audit.as_dict().items() - if k in fields_to_check} - object_data = { - k: v for k, v in action_plan.audit.as_dict().items() - if k in fields_to_check} - self.assertEqual(db_data, object_data) - - @mock.patch.object(db_api.Connection, 'get_action_plan_by_id') - def test_get_by_id(self, mock_get_action_plan): - mock_get_action_plan.return_value = self.fake_action_plan - action_plan_id = self.fake_action_plan['id'] - action_plan = objects.ActionPlan.get( - self.context, action_plan_id, eager=self.eager) - mock_get_action_plan.assert_called_once_with( - self.context, action_plan_id, eager=self.eager) - self.assertEqual(self.context, action_plan._context) - self.eager_load_action_plan_assert(action_plan) - self.assertEqual(0, self.m_send_update.call_count) - - @mock.patch.object(db_api.Connection, 'get_action_plan_by_uuid') - def test_get_by_uuid(self, mock_get_action_plan): - mock_get_action_plan.return_value = self.fake_action_plan - uuid = self.fake_action_plan['uuid'] - action_plan = objects.ActionPlan.get( - self.context, uuid, eager=self.eager) - mock_get_action_plan.assert_called_once_with( - self.context, uuid, eager=self.eager) - self.assertEqual(self.context, action_plan._context) - self.eager_load_action_plan_assert(action_plan) - self.assertEqual(0, self.m_send_update.call_count) - - def test_get_bad_id_and_uuid(self): - self.assertRaises(exception.InvalidIdentity, - objects.ActionPlan.get, self.context, - 'not-a-uuid', eager=self.eager) - - @mock.patch.object(db_api.Connection, 'get_action_plan_list') - def test_list(self, mock_get_list): - mock_get_list.return_value = [self.fake_action_plan] - action_plans = objects.ActionPlan.list(self.context, eager=self.eager) - self.assertEqual(1, mock_get_list.call_count) - self.assertEqual(1, len(action_plans)) - self.assertIsInstance(action_plans[0], objects.ActionPlan) - self.assertEqual(self.context, action_plans[0]._context) - for action_plan in action_plans: - self.eager_load_action_plan_assert(action_plan) - self.assertEqual(0, self.m_send_update.call_count) - - @mock.patch.object(db_api.Connection, 'update_action_plan') - @mock.patch.object(db_api.Connection, 'get_action_plan_by_uuid') - def test_save(self, mock_get_action_plan, mock_update_action_plan): - mock_get_action_plan.return_value = self.fake_action_plan - fake_saved_action_plan = self.fake_action_plan.copy() - fake_saved_action_plan['state'] = objects.action_plan.State.SUCCEEDED - fake_saved_action_plan['updated_at'] = datetime.datetime.utcnow() - - mock_update_action_plan.return_value = fake_saved_action_plan - - expected_action_plan = fake_saved_action_plan.copy() - expected_action_plan[ - 'created_at'] = expected_action_plan['created_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - expected_action_plan[ - 'updated_at'] = expected_action_plan['updated_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - - uuid = self.fake_action_plan['uuid'] - action_plan = objects.ActionPlan.get_by_uuid( - self.context, uuid, eager=self.eager) - action_plan.state = objects.action_plan.State.SUCCEEDED - action_plan.save() - - mock_get_action_plan.assert_called_once_with( - self.context, uuid, eager=self.eager) - mock_update_action_plan.assert_called_once_with( - uuid, {'state': objects.action_plan.State.SUCCEEDED}) - self.assertEqual(self.context, action_plan._context) - self.eager_load_action_plan_assert(action_plan) - self.m_send_update.assert_called_once_with( - self.context, action_plan, - old_state=self.fake_action_plan['state']) - self.assertEqual( - {k: v for k, v in expected_action_plan.items() - if k not in action_plan.object_fields}, - {k: v for k, v in action_plan.as_dict().items() - if k not in action_plan.object_fields}) - - @mock.patch.object(db_api.Connection, 'get_action_plan_by_uuid') - def test_refresh(self, mock_get_action_plan): - returns = [dict(self.fake_action_plan, state="first state"), - dict(self.fake_action_plan, state="second state")] - mock_get_action_plan.side_effect = returns - uuid = self.fake_action_plan['uuid'] - expected = [mock.call(self.context, uuid, eager=self.eager), - mock.call(self.context, uuid, eager=self.eager)] - action_plan = objects.ActionPlan.get( - self.context, uuid, eager=self.eager) - self.assertEqual("first state", action_plan.state) - action_plan.refresh(eager=self.eager) - self.assertEqual("second state", action_plan.state) - self.assertEqual(expected, mock_get_action_plan.call_args_list) - self.assertEqual(self.context, action_plan._context) - self.eager_load_action_plan_assert(action_plan) - - -class TestCreateDeleteActionPlanObject(base.DbTestCase): - - def setUp(self): - super(TestCreateDeleteActionPlanObject, self).setUp() - - p_action_plan_notifications = mock.patch.object( - notifications, 'action_plan', autospec=True) - self.m_action_plan_notifications = p_action_plan_notifications.start() - self.addCleanup(p_action_plan_notifications.stop) - self.m_send_update = self.m_action_plan_notifications.send_update - - self.fake_strategy = utils.create_test_strategy(name="DUMMY") - self.fake_audit = utils.create_test_audit() - self.fake_action_plan = utils.get_test_action_plan( - created_at=datetime.datetime.utcnow()) - - @mock.patch.object(db_api.Connection, 'create_action_plan') - def test_create(self, mock_create_action_plan): - mock_create_action_plan.return_value = self.fake_action_plan - action_plan = objects.ActionPlan( - self.context, **self.fake_action_plan) - action_plan.create() - expected_action_plan = self.fake_action_plan.copy() - expected_action_plan['created_at'] = expected_action_plan[ - 'created_at'].replace(tzinfo=iso8601.iso8601.Utc()) - mock_create_action_plan.assert_called_once_with(expected_action_plan) - self.assertEqual(self.context, action_plan._context) - - @mock.patch.multiple( - db_api.Connection, - get_action_plan_by_uuid=mock.DEFAULT, - soft_delete_action_plan=mock.DEFAULT, - update_action_plan=mock.DEFAULT, - get_efficacy_indicator_list=mock.DEFAULT, - soft_delete_efficacy_indicator=mock.DEFAULT, - ) - def test_soft_delete(self, get_action_plan_by_uuid, - soft_delete_action_plan, update_action_plan, - get_efficacy_indicator_list, - soft_delete_efficacy_indicator): - efficacy_indicator = utils.get_test_efficacy_indicator( - action_plan_id=self.fake_action_plan['id']) - uuid = self.fake_action_plan['uuid'] - m_get_action_plan = get_action_plan_by_uuid - m_soft_delete_action_plan = soft_delete_action_plan - m_get_efficacy_indicator_list = get_efficacy_indicator_list - m_soft_delete_efficacy_indicator = soft_delete_efficacy_indicator - m_update_action_plan = update_action_plan - - m_get_action_plan.return_value = self.fake_action_plan - fake_deleted_action_plan = self.fake_action_plan.copy() - fake_deleted_action_plan['deleted_at'] = datetime.datetime.utcnow() - m_update_action_plan.return_value = fake_deleted_action_plan - m_soft_delete_action_plan.return_value = fake_deleted_action_plan - expected_action_plan = fake_deleted_action_plan.copy() - expected_action_plan['created_at'] = expected_action_plan[ - 'created_at'].replace(tzinfo=iso8601.iso8601.Utc()) - expected_action_plan['deleted_at'] = expected_action_plan[ - 'deleted_at'].replace(tzinfo=iso8601.iso8601.Utc()) - del expected_action_plan['audit'] - del expected_action_plan['strategy'] - - m_get_efficacy_indicator_list.return_value = [efficacy_indicator] - action_plan = objects.ActionPlan.get_by_uuid( - self.context, uuid, eager=False) - action_plan.soft_delete() - - m_get_action_plan.assert_called_once_with( - self.context, uuid, eager=False) - m_get_efficacy_indicator_list.assert_called_once_with( - self.context, filters={"action_plan_uuid": uuid}, - limit=None, marker=None, sort_dir=None, sort_key=None) - m_soft_delete_action_plan.assert_called_once_with(uuid) - m_soft_delete_efficacy_indicator.assert_called_once_with( - efficacy_indicator['uuid']) - m_update_action_plan.assert_called_once_with( - uuid, {'state': objects.action_plan.State.DELETED}) - - self.assertEqual(self.context, action_plan._context) - self.assertEqual(expected_action_plan, action_plan.as_dict()) - - @mock.patch.multiple( - db_api.Connection, - get_action_plan_by_uuid=mock.DEFAULT, - destroy_action_plan=mock.DEFAULT, - get_efficacy_indicator_list=mock.DEFAULT, - destroy_efficacy_indicator=mock.DEFAULT, - ) - def test_destroy(self, get_action_plan_by_uuid, destroy_action_plan, - get_efficacy_indicator_list, destroy_efficacy_indicator): - m_get_action_plan = get_action_plan_by_uuid - m_destroy_action_plan = destroy_action_plan - m_get_efficacy_indicator_list = get_efficacy_indicator_list - m_destroy_efficacy_indicator = destroy_efficacy_indicator - efficacy_indicator = utils.get_test_efficacy_indicator( - action_plan_id=self.fake_action_plan['id']) - uuid = self.fake_action_plan['uuid'] - m_get_action_plan.return_value = self.fake_action_plan - m_get_efficacy_indicator_list.return_value = [efficacy_indicator] - action_plan = objects.ActionPlan.get_by_uuid(self.context, uuid) - action_plan.destroy() - - m_get_action_plan.assert_called_once_with( - self.context, uuid, eager=False) - m_get_efficacy_indicator_list.assert_called_once_with( - self.context, filters={"action_plan_uuid": uuid}, - limit=None, marker=None, sort_dir=None, sort_key=None) - m_destroy_action_plan.assert_called_once_with(uuid) - m_destroy_efficacy_indicator.assert_called_once_with( - efficacy_indicator['uuid']) - self.assertEqual(self.context, action_plan._context) - - -@mock.patch.object(notifications.action_plan, 'send_update', mock.Mock()) -class TestStateManager(base.DbTestCase): - - def setUp(self): - super(TestStateManager, self).setUp() - self.state_manager = objects.action_plan.StateManager() - - def test_check_expired(self): - CONF.set_default('action_plan_expiry', 0, - group='watcher_decision_engine') - strategy_1 = utils.create_test_strategy( - uuid=common_utils.generate_uuid()) - audit_1 = utils.create_test_audit( - uuid=common_utils.generate_uuid()) - action_plan_1 = utils.create_test_action_plan( - state=objects.action_plan.State.RECOMMENDED, - uuid=common_utils.generate_uuid(), - audit_id=audit_1.id, - strategy_id=strategy_1.id) - - self.state_manager.check_expired(self.context) - - action_plan = objects.action_plan.ActionPlan.get_by_uuid( - self.context, action_plan_1.uuid) - self.assertEqual(objects.action_plan.State.SUPERSEDED, - action_plan.state) diff --git a/watcher/tests/objects/test_audit.py b/watcher/tests/objects/test_audit.py deleted file mode 100644 index 0b01480..0000000 --- a/watcher/tests/objects/test_audit.py +++ /dev/null @@ -1,325 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import iso8601 -import mock - -from watcher.common import exception -from watcher.common import rpc -from watcher.common import utils as w_utils -from watcher.db.sqlalchemy import api as db_api -from watcher import notifications -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils -from watcher.tests.objects import utils as objutils - - -class TestAuditObject(base.DbTestCase): - - goal_id = 2 - - goal_data = utils.get_test_goal( - id=goal_id, uuid=w_utils.generate_uuid(), name="DUMMY") - - scenarios = [ - ('non_eager', dict( - eager=False, - fake_audit=utils.get_test_audit( - created_at=datetime.datetime.utcnow(), - goal_id=goal_id))), - ('eager_with_non_eager_load', dict( - eager=True, - fake_audit=utils.get_test_audit( - created_at=datetime.datetime.utcnow(), - goal_id=goal_id))), - ('eager_with_eager_load', dict( - eager=True, - fake_audit=utils.get_test_audit( - created_at=datetime.datetime.utcnow(), - goal_id=goal_id, goal=goal_data))), - ] - - def setUp(self): - super(TestAuditObject, self).setUp() - - p_audit_notifications = mock.patch.object( - notifications, 'audit', autospec=True) - self.m_audit_notifications = p_audit_notifications.start() - self.addCleanup(p_audit_notifications.stop) - self.m_send_update = self.m_audit_notifications.send_update - self.fake_goal = utils.create_test_goal(**self.goal_data) - - def eager_load_audit_assert(self, audit, goal): - if self.eager: - self.assertIsNotNone(audit.goal) - fields_to_check = set( - super(objects.Goal, objects.Goal).fields - ).symmetric_difference(objects.Goal.fields) - db_data = { - k: v for k, v in goal.as_dict().items() - if k in fields_to_check} - object_data = { - k: v for k, v in audit.goal.as_dict().items() - if k in fields_to_check} - self.assertEqual(db_data, object_data) - - @mock.patch.object(db_api.Connection, 'get_audit_by_id') - def test_get_by_id(self, mock_get_audit): - mock_get_audit.return_value = self.fake_audit - audit_id = self.fake_audit['id'] - audit = objects.Audit.get(self.context, audit_id, eager=self.eager) - mock_get_audit.assert_called_once_with( - self.context, audit_id, eager=self.eager) - self.assertEqual(self.context, audit._context) - self.eager_load_audit_assert(audit, self.fake_goal) - self.assertEqual(0, self.m_send_update.call_count) - - @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') - def test_get_by_uuid(self, mock_get_audit): - mock_get_audit.return_value = self.fake_audit - uuid = self.fake_audit['uuid'] - audit = objects.Audit.get(self.context, uuid, eager=self.eager) - mock_get_audit.assert_called_once_with( - self.context, uuid, eager=self.eager) - self.assertEqual(self.context, audit._context) - self.eager_load_audit_assert(audit, self.fake_goal) - self.assertEqual(0, self.m_send_update.call_count) - - def test_get_bad_id_and_uuid(self): - self.assertRaises(exception.InvalidIdentity, - objects.Audit.get, self.context, - 'not-a-uuid', eager=self.eager) - - @mock.patch.object(db_api.Connection, 'get_audit_list') - def test_list(self, mock_get_list): - mock_get_list.return_value = [self.fake_audit] - audits = objects.Audit.list(self.context, eager=self.eager) - mock_get_list.assert_called_once_with( - self.context, eager=self.eager, filters=None, limit=None, - marker=None, sort_dir=None, sort_key=None) - self.assertEqual(1, len(audits)) - self.assertIsInstance(audits[0], objects.Audit) - self.assertEqual(self.context, audits[0]._context) - for audit in audits: - self.eager_load_audit_assert(audit, self.fake_goal) - self.assertEqual(0, self.m_send_update.call_count) - - @mock.patch.object(db_api.Connection, 'update_audit') - @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') - def test_save(self, mock_get_audit, mock_update_audit): - mock_get_audit.return_value = self.fake_audit - fake_saved_audit = self.fake_audit.copy() - fake_saved_audit['state'] = objects.audit.State.SUCCEEDED - fake_saved_audit['updated_at'] = datetime.datetime.utcnow() - mock_update_audit.return_value = fake_saved_audit - - expected_audit = fake_saved_audit.copy() - expected_audit['created_at'] = expected_audit['created_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - expected_audit['updated_at'] = expected_audit['updated_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - - uuid = self.fake_audit['uuid'] - audit = objects.Audit.get_by_uuid(self.context, uuid, eager=self.eager) - audit.state = objects.audit.State.SUCCEEDED - audit.save() - - mock_get_audit.assert_called_once_with( - self.context, uuid, eager=self.eager) - mock_update_audit.assert_called_once_with( - uuid, {'state': objects.audit.State.SUCCEEDED}) - self.assertEqual(self.context, audit._context) - self.eager_load_audit_assert(audit, self.fake_goal) - self.m_send_update.assert_called_once_with( - self.context, audit, old_state=self.fake_audit['state']) - self.assertEqual( - {k: v for k, v in expected_audit.items() - if k not in audit.object_fields}, - {k: v for k, v in audit.as_dict().items() - if k not in audit.object_fields}) - - @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') - def test_refresh(self, mock_get_audit): - returns = [dict(self.fake_audit, state="first state"), - dict(self.fake_audit, state="second state")] - mock_get_audit.side_effect = returns - uuid = self.fake_audit['uuid'] - expected = [ - mock.call(self.context, uuid, eager=self.eager), - mock.call(self.context, uuid, eager=self.eager)] - audit = objects.Audit.get(self.context, uuid, eager=self.eager) - self.assertEqual("first state", audit.state) - audit.refresh(eager=self.eager) - self.assertEqual("second state", audit.state) - self.assertEqual(expected, mock_get_audit.call_args_list) - self.assertEqual(self.context, audit._context) - self.eager_load_audit_assert(audit, self.fake_goal) - - -class TestCreateDeleteAuditObject(base.DbTestCase): - - def setUp(self): - super(TestCreateDeleteAuditObject, self).setUp() - p_audit_notifications = mock.patch.object( - notifications, 'audit', autospec=True) - self.m_audit_notifications = p_audit_notifications.start() - self.addCleanup(p_audit_notifications.stop) - self.m_send_update = self.m_audit_notifications.send_update - - self.goal_id = 1 - self.goal = utils.create_test_goal(id=self.goal_id, name="DUMMY") - self.fake_audit = utils.get_test_audit( - goal_id=self.goal_id, created_at=datetime.datetime.utcnow()) - - @mock.patch.object(db_api.Connection, 'create_audit') - def test_create(self, mock_create_audit): - mock_create_audit.return_value = self.fake_audit - audit = objects.Audit(self.context, **self.fake_audit) - audit.create() - expected_audit = self.fake_audit.copy() - expected_audit['created_at'] = expected_audit['created_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - mock_create_audit.assert_called_once_with(expected_audit) - self.assertEqual(self.context, audit._context) - - @mock.patch.object(db_api.Connection, 'update_audit') - @mock.patch.object(db_api.Connection, 'soft_delete_audit') - @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') - def test_soft_delete(self, mock_get_audit, - mock_soft_delete_audit, mock_update_audit): - mock_get_audit.return_value = self.fake_audit - fake_deleted_audit = self.fake_audit.copy() - fake_deleted_audit['deleted_at'] = datetime.datetime.utcnow() - mock_soft_delete_audit.return_value = fake_deleted_audit - mock_update_audit.return_value = fake_deleted_audit - - expected_audit = fake_deleted_audit.copy() - expected_audit['created_at'] = expected_audit['created_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - expected_audit['deleted_at'] = expected_audit['deleted_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - del expected_audit['goal'] - del expected_audit['strategy'] - - uuid = self.fake_audit['uuid'] - audit = objects.Audit.get_by_uuid(self.context, uuid, eager=False) - audit.soft_delete() - mock_get_audit.assert_called_once_with(self.context, uuid, eager=False) - mock_soft_delete_audit.assert_called_once_with(uuid) - mock_update_audit.assert_called_once_with(uuid, {'state': 'DELETED'}) - self.assertEqual(self.context, audit._context) - self.assertEqual(expected_audit, audit.as_dict()) - - @mock.patch.object(db_api.Connection, 'destroy_audit') - @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') - def test_destroy(self, mock_get_audit, - mock_destroy_audit): - mock_get_audit.return_value = self.fake_audit - uuid = self.fake_audit['uuid'] - audit = objects.Audit.get_by_uuid(self.context, uuid) - audit.destroy() - mock_get_audit.assert_called_once_with( - self.context, uuid, eager=False) - mock_destroy_audit.assert_called_once_with(uuid) - self.assertEqual(self.context, audit._context) - - -class TestAuditObjectSendNotifications(base.DbTestCase): - - def setUp(self): - super(TestAuditObjectSendNotifications, self).setUp() - goal_id = 1 - self.fake_goal = utils.create_test_goal(id=goal_id, name="DUMMY") - self.fake_strategy = utils.create_test_strategy( - id=goal_id, name="DUMMY") - self.fake_audit = utils.get_test_audit( - goal_id=goal_id, goal=utils.get_test_goal(id=goal_id), - strategy_id=self.fake_strategy.id, strategy=self.fake_strategy) - - p_get_notifier = mock.patch.object(rpc, 'get_notifier') - self.m_get_notifier = p_get_notifier.start() - self.m_get_notifier.return_value = mock.Mock(name='m_notifier') - self.m_notifier = self.m_get_notifier.return_value - self.addCleanup(p_get_notifier.stop) - - @mock.patch.object(db_api.Connection, 'update_audit') - @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') - def test_send_update_notification(self, m_get_audit, m_update_audit): - fake_audit = utils.get_test_audit( - goal=self.fake_goal.as_dict(), - strategy_id=self.fake_strategy.id, - strategy=self.fake_strategy.as_dict()) - m_get_audit.return_value = fake_audit - fake_saved_audit = self.fake_audit.copy() - fake_saved_audit['state'] = objects.audit.State.SUCCEEDED - m_update_audit.return_value = fake_saved_audit - uuid = fake_audit['uuid'] - - audit = objects.Audit.get_by_uuid(self.context, uuid, eager=True) - audit.state = objects.audit.State.ONGOING - audit.save() - - self.assertEqual(1, self.m_notifier.info.call_count) - self.assertEqual('audit.update', - self.m_notifier.info.call_args[1]['event_type']) - - @mock.patch.object(db_api.Connection, 'create_audit') - def test_send_create_notification(self, m_create_audit): - audit = objutils.get_test_audit( - self.context, - id=1, - goal_id=self.fake_goal.id, - strategy_id=self.fake_strategy.id, - goal=self.fake_goal.as_dict(), - strategy=self.fake_strategy.as_dict()) - m_create_audit.return_value = audit - audit.create() - - self.assertEqual(1, self.m_notifier.info.call_count) - self.assertEqual('audit.create', - self.m_notifier.info.call_args[1]['event_type']) - - @mock.patch.object(db_api.Connection, 'update_audit') - @mock.patch.object(db_api.Connection, 'soft_delete_audit') - @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') - def test_send_delete_notification( - self, m_get_audit, m_soft_delete_audit, m_update_audit): - fake_audit = utils.get_test_audit( - goal=self.fake_goal.as_dict(), - strategy_id=self.fake_strategy.id, - strategy=self.fake_strategy.as_dict()) - m_get_audit.return_value = fake_audit - fake_deleted_audit = self.fake_audit.copy() - fake_deleted_audit['deleted_at'] = datetime.datetime.utcnow() - expected_audit = fake_deleted_audit.copy() - expected_audit['deleted_at'] = expected_audit['deleted_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - - m_soft_delete_audit.return_value = fake_deleted_audit - m_update_audit.return_value = fake_deleted_audit - uuid = fake_audit['uuid'] - audit = objects.Audit.get_by_uuid(self.context, uuid, eager=True) - audit.soft_delete() - - self.assertEqual(2, self.m_notifier.info.call_count) - self.assertEqual( - 'audit.update', - self.m_notifier.info.call_args_list[0][1]['event_type']) - self.assertEqual( - 'audit.delete', - self.m_notifier.info.call_args_list[1][1]['event_type']) diff --git a/watcher/tests/objects/test_audit_template.py b/watcher/tests/objects/test_audit_template.py deleted file mode 100644 index 14da9f0..0000000 --- a/watcher/tests/objects/test_audit_template.py +++ /dev/null @@ -1,222 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import iso8601 -import mock - -from watcher.common import exception -from watcher.common import utils as w_utils -from watcher.db.sqlalchemy import api as db_api -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestAuditTemplateObject(base.DbTestCase): - - goal_id = 1 - - goal_data = utils.get_test_goal( - id=goal_id, uuid=w_utils.generate_uuid(), name="DUMMY") - - scenarios = [ - ('non_eager', dict( - eager=False, - fake_audit_template=utils.get_test_audit_template( - created_at=datetime.datetime.utcnow(), - goal_id=goal_id))), - ('eager_with_non_eager_load', dict( - eager=True, - fake_audit_template=utils.get_test_audit_template( - created_at=datetime.datetime.utcnow(), - goal_id=goal_id))), - ('eager_with_eager_load', dict( - eager=True, - fake_audit_template=utils.get_test_audit_template( - created_at=datetime.datetime.utcnow(), - goal_id=goal_id, goal=goal_data))), - ] - - def setUp(self): - super(TestAuditTemplateObject, self).setUp() - self.fake_goal = utils.create_test_goal(**self.goal_data) - - def eager_load_audit_template_assert(self, audit_template, goal): - if self.eager: - self.assertIsNotNone(audit_template.goal) - fields_to_check = set( - super(objects.Goal, objects.Goal).fields - ).symmetric_difference(objects.Goal.fields) - db_data = { - k: v for k, v in goal.as_dict().items() - if k in fields_to_check} - object_data = { - k: v for k, v in audit_template.goal.as_dict().items() - if k in fields_to_check} - self.assertEqual(db_data, object_data) - - @mock.patch.object(db_api.Connection, 'get_audit_template_by_id') - def test_get_by_id(self, mock_get_audit_template): - mock_get_audit_template.return_value = self.fake_audit_template - audit_template_id = self.fake_audit_template['id'] - audit_template = objects.AuditTemplate.get( - self.context, audit_template_id, eager=self.eager) - mock_get_audit_template.assert_called_once_with( - self.context, audit_template_id, eager=self.eager) - self.assertEqual(self.context, audit_template._context) - self.eager_load_audit_template_assert(audit_template, self.fake_goal) - - @mock.patch.object(db_api.Connection, 'get_audit_template_by_uuid') - def test_get_by_uuid(self, mock_get_audit_template): - mock_get_audit_template.return_value = self.fake_audit_template - uuid = self.fake_audit_template['uuid'] - audit_template = objects.AuditTemplate.get( - self.context, uuid, eager=self.eager) - mock_get_audit_template.assert_called_once_with( - self.context, uuid, eager=self.eager) - self.assertEqual(self.context, audit_template._context) - self.eager_load_audit_template_assert(audit_template, self.fake_goal) - - @mock.patch.object(db_api.Connection, 'get_audit_template_by_name') - def test_get_by_name(self, mock_get_audit_template): - mock_get_audit_template.return_value = self.fake_audit_template - name = self.fake_audit_template['name'] - audit_template = objects.AuditTemplate.get_by_name( - self.context, name, eager=self.eager) - mock_get_audit_template.assert_called_once_with( - self.context, name, eager=self.eager) - self.assertEqual(self.context, audit_template._context) - self.eager_load_audit_template_assert(audit_template, self.fake_goal) - - def test_get_bad_id_and_uuid(self): - self.assertRaises(exception.InvalidIdentity, - objects.AuditTemplate.get, - self.context, 'not-a-uuid', eager=self.eager) - - @mock.patch.object(db_api.Connection, 'get_audit_template_list') - def test_list(self, mock_get_list): - mock_get_list.return_value = [self.fake_audit_template] - audit_templates = objects.AuditTemplate.list( - self.context, eager=self.eager) - mock_get_list.assert_called_once_with( - self.context, eager=self.eager, filters=None, limit=None, - marker=None, sort_dir=None, sort_key=None) - self.assertEqual(1, len(audit_templates)) - self.assertIsInstance(audit_templates[0], objects.AuditTemplate) - self.assertEqual(self.context, audit_templates[0]._context) - for audit_template in audit_templates: - self.eager_load_audit_template_assert( - audit_template, self.fake_goal) - - @mock.patch.object(db_api.Connection, 'update_audit_template') - @mock.patch.object(db_api.Connection, 'get_audit_template_by_uuid') - def test_save(self, mock_get_audit_template, mock_update_audit_template): - mock_get_audit_template.return_value = self.fake_audit_template - fake_saved_audit_template = self.fake_audit_template.copy() - fake_saved_audit_template['updated_at'] = datetime.datetime.utcnow() - mock_update_audit_template.return_value = fake_saved_audit_template - uuid = self.fake_audit_template['uuid'] - audit_template = objects.AuditTemplate.get_by_uuid( - self.context, uuid, eager=self.eager) - audit_template.goal_id = self.fake_goal.id - audit_template.save() - - mock_get_audit_template.assert_called_once_with( - self.context, uuid, eager=self.eager) - mock_update_audit_template.assert_called_once_with( - uuid, {'goal_id': self.fake_goal.id}) - self.assertEqual(self.context, audit_template._context) - self.eager_load_audit_template_assert(audit_template, self.fake_goal) - - @mock.patch.object(db_api.Connection, 'get_audit_template_by_uuid') - def test_refresh(self, mock_get_audit_template): - returns = [dict(self.fake_audit_template, name="first name"), - dict(self.fake_audit_template, name="second name")] - mock_get_audit_template.side_effect = returns - uuid = self.fake_audit_template['uuid'] - expected = [mock.call(self.context, uuid, eager=self.eager), - mock.call(self.context, uuid, eager=self.eager)] - audit_template = objects.AuditTemplate.get( - self.context, uuid, eager=self.eager) - self.assertEqual("first name", audit_template.name) - audit_template.refresh(eager=self.eager) - self.assertEqual("second name", audit_template.name) - self.assertEqual(expected, mock_get_audit_template.call_args_list) - self.assertEqual(self.context, audit_template._context) - self.eager_load_audit_template_assert(audit_template, self.fake_goal) - - -class TestCreateDeleteAuditTemplateObject(base.DbTestCase): - - def setUp(self): - super(TestCreateDeleteAuditTemplateObject, self).setUp() - self.fake_audit_template = utils.get_test_audit_template( - created_at=datetime.datetime.utcnow()) - - @mock.patch.object(db_api.Connection, 'create_audit_template') - def test_create(self, mock_create_audit_template): - goal = utils.create_test_goal() - self.fake_audit_template['goal_id'] = goal.id - mock_create_audit_template.return_value = self.fake_audit_template - audit_template = objects.AuditTemplate( - self.context, **self.fake_audit_template) - audit_template.create() - expected_audit_template = self.fake_audit_template.copy() - expected_audit_template['created_at'] = expected_audit_template[ - 'created_at'].replace(tzinfo=iso8601.iso8601.Utc()) - mock_create_audit_template.assert_called_once_with( - expected_audit_template) - self.assertEqual(self.context, audit_template._context) - - @mock.patch.object(db_api.Connection, 'soft_delete_audit_template') - @mock.patch.object(db_api.Connection, 'get_audit_template_by_uuid') - def test_soft_delete(self, m_get_audit_template, - m_soft_delete_audit_template): - m_get_audit_template.return_value = self.fake_audit_template - fake_deleted_audit_template = self.fake_audit_template.copy() - fake_deleted_audit_template['deleted_at'] = datetime.datetime.utcnow() - m_soft_delete_audit_template.return_value = fake_deleted_audit_template - - expected_audit_template = fake_deleted_audit_template.copy() - expected_audit_template['created_at'] = expected_audit_template[ - 'created_at'].replace(tzinfo=iso8601.iso8601.Utc()) - expected_audit_template['deleted_at'] = expected_audit_template[ - 'deleted_at'].replace(tzinfo=iso8601.iso8601.Utc()) - del expected_audit_template['goal'] - del expected_audit_template['strategy'] - - uuid = self.fake_audit_template['uuid'] - audit_template = objects.AuditTemplate.get_by_uuid(self.context, uuid) - audit_template.soft_delete() - m_get_audit_template.assert_called_once_with( - self.context, uuid, eager=False) - m_soft_delete_audit_template.assert_called_once_with(uuid) - self.assertEqual(self.context, audit_template._context) - self.assertEqual(expected_audit_template, audit_template.as_dict()) - - @mock.patch.object(db_api.Connection, 'destroy_audit_template') - @mock.patch.object(db_api.Connection, 'get_audit_template_by_uuid') - def test_destroy(self, mock_get_audit_template, - mock_destroy_audit_template): - mock_get_audit_template.return_value = self.fake_audit_template - uuid = self.fake_audit_template['uuid'] - audit_template = objects.AuditTemplate.get_by_uuid(self.context, uuid) - audit_template.destroy() - mock_get_audit_template.assert_called_once_with( - self.context, uuid, eager=False) - mock_destroy_audit_template.assert_called_once_with(uuid) - self.assertEqual(self.context, audit_template._context) diff --git a/watcher/tests/objects/test_efficacy_indicator.py b/watcher/tests/objects/test_efficacy_indicator.py deleted file mode 100644 index 1e5bfb4..0000000 --- a/watcher/tests/objects/test_efficacy_indicator.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from watcher.common import exception -# from watcher.common import utils as w_utils -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestEfficacyIndicatorObject(base.DbTestCase): - - def setUp(self): - super(TestEfficacyIndicatorObject, self).setUp() - self.fake_efficacy_indicator = utils.get_test_efficacy_indicator() - - def test_get_by_id(self): - efficacy_indicator_id = self.fake_efficacy_indicator['id'] - with mock.patch.object(self.dbapi, 'get_efficacy_indicator_by_id', - autospec=True) as mock_get_efficacy_indicator: - mock_get_efficacy_indicator.return_value = ( - self.fake_efficacy_indicator) - efficacy_indicator = objects.EfficacyIndicator.get( - self.context, efficacy_indicator_id) - mock_get_efficacy_indicator.assert_called_once_with( - self.context, efficacy_indicator_id) - self.assertEqual(self.context, efficacy_indicator._context) - - def test_get_by_uuid(self): - uuid = self.fake_efficacy_indicator['uuid'] - with mock.patch.object(self.dbapi, 'get_efficacy_indicator_by_uuid', - autospec=True) as mock_get_efficacy_indicator: - mock_get_efficacy_indicator.return_value = ( - self.fake_efficacy_indicator) - efficacy_indicator = objects.EfficacyIndicator.get( - self.context, uuid) - mock_get_efficacy_indicator.assert_called_once_with( - self.context, uuid) - self.assertEqual(self.context, efficacy_indicator._context) - - def test_get_bad_id_and_uuid(self): - self.assertRaises( - exception.InvalidIdentity, - objects.EfficacyIndicator.get, self.context, 'not-a-uuid') - - def test_list(self): - with mock.patch.object(self.dbapi, 'get_efficacy_indicator_list', - autospec=True) as mock_get_list: - mock_get_list.return_value = [self.fake_efficacy_indicator] - efficacy_indicators = objects.EfficacyIndicator.list(self.context) - self.assertEqual(1, mock_get_list.call_count) - self.assertEqual(1, len(efficacy_indicators)) - self.assertIsInstance( - efficacy_indicators[0], objects.EfficacyIndicator) - self.assertEqual(self.context, efficacy_indicators[0]._context) - - def test_create(self): - with mock.patch.object( - self.dbapi, 'create_efficacy_indicator', - autospec=True - ) as mock_create_efficacy_indicator: - mock_create_efficacy_indicator.return_value = ( - self.fake_efficacy_indicator) - efficacy_indicator = objects.EfficacyIndicator( - self.context, **self.fake_efficacy_indicator) - - efficacy_indicator.create() - mock_create_efficacy_indicator.assert_called_once_with( - self.fake_efficacy_indicator) - self.assertEqual(self.context, efficacy_indicator._context) - - def test_destroy(self): - uuid = self.fake_efficacy_indicator['uuid'] - with mock.patch.object( - self.dbapi, 'get_efficacy_indicator_by_uuid', - autospec=True - ) as mock_get_efficacy_indicator: - mock_get_efficacy_indicator.return_value = ( - self.fake_efficacy_indicator) - with mock.patch.object( - self.dbapi, 'destroy_efficacy_indicator', - autospec=True - ) as mock_destroy_efficacy_indicator: - efficacy_indicator = objects.EfficacyIndicator.get_by_uuid( - self.context, uuid) - efficacy_indicator.destroy() - mock_get_efficacy_indicator.assert_called_once_with( - self.context, uuid) - mock_destroy_efficacy_indicator.assert_called_once_with(uuid) - self.assertEqual(self.context, efficacy_indicator._context) - - def test_save(self): - uuid = self.fake_efficacy_indicator['uuid'] - with mock.patch.object( - self.dbapi, 'get_efficacy_indicator_by_uuid', - autospec=True - ) as mock_get_efficacy_indicator: - mock_get_efficacy_indicator.return_value = ( - self.fake_efficacy_indicator) - with mock.patch.object( - self.dbapi, 'update_efficacy_indicator', - autospec=True - ) as mock_update_efficacy_indicator: - efficacy_indicator = objects.EfficacyIndicator.get_by_uuid( - self.context, uuid) - efficacy_indicator.description = 'Indicator Description' - efficacy_indicator.save() - - mock_get_efficacy_indicator.assert_called_once_with( - self.context, uuid) - mock_update_efficacy_indicator.assert_called_once_with( - uuid, {'description': 'Indicator Description'}) - self.assertEqual(self.context, efficacy_indicator._context) - - def test_refresh(self): - uuid = self.fake_efficacy_indicator['uuid'] - returns = [dict(self.fake_efficacy_indicator, - description="first description"), - dict(self.fake_efficacy_indicator, - description="second description")] - expected = [mock.call(self.context, uuid), - mock.call(self.context, uuid)] - with mock.patch.object(self.dbapi, 'get_efficacy_indicator_by_uuid', - side_effect=returns, - autospec=True) as mock_get_efficacy_indicator: - efficacy_indicator = objects.EfficacyIndicator.get( - self.context, uuid) - self.assertEqual( - "first description", efficacy_indicator.description) - efficacy_indicator.refresh() - self.assertEqual( - "second description", efficacy_indicator.description) - self.assertEqual( - expected, mock_get_efficacy_indicator.call_args_list) - self.assertEqual(self.context, efficacy_indicator._context) diff --git a/watcher/tests/objects/test_goal.py b/watcher/tests/objects/test_goal.py deleted file mode 100644 index 26c6951..0000000 --- a/watcher/tests/objects/test_goal.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import iso8601 -import mock - -from watcher.db.sqlalchemy import api as db_api -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestGoalObject(base.DbTestCase): - - def setUp(self): - super(TestGoalObject, self).setUp() - self.fake_goal = utils.get_test_goal( - created_at=datetime.datetime.utcnow()) - - @mock.patch.object(db_api.Connection, 'get_goal_by_id') - def test_get_by_id(self, mock_get_goal): - goal_id = self.fake_goal['id'] - mock_get_goal.return_value = self.fake_goal - goal = objects.Goal.get(self.context, goal_id) - mock_get_goal.assert_called_once_with(self.context, goal_id) - self.assertEqual(self.context, goal._context) - - @mock.patch.object(db_api.Connection, 'get_goal_by_uuid') - def test_get_by_uuid(self, mock_get_goal): - uuid = self.fake_goal['uuid'] - mock_get_goal.return_value = self.fake_goal - goal = objects.Goal.get(self.context, uuid) - mock_get_goal.assert_called_once_with(self.context, uuid) - self.assertEqual(self.context, goal._context) - - @mock.patch.object(db_api.Connection, 'get_goal_by_name') - def test_get_by_name(self, mock_get_goal): - name = self.fake_goal['name'] - mock_get_goal.return_value = self.fake_goal - goal = objects.Goal.get_by_name(self.context, name) - mock_get_goal.assert_called_once_with(self.context, name) - self.assertEqual(self.context, goal._context) - - @mock.patch.object(db_api.Connection, 'get_goal_list') - def test_list(self, mock_get_list): - mock_get_list.return_value = [self.fake_goal] - goals = objects.Goal.list(self.context) - self.assertEqual(1, mock_get_list.call_count) - self.assertEqual(1, len(goals)) - self.assertIsInstance(goals[0], objects.Goal) - self.assertEqual(self.context, goals[0]._context) - - @mock.patch.object(db_api.Connection, 'create_goal') - def test_create(self, mock_create_goal): - mock_create_goal.return_value = self.fake_goal - goal = objects.Goal(self.context, **self.fake_goal) - goal.create() - expected_goal = self.fake_goal.copy() - expected_goal['created_at'] = expected_goal['created_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - mock_create_goal.assert_called_once_with(expected_goal) - self.assertEqual(self.context, goal._context) - - @mock.patch.object(db_api.Connection, 'destroy_goal') - @mock.patch.object(db_api.Connection, 'get_goal_by_id') - def test_destroy(self, mock_get_goal, mock_destroy_goal): - goal_id = self.fake_goal['id'] - mock_get_goal.return_value = self.fake_goal - goal = objects.Goal.get_by_id(self.context, goal_id) - goal.destroy() - mock_get_goal.assert_called_once_with( - self.context, goal_id) - mock_destroy_goal.assert_called_once_with(goal_id) - self.assertEqual(self.context, goal._context) - - @mock.patch.object(db_api.Connection, 'update_goal') - @mock.patch.object(db_api.Connection, 'get_goal_by_uuid') - def test_save(self, mock_get_goal, mock_update_goal): - mock_get_goal.return_value = self.fake_goal - goal_uuid = self.fake_goal['uuid'] - fake_saved_goal = self.fake_goal.copy() - fake_saved_goal['updated_at'] = datetime.datetime.utcnow() - mock_update_goal.return_value = fake_saved_goal - - goal = objects.Goal.get_by_uuid(self.context, goal_uuid) - goal.display_name = 'DUMMY' - goal.save() - - mock_get_goal.assert_called_once_with(self.context, goal_uuid) - mock_update_goal.assert_called_once_with( - goal_uuid, {'display_name': 'DUMMY'}) - self.assertEqual(self.context, goal._context) - - @mock.patch.object(db_api.Connection, 'get_goal_by_uuid') - def test_refresh(self, mock_get_goal): - fake_goal2 = utils.get_test_goal(name="BALANCE_LOAD") - returns = [self.fake_goal, fake_goal2] - mock_get_goal.side_effect = returns - uuid = self.fake_goal['uuid'] - expected = [mock.call(self.context, uuid), - mock.call(self.context, uuid)] - goal = objects.Goal.get(self.context, uuid) - self.assertEqual("TEST", goal.name) - goal.refresh() - self.assertEqual("BALANCE_LOAD", goal.name) - self.assertEqual(expected, mock_get_goal.call_args_list) - self.assertEqual(self.context, goal._context) - - @mock.patch.object(db_api.Connection, 'soft_delete_goal') - @mock.patch.object(db_api.Connection, 'get_goal_by_uuid') - def test_soft_delete(self, mock_get_goal, mock_soft_delete_goal): - mock_get_goal.return_value = self.fake_goal - fake_deleted_goal = self.fake_goal.copy() - fake_deleted_goal['deleted_at'] = datetime.datetime.utcnow() - mock_soft_delete_goal.return_value = fake_deleted_goal - - expected_goal = fake_deleted_goal.copy() - expected_goal['created_at'] = expected_goal['created_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - expected_goal['deleted_at'] = expected_goal['deleted_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - - uuid = self.fake_goal['uuid'] - goal = objects.Goal.get_by_uuid(self.context, uuid) - goal.soft_delete() - mock_get_goal.assert_called_once_with(self.context, uuid) - mock_soft_delete_goal.assert_called_once_with(uuid) - self.assertEqual(self.context, goal._context) - self.assertEqual(expected_goal, goal.as_dict()) diff --git a/watcher/tests/objects/test_objects.py b/watcher/tests/objects/test_objects.py deleted file mode 100644 index cc61f46..0000000 --- a/watcher/tests/objects/test_objects.py +++ /dev/null @@ -1,562 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import contextlib -import datetime -import gettext -import iso8601 - -import mock -from oslo_versionedobjects import base as object_base -from oslo_versionedobjects import exception as object_exception -from oslo_versionedobjects import fixture as object_fixture -import six - -from watcher.common import context -from watcher.objects import base -from watcher.objects import fields -from watcher.tests import base as test_base - -gettext.install('watcher') - - -@base.WatcherObjectRegistry.register -class MyObj(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - VERSION = '1.5' - - fields = {'foo': fields.IntegerField(), - 'bar': fields.StringField(), - 'missing': fields.StringField()} - - def obj_load_attr(self, attrname): - setattr(self, attrname, 'loaded!') - - @object_base.remotable_classmethod - def query(cls, context): - obj = cls(context) - obj.foo = 1 - obj.bar = 'bar' - obj.obj_reset_changes() - return obj - - @object_base.remotable - def marco(self, context=None): - return 'polo' - - @object_base.remotable - def update_test(self, context=None): - if context and context.user == 'alternate': - self.bar = 'alternate-context' - else: - self.bar = 'updated' - - @object_base.remotable - def save(self, context=None): - self.obj_reset_changes() - - @object_base.remotable - def refresh(self, context=None): - self.foo = 321 - self.bar = 'refreshed' - self.obj_reset_changes() - - @object_base.remotable - def modify_save_modify(self, context=None): - self.bar = 'meow' - self.save() - self.foo = 42 - - -class MyObj2(object): - @classmethod - def obj_name(cls): - return 'MyObj' - - @object_base.remotable_classmethod - def get(cls, *args, **kwargs): - pass - - -@base.WatcherObjectRegistry.register_if(False) -class WatcherTestSubclassedObject(MyObj): - fields = {'new_field': fields.StringField()} - - -class _LocalTest(test_base.TestCase): - def setUp(self): - super(_LocalTest, self).setUp() - # Just in case - base.WatcherObject.indirection_api = None - - -@contextlib.contextmanager -def things_temporarily_local(): - # Temporarily go non-remote so the conductor handles - # this request directly - _api = base.WatcherObject.indirection_api - base.WatcherObject.indirection_api = None - yield - base.WatcherObject.indirection_api = _api - - -class _TestObject(object): - def test_hydration_type_error(self): - primitive = {'watcher_object.name': 'MyObj', - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.5', - 'watcher_object.data': {'foo': 'a'}} - self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive) - - def test_hydration(self): - primitive = {'watcher_object.name': 'MyObj', - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.5', - 'watcher_object.data': {'foo': 1}} - obj = MyObj.obj_from_primitive(primitive) - self.assertEqual(1, obj.foo) - - def test_hydration_bad_ns(self): - primitive = {'watcher_object.name': 'MyObj', - 'watcher_object.namespace': 'foo', - 'watcher_object.version': '1.5', - 'watcher_object.data': {'foo': 1}} - self.assertRaises(object_exception.UnsupportedObjectError, - MyObj.obj_from_primitive, primitive) - - def test_dehydration(self): - expected = {'watcher_object.name': 'MyObj', - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.5', - 'watcher_object.data': {'foo': 1}} - obj = MyObj(self.context) - obj.foo = 1 - obj.obj_reset_changes() - self.assertEqual(expected, obj.obj_to_primitive()) - - def test_get_updates(self): - obj = MyObj(self.context) - self.assertEqual({}, obj.obj_get_changes()) - obj.foo = 123 - self.assertEqual({'foo': 123}, obj.obj_get_changes()) - obj.bar = 'test' - self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes()) - obj.obj_reset_changes() - self.assertEqual({}, obj.obj_get_changes()) - - def test_object_property(self): - obj = MyObj(self.context, foo=1) - self.assertEqual(1, obj.foo) - - def test_object_property_type_error(self): - obj = MyObj(self.context) - - def fail(): - obj.foo = 'a' - self.assertRaises(ValueError, fail) - - def test_load(self): - obj = MyObj(self.context) - self.assertEqual('loaded!', obj.bar) - - def test_load_in_base(self): - @base.WatcherObjectRegistry.register_if(False) - class Foo(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - fields = {'foobar': fields.IntegerField()} - obj = Foo(self.context) - - self.assertRaisesRegex( - NotImplementedError, "Cannot load 'foobar' in the base class", - getattr, obj, 'foobar') - - def test_loaded_in_primitive(self): - obj = MyObj(self.context) - obj.foo = 1 - obj.obj_reset_changes() - self.assertEqual('loaded!', obj.bar) - expected = {'watcher_object.name': 'MyObj', - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.5', - 'watcher_object.changes': ['bar'], - 'watcher_object.data': {'foo': 1, - 'bar': 'loaded!'}} - self.assertEqual(expected, obj.obj_to_primitive()) - - def test_changes_in_primitive(self): - obj = MyObj(self.context) - obj.foo = 123 - self.assertEqual(set(['foo']), obj.obj_what_changed()) - primitive = obj.obj_to_primitive() - self.assertIn('watcher_object.changes', primitive) - obj2 = MyObj.obj_from_primitive(primitive) - self.assertEqual(set(['foo']), obj2.obj_what_changed()) - obj2.obj_reset_changes() - self.assertEqual(set(), obj2.obj_what_changed()) - - def test_unknown_objtype(self): - self.assertRaises(object_exception.UnsupportedObjectError, - base.WatcherObject.obj_class_from_name, 'foo', '1.0') - - def test_with_alternate_context(self): - ctxt1 = context.RequestContext('foo', 'foo') - ctxt2 = context.RequestContext(user='alternate') - obj = MyObj.query(ctxt1) - obj.update_test(ctxt2) - self.assertEqual('alternate-context', obj.bar) - - def test_orphaned_object(self): - obj = MyObj.query(self.context) - obj._context = None - self.assertRaises(object_exception.OrphanedObjectError, - obj.update_test) - - def test_changed_1(self): - obj = MyObj.query(self.context) - obj.foo = 123 - self.assertEqual(set(['foo']), obj.obj_what_changed()) - obj.update_test(self.context) - self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed()) - self.assertEqual(123, obj.foo) - - def test_changed_2(self): - obj = MyObj.query(self.context) - obj.foo = 123 - self.assertEqual(set(['foo']), obj.obj_what_changed()) - obj.save() - self.assertEqual(set([]), obj.obj_what_changed()) - self.assertEqual(123, obj.foo) - - def test_changed_3(self): - obj = MyObj.query(self.context) - obj.foo = 123 - self.assertEqual(set(['foo']), obj.obj_what_changed()) - obj.refresh() - self.assertEqual(set([]), obj.obj_what_changed()) - self.assertEqual(321, obj.foo) - self.assertEqual('refreshed', obj.bar) - - def test_changed_4(self): - obj = MyObj.query(self.context) - obj.bar = 'something' - self.assertEqual(set(['bar']), obj.obj_what_changed()) - obj.modify_save_modify(self.context) - self.assertEqual(set(['foo']), obj.obj_what_changed()) - self.assertEqual(42, obj.foo) - self.assertEqual('meow', obj.bar) - - def test_static_result(self): - obj = MyObj.query(self.context) - self.assertEqual('bar', obj.bar) - result = obj.marco() - self.assertEqual('polo', result) - - def test_updates(self): - obj = MyObj.query(self.context) - self.assertEqual(1, obj.foo) - obj.update_test() - self.assertEqual('updated', obj.bar) - - def test_base_attributes(self): - dt = datetime.datetime(1955, 11, 5, 0, 0, tzinfo=iso8601.iso8601.Utc()) - datatime = fields.DateTimeField() - obj = MyObj(self.context) - obj.created_at = dt - obj.updated_at = dt - expected = {'watcher_object.name': 'MyObj', - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.5', - 'watcher_object.changes': - ['created_at', 'updated_at'], - 'watcher_object.data': - {'created_at': datatime.stringify(dt), - 'updated_at': datatime.stringify(dt), - } - } - actual = obj.obj_to_primitive() - # watcher_object.changes is built from a set and order is undefined - self.assertEqual(sorted(expected['watcher_object.changes']), - sorted(actual['watcher_object.changes'])) - del expected[ - 'watcher_object.changes'], actual['watcher_object.changes'] - self.assertEqual(expected, actual) - - def test_contains(self): - obj = MyObj(self.context) - self.assertNotIn('foo', obj) - obj.foo = 1 - self.assertIn('foo', obj) - self.assertNotIn('does_not_exist', obj) - - def test_obj_attr_is_set(self): - obj = MyObj(self.context, foo=1) - self.assertTrue(obj.obj_attr_is_set('foo')) - self.assertFalse(obj.obj_attr_is_set('bar')) - self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang') - - def test_get(self): - obj = MyObj(self.context, foo=1) - # Foo has value, should not get the default - self.assertEqual(obj.get('foo', 2), 1) - # Foo has value, should return the value without error - self.assertEqual(obj.get('foo'), 1) - # Bar is not loaded, so we should get the default - self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded') - # Bar without a default should lazy-load - self.assertEqual(obj.get('bar'), 'loaded!') - # Bar now has a default, but loaded value should be returned - self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!') - # Invalid attribute should raise AttributeError - self.assertRaises(AttributeError, obj.get, 'nothing') - # ...even with a default - self.assertRaises(AttributeError, obj.get, 'nothing', 3) - - def test_object_inheritance(self): - base_fields = ( - list(base.WatcherObject.fields) + - list(base.WatcherPersistentObject.fields)) - myobj_fields = ['foo', 'bar', 'missing'] + base_fields - myobj3_fields = ['new_field'] - self.assertTrue(issubclass(WatcherTestSubclassedObject, MyObj)) - self.assertEqual(len(myobj_fields), len(MyObj.fields)) - self.assertEqual(set(myobj_fields), set(MyObj.fields.keys())) - self.assertEqual(len(myobj_fields) + len(myobj3_fields), - len(WatcherTestSubclassedObject.fields)) - self.assertEqual(set(myobj_fields) | set(myobj3_fields), - set(WatcherTestSubclassedObject.fields.keys())) - - def test_get_changes(self): - obj = MyObj(self.context) - self.assertEqual({}, obj.obj_get_changes()) - obj.foo = 123 - self.assertEqual({'foo': 123}, obj.obj_get_changes()) - obj.bar = 'test' - self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes()) - obj.obj_reset_changes() - self.assertEqual({}, obj.obj_get_changes()) - - def test_obj_fields(self): - @base.WatcherObjectRegistry.register_if(False) - class TestObj(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - fields = {'foo': fields.IntegerField()} - obj_extra_fields = ['bar'] - - @property - def bar(self): - return 'this is bar' - - obj = TestObj(self.context) - self.assertEqual(set(['created_at', 'updated_at', 'deleted_at', - 'foo', 'bar']), - set(obj.obj_fields)) - - def test_refresh_object(self): - @base.WatcherObjectRegistry.register_if(False) - class TestObj(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - fields = {'foo': fields.IntegerField(), - 'bar': fields.StringField()} - - obj = TestObj(self.context) - current_obj = TestObj(self.context) - obj.foo = 10 - obj.bar = 'obj.bar' - current_obj.foo = 2 - current_obj.bar = 'current.bar' - obj.obj_refresh(current_obj) - self.assertEqual(obj.foo, 2) - self.assertEqual(obj.bar, 'current.bar') - - def test_obj_constructor(self): - obj = MyObj(self.context, foo=123, bar='abc') - self.assertEqual(123, obj.foo) - self.assertEqual('abc', obj.bar) - self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed()) - - def test_assign_value_without_DictCompat(self): - class TestObj(base.WatcherObject): - fields = {'foo': fields.IntegerField(), - 'bar': fields.StringField()} - obj = TestObj(self.context) - obj.foo = 10 - err_message = '' - try: - obj['bar'] = 'value' - except TypeError as e: - err_message = six.text_type(e) - finally: - self.assertIn("'TestObj' object does not support item assignment", - err_message) - - -class TestObject(_LocalTest, _TestObject): - pass - - -# The hashes are help developers to check if the change of objects need a -# version bump. It is md5 hash of object fields and remotable methods. -# The fingerprint values should only be changed if there is a version bump. -expected_object_fingerprints = { - 'Goal': '1.0-93881622db05e7b67a65ca885b4a022e', - 'Strategy': '1.1-73f164491bdd4c034f48083a51bdeb7b', - 'AuditTemplate': '1.1-b291973ffc5efa2c61b24fe34fdccc0b', - 'Audit': '1.3-f47ffb1ee79d8248eb991674bda565ce', - 'ActionPlan': '2.0-394f1abbf5d73d7b6675a118fe1a0284', - 'Action': '2.0-1dd4959a7e7ac30c62ef170fe08dd935', - 'EfficacyIndicator': '1.0-655b71234a82bc7478aff964639c4bb0', - 'ScoringEngine': '1.0-4abbe833544000728e17bd9e83f97576', - 'Service': '1.0-4b35b99ada9677a882c9de2b30212f35', - 'MyObj': '1.5-23c516d1e842f365f694e688d34e47c3', -} - - -def get_watcher_objects(): - """Get Watcher versioned objects - - This returns a dict of versioned objects which are - in the Watcher project namespace only. ie excludes - objects from os-vif and other 3rd party modules - :return: a dict mapping class names to lists of versioned objects - """ - all_classes = base.WatcherObjectRegistry.obj_classes() - watcher_classes = {} - for name in all_classes: - objclasses = all_classes[name] - if (objclasses[0].OBJ_PROJECT_NAMESPACE != - base.WatcherObject.OBJ_PROJECT_NAMESPACE): - continue - watcher_classes[name] = objclasses - return watcher_classes - - -class TestObjectVersions(test_base.TestCase): - - def test_object_version_check(self): - classes = base.WatcherObjectRegistry.obj_classes() - checker = object_fixture.ObjectVersionChecker(obj_classes=classes) - # Compute the difference between actual fingerprints and - # expect fingerprints. expect = actual = {} if there is no change. - expect, actual = checker.test_hashes(expected_object_fingerprints) - self.assertEqual(expect, actual, - "Some objects fields or remotable methods have been " - "modified. Please make sure the version of those " - "objects have been bumped and then update " - "expected_object_fingerprints with the new hashes. ") - - -class TestObjectSerializer(test_base.TestCase): - - def test_object_serialization(self): - ser = base.WatcherObjectSerializer() - obj = MyObj(self.context) - primitive = ser.serialize_entity(self.context, obj) - self.assertIn('watcher_object.name', primitive) - obj2 = ser.deserialize_entity(self.context, primitive) - self.assertIsInstance(obj2, MyObj) - self.assertEqual(self.context, obj2._context) - - def test_object_serialization_iterables(self): - ser = base.WatcherObjectSerializer() - obj = MyObj(self.context) - for iterable in (list, tuple, set): - thing = iterable([obj]) - primitive = ser.serialize_entity(self.context, thing) - self.assertEqual(1, len(primitive)) - for item in primitive: - self.assertFalse(isinstance(item, base.WatcherObject)) - thing2 = ser.deserialize_entity(self.context, primitive) - self.assertEqual(1, len(thing2)) - for item in thing2: - self.assertIsInstance(item, MyObj) - - @mock.patch('watcher.objects.base.WatcherObject.indirection_api') - def _test_deserialize_entity_newer(self, obj_version, backported_to, - mock_indirection_api, - my_version='1.6'): - ser = base.WatcherObjectSerializer() - mock_indirection_api.object_backport_versions.return_value \ - = 'backported' - - @base.WatcherObjectRegistry.register - class MyTestObj(MyObj): - VERSION = my_version - - obj = MyTestObj(self.context) - obj.VERSION = obj_version - primitive = obj.obj_to_primitive() - result = ser.deserialize_entity(self.context, primitive) - if backported_to is None: - self.assertFalse( - mock_indirection_api.object_backport_versions.called) - else: - self.assertEqual('backported', result) - versions = object_base.obj_tree_get_versions('MyTestObj') - mock_indirection_api.object_backport_versions.assert_called_with( - self.context, primitive, versions) - - def test_deserialize_entity_newer_version_backports(self): - "Test object with unsupported (newer) version" - self._test_deserialize_entity_newer('1.25', '1.6') - - def test_deserialize_entity_same_revision_does_not_backport(self): - "Test object with supported revision" - self._test_deserialize_entity_newer('1.6', None) - - def test_deserialize_entity_newer_revision_does_not_backport_zero(self): - "Test object with supported revision" - self._test_deserialize_entity_newer('1.6.0', None) - - def test_deserialize_entity_newer_revision_does_not_backport(self): - "Test object with supported (newer) revision" - self._test_deserialize_entity_newer('1.6.1', None) - - def test_deserialize_entity_newer_version_passes_revision(self): - "Test object with unsupported (newer) version and revision" - self._test_deserialize_entity_newer('1.7', '1.6.1', my_version='1.6.1') - - -class TestRegistry(test_base.TestCase): - - @mock.patch('watcher.objects.base.objects') - def test_hook_chooses_newer_properly(self, mock_objects): - reg = base.WatcherObjectRegistry() - reg.registration_hook(MyObj, 0) - - class MyNewerObj(object): - VERSION = '1.123' - - @classmethod - def obj_name(cls): - return 'MyObj' - - self.assertEqual(MyObj, mock_objects.MyObj) - reg.registration_hook(MyNewerObj, 0) - self.assertEqual(MyNewerObj, mock_objects.MyObj) - - @mock.patch('watcher.objects.base.objects') - def test_hook_keeps_newer_properly(self, mock_objects): - reg = base.WatcherObjectRegistry() - reg.registration_hook(MyObj, 0) - - class MyOlderObj(object): - VERSION = '1.1' - - @classmethod - def obj_name(cls): - return 'MyObj' - - self.assertEqual(MyObj, mock_objects.MyObj) - reg.registration_hook(MyOlderObj, 0) - self.assertEqual(MyObj, mock_objects.MyObj) diff --git a/watcher/tests/objects/test_scoring_engine.py b/watcher/tests/objects/test_scoring_engine.py deleted file mode 100644 index e27fbcf..0000000 --- a/watcher/tests/objects/test_scoring_engine.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright 2016 Intel -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import iso8601 -import mock - -from watcher.db.sqlalchemy import api as db_api -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestScoringEngineObject(base.DbTestCase): - - def setUp(self): - super(TestScoringEngineObject, self).setUp() - self.fake_scoring_engine = utils.get_test_scoring_engine( - created_at=datetime.datetime.utcnow()) - - @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_id') - def test_get_by_id(self, mock_get_scoring_engine): - scoring_engine_id = self.fake_scoring_engine['id'] - mock_get_scoring_engine.return_value = self.fake_scoring_engine - scoring_engine = objects.ScoringEngine.get_by_id( - self.context, scoring_engine_id) - mock_get_scoring_engine.assert_called_once_with( - self.context, scoring_engine_id) - self.assertEqual(self.context, scoring_engine._context) - - @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_uuid') - def test_get_by_uuid(self, mock_get_scoring_engine): - se_uuid = self.fake_scoring_engine['uuid'] - mock_get_scoring_engine.return_value = self.fake_scoring_engine - scoring_engine = objects.ScoringEngine.get_by_uuid( - self.context, se_uuid) - mock_get_scoring_engine.assert_called_once_with( - self.context, se_uuid) - self.assertEqual(self.context, scoring_engine._context) - - @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_uuid') - def test_get_by_name(self, mock_get_scoring_engine): - scoring_engine_uuid = self.fake_scoring_engine['uuid'] - mock_get_scoring_engine.return_value = self.fake_scoring_engine - scoring_engine = objects.ScoringEngine.get( - self.context, scoring_engine_uuid) - mock_get_scoring_engine.assert_called_once_with( - self.context, scoring_engine_uuid) - self.assertEqual(self.context, scoring_engine._context) - - @mock.patch.object(db_api.Connection, 'get_scoring_engine_list') - def test_list(self, mock_get_list): - mock_get_list.return_value = [self.fake_scoring_engine] - scoring_engines = objects.ScoringEngine.list(self.context) - self.assertEqual(1, mock_get_list.call_count, 1) - self.assertEqual(1, len(scoring_engines)) - self.assertIsInstance(scoring_engines[0], objects.ScoringEngine) - self.assertEqual(self.context, scoring_engines[0]._context) - - @mock.patch.object(db_api.Connection, 'create_scoring_engine') - def test_create(self, mock_create_scoring_engine): - mock_create_scoring_engine.return_value = self.fake_scoring_engine - scoring_engine = objects.ScoringEngine( - self.context, **self.fake_scoring_engine) - scoring_engine.create() - expected_scoring_engine = self.fake_scoring_engine.copy() - expected_scoring_engine['created_at'] = expected_scoring_engine[ - 'created_at'].replace(tzinfo=iso8601.iso8601.Utc()) - mock_create_scoring_engine.assert_called_once_with( - expected_scoring_engine) - self.assertEqual(self.context, scoring_engine._context) - - @mock.patch.object(db_api.Connection, 'destroy_scoring_engine') - @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_id') - def test_destroy(self, mock_get_scoring_engine, - mock_destroy_scoring_engine): - mock_get_scoring_engine.return_value = self.fake_scoring_engine - _id = self.fake_scoring_engine['id'] - scoring_engine = objects.ScoringEngine.get_by_id(self.context, _id) - scoring_engine.destroy() - mock_get_scoring_engine.assert_called_once_with(self.context, _id) - mock_destroy_scoring_engine.assert_called_once_with(_id) - self.assertEqual(self.context, scoring_engine._context) - - @mock.patch.object(db_api.Connection, 'update_scoring_engine') - @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_uuid') - def test_save(self, mock_get_scoring_engine, mock_update_scoring_engine): - mock_get_scoring_engine.return_value = self.fake_scoring_engine - fake_saved_scoring_engine = self.fake_scoring_engine.copy() - fake_saved_scoring_engine['updated_at'] = datetime.datetime.utcnow() - mock_update_scoring_engine.return_value = fake_saved_scoring_engine - - uuid = self.fake_scoring_engine['uuid'] - scoring_engine = objects.ScoringEngine.get_by_uuid(self.context, uuid) - scoring_engine.description = 'UPDATED DESCRIPTION' - scoring_engine.save() - - mock_get_scoring_engine.assert_called_once_with(self.context, uuid) - mock_update_scoring_engine.assert_called_once_with( - uuid, {'description': 'UPDATED DESCRIPTION'}) - self.assertEqual(self.context, scoring_engine._context) - - @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_id') - def test_refresh(self, mock_get_scoring_engine): - returns = [ - dict(self.fake_scoring_engine, description="first description"), - dict(self.fake_scoring_engine, description="second description")] - mock_get_scoring_engine.side_effect = returns - _id = self.fake_scoring_engine['id'] - expected = [mock.call(self.context, _id), - mock.call(self.context, _id)] - scoring_engine = objects.ScoringEngine.get_by_id(self.context, _id) - self.assertEqual("first description", scoring_engine.description) - scoring_engine.refresh() - self.assertEqual("second description", scoring_engine.description) - self.assertEqual(expected, mock_get_scoring_engine.call_args_list) - self.assertEqual(self.context, scoring_engine._context) - - @mock.patch.object(db_api.Connection, 'soft_delete_scoring_engine') - @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_id') - def test_soft_delete(self, mock_get_scoring_engine, mock_soft_delete): - mock_get_scoring_engine.return_value = self.fake_scoring_engine - fake_deleted_scoring_engine = self.fake_scoring_engine.copy() - fake_deleted_scoring_engine['deleted_at'] = datetime.datetime.utcnow() - mock_soft_delete.return_value = fake_deleted_scoring_engine - - expected_scoring_engine = fake_deleted_scoring_engine.copy() - expected_scoring_engine['created_at'] = expected_scoring_engine[ - 'created_at'].replace(tzinfo=iso8601.iso8601.Utc()) - expected_scoring_engine['deleted_at'] = expected_scoring_engine[ - 'deleted_at'].replace(tzinfo=iso8601.iso8601.Utc()) - - _id = self.fake_scoring_engine['id'] - scoring_engine = objects.ScoringEngine.get_by_id(self.context, _id) - scoring_engine.soft_delete() - mock_get_scoring_engine.assert_called_once_with(self.context, _id) - mock_soft_delete.assert_called_once_with(_id) - self.assertEqual(self.context, scoring_engine._context) - self.assertEqual(expected_scoring_engine, scoring_engine.as_dict()) diff --git a/watcher/tests/objects/test_service.py b/watcher/tests/objects/test_service.py deleted file mode 100644 index 0dcef21..0000000 --- a/watcher/tests/objects/test_service.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import iso8601 -import mock - -from watcher.db.sqlalchemy import api as db_api -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestServiceObject(base.DbTestCase): - - def setUp(self): - super(TestServiceObject, self).setUp() - self.fake_service = utils.get_test_service( - created_at=datetime.datetime.utcnow()) - - @mock.patch.object(db_api.Connection, 'get_service_by_id') - def test_get_by_id(self, mock_get_service): - service_id = self.fake_service['id'] - mock_get_service.return_value = self.fake_service - service = objects.Service.get(self.context, service_id) - mock_get_service.assert_called_once_with(self.context, service_id) - self.assertEqual(self.context, service._context) - - @mock.patch.object(db_api.Connection, 'get_service_list') - def test_list(self, mock_get_list): - mock_get_list.return_value = [self.fake_service] - services = objects.Service.list(self.context) - self.assertEqual(1, mock_get_list.call_count, 1) - self.assertEqual(1, len(services)) - self.assertIsInstance(services[0], objects.Service) - self.assertEqual(self.context, services[0]._context) - - @mock.patch.object(db_api.Connection, 'create_service') - def test_create(self, mock_create_service): - mock_create_service.return_value = self.fake_service - service = objects.Service(self.context, **self.fake_service) - - service.create() - expected_service = self.fake_service.copy() - expected_service['created_at'] = expected_service[ - 'created_at'].replace(tzinfo=iso8601.iso8601.Utc()) - - mock_create_service.assert_called_once_with(expected_service) - self.assertEqual(self.context, service._context) - - @mock.patch.object(db_api.Connection, 'update_service') - @mock.patch.object(db_api.Connection, 'get_service_by_id') - def test_save(self, mock_get_service, mock_update_service): - mock_get_service.return_value = self.fake_service - fake_saved_service = self.fake_service.copy() - fake_saved_service['updated_at'] = datetime.datetime.utcnow() - mock_update_service.return_value = fake_saved_service - _id = self.fake_service['id'] - service = objects.Service.get(self.context, _id) - service.name = 'UPDATED NAME' - service.save() - - mock_get_service.assert_called_once_with(self.context, _id) - mock_update_service.assert_called_once_with( - _id, {'name': 'UPDATED NAME'}) - self.assertEqual(self.context, service._context) - - @mock.patch.object(db_api.Connection, 'get_service_by_id') - def test_refresh(self, mock_get_service): - returns = [dict(self.fake_service, name="first name"), - dict(self.fake_service, name="second name")] - mock_get_service.side_effect = returns - _id = self.fake_service['id'] - expected = [mock.call(self.context, _id), - mock.call(self.context, _id)] - service = objects.Service.get(self.context, _id) - self.assertEqual("first name", service.name) - service.refresh() - self.assertEqual("second name", service.name) - self.assertEqual(expected, mock_get_service.call_args_list) - self.assertEqual(self.context, service._context) - - @mock.patch.object(db_api.Connection, 'soft_delete_service') - @mock.patch.object(db_api.Connection, 'get_service_by_id') - def test_soft_delete(self, mock_get_service, mock_soft_delete): - mock_get_service.return_value = self.fake_service - fake_deleted_service = self.fake_service.copy() - fake_deleted_service['deleted_at'] = datetime.datetime.utcnow() - mock_soft_delete.return_value = fake_deleted_service - - expected_service = fake_deleted_service.copy() - expected_service['created_at'] = expected_service[ - 'created_at'].replace(tzinfo=iso8601.iso8601.Utc()) - expected_service['deleted_at'] = expected_service[ - 'deleted_at'].replace(tzinfo=iso8601.iso8601.Utc()) - - _id = self.fake_service['id'] - service = objects.Service.get(self.context, _id) - service.soft_delete() - mock_get_service.assert_called_once_with(self.context, _id) - mock_soft_delete.assert_called_once_with(_id) - self.assertEqual(self.context, service._context) - self.assertEqual(expected_service, service.as_dict()) diff --git a/watcher/tests/objects/test_strategy.py b/watcher/tests/objects/test_strategy.py deleted file mode 100644 index 3d1a47d..0000000 --- a/watcher/tests/objects/test_strategy.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import mock - -from watcher.common import exception -from watcher.db.sqlalchemy import api as db_api -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestStrategyObject(base.DbTestCase): - - goal_id = 2 - - scenarios = [ - ('non_eager', dict( - eager=False, fake_strategy=utils.get_test_strategy( - goal_id=goal_id))), - ('eager_with_non_eager_load', dict( - eager=True, fake_strategy=utils.get_test_strategy( - goal_id=goal_id))), - ('eager_with_eager_load', dict( - eager=True, fake_strategy=utils.get_test_strategy( - goal_id=goal_id, goal=utils.get_test_goal(id=goal_id)))), - ] - - def setUp(self): - super(TestStrategyObject, self).setUp() - self.fake_goal = utils.create_test_goal(id=self.goal_id) - - def eager_load_strategy_assert(self, strategy): - if self.eager: - self.assertIsNotNone(strategy.goal) - fields_to_check = set( - super(objects.Goal, objects.Goal).fields - ).symmetric_difference(objects.Goal.fields) - db_data = { - k: v for k, v in self.fake_goal.as_dict().items() - if k in fields_to_check} - object_data = { - k: v for k, v in strategy.goal.as_dict().items() - if k in fields_to_check} - self.assertEqual(db_data, object_data) - - @mock.patch.object(db_api.Connection, 'get_strategy_by_id') - def test_get_by_id(self, mock_get_strategy): - strategy_id = self.fake_strategy['id'] - mock_get_strategy.return_value = self.fake_strategy - strategy = objects.Strategy.get( - self.context, strategy_id, eager=self.eager) - mock_get_strategy.assert_called_once_with( - self.context, strategy_id, eager=self.eager) - self.assertEqual(self.context, strategy._context) - self.eager_load_strategy_assert(strategy) - - @mock.patch.object(db_api.Connection, 'get_strategy_by_uuid') - def test_get_by_uuid(self, mock_get_strategy): - uuid = self.fake_strategy['uuid'] - mock_get_strategy.return_value = self.fake_strategy - strategy = objects.Strategy.get(self.context, uuid, eager=self.eager) - mock_get_strategy.assert_called_once_with( - self.context, uuid, eager=self.eager) - self.assertEqual(self.context, strategy._context) - self.eager_load_strategy_assert(strategy) - - def test_get_bad_uuid(self): - self.assertRaises(exception.InvalidIdentity, - objects.Strategy.get, self.context, 'not-a-uuid') - - @mock.patch.object(db_api.Connection, 'get_strategy_list') - def test_list(self, mock_get_list): - mock_get_list.return_value = [self.fake_strategy] - strategies = objects.Strategy.list(self.context, eager=self.eager) - self.assertEqual(1, mock_get_list.call_count, 1) - self.assertEqual(1, len(strategies)) - self.assertIsInstance(strategies[0], objects.Strategy) - self.assertEqual(self.context, strategies[0]._context) - for strategy in strategies: - self.eager_load_strategy_assert(strategy) - - @mock.patch.object(db_api.Connection, 'update_strategy') - @mock.patch.object(db_api.Connection, 'get_strategy_by_id') - def test_save(self, mock_get_strategy, mock_update_strategy): - _id = self.fake_strategy['id'] - mock_get_strategy.return_value = self.fake_strategy - strategy = objects.Strategy.get_by_id( - self.context, _id, eager=self.eager) - strategy.name = 'UPDATED NAME' - strategy.save() - - mock_get_strategy.assert_called_once_with( - self.context, _id, eager=self.eager) - mock_update_strategy.assert_called_once_with( - _id, {'name': 'UPDATED NAME'}) - self.assertEqual(self.context, strategy._context) - self.eager_load_strategy_assert(strategy) - - @mock.patch.object(db_api.Connection, 'get_strategy_by_id') - def test_refresh(self, mock_get_strategy): - _id = self.fake_strategy['id'] - returns = [dict(self.fake_strategy, name="first name"), - dict(self.fake_strategy, name="second name")] - mock_get_strategy.side_effect = returns - expected = [mock.call(self.context, _id, eager=self.eager), - mock.call(self.context, _id, eager=self.eager)] - strategy = objects.Strategy.get(self.context, _id, eager=self.eager) - self.assertEqual("first name", strategy.name) - strategy.refresh(eager=self.eager) - self.assertEqual("second name", strategy.name) - self.assertEqual(expected, mock_get_strategy.call_args_list) - self.assertEqual(self.context, strategy._context) - self.eager_load_strategy_assert(strategy) - - -class TestCreateDeleteStrategyObject(base.DbTestCase): - - def setUp(self): - super(TestCreateDeleteStrategyObject, self).setUp() - self.fake_goal = utils.create_test_goal() - self.fake_strategy = utils.get_test_strategy(goal_id=self.fake_goal.id) - - @mock.patch.object(db_api.Connection, 'create_strategy') - def test_create(self, mock_create_strategy): - mock_create_strategy.return_value = self.fake_strategy - strategy = objects.Strategy(self.context, **self.fake_strategy) - strategy.create() - mock_create_strategy.assert_called_once_with(self.fake_strategy) - self.assertEqual(self.context, strategy._context) - - @mock.patch.object(db_api.Connection, 'soft_delete_strategy') - @mock.patch.object(db_api.Connection, 'get_strategy_by_id') - def test_soft_delete(self, mock_get_strategy, mock_soft_delete): - _id = self.fake_strategy['id'] - mock_get_strategy.return_value = self.fake_strategy - strategy = objects.Strategy.get_by_id(self.context, _id) - strategy.soft_delete() - mock_get_strategy.assert_called_once_with( - self.context, _id, eager=False) - mock_soft_delete.assert_called_once_with(_id) - self.assertEqual(self.context, strategy._context) - - @mock.patch.object(db_api.Connection, 'destroy_strategy') - @mock.patch.object(db_api.Connection, 'get_strategy_by_id') - def test_destroy(self, mock_get_strategy, mock_destroy_strategy): - _id = self.fake_strategy['id'] - mock_get_strategy.return_value = self.fake_strategy - strategy = objects.Strategy.get_by_id(self.context, _id) - strategy.destroy() - mock_get_strategy.assert_called_once_with( - self.context, _id, eager=False) - mock_destroy_strategy.assert_called_once_with(_id) - self.assertEqual(self.context, strategy._context) diff --git a/watcher/tests/objects/utils.py b/watcher/tests/objects/utils.py deleted file mode 100644 index 18ec4aa..0000000 --- a/watcher/tests/objects/utils.py +++ /dev/null @@ -1,256 +0,0 @@ -# Copyright 2014 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Watcher object test utilities.""" - -from watcher import objects -from watcher.tests.db import utils as db_utils - - -def _load_related_objects(context, cls, db_data): - """Replace the DB data with its object counterpart""" - obj_data = db_data.copy() - for name, (obj_cls, _) in cls.object_fields.items(): - if obj_data.get(name): - obj_data[name] = obj_cls(context, **obj_data.get(name).as_dict()) - else: - del obj_data[name] - - return obj_data - - -def _load_test_obj(context, cls, obj_data, **kw): - # Let DB generate ID if it isn't specified explicitly - if 'id' not in kw: - del obj_data['id'] - obj = cls(context) - for key in obj_data: - setattr(obj, key, obj_data[key]) - return obj - - -def get_test_audit_template(context, **kw): - """Return a AuditTemplate object with appropriate attributes. - - NOTE: The object leaves the attributes marked as changed, such - that a create() could be used to commit it to the DB. - """ - obj_cls = objects.AuditTemplate - db_data = db_utils.get_test_audit_template(**kw) - obj_data = _load_related_objects(context, obj_cls, db_data) - - return _load_test_obj(context, obj_cls, obj_data, **kw) - - -def create_test_audit_template(context, **kw): - """Create and return a test audit_template object. - - Create a audit template in the DB and return an AuditTemplate object - with appropriate attributes. - """ - audit_template = get_test_audit_template(context, **kw) - audit_template.create() - return audit_template - - -def get_test_audit(context, **kw): - """Return a Audit object with appropriate attributes. - - NOTE: The object leaves the attributes marked as changed, such - that a create() could be used to commit it to the DB. - """ - obj_cls = objects.Audit - db_data = db_utils.get_test_audit(**kw) - obj_data = _load_related_objects(context, obj_cls, db_data) - - return _load_test_obj(context, obj_cls, obj_data, **kw) - - -def create_test_audit(context, **kw): - """Create and return a test audit object. - - Create a audit in the DB and return an Audit object with appropriate - attributes. - """ - audit = get_test_audit(context, **kw) - audit.create() - return audit - - -def get_test_action_plan(context, **kw): - """Return a ActionPlan object with appropriate attributes. - - NOTE: The object leaves the attributes marked as changed, such - that a create() could be used to commit it to the DB. - """ - obj_cls = objects.ActionPlan - db_data = db_utils.get_test_action_plan(**kw) - obj_data = _load_related_objects(context, obj_cls, db_data) - - return _load_test_obj(context, obj_cls, obj_data, **kw) - - -def create_test_action_plan(context, **kw): - """Create and return a test action_plan object. - - Create a action plan in the DB and return a ActionPlan object with - appropriate attributes. - """ - action_plan = get_test_action_plan(context, **kw) - action_plan.create() - return action_plan - - -def get_test_action(context, **kw): - """Return a Action object with appropriate attributes. - - NOTE: The object leaves the attributes marked as changed, such - that a create() could be used to commit it to the DB. - """ - obj_cls = objects.Action - db_data = db_utils.get_test_action(**kw) - obj_data = _load_related_objects(context, obj_cls, db_data) - - return _load_test_obj(context, obj_cls, obj_data, **kw) - - -def create_test_action(context, **kw): - """Create and return a test action object. - - Create a action in the DB and return a Action object with appropriate - attributes. - """ - action = get_test_action(context, **kw) - action.create() - return action - - -def get_test_goal(context, **kw): - """Return a Goal object with appropriate attributes. - - NOTE: The object leaves the attributes marked as changed, such - that a create() could be used to commit it to the DB. - """ - obj_cls = objects.Goal - db_data = db_utils.get_test_goal(**kw) - obj_data = _load_related_objects(context, obj_cls, db_data) - - return _load_test_obj(context, obj_cls, obj_data, **kw) - - -def create_test_goal(context, **kw): - """Create and return a test goal object. - - Create a goal in the DB and return a Goal object with appropriate - attributes. - """ - goal = get_test_goal(context, **kw) - goal.create() - return goal - - -def get_test_scoring_engine(context, **kw): - """Return a ScoringEngine object with appropriate attributes. - - NOTE: The object leaves the attributes marked as changed, such - that a create() could be used to commit it to the DB. - """ - obj_cls = objects.ScoringEngine - db_data = db_utils.get_test_scoring_engine(**kw) - obj_data = _load_related_objects(context, obj_cls, db_data) - - return _load_test_obj(context, obj_cls, obj_data, **kw) - - -def create_test_scoring_engine(context, **kw): - """Create and return a test scoring engine object. - - Create a scoring engine in the DB and return a ScoringEngine object with - appropriate attributes. - """ - scoring_engine = get_test_scoring_engine(context, **kw) - scoring_engine.create() - return scoring_engine - - -def get_test_service(context, **kw): - """Return a Service object with appropriate attributes. - - NOTE: The object leaves the attributes marked as changed, such - that a create() could be used to commit it to the DB. - """ - obj_cls = objects.Service - db_data = db_utils.get_test_service(**kw) - obj_data = _load_related_objects(context, obj_cls, db_data) - - return _load_test_obj(context, obj_cls, obj_data, **kw) - - -def create_test_service(context, **kw): - """Create and return a test service object. - - Create a service in the DB and return a Service object with - appropriate attributes. - """ - service = get_test_service(context, **kw) - service.create() - return service - - -def get_test_strategy(context, **kw): - """Return a Strategy object with appropriate attributes. - - NOTE: The object leaves the attributes marked as changed, such - that a create() could be used to commit it to the DB. - """ - obj_cls = objects.Strategy - db_data = db_utils.get_test_strategy(**kw) - obj_data = _load_related_objects(context, obj_cls, db_data) - - return _load_test_obj(context, obj_cls, obj_data, **kw) - - -def create_test_strategy(context, **kw): - """Create and return a test strategy object. - - Create a strategy in the DB and return a Strategy object with appropriate - attributes. - """ - strategy = get_test_strategy(context, **kw) - strategy.create() - return strategy - - -def get_test_efficacy_indicator(context, **kw): - """Return a EfficacyIndicator object with appropriate attributes. - - NOTE: The object leaves the attributes marked as changed, such - that a create() could be used to commit it to the DB. - """ - obj_cls = objects.EfficacyIndicator - db_data = db_utils.get_test_efficacy_indicator(**kw) - obj_data = _load_related_objects(context, obj_cls, db_data) - - return _load_test_obj(context, obj_cls, obj_data, **kw) - - -def create_test_efficacy_indicator(context, **kw): - """Create and return a test efficacy indicator object. - - Create a efficacy indicator in the DB and return a EfficacyIndicator object - with appropriate attributes. - """ - efficacy_indicator = get_test_efficacy_indicator(context, **kw) - efficacy_indicator.create() - return efficacy_indicator diff --git a/watcher/tests/policy_fixture.py b/watcher/tests/policy_fixture.py deleted file mode 100644 index 8a5b4e8..0000000 --- a/watcher/tests/policy_fixture.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -import fixtures -from oslo_config import cfg -from oslo_policy import _parser -from oslo_policy import opts as policy_opts - -from watcher.common import policy as watcher_policy -from watcher.tests import fake_policy - -CONF = cfg.CONF - - -class PolicyFixture(fixtures.Fixture): - - def _setUp(self): - self.policy_dir = self.useFixture(fixtures.TempDir()) - self.policy_file_name = os.path.join(self.policy_dir.path, - 'policy.json') - with open(self.policy_file_name, 'w') as policy_file: - policy_file.write(fake_policy.policy_data) - policy_opts.set_defaults(CONF) - CONF.set_override('policy_file', self.policy_file_name, 'oslo_policy') - watcher_policy._ENFORCER = None - self.addCleanup(watcher_policy.init().clear) - - def set_rules(self, rules): - policy = watcher_policy._ENFORCER - policy.set_rules({k: _parser.parse_rule(v) - for k, v in rules.items()}) diff --git a/watcher/version.py b/watcher/version.py deleted file mode 100644 index 42c81b0..0000000 --- a/watcher/version.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pbr.version - -version_info = pbr.version.VersionInfo('python-watcher') -version_string = version_info.version_string diff --git a/watcher_tempest_plugin/README.rst b/watcher_tempest_plugin/README.rst deleted file mode 100644 index 1fd805f..0000000 --- a/watcher_tempest_plugin/README.rst +++ /dev/null @@ -1,158 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _tempest_tests: - -Tempest tests -============= - -The following procedure gets you started with Tempest testing but you can also -refer to the `Tempest documentation`_ for more details. - -.. _Tempest documentation: https://docs.openstack.org/tempest/latest - - -Tempest installation --------------------- - -To install Tempest you can issue the following commands:: - - $ git clone https://github.com/openstack/tempest/ - $ cd tempest/ - $ pip install . - -The folder you are into now will be called ```` from now onwards. - -Please note that although it is fully working outside a virtual environment, it -is recommended to install within a `venv`. - - -Watcher Tempest testing setup ------------------------------ - -You can now install Watcher alongside it in development mode by issuing the -following command:: - - $ pip install -e - -Then setup a local working environment (here ``watcher-cloud``) for running -Tempest for Watcher which shall contain the configuration for your OpenStack -integration platform. - -In a virtual environment, you can do so by issuing the following command:: - - $ cd - $ tempest init watcher-cloud - -Otherwise, if you are not using a virtualenv:: - - $ cd - $ tempest init --config-dir ./etc watcher-cloud - -By default the configuration file is empty so before starting, you need to -issue the following commands:: - - $ cd /watcher-cloud/etc - $ cp tempest.conf.sample tempest.conf - -At this point you need to edit the ``watcher-cloud/etc/tempest.conf`` -file as described in the `Tempest configuration guide`_. -Shown below is a minimal configuration you need to set within your -``tempest.conf`` configuration file which can get you started. - -For Keystone V3:: - - [identity] - uri_v3 = http://:/v3 - auth_version = v3 - - [auth] - admin_username = - admin_password = - admin_tenant_name = - admin_domain_name = - - [identity-feature-enabled] - api_v2 = false - api_v3 = true - -For Keystone V2:: - - [identity] - uri = http://:/v2.0 - auth_version = v2 - - [auth] - admin_tenant_name = - admin_username = - admin_password = - -In both cases:: - - [network] - public_network_id = - -You now have the minimum configuration for running Watcher Tempest tests on a -single node. - -Since deploying Watcher with only a single compute node is not very useful, a -few more configuration have to be set in your ``tempest.conf`` file in order to -enable the execution of multi-node scenarios:: - - [compute] - # To indicate Tempest test that you have provided enough compute nodes - min_compute_nodes = 2 - - # Image UUID you can get using the "glance image-list" command - image_ref = - - -For more information, please refer to: - -- Keystone connection: https://docs.openstack.org/tempest/latest/configuration.html#keystone-connection-info -- Dynamic Keystone Credentials: https://docs.openstack.org/tempest/latest/configuration.html#dynamic-credentials - -.. _virtual environment: http://docs.python-guide.org/en/latest/dev/virtualenvs/ -.. _Tempest configuration guide: http://docs.openstack.org/tempest/latest/configuration.html - - -Watcher Tempest tests execution -------------------------------- - -To list all Watcher Tempest cases, you can issue the following commands:: - - $ cd - $ testr list-tests watcher - -To run only these tests in Tempest, you can then issue these commands:: - - $ ./run_tempest.sh --config watcher-cloud/etc/tempest.conf -N -- watcher - -Or alternatively the following commands if you are:: - - $ cd /watcher-cloud - $ ../run_tempest.sh -N -- watcher - -To run a single test case, go to Tempest directory, then run with test case -name, e.g.:: - - $ cd - $ ./run_tempest.sh --config watcher-cloud/etc/tempest.conf -N \ - -- watcher_tempest_plugin.tests.api.admin.test_audit_template.TestCreateDeleteAuditTemplate.test_create_audit_template - -Alternatively, you can also run the Watcher Tempest plugin tests using tox. But -before you can do so, you need to follow the Tempest explanation on running -`tox with plugins`_. Then, run:: - - $ export TEMPEST_CONFIG_DIR=/watcher-cloud/etc/ - $ tox -eall-plugin watcher - -.. _tox with plugins: https://docs.openstack.org/tempest/latest/plugin.html#notes-for-using-plugins-with-virtualenvs - -And, to run a specific test:: - - $ export TEMPEST_CONFIG_DIR=/watcher-cloud/etc/ - $ tox -eall-plugin watcher_tempest_plugin.tests.api.admin.test_audit_template.TestCreateDeleteAuditTemplate.test_create_audit_template diff --git a/watcher_tempest_plugin/__init__.py b/watcher_tempest_plugin/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher_tempest_plugin/config.py b/watcher_tempest_plugin/config.py deleted file mode 100644 index 426399d..0000000 --- a/watcher_tempest_plugin/config.py +++ /dev/null @@ -1,23 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - - -service_option = cfg.BoolOpt("watcher", - default=True, - help="Whether or not watcher is expected to be " - "available") diff --git a/watcher_tempest_plugin/infra_optim_clients.py b/watcher_tempest_plugin/infra_optim_clients.py deleted file mode 100644 index edf2091..0000000 --- a/watcher_tempest_plugin/infra_optim_clients.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc - -import six -from tempest import clients -from tempest.common import credentials_factory as creds_factory -from tempest import config - -from watcher_tempest_plugin.services.infra_optim.v1.json import client as ioc - -CONF = config.CONF - - -@six.add_metaclass(abc.ABCMeta) -class BaseManager(clients.Manager): - - def __init__(self, credentials): - super(BaseManager, self).__init__(credentials) - self.io_client = ioc.InfraOptimClientJSON( - self.auth_provider, 'infra-optim', CONF.identity.region) - - -class AdminManager(BaseManager): - def __init__(self): - super(AdminManager, self).__init__( - creds_factory.get_configured_admin_credentials(), - ) diff --git a/watcher_tempest_plugin/plugin.py b/watcher_tempest_plugin/plugin.py deleted file mode 100644 index 560c544..0000000 --- a/watcher_tempest_plugin/plugin.py +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import os - -from tempest.test_discover import plugins - -from watcher_tempest_plugin import config as watcher_config - - -class WatcherTempestPlugin(plugins.TempestPlugin): - def load_tests(self): - base_path = os.path.split(os.path.dirname( - os.path.abspath(__file__)))[0] - test_dir = "watcher_tempest_plugin/tests" - full_test_dir = os.path.join(base_path, test_dir) - return full_test_dir, base_path - - def register_opts(self, conf): - conf.register_opt(watcher_config.service_option, - group='service_available') - - def get_opt_lists(self): - return [('service_available', [watcher_config.service_option])] diff --git a/watcher_tempest_plugin/services/__init__.py b/watcher_tempest_plugin/services/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher_tempest_plugin/services/infra_optim/__init__.py b/watcher_tempest_plugin/services/infra_optim/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher_tempest_plugin/services/infra_optim/base.py b/watcher_tempest_plugin/services/infra_optim/base.py deleted file mode 100644 index d248774..0000000 --- a/watcher_tempest_plugin/services/infra_optim/base.py +++ /dev/null @@ -1,211 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc -import functools - -import six -import six.moves.urllib.parse as urlparse - -from tempest.lib.common import rest_client - - -def handle_errors(f): - """A decorator that allows to ignore certain types of errors.""" - - @functools.wraps(f) - def wrapper(*args, **kwargs): - param_name = 'ignore_errors' - ignored_errors = kwargs.get(param_name, tuple()) - - if param_name in kwargs: - del kwargs[param_name] - - try: - return f(*args, **kwargs) - except ignored_errors: - # Silently ignore errors - pass - - return wrapper - - -@six.add_metaclass(abc.ABCMeta) -class BaseInfraOptimClient(rest_client.RestClient): - """Base Tempest REST client for Watcher API.""" - - URI_PREFIX = '' - - @abc.abstractmethod - def serialize(self, object_dict): - """Serialize an Watcher object.""" - raise NotImplementedError() - - @abc.abstractmethod - def deserialize(self, object_str): - """Deserialize an Watcher object.""" - raise NotImplementedError() - - def _get_uri(self, resource_name, uuid=None, permanent=False): - """Get URI for a specific resource or object. - - :param resource_name: The name of the REST resource, e.g., 'audits'. - :param uuid: The unique identifier of an object in UUID format. - :return: Relative URI for the resource or object. - """ - - prefix = self.URI_PREFIX if not permanent else '' - - return '{pref}/{res}{uuid}'.format(pref=prefix, - res=resource_name, - uuid='/%s' % uuid if uuid else '') - - def _make_patch(self, allowed_attributes, **kw): - """Create a JSON patch according to RFC 6902. - - :param allowed_attributes: An iterable object that contains a set of - allowed attributes for an object. - :param **kw: Attributes and new values for them. - :return: A JSON path that sets values of the specified attributes to - the new ones. - """ - - def get_change(kw, path='/'): - for name, value in kw.items(): - if isinstance(value, dict): - for ch in get_change(value, path + '%s/' % name): - yield ch - else: - if value is None: - yield {'path': path + name, - 'op': 'remove'} - else: - yield {'path': path + name, - 'value': value, - 'op': 'replace'} - - patch = [ch for ch in get_change(kw) - if ch['path'].lstrip('/') in allowed_attributes] - - return patch - - def _list_request(self, resource, permanent=False, **kwargs): - """Get the list of objects of the specified type. - - :param resource: The name of the REST resource, e.g., 'audits'. - "param **kw: Parameters for the request. - :return: A tuple with the server response and deserialized JSON list - of objects - """ - - uri = self._get_uri(resource, permanent=permanent) - if kwargs: - uri += "?%s" % urlparse.urlencode(kwargs) - - resp, body = self.get(uri) - self.expected_success(200, int(resp['status'])) - - return resp, self.deserialize(body) - - def _show_request(self, resource, uuid, permanent=False, **kwargs): - """Gets a specific object of the specified type. - - :param uuid: Unique identifier of the object in UUID format. - :return: Serialized object as a dictionary. - """ - - if 'uri' in kwargs: - uri = kwargs['uri'] - else: - uri = self._get_uri(resource, uuid=uuid, permanent=permanent) - resp, body = self.get(uri) - self.expected_success(200, int(resp['status'])) - - return resp, self.deserialize(body) - - def _create_request(self, resource, object_dict): - """Create an object of the specified type. - - :param resource: The name of the REST resource, e.g., 'audits'. - :param object_dict: A Python dict that represents an object of the - specified type. - :return: A tuple with the server response and the deserialized created - object. - """ - - body = self.serialize(object_dict) - uri = self._get_uri(resource) - - resp, body = self.post(uri, body=body) - self.expected_success(201, int(resp['status'])) - - return resp, self.deserialize(body) - - def _delete_request(self, resource, uuid): - """Delete specified object. - - :param resource: The name of the REST resource, e.g., 'audits'. - :param uuid: The unique identifier of an object in UUID format. - :return: A tuple with the server response and the response body. - """ - - uri = self._get_uri(resource, uuid) - - resp, body = self.delete(uri) - self.expected_success(204, int(resp['status'])) - return resp, body - - def _patch_request(self, resource, uuid, patch_object): - """Update specified object with JSON-patch. - - :param resource: The name of the REST resource, e.g., 'audits'. - :param uuid: The unique identifier of an object in UUID format. - :return: A tuple with the server response and the serialized patched - object. - """ - - uri = self._get_uri(resource, uuid) - patch_body = self.serialize(patch_object) - - resp, body = self.patch(uri, body=patch_body) - self.expected_success(200, int(resp['status'])) - return resp, self.deserialize(body) - - @handle_errors - def get_api_description(self): - """Retrieves all versions of the Watcher API.""" - - return self._list_request('', permanent=True) - - @handle_errors - def get_version_description(self, version='v1'): - """Retrieves the description of the API. - - :param version: The version of the API. Default: 'v1'. - :return: Serialized description of API resources. - """ - - return self._list_request(version, permanent=True) - - def _put_request(self, resource, put_object): - """Update specified object with JSON-patch.""" - - uri = self._get_uri(resource) - put_body = self.serialize(put_object) - - resp, body = self.put(uri, body=put_body) - self.expected_success(202, int(resp['status'])) - return resp, body diff --git a/watcher_tempest_plugin/services/infra_optim/v1/__init__.py b/watcher_tempest_plugin/services/infra_optim/v1/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher_tempest_plugin/services/infra_optim/v1/json/__init__.py b/watcher_tempest_plugin/services/infra_optim/v1/json/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher_tempest_plugin/services/infra_optim/v1/json/client.py b/watcher_tempest_plugin/services/infra_optim/v1/json/client.py deleted file mode 100644 index 2ee27f5..0000000 --- a/watcher_tempest_plugin/services/infra_optim/v1/json/client.py +++ /dev/null @@ -1,331 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_serialization import jsonutils -from watcher.common import utils -from watcher_tempest_plugin.services.infra_optim import base - - -class InfraOptimClientJSON(base.BaseInfraOptimClient): - """Base Tempest REST client for Watcher API v1.""" - - URI_PREFIX = 'v1' - - def serialize(self, object_dict): - """Serialize an Watcher object.""" - return jsonutils.dumps(object_dict) - - def deserialize(self, object_str): - """Deserialize an Watcher object.""" - return jsonutils.loads(object_str.decode('utf-8')) - - # ### AUDIT TEMPLATES ### # - - @base.handle_errors - def list_audit_templates(self, **kwargs): - """List all existing audit templates.""" - return self._list_request('audit_templates', **kwargs) - - @base.handle_errors - def list_audit_templates_detail(self, **kwargs): - """Lists details of all existing audit templates.""" - return self._list_request('/audit_templates/detail', **kwargs) - - @base.handle_errors - def show_audit_template(self, audit_template_uuid): - """Gets a specific audit template. - - :param audit_template_uuid: Unique identifier of the audit template - :return: Serialized audit template as a dictionary. - """ - return self._show_request('audit_templates', audit_template_uuid) - - @base.handle_errors - def create_audit_template(self, **kwargs): - """Creates an audit template with the specified parameters. - - :param name: The name of the audit template. - :param description: The description of the audit template. - :param goal_uuid: The related Goal UUID associated. - :param strategy_uuid: The related Strategy UUID associated. - :param audit_scope: Scope the audit should apply to. - :return: A tuple with the server response and the created audit - template. - """ - - parameters = {k: v for k, v in kwargs.items() if v is not None} - # This name is unique to avoid the DB unique constraint on names - unique_name = 'Tempest Audit Template %s' % utils.generate_uuid() - - audit_template = { - 'name': parameters.get('name', unique_name), - 'description': parameters.get('description'), - 'goal': parameters.get('goal'), - 'strategy': parameters.get('strategy'), - 'scope': parameters.get('scope', []), - } - - return self._create_request('audit_templates', audit_template) - - @base.handle_errors - def delete_audit_template(self, audit_template_uuid): - """Deletes an audit template having the specified UUID. - - :param audit_template_uuid: The unique identifier of the audit template - :return: A tuple with the server response and the response body. - """ - - return self._delete_request('audit_templates', audit_template_uuid) - - @base.handle_errors - def update_audit_template(self, audit_template_uuid, patch): - """Update the specified audit template. - - :param audit_template_uuid: The unique identifier of the audit template - :param patch: List of dicts representing json patches. - :return: A tuple with the server response and the updated audit - template. - """ - - return self._patch_request('audit_templates', - audit_template_uuid, patch) - - # ### AUDITS ### # - - @base.handle_errors - def list_audits(self, **kwargs): - """List all existing audit templates.""" - return self._list_request('audits', **kwargs) - - @base.handle_errors - def list_audits_detail(self, **kwargs): - """Lists details of all existing audit templates.""" - return self._list_request('/audits/detail', **kwargs) - - @base.handle_errors - def show_audit(self, audit_uuid): - """Gets a specific audit template. - - :param audit_uuid: Unique identifier of the audit template - :return: Serialized audit template as a dictionary - """ - return self._show_request('audits', audit_uuid) - - @base.handle_errors - def create_audit(self, audit_template_uuid, **kwargs): - """Create an audit with the specified parameters - - :param audit_template_uuid: Audit template ID used by the audit - :return: A tuple with the server response and the created audit - """ - audit = {'audit_template_uuid': audit_template_uuid} - audit.update(kwargs) - if not audit['state']: - del audit['state'] - - return self._create_request('audits', audit) - - @base.handle_errors - def delete_audit(self, audit_uuid): - """Deletes an audit having the specified UUID - - :param audit_uuid: The unique identifier of the audit - :return: A tuple with the server response and the response body - """ - - return self._delete_request('audits', audit_uuid) - - @base.handle_errors - def update_audit(self, audit_uuid, patch): - """Update the specified audit. - - :param audit_uuid: The unique identifier of the audit - :param patch: List of dicts representing json patches. - :return: Tuple with the server response and the updated audit - """ - - return self._patch_request('audits', audit_uuid, patch) - - # ### ACTION PLANS ### # - - @base.handle_errors - def list_action_plans(self, **kwargs): - """List all existing action plan""" - return self._list_request('action_plans', **kwargs) - - @base.handle_errors - def list_action_plans_detail(self, **kwargs): - """Lists details of all existing action plan""" - return self._list_request('/action_plans/detail', **kwargs) - - @base.handle_errors - def show_action_plan(self, action_plan_uuid): - """Gets a specific action plan - - :param action_plan_uuid: Unique identifier of the action plan - :return: Serialized action plan as a dictionary - """ - return self._show_request('/action_plans', action_plan_uuid) - - @base.handle_errors - def delete_action_plan(self, action_plan_uuid): - """Deletes an action plan having the specified UUID - - :param action_plan_uuid: The unique identifier of the action_plan - :return: A tuple with the server response and the response body - """ - - return self._delete_request('/action_plans', action_plan_uuid) - - @base.handle_errors - def delete_action_plans_by_audit(self, audit_uuid): - """Deletes an action plan having the specified UUID - - :param audit_uuid: The unique identifier of the related Audit - """ - - action_plans = self.list_action_plans(audit_uuid=audit_uuid)[1] - - for action_plan in action_plans: - self.delete_action_plan(action_plan['uuid']) - - @base.handle_errors - def update_action_plan(self, action_plan_uuid, patch): - """Update the specified action plan - - :param action_plan_uuid: The unique identifier of the action_plan - :param patch: List of dicts representing json patches. - :return: Tuple with the server response and the updated action_plan - """ - - return self._patch_request('/action_plans', action_plan_uuid, patch) - - @base.handle_errors - def start_action_plan(self, action_plan_uuid): - """Start the specified action plan - - :param action_plan_uuid: The unique identifier of the action_plan - :return: Tuple with the server response and the updated action_plan - """ - - return self._patch_request( - '/action_plans', action_plan_uuid, - [{'path': '/state', 'op': 'replace', 'value': 'PENDING'}]) - - # ### GOALS ### # - - @base.handle_errors - def list_goals(self, **kwargs): - """List all existing goals""" - return self._list_request('/goals', **kwargs) - - @base.handle_errors - def list_goals_detail(self, **kwargs): - """Lists details of all existing goals""" - return self._list_request('/goals/detail', **kwargs) - - @base.handle_errors - def show_goal(self, goal): - """Gets a specific goal - - :param goal: UUID or Name of the goal - :return: Serialized goal as a dictionary - """ - return self._show_request('/goals', goal) - - # ### ACTIONS ### # - - @base.handle_errors - def list_actions(self, **kwargs): - """List all existing actions""" - return self._list_request('/actions', **kwargs) - - @base.handle_errors - def list_actions_detail(self, **kwargs): - """Lists details of all existing actions""" - return self._list_request('/actions/detail', **kwargs) - - @base.handle_errors - def show_action(self, action_uuid): - """Gets a specific action - - :param action_uuid: Unique identifier of the action - :return: Serialized action as a dictionary - """ - return self._show_request('/actions', action_uuid) - - # ### STRATEGIES ### # - - @base.handle_errors - def list_strategies(self, **kwargs): - """List all existing strategies""" - return self._list_request('/strategies', **kwargs) - - @base.handle_errors - def list_strategies_detail(self, **kwargs): - """Lists details of all existing strategies""" - return self._list_request('/strategies/detail', **kwargs) - - @base.handle_errors - def show_strategy(self, strategy): - """Gets a specific strategy - - :param strategy_id: Name of the strategy - :return: Serialized strategy as a dictionary - """ - return self._show_request('/strategies', strategy) - - # ### SCORING ENGINE ### # - - @base.handle_errors - def list_scoring_engines(self, **kwargs): - """List all existing scoring_engines""" - return self._list_request('/scoring_engines', **kwargs) - - @base.handle_errors - def list_scoring_engines_detail(self, **kwargs): - """Lists details of all existing scoring_engines""" - return self._list_request('/scoring_engines/detail', **kwargs) - - @base.handle_errors - def show_scoring_engine(self, scoring_engine): - """Gets a specific scoring_engine - - :param scoring_engine: UUID or Name of the scoring_engine - :return: Serialized scoring_engine as a dictionary - """ - return self._show_request('/scoring_engines', scoring_engine) - - # ### SERVICES ### # - - @base.handle_errors - def list_services(self, **kwargs): - """List all existing services""" - return self._list_request('/services', **kwargs) - - @base.handle_errors - def list_services_detail(self, **kwargs): - """Lists details of all existing services""" - return self._list_request('/services/detail', **kwargs) - - @base.handle_errors - def show_service(self, service): - """Gets a specific service - - :param service: Name of the strategy - :return: Serialized strategy as a dictionary - """ - return self._show_request('/services', service) diff --git a/watcher_tempest_plugin/tests/__init__.py b/watcher_tempest_plugin/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher_tempest_plugin/tests/api/__init__.py b/watcher_tempest_plugin/tests/api/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher_tempest_plugin/tests/api/admin/__init__.py b/watcher_tempest_plugin/tests/api/admin/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher_tempest_plugin/tests/api/admin/base.py b/watcher_tempest_plugin/tests/api/admin/base.py deleted file mode 100644 index 5373623..0000000 --- a/watcher_tempest_plugin/tests/api/admin/base.py +++ /dev/null @@ -1,263 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools - -from tempest.lib.common.utils import data_utils -from tempest.lib.common.utils import test_utils -from tempest import test - -from watcher_tempest_plugin import infra_optim_clients as clients - - -class BaseInfraOptimTest(test.BaseTestCase): - """Base class for Infrastructure Optimization API tests.""" - - # States where the object is waiting for some event to perform a transition - IDLE_STATES = ('RECOMMENDED', - 'FAILED', - 'SUCCEEDED', - 'CANCELLED', - 'SUSPENDED') - # States where the object can only be DELETED (end of its life-cycle) - FINISHED_STATES = ('FAILED', - 'SUCCEEDED', - 'CANCELLED', - 'SUPERSEDED') - - @classmethod - def setup_credentials(cls): - super(BaseInfraOptimTest, cls).setup_credentials() - cls.mgr = clients.AdminManager() - - @classmethod - def setup_clients(cls): - super(BaseInfraOptimTest, cls).setup_clients() - cls.client = cls.mgr.io_client - - @classmethod - def resource_setup(cls): - super(BaseInfraOptimTest, cls).resource_setup() - - # Set of all created audit templates UUIDs - cls.created_audit_templates = set() - # Set of all created audit UUIDs - cls.created_audits = set() - # Set of all created audit UUIDs. We use it to build the list of - # action plans to delete (including potential orphan one(s)) - cls.created_action_plans_audit_uuids = set() - - @classmethod - def resource_cleanup(cls): - """Ensure that all created objects get destroyed.""" - try: - action_plans_to_be_deleted = set() - # Phase 1: Make sure all objects are in an idle state - for audit_uuid in cls.created_audits: - test_utils.call_until_true( - func=functools.partial( - cls.is_audit_idle, audit_uuid), - duration=30, - sleep_for=.5 - ) - - for audit_uuid in cls.created_action_plans_audit_uuids: - _, action_plans = cls.client.list_action_plans( - audit_uuid=audit_uuid) - action_plans_to_be_deleted.update( - ap['uuid'] for ap in action_plans['action_plans']) - - for action_plan in action_plans['action_plans']: - try: - test_utils.call_until_true( - func=functools.partial( - cls.is_action_plan_idle, action_plan['uuid']), - duration=30, - sleep_for=.5 - ) - except Exception: - action_plans_to_be_deleted.remove( - action_plan['uuid']) - - # Phase 2: Delete them all - for action_plan_uuid in action_plans_to_be_deleted: - cls.delete_action_plan(action_plan_uuid) - - for audit_uuid in cls.created_audits.copy(): - cls.delete_audit(audit_uuid) - - for audit_template_uuid in cls.created_audit_templates.copy(): - cls.delete_audit_template(audit_template_uuid) - - finally: - super(BaseInfraOptimTest, cls).resource_cleanup() - - def validate_self_link(self, resource, uuid, link): - """Check whether the given self link formatted correctly.""" - expected_link = "{base}/{pref}/{res}/{uuid}".format( - base=self.client.base_url, - pref=self.client.URI_PREFIX, - res=resource, - uuid=uuid - ) - self.assertEqual(expected_link, link) - - def assert_expected(self, expected, actual, - keys=('created_at', 'updated_at', 'deleted_at')): - # Check if not expected keys/values exists in actual response body - for key, value in expected.items(): - if key not in keys: - self.assertIn(key, actual) - self.assertEqual(value, actual[key]) - - # ### AUDIT TEMPLATES ### # - - @classmethod - def create_audit_template(cls, goal, name=None, description=None, - strategy=None, scope=None): - """Wrapper utility for creating a test audit template - - :param goal: Goal UUID or name related to the audit template. - :param name: The name of the audit template. Default: My Audit Template - :param description: The description of the audit template. - :param strategy: Strategy UUID or name related to the audit template. - :param scope: Scope that will be applied on all derived audits. - :return: A tuple with The HTTP response and its body - """ - description = description or data_utils.rand_name( - 'test-audit_template') - resp, body = cls.client.create_audit_template( - name=name, description=description, - goal=goal, strategy=strategy, scope=scope) - - cls.created_audit_templates.add(body['uuid']) - - return resp, body - - @classmethod - def delete_audit_template(cls, uuid): - """Deletes a audit_template having the specified UUID - - :param uuid: The unique identifier of the audit template - :return: Server response - """ - resp, _ = cls.client.delete_audit_template(uuid) - - if uuid in cls.created_audit_templates: - cls.created_audit_templates.remove(uuid) - - return resp - - # ### AUDITS ### # - - @classmethod - def create_audit(cls, audit_template_uuid, audit_type='ONESHOT', - state=None, interval=None): - """Wrapper utility for creating a test audit - - :param audit_template_uuid: Audit Template UUID this audit will use - :param audit_type: Audit type (either ONESHOT or CONTINUOUS) - :param state: Audit state (str) - :param interval: Audit interval in seconds or cron syntax (str) - :return: A tuple with The HTTP response and its body - """ - resp, body = cls.client.create_audit( - audit_template_uuid=audit_template_uuid, audit_type=audit_type, - state=state, interval=interval) - - cls.created_audits.add(body['uuid']) - cls.created_action_plans_audit_uuids.add(body['uuid']) - - return resp, body - - @classmethod - def delete_audit(cls, audit_uuid): - """Deletes an audit having the specified UUID - - :param audit_uuid: The unique identifier of the audit. - :return: the HTTP response - """ - resp, _ = cls.client.delete_audit(audit_uuid) - - if audit_uuid in cls.created_audits: - cls.created_audits.remove(audit_uuid) - - return resp - - @classmethod - def has_audit_succeeded(cls, audit_uuid): - _, audit = cls.client.show_audit(audit_uuid) - return audit.get('state') == 'SUCCEEDED' - - @classmethod - def has_audit_finished(cls, audit_uuid): - _, audit = cls.client.show_audit(audit_uuid) - return audit.get('state') in cls.FINISHED_STATES - - @classmethod - def is_audit_idle(cls, audit_uuid): - _, audit = cls.client.show_audit(audit_uuid) - return audit.get('state') in cls.IDLE_STATES - - # ### ACTION PLANS ### # - - @classmethod - def create_action_plan(cls, audit_template_uuid, **audit_kwargs): - """Wrapper utility for creating a test action plan - - :param audit_template_uuid: Audit template UUID to use - :param audit_kwargs: Dict of audit properties to set - :return: The action plan as dict - """ - _, audit = cls.create_audit(audit_template_uuid, **audit_kwargs) - audit_uuid = audit['uuid'] - - assert test_utils.call_until_true( - func=functools.partial(cls.has_audit_finished, audit_uuid), - duration=30, - sleep_for=.5 - ) - - _, action_plans = cls.client.list_action_plans(audit_uuid=audit_uuid) - if len(action_plans['action_plans']) == 0: - return - - return action_plans['action_plans'][0] - - @classmethod - def delete_action_plan(cls, action_plan_uuid): - """Deletes an action plan having the specified UUID - - :param action_plan_uuid: The unique identifier of the action plan. - :return: the HTTP response - """ - resp, _ = cls.client.delete_action_plan(action_plan_uuid) - - if action_plan_uuid in cls.created_action_plans_audit_uuids: - cls.created_action_plans_audit_uuids.remove(action_plan_uuid) - - return resp - - @classmethod - def has_action_plan_finished(cls, action_plan_uuid): - _, action_plan = cls.client.show_action_plan(action_plan_uuid) - return action_plan.get('state') in cls.FINISHED_STATES - - @classmethod - def is_action_plan_idle(cls, action_plan_uuid): - """This guard makes sure your action plan is not running""" - _, action_plan = cls.client.show_action_plan(action_plan_uuid) - return action_plan.get('state') in cls.IDLE_STATES diff --git a/watcher_tempest_plugin/tests/api/admin/test_action.py b/watcher_tempest_plugin/tests/api/admin/test_action.py deleted file mode 100644 index 3fa2d94..0000000 --- a/watcher_tempest_plugin/tests/api/admin/test_action.py +++ /dev/null @@ -1,110 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -import collections -import functools - -from tempest.lib.common.utils import test_utils -from tempest.lib import decorators - -from watcher_tempest_plugin.tests.api.admin import base - - -class TestShowListAction(base.BaseInfraOptimTest): - """Tests for actions""" - - @classmethod - def resource_setup(cls): - super(TestShowListAction, cls).resource_setup() - _, cls.goal = cls.client.show_goal("DUMMY") - _, cls.audit_template = cls.create_audit_template(cls.goal['uuid']) - _, cls.audit = cls.create_audit(cls.audit_template['uuid']) - - assert test_utils.call_until_true( - func=functools.partial(cls.has_audit_finished, cls.audit['uuid']), - duration=30, - sleep_for=.5 - ) - _, action_plans = cls.client.list_action_plans( - audit_uuid=cls.audit['uuid']) - cls.action_plan = action_plans['action_plans'][0] - - @decorators.attr(type='smoke') - def test_show_one_action(self): - _, body = self.client.list_actions( - action_plan_uuid=self.action_plan["uuid"]) - actions = body['actions'] - - _, action = self.client.show_action(actions[0]["uuid"]) - - self.assertEqual(self.action_plan["uuid"], action['action_plan_uuid']) - self.assertEqual("PENDING", action['state']) - - @decorators.attr(type='smoke') - def test_show_action_with_links(self): - _, body = self.client.list_actions( - action_plan_uuid=self.action_plan["uuid"]) - actions = body['actions'] - - _, action = self.client.show_action(actions[0]["uuid"]) - - self.assertIn('links', action.keys()) - self.assertEqual(2, len(action['links'])) - self.assertIn(action['uuid'], action['links'][0]['href']) - - @decorators.attr(type="smoke") - def test_list_actions(self): - _, body = self.client.list_actions() - - # Verify self links. - for action in body['actions']: - self.validate_self_link('actions', action['uuid'], - action['links'][0]['href']) - - @decorators.attr(type="smoke") - def test_list_actions_by_action_plan(self): - _, body = self.client.list_actions( - action_plan_uuid=self.action_plan["uuid"]) - - for item in body['actions']: - self.assertEqual(self.action_plan["uuid"], - item['action_plan_uuid']) - - action_counter = collections.Counter( - act['action_type'] for act in body['actions']) - - # A dummy strategy generates 2 "nop" actions and 1 "sleep" action - self.assertEqual(3, len(body['actions'])) - self.assertEqual(2, action_counter.get("nop")) - self.assertEqual(1, action_counter.get("sleep")) - - @decorators.attr(type="smoke") - def test_list_actions_by_audit(self): - _, body = self.client.list_actions(audit_uuid=self.audit["uuid"]) - - for item in body['actions']: - self.assertEqual(self.action_plan["uuid"], - item['action_plan_uuid']) - - action_counter = collections.Counter( - act['action_type'] for act in body['actions']) - - # A dummy strategy generates 2 "nop" actions and 1 "sleep" action - self.assertEqual(3, len(body['actions'])) - self.assertEqual(2, action_counter.get("nop")) - self.assertEqual(1, action_counter.get("sleep")) diff --git a/watcher_tempest_plugin/tests/api/admin/test_action_plan.py b/watcher_tempest_plugin/tests/api/admin/test_action_plan.py deleted file mode 100644 index b31b5df..0000000 --- a/watcher_tempest_plugin/tests/api/admin/test_action_plan.py +++ /dev/null @@ -1,176 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -import functools - -from tempest.lib.common.utils import test_utils -from tempest.lib import decorators -from tempest.lib import exceptions - -from watcher_tempest_plugin.tests.api.admin import base - - -class TestCreateDeleteExecuteActionPlan(base.BaseInfraOptimTest): - """Tests for action plans""" - - @decorators.attr(type='smoke') - def test_create_action_plan(self): - _, goal = self.client.show_goal("dummy") - _, audit_template = self.create_audit_template(goal['uuid']) - _, audit = self.create_audit(audit_template['uuid']) - - self.assertTrue(test_utils.call_until_true( - func=functools.partial(self.has_audit_finished, audit['uuid']), - duration=30, - sleep_for=.5 - )) - _, action_plans = self.client.list_action_plans( - audit_uuid=audit['uuid']) - action_plan = action_plans['action_plans'][0] - - _, action_plan = self.client.show_action_plan(action_plan['uuid']) - - self.assertEqual(audit['uuid'], action_plan['audit_uuid']) - self.assertEqual('RECOMMENDED', action_plan['state']) - - @decorators.attr(type='smoke') - def test_delete_action_plan(self): - _, goal = self.client.show_goal("dummy") - _, audit_template = self.create_audit_template(goal['uuid']) - _, audit = self.create_audit(audit_template['uuid']) - - self.assertTrue(test_utils.call_until_true( - func=functools.partial(self.has_audit_finished, audit['uuid']), - duration=30, - sleep_for=.5 - )) - _, action_plans = self.client.list_action_plans( - audit_uuid=audit['uuid']) - action_plan = action_plans['action_plans'][0] - - _, action_plan = self.client.show_action_plan(action_plan['uuid']) - - self.client.delete_action_plan(action_plan['uuid']) - - self.assertRaises(exceptions.NotFound, self.client.show_action_plan, - action_plan['uuid']) - - @decorators.attr(type='smoke') - def test_execute_dummy_action_plan(self): - _, goal = self.client.show_goal("dummy") - _, audit_template = self.create_audit_template(goal['uuid']) - _, audit = self.create_audit(audit_template['uuid']) - - self.assertTrue(test_utils.call_until_true( - func=functools.partial(self.has_audit_finished, audit['uuid']), - duration=30, - sleep_for=.5 - )) - _, action_plans = self.client.list_action_plans( - audit_uuid=audit['uuid']) - action_plan = action_plans['action_plans'][0] - - _, action_plan = self.client.show_action_plan(action_plan['uuid']) - - if action_plan['state'] in ['SUPERSEDED', 'SUCCEEDED']: - # This means the action plan is superseded so we cannot trigger it, - # or it is empty. - return - - # Execute the action by changing its state to PENDING - _, updated_ap = self.client.start_action_plan(action_plan['uuid']) - - self.assertTrue(test_utils.call_until_true( - func=functools.partial( - self.has_action_plan_finished, action_plan['uuid']), - duration=30, - sleep_for=.5 - )) - _, finished_ap = self.client.show_action_plan(action_plan['uuid']) - - self.assertIn(updated_ap['state'], ('PENDING', 'ONGOING')) - self.assertIn(finished_ap['state'], ('SUCCEEDED', 'SUPERSEDED')) - - -class TestShowListActionPlan(base.BaseInfraOptimTest): - """Tests for action_plan.""" - - @classmethod - def resource_setup(cls): - super(TestShowListActionPlan, cls).resource_setup() - _, cls.goal = cls.client.show_goal("dummy") - _, cls.audit_template = cls.create_audit_template(cls.goal['uuid']) - _, cls.audit = cls.create_audit(cls.audit_template['uuid']) - - assert test_utils.call_until_true( - func=functools.partial(cls.has_audit_finished, cls.audit['uuid']), - duration=30, - sleep_for=.5 - ) - _, action_plans = cls.client.list_action_plans( - audit_uuid=cls.audit['uuid']) - if len(action_plans['action_plans']) > 0: - cls.action_plan = action_plans['action_plans'][0] - - @decorators.attr(type='smoke') - def test_show_action_plan(self): - _, action_plan = self.client.show_action_plan( - self.action_plan['uuid']) - - self.assert_expected(self.action_plan, action_plan) - - @decorators.attr(type='smoke') - def test_show_action_plan_detail(self): - _, action_plans = self.client.list_action_plans_detail( - audit_uuid=self.audit['uuid']) - - action_plan = action_plans['action_plans'][0] - - self.assert_expected(self.action_plan, action_plan) - - @decorators.attr(type='smoke') - def test_show_action_plan_with_links(self): - _, action_plan = self.client.show_action_plan( - self.action_plan['uuid']) - self.assertIn('links', action_plan.keys()) - self.assertEqual(2, len(action_plan['links'])) - self.assertIn(action_plan['uuid'], - action_plan['links'][0]['href']) - - @decorators.attr(type="smoke") - def test_list_action_plans(self): - _, body = self.client.list_action_plans() - self.assertIn(self.action_plan['uuid'], - [i['uuid'] for i in body['action_plans']]) - # Verify self links. - for action_plan in body['action_plans']: - self.validate_self_link('action_plans', action_plan['uuid'], - action_plan['links'][0]['href']) - - @decorators.attr(type='smoke') - def test_list_with_limit(self): - # We create 3 extra audits to exceed the limit we fix - for _ in range(3): - self.create_action_plan(self.audit_template['uuid']) - - _, body = self.client.list_action_plans(limit=3) - - next_marker = body['action_plans'][-1]['uuid'] - - self.assertEqual(3, len(body['action_plans'])) - self.assertIn(next_marker, body['next']) diff --git a/watcher_tempest_plugin/tests/api/admin/test_api_discovery.py b/watcher_tempest_plugin/tests/api/admin/test_api_discovery.py deleted file mode 100644 index f30cb4b..0000000 --- a/watcher_tempest_plugin/tests/api/admin/test_api_discovery.py +++ /dev/null @@ -1,47 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from tempest.lib import decorators - -from watcher_tempest_plugin.tests.api.admin import base - - -class TestApiDiscovery(base.BaseInfraOptimTest): - """Tests for API discovery features.""" - - @decorators.attr(type='smoke') - def test_api_versions(self): - _, descr = self.client.get_api_description() - expected_versions = ('v1',) - versions = [version['id'] for version in descr['versions']] - - for v in expected_versions: - self.assertIn(v, versions) - - @decorators.attr(type='smoke') - def test_default_version(self): - _, descr = self.client.get_api_description() - default_version = descr['default_version'] - self.assertEqual('v1', default_version['id']) - - @decorators.attr(type='smoke') - def test_version_1_resources(self): - _, descr = self.client.get_version_description(version='v1') - expected_resources = ('audit_templates', 'audits', 'action_plans', - 'actions', 'links', 'media_types') - - for res in expected_resources: - self.assertIn(res, descr) diff --git a/watcher_tempest_plugin/tests/api/admin/test_audit.py b/watcher_tempest_plugin/tests/api/admin/test_audit.py deleted file mode 100644 index 13a187e..0000000 --- a/watcher_tempest_plugin/tests/api/admin/test_audit.py +++ /dev/null @@ -1,221 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -import functools - -from tempest.lib.common.utils import test_utils -from tempest.lib import decorators -from tempest.lib import exceptions - -from watcher_tempest_plugin.tests.api.admin import base - - -class TestCreateUpdateDeleteAudit(base.BaseInfraOptimTest): - """Tests for audit.""" - - audit_states = ['ONGOING', 'SUCCEEDED', 'FAILED', - 'CANCELLED', 'DELETED', 'PENDING', 'SUSPENDED'] - - def assert_expected(self, expected, actual, - keys=('created_at', 'updated_at', - 'deleted_at', 'state')): - super(TestCreateUpdateDeleteAudit, self).assert_expected( - expected, actual, keys) - - @decorators.attr(type='smoke') - def test_create_audit_oneshot(self): - _, goal = self.client.show_goal("dummy") - _, audit_template = self.create_audit_template(goal['uuid']) - - audit_params = dict( - audit_template_uuid=audit_template['uuid'], - audit_type='ONESHOT', - ) - - _, body = self.create_audit(**audit_params) - audit_params.pop('audit_template_uuid') - audit_params['goal_uuid'] = goal['uuid'] - self.assert_expected(audit_params, body) - - _, audit = self.client.show_audit(body['uuid']) - self.assert_expected(audit, body) - - @decorators.attr(type='smoke') - def test_create_audit_continuous(self): - _, goal = self.client.show_goal("dummy") - _, audit_template = self.create_audit_template(goal['uuid']) - - audit_params = dict( - audit_template_uuid=audit_template['uuid'], - audit_type='CONTINUOUS', - interval='7200', - ) - - _, body = self.create_audit(**audit_params) - audit_params.pop('audit_template_uuid') - audit_params['goal_uuid'] = goal['uuid'] - self.assert_expected(audit_params, body) - - _, audit = self.client.show_audit(body['uuid']) - self.assert_expected(audit, body) - - @decorators.attr(type='smoke') - def test_create_audit_with_wrong_audit_template(self): - audit_params = dict( - audit_template_uuid='INVALID', - audit_type='ONESHOT', - ) - - self.assertRaises( - exceptions.BadRequest, self.create_audit, **audit_params) - - @decorators.attr(type='smoke') - def test_create_audit_with_invalid_state(self): - _, goal = self.client.show_goal("dummy") - _, audit_template = self.create_audit_template(goal['uuid']) - - audit_params = dict( - audit_template_uuid=audit_template['uuid'], - state='INVALID', - ) - - self.assertRaises( - exceptions.BadRequest, self.create_audit, **audit_params) - - @decorators.attr(type='smoke') - def test_create_audit_with_no_state(self): - _, goal = self.client.show_goal("dummy") - _, audit_template = self.create_audit_template(goal['uuid']) - - audit_params = dict( - audit_template_uuid=audit_template['uuid'], - state='', - ) - - _, body = self.create_audit(**audit_params) - audit_params.pop('audit_template_uuid') - audit_params['goal_uuid'] = goal['uuid'] - self.assert_expected(audit_params, body) - - _, audit = self.client.show_audit(body['uuid']) - - initial_audit_state = audit.pop('state') - self.assertIn(initial_audit_state, self.audit_states) - - self.assert_expected(audit, body) - - @decorators.attr(type='smoke') - def test_delete_audit(self): - _, goal = self.client.show_goal("dummy") - _, audit_template = self.create_audit_template(goal['uuid']) - _, body = self.create_audit(audit_template['uuid']) - audit_uuid = body['uuid'] - - test_utils.call_until_true( - func=functools.partial( - self.is_audit_idle, audit_uuid), - duration=10, - sleep_for=.5 - ) - - def is_audit_deleted(uuid): - try: - return not bool(self.client.show_audit(uuid)) - except exceptions.NotFound: - return True - - self.delete_audit(audit_uuid) - - test_utils.call_until_true( - func=functools.partial(is_audit_deleted, audit_uuid), - duration=5, - sleep_for=1 - ) - - self.assertTrue(is_audit_deleted(audit_uuid)) - - -class TestShowListAudit(base.BaseInfraOptimTest): - """Tests for audit.""" - - audit_states = ['ONGOING', 'SUCCEEDED', 'FAILED', - 'CANCELLED', 'DELETED', 'PENDING', 'SUSPENDED'] - - @classmethod - def resource_setup(cls): - super(TestShowListAudit, cls).resource_setup() - _, cls.goal = cls.client.show_goal("dummy") - _, cls.audit_template = cls.create_audit_template(cls.goal['uuid']) - _, cls.audit = cls.create_audit(cls.audit_template['uuid']) - - def assert_expected(self, expected, actual, - keys=('created_at', 'updated_at', - 'deleted_at', 'state')): - super(TestShowListAudit, self).assert_expected( - expected, actual, keys) - - @decorators.attr(type='smoke') - def test_show_audit(self): - _, audit = self.client.show_audit( - self.audit['uuid']) - - initial_audit = self.audit.copy() - del initial_audit['state'] - audit_state = audit['state'] - actual_audit = audit.copy() - del actual_audit['state'] - - self.assertIn(audit_state, self.audit_states) - self.assert_expected(initial_audit, actual_audit) - - @decorators.attr(type='smoke') - def test_show_audit_with_links(self): - _, audit = self.client.show_audit( - self.audit['uuid']) - self.assertIn('links', audit.keys()) - self.assertEqual(2, len(audit['links'])) - self.assertIn(audit['uuid'], - audit['links'][0]['href']) - - @decorators.attr(type="smoke") - def test_list_audits(self): - _, body = self.client.list_audits() - self.assertIn(self.audit['uuid'], - [i['uuid'] for i in body['audits']]) - # Verify self links. - for audit in body['audits']: - self.validate_self_link('audits', audit['uuid'], - audit['links'][0]['href']) - - @decorators.attr(type='smoke') - def test_list_with_limit(self): - # We create 3 extra audits to exceed the limit we fix - for _ in range(3): - self.create_audit(self.audit_template['uuid']) - - _, body = self.client.list_audits(limit=3) - - next_marker = body['audits'][-1]['uuid'] - self.assertEqual(3, len(body['audits'])) - self.assertIn(next_marker, body['next']) - - @decorators.attr(type='smoke') - def test_list_audits_related_to_given_audit_template(self): - _, body = self.client.list_audits( - goal=self.goal['uuid']) - self.assertIn(self.audit['uuid'], [n['uuid'] for n in body['audits']]) diff --git a/watcher_tempest_plugin/tests/api/admin/test_audit_template.py b/watcher_tempest_plugin/tests/api/admin/test_audit_template.py deleted file mode 100644 index 75ac80a..0000000 --- a/watcher_tempest_plugin/tests/api/admin/test_audit_template.py +++ /dev/null @@ -1,226 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -from oslo_utils import uuidutils - -from tempest.lib import decorators -from tempest.lib import exceptions - -from watcher_tempest_plugin.tests.api.admin import base - - -class TestCreateDeleteAuditTemplate(base.BaseInfraOptimTest): - """Tests on audit templates""" - - @decorators.attr(type='smoke') - def test_create_audit_template(self): - goal_name = "dummy" - _, goal = self.client.show_goal(goal_name) - - params = { - 'name': 'my at name %s' % uuidutils.generate_uuid(), - 'description': 'my at description', - 'goal': goal['uuid']} - expected_data = { - 'name': params['name'], - 'description': params['description'], - 'goal_uuid': params['goal'], - 'goal_name': goal_name, - 'strategy_uuid': None, - 'strategy_name': None} - - _, body = self.create_audit_template(**params) - self.assert_expected(expected_data, body) - - _, audit_template = self.client.show_audit_template(body['uuid']) - self.assert_expected(audit_template, body) - - @decorators.attr(type='smoke') - def test_create_audit_template_unicode_description(self): - goal_name = "dummy" - _, goal = self.client.show_goal(goal_name) - # Use a unicode string for testing: - params = { - 'name': 'my at name %s' % uuidutils.generate_uuid(), - 'description': 'my àt déscrïptïôn', - 'goal': goal['uuid']} - - expected_data = { - 'name': params['name'], - 'description': params['description'], - 'goal_uuid': params['goal'], - 'goal_name': goal_name, - 'strategy_uuid': None, - 'strategy_name': None} - - _, body = self.create_audit_template(**params) - self.assert_expected(expected_data, body) - - _, audit_template = self.client.show_audit_template(body['uuid']) - self.assert_expected(audit_template, body) - - @decorators.attr(type='smoke') - def test_delete_audit_template(self): - _, goal = self.client.show_goal("dummy") - _, body = self.create_audit_template(goal=goal['uuid']) - audit_uuid = body['uuid'] - - self.delete_audit_template(audit_uuid) - - self.assertRaises(exceptions.NotFound, self.client.show_audit_template, - audit_uuid) - - -class TestAuditTemplate(base.BaseInfraOptimTest): - """Tests for audit_template.""" - - @classmethod - def resource_setup(cls): - super(TestAuditTemplate, cls).resource_setup() - _, cls.goal = cls.client.show_goal("dummy") - _, cls.strategy = cls.client.show_strategy("dummy") - _, cls.audit_template = cls.create_audit_template( - goal=cls.goal['uuid'], strategy=cls.strategy['uuid']) - - @decorators.attr(type='smoke') - def test_show_audit_template(self): - _, audit_template = self.client.show_audit_template( - self.audit_template['uuid']) - - self.assert_expected(self.audit_template, audit_template) - - @decorators.attr(type='smoke') - def test_filter_audit_template_by_goal_uuid(self): - _, audit_templates = self.client.list_audit_templates( - goal=self.audit_template['goal_uuid']) - - audit_template_uuids = [ - at["uuid"] for at in audit_templates['audit_templates']] - self.assertIn(self.audit_template['uuid'], audit_template_uuids) - - @decorators.attr(type='smoke') - def test_filter_audit_template_by_strategy_uuid(self): - _, audit_templates = self.client.list_audit_templates( - strategy=self.audit_template['strategy_uuid']) - - audit_template_uuids = [ - at["uuid"] for at in audit_templates['audit_templates']] - self.assertIn(self.audit_template['uuid'], audit_template_uuids) - - @decorators.attr(type='smoke') - def test_show_audit_template_with_links(self): - _, audit_template = self.client.show_audit_template( - self.audit_template['uuid']) - self.assertIn('links', audit_template.keys()) - self.assertEqual(2, len(audit_template['links'])) - self.assertIn(audit_template['uuid'], - audit_template['links'][0]['href']) - - @decorators.attr(type="smoke") - def test_list_audit_templates(self): - _, body = self.client.list_audit_templates() - self.assertIn(self.audit_template['uuid'], - [i['uuid'] for i in body['audit_templates']]) - # Verify self links. - for audit_template in body['audit_templates']: - self.validate_self_link('audit_templates', audit_template['uuid'], - audit_template['links'][0]['href']) - - @decorators.attr(type='smoke') - def test_list_with_limit(self): - # We create 3 extra audit templates to exceed the limit we fix - for _ in range(3): - self.create_audit_template(self.goal['uuid']) - - _, body = self.client.list_audit_templates(limit=3) - - next_marker = body['audit_templates'][-1]['uuid'] - self.assertEqual(3, len(body['audit_templates'])) - self.assertIn(next_marker, body['next']) - - @decorators.attr(type='smoke') - def test_update_audit_template_replace(self): - _, new_goal = self.client.show_goal("server_consolidation") - _, new_strategy = self.client.show_strategy("basic") - - params = {'name': 'my at name %s' % uuidutils.generate_uuid(), - 'description': 'my at description', - 'goal': self.goal['uuid']} - - _, body = self.create_audit_template(**params) - - new_name = 'my at new name %s' % uuidutils.generate_uuid() - new_description = 'my new at description' - - patch = [{'path': '/name', - 'op': 'replace', - 'value': new_name}, - {'path': '/description', - 'op': 'replace', - 'value': new_description}, - {'path': '/goal', - 'op': 'replace', - 'value': new_goal['uuid']}, - {'path': '/strategy', - 'op': 'replace', - 'value': new_strategy['uuid']}] - - self.client.update_audit_template(body['uuid'], patch) - - _, body = self.client.show_audit_template(body['uuid']) - self.assertEqual(new_name, body['name']) - self.assertEqual(new_description, body['description']) - self.assertEqual(new_goal['uuid'], body['goal_uuid']) - self.assertEqual(new_strategy['uuid'], body['strategy_uuid']) - - @decorators.attr(type='smoke') - def test_update_audit_template_remove(self): - description = 'my at description' - name = 'my at name %s' % uuidutils.generate_uuid() - params = {'name': name, - 'description': description, - 'goal': self.goal['uuid']} - - _, audit_template = self.create_audit_template(**params) - - # Removing the description - self.client.update_audit_template( - audit_template['uuid'], - [{'path': '/description', 'op': 'remove'}]) - - _, body = self.client.show_audit_template(audit_template['uuid']) - self.assertIsNone(body.get('description')) - - # Assert nothing else was changed - self.assertEqual(name, body['name']) - self.assertIsNone(body['description']) - self.assertEqual(self.goal['uuid'], body['goal_uuid']) - - @decorators.attr(type='smoke') - def test_update_audit_template_add(self): - params = {'name': 'my at name %s' % uuidutils.generate_uuid(), - 'goal': self.goal['uuid']} - - _, body = self.create_audit_template(**params) - - patch = [{'path': '/description', 'op': 'add', 'value': 'description'}] - - self.client.update_audit_template(body['uuid'], patch) - - _, body = self.client.show_audit_template(body['uuid']) - self.assertEqual('description', body['description']) diff --git a/watcher_tempest_plugin/tests/api/admin/test_goal.py b/watcher_tempest_plugin/tests/api/admin/test_goal.py deleted file mode 100644 index 2cf228e..0000000 --- a/watcher_tempest_plugin/tests/api/admin/test_goal.py +++ /dev/null @@ -1,66 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -from tempest.lib import decorators - -from watcher_tempest_plugin.tests.api.admin import base - - -class TestShowListGoal(base.BaseInfraOptimTest): - """Tests for goals""" - - DUMMY_GOAL = "dummy" - - @classmethod - def resource_setup(cls): - super(TestShowListGoal, cls).resource_setup() - - def assert_expected(self, expected, actual, - keys=('created_at', 'updated_at', 'deleted_at')): - super(TestShowListGoal, self).assert_expected( - expected, actual, keys) - - @decorators.attr(type='smoke') - def test_show_goal(self): - _, goal = self.client.show_goal(self.DUMMY_GOAL) - - self.assertEqual(self.DUMMY_GOAL, goal['name']) - expected_fields = { - 'created_at', 'deleted_at', 'display_name', - 'efficacy_specification', 'links', 'name', - 'updated_at', 'uuid'} - self.assertEqual(expected_fields, set(goal.keys())) - - @decorators.attr(type='smoke') - def test_show_goal_with_links(self): - _, goal = self.client.show_goal(self.DUMMY_GOAL) - self.assertIn('links', goal.keys()) - self.assertEqual(2, len(goal['links'])) - self.assertIn(goal['uuid'], - goal['links'][0]['href']) - - @decorators.attr(type="smoke") - def test_list_goals(self): - _, body = self.client.list_goals() - self.assertIn(self.DUMMY_GOAL, - [i['name'] for i in body['goals']]) - - # Verify self links. - for goal in body['goals']: - self.validate_self_link('goals', goal['uuid'], - goal['links'][0]['href']) diff --git a/watcher_tempest_plugin/tests/api/admin/test_scoring_engine.py b/watcher_tempest_plugin/tests/api/admin/test_scoring_engine.py deleted file mode 100644 index 466fe41..0000000 --- a/watcher_tempest_plugin/tests/api/admin/test_scoring_engine.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -from tempest.lib import decorators - -from watcher_tempest_plugin.tests.api.admin import base - - -class TestShowListScoringEngine(base.BaseInfraOptimTest): - """Tests for scoring engines""" - - DUMMY_SCORING_ENGINE = "dummy_scorer" - - @classmethod - def resource_setup(cls): - super(TestShowListScoringEngine, cls).resource_setup() - - def assert_expected(self, expected, actual, - keys=('created_at', 'updated_at', 'deleted_at')): - super(TestShowListScoringEngine, self).assert_expected( - expected, actual, keys) - - @decorators.attr(type='smoke') - def test_show_scoring_engine(self): - _, scoring_engine = self.client.show_scoring_engine( - self.DUMMY_SCORING_ENGINE) - - self.assertEqual(self.DUMMY_SCORING_ENGINE, scoring_engine['name']) - - expected_fields = {'metainfo', 'description', 'name', 'uuid', 'links'} - self.assertEqual(expected_fields, set(scoring_engine.keys())) - - @decorators.attr(type='smoke') - def test_show_scoring_engine_with_links(self): - _, scoring_engine = self.client.show_scoring_engine( - self.DUMMY_SCORING_ENGINE) - self.assertIn('links', scoring_engine.keys()) - self.assertEqual(2, len(scoring_engine['links'])) - self.assertIn(scoring_engine['uuid'], - scoring_engine['links'][0]['href']) - - @decorators.attr(type="smoke") - def test_list_scoring_engines(self): - _, body = self.client.list_scoring_engines() - self.assertIn(self.DUMMY_SCORING_ENGINE, - [i['name'] for i in body['scoring_engines']]) - - # Verify self links. - for scoring_engine in body['scoring_engines']: - self.validate_self_link('scoring_engines', scoring_engine['uuid'], - scoring_engine['links'][0]['href']) diff --git a/watcher_tempest_plugin/tests/api/admin/test_service.py b/watcher_tempest_plugin/tests/api/admin/test_service.py deleted file mode 100644 index 948d8b1..0000000 --- a/watcher_tempest_plugin/tests/api/admin/test_service.py +++ /dev/null @@ -1,90 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Servionica -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -from tempest.lib import decorators - -from watcher_tempest_plugin.tests.api.admin import base - - -class TestShowListService(base.BaseInfraOptimTest): - """Tests for services""" - - DECISION_ENGINE = "watcher-decision-engine" - APPLIER = "watcher-applier" - - @classmethod - def resource_setup(cls): - super(TestShowListService, cls).resource_setup() - - def assert_expected(self, expected, actual, - keys=('created_at', 'updated_at', 'deleted_at')): - super(TestShowListService, self).assert_expected( - expected, actual, keys) - - @decorators.attr(type='smoke') - def test_show_service(self): - _, body = self.client.list_services() - self.assertIn('services', body) - services = body['services'] - self.assertIn(self.DECISION_ENGINE, - [i['name'] for i in body['services']]) - - service_id = filter(lambda x: self.DECISION_ENGINE == x['name'], - services)[0]['id'] - _, service = self.client.show_service(service_id) - - self.assertEqual(self.DECISION_ENGINE, service['name']) - self.assertIn("host", service.keys()) - self.assertIn("last_seen_up", service.keys()) - self.assertIn("status", service.keys()) - - @decorators.attr(type='smoke') - def test_show_service_with_links(self): - _, body = self.client.list_services() - self.assertIn('services', body) - services = body['services'] - self.assertIn(self.DECISION_ENGINE, - [i['name'] for i in body['services']]) - - service_id = filter(lambda x: self.DECISION_ENGINE == x['name'], - services)[0]['id'] - _, service = self.client.show_service(service_id) - - self.assertIn('links', service.keys()) - self.assertEqual(2, len(service['links'])) - self.assertIn(str(service['id']), - service['links'][0]['href']) - - @decorators.attr(type="smoke") - def test_list_services(self): - _, body = self.client.list_services() - self.assertIn('services', body) - services = body['services'] - self.assertIn(self.DECISION_ENGINE, - [i['name'] for i in body['services']]) - - for service in services: - self.assertTrue( - all(val is not None for key, val in service.items() - if key in ['id', 'name', 'host', 'status', - 'last_seen_up'])) - - # Verify self links. - for service in body['services']: - self.validate_self_link('services', service['id'], - service['links'][0]['href']) diff --git a/watcher_tempest_plugin/tests/api/admin/test_strategy.py b/watcher_tempest_plugin/tests/api/admin/test_strategy.py deleted file mode 100644 index 73eefd7..0000000 --- a/watcher_tempest_plugin/tests/api/admin/test_strategy.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -from tempest.lib import decorators - -from watcher_tempest_plugin.tests.api.admin import base - - -class TestShowListStrategy(base.BaseInfraOptimTest): - """Tests for strategies""" - - DUMMY_STRATEGY = "dummy" - - @classmethod - def resource_setup(cls): - super(TestShowListStrategy, cls).resource_setup() - - def assert_expected(self, expected, actual, - keys=('created_at', 'updated_at', 'deleted_at')): - super(TestShowListStrategy, self).assert_expected( - expected, actual, keys) - - @decorators.attr(type='smoke') - def test_show_strategy(self): - _, strategy = self.client.show_strategy(self.DUMMY_STRATEGY) - - self.assertEqual(self.DUMMY_STRATEGY, strategy['name']) - self.assertIn("display_name", strategy.keys()) - - @decorators.attr(type='smoke') - def test_show_strategy_with_links(self): - _, strategy = self.client.show_strategy(self.DUMMY_STRATEGY) - self.assertIn('links', strategy.keys()) - self.assertEqual(2, len(strategy['links'])) - self.assertIn(strategy['uuid'], - strategy['links'][0]['href']) - - @decorators.attr(type="smoke") - def test_list_strategies(self): - _, body = self.client.list_strategies() - self.assertIn('strategies', body) - strategies = body['strategies'] - self.assertIn(self.DUMMY_STRATEGY, - [i['name'] for i in body['strategies']]) - - for strategy in strategies: - self.assertTrue( - all(val is not None for key, val in strategy.items() - if key in ['uuid', 'name', 'display_name', 'goal_uuid'])) - - # Verify self links. - for strategy in body['strategies']: - self.validate_self_link('strategies', strategy['uuid'], - strategy['links'][0]['href']) diff --git a/watcher_tempest_plugin/tests/scenario/__init__.py b/watcher_tempest_plugin/tests/scenario/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher_tempest_plugin/tests/scenario/base.py b/watcher_tempest_plugin/tests/scenario/base.py deleted file mode 100644 index 8b7e268..0000000 --- a/watcher_tempest_plugin/tests/scenario/base.py +++ /dev/null @@ -1,170 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from __future__ import unicode_literals - -import time - -from oslo_log import log -from tempest import config -from tempest import exceptions -from tempest.lib.common.utils import data_utils - -from watcher_tempest_plugin import infra_optim_clients as clients -from watcher_tempest_plugin.tests.scenario import manager - -LOG = log.getLogger(__name__) -CONF = config.CONF - - -class BaseInfraOptimScenarioTest(manager.ScenarioTest): - """Base class for Infrastructure Optimization API tests.""" - - # States where the object is waiting for some event to perform a transition - IDLE_STATES = ('RECOMMENDED', 'FAILED', 'SUCCEEDED', 'CANCELLED') - # States where the object can only be DELETED (end of its life-cycle) - FINISHED_STATES = ('FAILED', 'SUCCEEDED', 'CANCELLED', 'SUPERSEDED') - - @classmethod - def setup_credentials(cls): - cls._check_network_config() - super(BaseInfraOptimScenarioTest, cls).setup_credentials() - cls.mgr = clients.AdminManager() - - @classmethod - def setup_clients(cls): - super(BaseInfraOptimScenarioTest, cls).setup_clients() - cls.client = cls.mgr.io_client - - @classmethod - def resource_setup(cls): - super(BaseInfraOptimScenarioTest, cls).resource_setup() - - @classmethod - def resource_cleanup(cls): - """Ensure that all created objects get destroyed.""" - super(BaseInfraOptimScenarioTest, cls).resource_cleanup() - - @classmethod - def wait_for(cls, condition, timeout=30): - start_time = time.time() - while time.time() - start_time < timeout: - if condition(): - break - time.sleep(.5) - - @classmethod - def _check_network_config(cls): - if not CONF.network.public_network_id: - msg = 'public network not defined.' - LOG.error(msg) - raise exceptions.InvalidConfiguration(msg) - - # ### AUDIT TEMPLATES ### # - - def create_audit_template(self, goal, name=None, description=None, - strategy=None): - """Wrapper utility for creating a test audit template - - :param goal: Goal UUID or name related to the audit template. - :param name: The name of the audit template. Default: My Audit Template - :param description: The description of the audit template. - :param strategy: Strategy UUID or name related to the audit template. - :return: A tuple with The HTTP response and its body - """ - description = description or data_utils.rand_name( - 'test-audit_template') - resp, body = self.client.create_audit_template( - name=name, description=description, goal=goal, strategy=strategy) - - self.addCleanup( - self.delete_audit_template, - audit_template_uuid=body["uuid"] - ) - - return resp, body - - def delete_audit_template(self, audit_template_uuid): - """Deletes a audit_template having the specified UUID - - :param audit_template_uuid: The unique identifier of the audit template - :return: Server response - """ - resp, _ = self.client.delete_audit_template(audit_template_uuid) - return resp - - # ### AUDITS ### # - - def create_audit(self, audit_template_uuid, audit_type='ONESHOT', - state=None, parameters=None): - """Wrapper utility for creating a test audit - - :param audit_template_uuid: Audit Template UUID this audit will use - :param audit_type: Audit type (either ONESHOT or CONTINUOUS) - :param state: Audit state - :param parameters: Input parameters of the audit - :return: A tuple with The HTTP response and its body - """ - resp, body = self.client.create_audit( - audit_template_uuid=audit_template_uuid, audit_type=audit_type, - state=state, parameters=parameters) - - self.addCleanup(self.delete_audit, audit_uuid=body["uuid"]) - return resp, body - - def delete_audit(self, audit_uuid): - """Deletes an audit having the specified UUID - - :param audit_uuid: The unique identifier of the audit. - :return: the HTTP response - """ - - _, action_plans = self.client.list_action_plans(audit_uuid=audit_uuid) - for action_plan in action_plans.get("action_plans", []): - self.delete_action_plan(action_plan_uuid=action_plan["uuid"]) - - resp, _ = self.client.delete_audit(audit_uuid) - return resp - - def has_audit_succeeded(self, audit_uuid): - _, audit = self.client.show_audit(audit_uuid) - if audit.get('state') in ('FAILED', 'CANCELLED'): - raise ValueError() - - return audit.get('state') == 'SUCCEEDED' - - @classmethod - def has_audit_finished(cls, audit_uuid): - _, audit = cls.client.show_audit(audit_uuid) - return audit.get('state') in cls.FINISHED_STATES - - # ### ACTION PLANS ### # - - def delete_action_plan(self, action_plan_uuid): - """Deletes an action plan having the specified UUID - - :param action_plan_uuid: The unique identifier of the action plan. - :return: the HTTP response - """ - resp, _ = self.client.delete_action_plan(action_plan_uuid) - return resp - - def has_action_plan_finished(self, action_plan_uuid): - _, action_plan = self.client.show_action_plan(action_plan_uuid) - return action_plan.get('state') in ('FAILED', 'SUCCEEDED', 'CANCELLED', - 'SUPERSEDED') diff --git a/watcher_tempest_plugin/tests/scenario/manager.py b/watcher_tempest_plugin/tests/scenario/manager.py deleted file mode 100644 index 5364525..0000000 --- a/watcher_tempest_plugin/tests/scenario/manager.py +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log - -from tempest.common import compute -from tempest.common import waiters -from tempest import config -from tempest.lib.common.utils import data_utils -from tempest.lib.common.utils import test_utils -from tempest.lib import exceptions as lib_exc -import tempest.test - -CONF = config.CONF - -LOG = log.getLogger(__name__) - - -class ScenarioTest(tempest.test.BaseTestCase): - """Base class for scenario tests. Uses tempest own clients. """ - - credentials = ['primary'] - - @classmethod - def setup_clients(cls): - super(ScenarioTest, cls).setup_clients() - # Clients (in alphabetical order) - cls.flavors_client = cls.os_primary.flavors_client - cls.compute_floating_ips_client = ( - cls.os_primary.compute_floating_ips_client) - if CONF.service_available.glance: - # Check if glance v1 is available to determine which client to use. - if CONF.image_feature_enabled.api_v1: - cls.image_client = cls.os_primary.image_client - elif CONF.image_feature_enabled.api_v2: - cls.image_client = cls.os_primary.image_client_v2 - else: - raise lib_exc.InvalidConfiguration( - 'Either api_v1 or api_v2 must be True in ' - '[image-feature-enabled].') - # Compute image client - cls.compute_images_client = cls.os_primary.compute_images_client - cls.keypairs_client = cls.os_primary.keypairs_client - # Nova security groups client - cls.compute_security_groups_client = ( - cls.os_primary.compute_security_groups_client) - cls.compute_security_group_rules_client = ( - cls.os_primary.compute_security_group_rules_client) - cls.servers_client = cls.os_primary.servers_client - cls.interface_client = cls.os_primary.interfaces_client - # Neutron network client - cls.networks_client = cls.os_primary.networks_client - cls.ports_client = cls.os_primary.ports_client - cls.routers_client = cls.os_primary.routers_client - cls.subnets_client = cls.os_primary.subnets_client - cls.floating_ips_client = cls.os_primary.floating_ips_client - cls.security_groups_client = cls.os_primary.security_groups_client - cls.security_group_rules_client = ( - cls.os_primary.security_group_rules_client) - - if CONF.volume_feature_enabled.api_v2: - cls.volumes_client = cls.os_primary.volumes_v2_client - cls.snapshots_client = cls.os_primary.snapshots_v2_client - else: - cls.volumes_client = cls.os_primary.volumes_client - cls.snapshots_client = cls.os_primary.snapshots_client - - # ## Test functions library - # - # The create_[resource] functions only return body and discard the - # resp part which is not used in scenario tests - - def _create_port(self, network_id, client=None, namestart='port-quotatest', - **kwargs): - if not client: - client = self.ports_client - name = data_utils.rand_name(namestart) - result = client.create_port( - name=name, - network_id=network_id, - **kwargs) - self.assertIsNotNone(result, 'Unable to allocate port') - port = result['port'] - self.addCleanup(test_utils.call_and_ignore_notfound_exc, - client.delete_port, port['id']) - return port - - def create_keypair(self, client=None): - if not client: - client = self.keypairs_client - name = data_utils.rand_name(self.__class__.__name__) - # We don't need to create a keypair by pubkey in scenario - body = client.create_keypair(name=name) - self.addCleanup(client.delete_keypair, name) - return body['keypair'] - - def create_server(self, name=None, image_id=None, flavor=None, - validatable=False, wait_until='ACTIVE', - clients=None, **kwargs): - """Wrapper utility that returns a test server. - - This wrapper utility calls the common create test server and - returns a test server. The purpose of this wrapper is to minimize - the impact on the code of the tests already using this - function. - """ - - # NOTE(jlanoux): As a first step, ssh checks in the scenario - # tests need to be run regardless of the run_validation and - # validatable parameters and thus until the ssh validation job - # becomes voting in CI. The test resources management and IP - # association are taken care of in the scenario tests. - # Therefore, the validatable parameter is set to false in all - # those tests. In this way create_server just return a standard - # server and the scenario tests always perform ssh checks. - - # Needed for the cross_tenant_traffic test: - if clients is None: - clients = self.os_primary - - if name is None: - name = data_utils.rand_name(self.__class__.__name__ + "-server") - - vnic_type = CONF.network.port_vnic_type - - # If vnic_type is configured create port for - # every network - if vnic_type: - ports = [] - - create_port_body = {'binding:vnic_type': vnic_type, - 'namestart': 'port-smoke'} - if kwargs: - # Convert security group names to security group ids - # to pass to create_port - if 'security_groups' in kwargs: - security_groups = \ - clients.security_groups_client.list_security_groups( - ).get('security_groups') - sec_dict = dict([(s['name'], s['id']) - for s in security_groups]) - - sec_groups_names = [s['name'] for s in kwargs.pop( - 'security_groups')] - security_groups_ids = [sec_dict[s] - for s in sec_groups_names] - - if security_groups_ids: - create_port_body[ - 'security_groups'] = security_groups_ids - networks = kwargs.pop('networks', []) - else: - networks = [] - - # If there are no networks passed to us we look up - # for the project's private networks and create a port. - # The same behaviour as we would expect when passing - # the call to the clients with no networks - if not networks: - networks = clients.networks_client.list_networks( - **{'router:external': False, 'fields': 'id'})['networks'] - - # It's net['uuid'] if networks come from kwargs - # and net['id'] if they come from - # clients.networks_client.list_networks - for net in networks: - net_id = net.get('uuid', net.get('id')) - if 'port' not in net: - port = self._create_port(network_id=net_id, - client=clients.ports_client, - **create_port_body) - ports.append({'port': port['id']}) - else: - ports.append({'port': net['port']}) - if ports: - kwargs['networks'] = ports - self.ports = ports - - tenant_network = self.get_tenant_network() - - body, servers = compute.create_test_server( - clients, - tenant_network=tenant_network, - wait_until=wait_until, - name=name, flavor=flavor, - image_id=image_id, **kwargs) - - self.addCleanup(waiters.wait_for_server_termination, - clients.servers_client, body['id']) - self.addCleanup(test_utils.call_and_ignore_notfound_exc, - clients.servers_client.delete_server, body['id']) - server = clients.servers_client.show_server(body['id'])['server'] - return server diff --git a/watcher_tempest_plugin/tests/scenario/test_execute_basic_optim.py b/watcher_tempest_plugin/tests/scenario/test_execute_basic_optim.py deleted file mode 100644 index b4b5e76..0000000 --- a/watcher_tempest_plugin/tests/scenario/test_execute_basic_optim.py +++ /dev/null @@ -1,191 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -import functools - -from tempest import config -from tempest.lib.common.utils import test_utils - -from watcher_tempest_plugin.tests.scenario import base - -CONF = config.CONF - - -class TestExecuteBasicStrategy(base.BaseInfraOptimScenarioTest): - """Tests for action plans""" - - GOAL_NAME = "server_consolidation" - - @classmethod - def skip_checks(cls): - super(TestExecuteBasicStrategy, cls).skip_checks() - - @classmethod - def resource_setup(cls): - super(TestExecuteBasicStrategy, cls).resource_setup() - if CONF.compute.min_compute_nodes < 2: - raise cls.skipException( - "Less than 2 compute nodes, skipping multinode tests.") - if not CONF.compute_feature_enabled.live_migration: - raise cls.skipException("Live migration is not enabled") - - cls.initial_compute_nodes_setup = cls.get_compute_nodes_setup() - enabled_compute_nodes = [cn for cn in cls.initial_compute_nodes_setup - if cn.get('status') == 'enabled'] - - cls.wait_for_compute_node_setup() - - if len(enabled_compute_nodes) < 2: - raise cls.skipException( - "Less than 2 compute nodes are enabled, " - "skipping multinode tests.") - - @classmethod - def get_compute_nodes_setup(cls): - services_client = cls.mgr.services_client - available_services = services_client.list_services()['services'] - - return [srv for srv in available_services - if srv.get('binary') == 'nova-compute'] - - @classmethod - def wait_for_compute_node_setup(cls): - - def _are_compute_nodes_setup(): - try: - hypervisors_client = cls.mgr.hypervisor_client - hypervisors = hypervisors_client.list_hypervisors( - detail=True)['hypervisors'] - available_hypervisors = set( - hyp['hypervisor_hostname'] for hyp in hypervisors) - available_services = set( - service['host'] - for service in cls.get_compute_nodes_setup()) - - return ( - available_hypervisors == available_services and - len(hypervisors) >= 2) - except Exception: - return False - - assert test_utils.call_until_true( - func=_are_compute_nodes_setup, - duration=600, - sleep_for=2 - ) - - @classmethod - def rollback_compute_nodes_status(cls): - current_compute_nodes_setup = cls.get_compute_nodes_setup() - for cn_setup in current_compute_nodes_setup: - cn_hostname = cn_setup.get('host') - matching_cns = [ - cns for cns in cls.initial_compute_nodes_setup - if cns.get('host') == cn_hostname - ] - initial_cn_setup = matching_cns[0] # Should return a single result - if cn_setup.get('status') != initial_cn_setup.get('status'): - if initial_cn_setup.get('status') == 'enabled': - rollback_func = cls.mgr.services_client.enable_service - else: - rollback_func = cls.mgr.services_client.disable_service - rollback_func(binary='nova-compute', host=cn_hostname) - - def _create_one_instance_per_host(self): - """Create 1 instance per compute node - - This goes up to the min_compute_nodes threshold so that things don't - get crazy if you have 1000 compute nodes but set min to 3. - """ - host_client = self.mgr.hosts_client - all_hosts = host_client.list_hosts()['hosts'] - compute_nodes = [x for x in all_hosts if x['service'] == 'compute'] - - for idx, _ in enumerate( - compute_nodes[:CONF.compute.min_compute_nodes], start=1): - # by getting to active state here, this means this has - # landed on the host in question. - self.create_server( - name="instance-%d" % idx, - image_id=CONF.compute.image_ref, - wait_until='ACTIVE', - clients=self.mgr) - - def test_execute_basic_action_plan(self): - """Execute an action plan based on the BASIC strategy - - - create an audit template with the basic strategy - - run the audit to create an action plan - - get the action plan - - run the action plan - - get results and make sure it succeeded - """ - self.addCleanup(self.rollback_compute_nodes_status) - self._create_one_instance_per_host() - - _, goal = self.client.show_goal(self.GOAL_NAME) - _, strategy = self.client.show_strategy("basic") - _, audit_template = self.create_audit_template( - goal['uuid'], strategy=strategy['uuid']) - _, audit = self.create_audit(audit_template['uuid']) - - try: - self.assertTrue(test_utils.call_until_true( - func=functools.partial( - self.has_audit_finished, audit['uuid']), - duration=600, - sleep_for=2 - )) - except ValueError: - self.fail("The audit has failed!") - - _, finished_audit = self.client.show_audit(audit['uuid']) - if finished_audit.get('state') in ('FAILED', 'CANCELLED', 'SUSPENDED'): - self.fail("The audit ended in unexpected state: %s!" - % finished_audit.get('state')) - - _, action_plans = self.client.list_action_plans( - audit_uuid=audit['uuid']) - action_plan = action_plans['action_plans'][0] - - _, action_plan = self.client.show_action_plan(action_plan['uuid']) - - if action_plan['state'] in ('SUPERSEDED', 'SUCCEEDED'): - # This means the action plan is superseded so we cannot trigger it, - # or it is empty. - return - - # Execute the action by changing its state to PENDING - _, updated_ap = self.client.start_action_plan(action_plan['uuid']) - - self.assertTrue(test_utils.call_until_true( - func=functools.partial( - self.has_action_plan_finished, action_plan['uuid']), - duration=600, - sleep_for=2 - )) - _, finished_ap = self.client.show_action_plan(action_plan['uuid']) - _, action_list = self.client.list_actions( - action_plan_uuid=finished_ap["uuid"]) - - self.assertIn(updated_ap['state'], ('PENDING', 'ONGOING')) - self.assertIn(finished_ap['state'], ('SUCCEEDED', 'SUPERSEDED')) - - for action in action_list['actions']: - self.assertEqual('SUCCEEDED', action.get('state')) diff --git a/watcher_tempest_plugin/tests/scenario/test_execute_dummy_optim.py b/watcher_tempest_plugin/tests/scenario/test_execute_dummy_optim.py deleted file mode 100644 index 33b108a..0000000 --- a/watcher_tempest_plugin/tests/scenario/test_execute_dummy_optim.py +++ /dev/null @@ -1,85 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -import collections -import functools - -from tempest.lib.common.utils import test_utils - -from watcher_tempest_plugin.tests.scenario import base - - -class TestExecuteDummyStrategy(base.BaseInfraOptimScenarioTest): - """Tests for action plans""" - - def test_execute_dummy_action_plan(self): - """Execute an action plan based on the 'dummy' strategy - - - create an audit template with the 'dummy' strategy - - run the audit to create an action plan - - get the action plan - - run the action plan - - get results and make sure it succeeded - """ - _, goal = self.client.show_goal("dummy") - _, audit_template = self.create_audit_template(goal['uuid']) - _, audit = self.create_audit(audit_template['uuid']) - - self.assertTrue(test_utils.call_until_true( - func=functools.partial(self.has_audit_finished, audit['uuid']), - duration=30, - sleep_for=.5 - )) - - self.assertTrue(self.has_audit_succeeded(audit['uuid'])) - - _, action_plans = self.client.list_action_plans( - audit_uuid=audit['uuid']) - action_plan = action_plans['action_plans'][0] - - _, action_plan = self.client.show_action_plan(action_plan['uuid']) - - if action_plan['state'] in ['SUPERSEDED', 'SUCCEEDED']: - # This means the action plan is superseded so we cannot trigger it, - # or it is empty. - return - - # Execute the action by changing its state to PENDING - _, updated_ap = self.client.start_action_plan(action_plan['uuid']) - - self.assertTrue(test_utils.call_until_true( - func=functools.partial( - self.has_action_plan_finished, action_plan['uuid']), - duration=30, - sleep_for=.5 - )) - _, finished_ap = self.client.show_action_plan(action_plan['uuid']) - _, action_list = self.client.list_actions( - action_plan_uuid=finished_ap["uuid"]) - - action_counter = collections.Counter( - act['action_type'] for act in action_list['actions']) - - self.assertIn(updated_ap['state'], ('PENDING', 'ONGOING')) - self.assertIn(finished_ap['state'], ('SUCCEEDED', 'SUPERSEDED')) - - # A dummy strategy generates 2 "nop" actions and 1 "sleep" action - self.assertEqual(3, len(action_list['actions'])) - self.assertEqual(2, action_counter.get("nop")) - self.assertEqual(1, action_counter.get("sleep")) diff --git a/watcher_tempest_plugin/tests/scenario/test_execute_workload_balancing.py b/watcher_tempest_plugin/tests/scenario/test_execute_workload_balancing.py deleted file mode 100644 index 8594e94..0000000 --- a/watcher_tempest_plugin/tests/scenario/test_execute_workload_balancing.py +++ /dev/null @@ -1,198 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -import functools - -from oslo_log import log -from tempest import config -from tempest.lib.common.utils import test_utils - -from watcher_tempest_plugin.tests.scenario import base - -CONF = config.CONF -LOG = log.getLogger(__name__) - - -class TestExecuteWorkloadBalancingStrategy(base.BaseInfraOptimScenarioTest): - """Tests for action plans""" - - GOAL = "workload_balancing" - - @classmethod - def skip_checks(cls): - super(TestExecuteWorkloadBalancingStrategy, cls).skip_checks() - - @classmethod - def resource_setup(cls): - super(TestExecuteWorkloadBalancingStrategy, cls).resource_setup() - if CONF.compute.min_compute_nodes < 2: - raise cls.skipException( - "Less than 2 compute nodes, skipping multinode tests.") - if not CONF.compute_feature_enabled.live_migration: - raise cls.skipException("Live migration is not enabled") - - cls.initial_compute_nodes_setup = cls.get_compute_nodes_setup() - enabled_compute_nodes = [cn for cn in cls.initial_compute_nodes_setup - if cn.get('status') == 'enabled'] - - cls.wait_for_compute_node_setup() - - if len(enabled_compute_nodes) < 2: - raise cls.skipException( - "Less than 2 compute nodes are enabled, " - "skipping multinode tests.") - - @classmethod - def get_hypervisors_setup(cls): - hypervisors_client = cls.mgr.hypervisor_client - hypervisors = hypervisors_client.list_hypervisors( - detail=True)['hypervisors'] - return hypervisors - - @classmethod - def get_compute_nodes_setup(cls): - services_client = cls.mgr.services_client - available_services = services_client.list_services()['services'] - - return [srv for srv in available_services - if srv.get('binary') == 'nova-compute'] - - def _migrate_server_to(self, server_id, dest_host, volume_backed=False): - kwargs = dict() - kwargs['disk_over_commit'] = False - block_migration = (CONF.compute_feature_enabled. - block_migration_for_live_migration and - not volume_backed) - body = self.mgr.servers_client.live_migrate_server( - server_id, host=dest_host, block_migration=block_migration, - **kwargs) - return body - - @classmethod - def wait_for_compute_node_setup(cls): - - def _are_compute_nodes_setup(): - try: - hypervisors = cls.get_hypervisors_setup() - available_hypervisors = set( - hyp['hypervisor_hostname'] for hyp in hypervisors - if hyp['state'] == 'up') - available_services = set( - service['host'] - for service in cls.get_compute_nodes_setup() - if service['state'] == 'up') - return ( - len(available_hypervisors) == len(available_services) and - len(hypervisors) >= 2) - except Exception as exc: - LOG.exception(exc) - return False - - assert test_utils.call_until_true( - func=_are_compute_nodes_setup, - duration=600, - sleep_for=2 - ) - - @classmethod - def rollback_compute_nodes_status(cls): - current_compute_nodes_setup = cls.get_compute_nodes_setup() - for cn_setup in current_compute_nodes_setup: - cn_hostname = cn_setup.get('host') - matching_cns = [ - cns for cns in cls.initial_compute_nodes_setup - if cns.get('host') == cn_hostname - ] - initial_cn_setup = matching_cns[0] # Should return a single result - if cn_setup.get('status') != initial_cn_setup.get('status'): - if initial_cn_setup.get('status') == 'enabled': - rollback_func = cls.mgr.services_client.enable_service - else: - rollback_func = cls.mgr.services_client.disable_service - rollback_func(binary='nova-compute', host=cn_hostname) - - def _create_one_instance_per_host(self): - """Create 1 instance per compute node - - This goes up to the min_compute_nodes threshold so that things don't - get crazy if you have 1000 compute nodes but set min to 3. - """ - host_client = self.mgr.hosts_client - all_hosts = host_client.list_hosts()['hosts'] - compute_nodes = [x for x in all_hosts if x['service'] == 'compute'] - - created_instances = [] - for _ in compute_nodes[:CONF.compute.min_compute_nodes]: - # by getting to active state here, this means this has - # landed on the host in question. - created_instances.append( - self.create_server(image_id=CONF.compute.image_ref, - wait_until='ACTIVE', clients=self.mgr)) - return created_instances - - def _pack_all_created_instances_on_one_host(self, instances): - hypervisors = [ - hyp['hypervisor_hostname'] for hyp in self.get_hypervisors_setup() - if hyp['state'] == 'up'] - node = hypervisors[0] - for instance in instances: - if instance.get('OS-EXT-SRV-ATTR:hypervisor_hostname') != node: - self._migrate_server_to(instance['id'], node) - - def test_execute_workload_stabilization(self): - """Execute an action plan using the workload_stabilization strategy""" - self.addCleanup(self.rollback_compute_nodes_status) - instances = self._create_one_instance_per_host() - self._pack_all_created_instances_on_one_host(instances) - - audit_parameters = { - "metrics": ["cpu_util"], - "thresholds": {"cpu_util": 0.2}, - "weights": {"cpu_util_weight": 1.0}, - "instance_metrics": {"cpu_util": "compute.node.cpu.percent"}} - - _, goal = self.client.show_goal(self.GOAL) - _, strategy = self.client.show_strategy("workload_stabilization") - _, audit_template = self.create_audit_template( - goal['uuid'], strategy=strategy['uuid']) - _, audit = self.create_audit( - audit_template['uuid'], parameters=audit_parameters) - - try: - self.assertTrue(test_utils.call_until_true( - func=functools.partial( - self.has_audit_finished, audit['uuid']), - duration=600, - sleep_for=2 - )) - except ValueError: - self.fail("The audit has failed!") - - _, finished_audit = self.client.show_audit(audit['uuid']) - if finished_audit.get('state') in ('FAILED', 'CANCELLED'): - self.fail("The audit ended in unexpected state: %s!" % - finished_audit.get('state')) - - _, action_plans = self.client.list_action_plans( - audit_uuid=audit['uuid']) - action_plan = action_plans['action_plans'][0] - - _, action_plan = self.client.show_action_plan(action_plan['uuid']) - _, action_list = self.client.list_actions( - action_plan_uuid=action_plan["uuid"])