diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 2850371..0000000 --- a/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -# Add patterns in here to exclude files created by tools integrated with this -# repository, such as test frameworks from the project's recommended workflow, -# rendered documentation and package builds. -# -# Don't add patterns to exclude files created by preferred personal tools -# (editors, IDEs, your operating system itself even). These should instead be -# maintained outside the repository, for example in a ~/.gitignore file added -# with: -# -# git config --global core.excludesfile '~/.gitignore' - -pkg/ -Gemfile.lock -vendor/ -spec/fixtures/ -.vagrant/ -.bundle/ -coverage/ -.idea/ -*.iml -/openstack -/log - -# Files created by releasenotes build -releasenotes/build -.tox diff --git a/.zuul.yaml b/.zuul.yaml deleted file mode 100644 index 60ca981..0000000 --- a/.zuul.yaml +++ /dev/null @@ -1,5 +0,0 @@ -- project: - templates: - - puppet-openstack-check-jobs - - puppet-openstack-module-unit-jobs - - release-notes-jobs-python3 diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index ea5c7cf..0000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,19 +0,0 @@ -The source repository for this project can be found at: - - https://opendev.org/openstack/puppet-monasca - -Pull requests submitted through GitHub are not monitored. - -To start contributing to OpenStack, follow the steps in the contribution guide -to set up and use Gerrit: - - https://docs.openstack.org/contributors/code-and-documentation/quick-start.html - -Bugs should be filed on Launchpad: - - https://bugs.launchpad.net/puppet-monasca - -For more specific information about contributing to this repository, see the -Puppet OpenStack contributor guide: - - https://docs.openstack.org/puppet-openstack-guide/latest/contributor/contributing.html diff --git a/Gemfile b/Gemfile deleted file mode 100644 index 3f46561..0000000 --- a/Gemfile +++ /dev/null @@ -1,36 +0,0 @@ -source ENV['GEM_SOURCE'] || "https://rubygems.org" - -group :development, :test, :system_tests do - spec_helper_dir = '/home/zuul/src/opendev.org/openstack/puppet-openstack_spec_helper' - if File.directory?(spec_helper_dir) - if ENV['ZUUL_PROJECT'] == 'openstack/puppet-openstack_spec_helper' - gem 'puppet-openstack_spec_helper', - :path => '../..', - :require => 'false' - else - gem 'puppet-openstack_spec_helper', - :path => spec_helper_dir, - :require => 'false' - end - else - spec_helper_version = ENV['ZUUL_BRANCH'] || "master" - gem 'puppet-openstack_spec_helper', - :git => 'https://opendev.org/openstack/puppet-openstack_spec_helper', - :ref => spec_helper_version, - :require => 'false' - end -end - -if facterversion = ENV['FACTER_GEM_VERSION'] - gem 'facter', facterversion, :require => false -else - gem 'facter', :require => false -end - -if puppetversion = ENV['PUPPET_GEM_VERSION'] - gem 'puppet', puppetversion, :require => false -else - gem 'puppet', :require => false -end - -# vim:ft=ruby diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 68c771a..0000000 --- a/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/Puppetfile_extras b/Puppetfile_extras deleted file mode 100644 index 581b7d6..0000000 --- a/Puppetfile_extras +++ /dev/null @@ -1,7 +0,0 @@ -mod 'wget', - :git => "https://github.com/maestrodev/puppet-wget.git", - :tag => 'v1.7.3' - -mod 'epel', - :git => "https://github.com/stahnma/puppet-module-epel", - :tag => '1.2.2' diff --git a/README.md b/README.md deleted file mode 100644 index d7cd143..0000000 --- a/README.md +++ /dev/null @@ -1,121 +0,0 @@ -puppet-monasca -============== - -#### Table of Contents - -1. [Overview - What is the monasca module?](#overview) -2. [Module Description - What does the module do?](#module-description) -3. [Setup - The basics of getting started with monasca](#setup) -4. [Implementation - An under-the-hood peek at what the module is doing](#implementation) -5. [Limitations - OS compatibility, etc.](#limitations) -6. [Development - Guide for contributing to the module](#development) -7. [Contributors - Those with commits](#contributors) - -Overview --------- - -The monasca module is a part of [OpenStack](https://opendev.org/openstack), and is meant to assist with the installation and configuration of monasca itself, and its dependent services (mentioned below). - -Module Description ------------------- - -Setup ------ - -**What the monasca module affects:** - -* monasca, monitoring as a service for OpenStack. -* storm, Apache's distributed realtime computational system. -* kafka, Apache's publish-subscribe messaging system. -* influxdb, a stand-alone open-source distributes time series database. - -Implementation --------------- - -### monasca - -monasca is a combination of Puppet manifest that configures the monasca client and server configuration, as well as monasca's dependent services. - -### Types - -#### monasca_config - -The `monasca_config` provider is a child of the ini_setting provider. It allows one to write an entry in the `/etc/monasca/monasca.conf` file. - -```puppet -monasca_config { 'DEFAULT/debug' : - value => true, -} -``` - -This will write `debug=true` in the `[DEFAULT]` section. - -##### name - -Section/setting name to manage from `monasca.conf` - -##### value - -The value of the setting to be defined. - -##### secret - -Whether to hide the value from Puppet logs. Defaults to `false`. - -##### ensure_absent_val - -If value is equal to ensure_absent_val then the resource will behave as if `ensure => absent` was specified. Defaults to `` - -#### agent_config - -The `agent_config` provider is a children of the ini_setting provider. It allows one to write an entry in the `/etc/monasca/agent/agent.conf` file. - -```puppet -agent_config { 'DEFAULT/verbose' : - value => true, -} -``` - -This will write `verbose=true` in the `[DEFAULT]` section. - -##### name - -Section/setting name to manage from `agent.conf` - -##### value - -The value of the setting to be defined. - -##### secret - -Whether to hide the value from Puppet logs. Defaults to `false`. - -##### ensure_absent_val - -If value is equal to ensure_absent_val then the resource will behave as if `ensure => absent` was specified. Defaults to `` - -Limitations ------------ -This module currently only supports debian based installs. - -Development ------------ - -Developer documentation for the entire puppet-openstack project. - -* http://docs.openstack.org/developer/puppet-openstack-guide/ - -Contributors ------------- - -* https://github.com/openstack/puppet-monasca/graphs/contributors - -Release Notes -------------- - -* https://docs.openstack.org/releasenotes/puppet-monasca - -Repository ----------- - -* https://opendev.org/openstack/puppet-monasca diff --git a/README.rst b/README.rst new file mode 100644 index 0000000..543210d --- /dev/null +++ b/README.rst @@ -0,0 +1,8 @@ +This project is no longer maintained. + +The contents of this repository are still available in the Git source code +management system. To see the contents of this repository before it reached its +end of life, please check out the previous commit with "git checkout HEAD^1". + +For any further questions, please email openstack-discuss@lists.openstack.org +or join #openstack-dev on OFTC. diff --git a/Rakefile b/Rakefile deleted file mode 100644 index 168d108..0000000 --- a/Rakefile +++ /dev/null @@ -1 +0,0 @@ -require 'puppet-openstack_spec_helper/rake_tasks' diff --git a/bindep.txt b/bindep.txt deleted file mode 100644 index 01b2ca6..0000000 --- a/bindep.txt +++ /dev/null @@ -1,12 +0,0 @@ -# This is a cross-platform list tracking distribution packages needed by tests; -# see http://docs.openstack.org/infra/bindep/ for additional information. - -libxml2-devel [test platform:rpm] -libxml2-dev [test platform:dpkg] -libxslt-devel [test platform:rpm] -libxslt1-dev [test platform:dpkg] -ruby-devel [test platform:rpm] -ruby-dev [test platform:dpkg] -zlib1g-dev [test platform:dpkg] -zlib-devel [test platform:rpm] -puppet [build] diff --git a/doc/requirements.txt b/doc/requirements.txt deleted file mode 100644 index 559dc80..0000000 --- a/doc/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -# This is required for the docs build jobs -sphinx>=3.5.1 # BSD -openstackdocstheme>=2.2.1 # Apache-2.0 - -# This is required for the releasenotes build jobs -reno>=3.1.0 # Apache-2.0 diff --git a/examples/apiserver.pp b/examples/apiserver.pp deleted file mode 100644 index 4c27699..0000000 --- a/examples/apiserver.pp +++ /dev/null @@ -1 +0,0 @@ -include monasca::apiserver diff --git a/examples/init.pp b/examples/init.pp deleted file mode 100644 index c2153d4..0000000 --- a/examples/init.pp +++ /dev/null @@ -1,12 +0,0 @@ -# The baseline for module testing used by Puppet Inc. is that each manifest -# should have a corresponding test manifest that declares that class or defined -# type. -# -# Tests are then run by using puppet apply --noop (to check for compilation -# errors and view a log of events) or by fully applying the test in a virtual -# environment (to compare the resulting system state to the desired state). -# -# Learn more about module testing here: -# https://puppet.com/docs/puppet/latest/bgtm.html#testing-your-module -# -include monasca diff --git a/examples/storm.pp b/examples/storm.pp deleted file mode 100644 index c955ddc..0000000 --- a/examples/storm.pp +++ /dev/null @@ -1 +0,0 @@ -include monasca::storm diff --git a/files/agent_requirements.txt b/files/agent_requirements.txt deleted file mode 100644 index 84d1e17..0000000 --- a/files/agent_requirements.txt +++ /dev/null @@ -1 +0,0 @@ -monasca-agent \ No newline at end of file diff --git a/files/monasca-api.conf b/files/monasca-api.conf deleted file mode 100644 index 9a671f0..0000000 --- a/files/monasca-api.conf +++ /dev/null @@ -1,14 +0,0 @@ -# Startup script for the Monasca API - -description "Monasca API java app" -start on runlevel [2345] - -console log -respawn - -setgid monasca -setuid monasca_api -exec /usr/bin/java -Xmx8g -cp \ - /opt/monasca/monasca-api.jar:/opt/vertica/java/lib/vertica_jdbc.jar \ - monasca.api.MonApiApplication server /etc/monasca/api-config.yml \ - > /dev/null diff --git a/files/monasca-thresh b/files/monasca-thresh deleted file mode 100644 index 9742f9f..0000000 --- a/files/monasca-thresh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash -### BEGIN INIT INFO -# Provides: monasca-thresh -# Required-Start: $nimbus -# Required-Stop: -# Default-Start: 2 3 4 5 -# Default-Stop: -# Short-Description: Monitoring threshold engine running under storm -# Description: -### END INIT INFO - -case "$1" in - start) - $0 status - if [ $? -ne 0 ]; then - sudo -Hu thresh /opt/storm/current/bin/storm jar /opt/monasca/monasca-thresh.jar monasca.thresh.ThresholdingEngine /etc/monasca/thresh-config.yml thresh-cluster - exit $? - else - echo "monasca-thresh is already running" - exit 0 - fi - ;; - stop) - # On system shutdown storm is being shutdown also and this will hang so skip shutting down thresh in that case - if [ -e '/sbin/runlevel' ]; then # upstart/sysV case - if [ $(runlevel | cut -d\ -f 2) == 0 ]; then - exit 0 - fi - else # systemd case - systemctl list-units --type=target |grep shutdown.target - if [ $? -eq 0 ]; then - exit 0 - fi - fi - sudo -Hu thresh /opt/storm/current/bin/storm kill thresh-cluster - # The above command returns but actually takes awhile loop watching status - while true; do - sudo -Hu thresh /opt/storm/current/bin/storm list |grep thresh-cluster - if [ $? -ne 0 ]; then break; fi - sleep 1 - done - ;; - status) - sudo -Hu thresh /opt/storm/current/bin/storm list |grep thresh-cluster - ;; - restart) - $0 stop - $0 start - ;; -esac diff --git a/files/monasca_stack.sh b/files/monasca_stack.sh deleted file mode 100644 index 1e6e1a7..0000000 --- a/files/monasca_stack.sh +++ /dev/null @@ -1,187 +0,0 @@ -#!/bin/bash - -MIRROR_FILE="/etc/monasca/monasca-persister-mirror.yml" -STORM_FILE="/opt/storm/current/conf/storm.yaml" -INFLUXDB_FILE="/etc/opt/influxdb/influxdb.conf" -INCLUDE_THRESH="include_thresh_flag" - -# -# Get the list of monasca services in the order they should be -# started in. Note that we intentionally don't stop/start -# verticad -- vertica doesn't like that. Use adminTools -# for the entire cluster instead. -# -get_up_list() { - - - if [ -e $INFLUXDB_FILE ] - then - echo "influxdb" - fi - - echo "zookeeper kafka storm-supervisor storm-nimbus storm-ui" - - if [ "$1" = "$INCLUDE_THRESH" ] - then - echo "monasca-thresh" - fi - - if [ -e $MIRROR_FILE ] - then - echo "monasca-persister-mirror" - fi - - echo "monasca-persister monasca-notification monasca-api" -} - -# -# Get the list of monasca services in the order they should be -# stopped in. -# -get_down_list() { - - echo "monasca-api monasca-notification monasca-persister" - - if [ -e $MIRROR_FILE ] - then - echo "monasca-persister-mirror" - fi - - if [ "$1" = "$INCLUDE_THRESH" ] - then - echo "monasca-thresh" - fi - - echo "storm-ui storm-nimbus storm-supervisor kafka zookeeper" - - if [ -e $INFLUXDB_FILE ] - then - echo "influxdb" - fi -} - -status() { - for x in $(get_up_list $INCLUDE_THRESH) - do - service $x status - done -} - -start() { - for x in $(get_up_list $1) - do - STATUS=$(is_service_running $x) - # - # Only start a service if it isn't currently running - # - if [ $STATUS != 0 ] - then - service $x start - # - # Many of these services are java -- give them - # some time to come up before starting a dependent - # service. - # - sleep 10 - fi - STATUS=$(is_service_running $x) - if [ $STATUS != 0 ] - then - echo "$x did not start -- diagnose and try starting the stack again!" - exit 1 - fi - done -} - -is_service_running() { - STATUS=$(service $1 status 2>&1) - if [ $? != 0 ] || [[ "$STATUS" == *"stop/waiting"* ]] - then - echo "1" - else - echo "0" - fi -} - -stop() { - for x in $(get_down_list $1) - do - service $x stop - # - # Give the service time to clean up and stop before - # moving on. - # - sleep 10 - STATUS=$(is_service_running $x) - if [ $STATUS != 1 ] - then - echo "$x did not stop -- diagnose and try stopping the stack again!" - exit 1 - fi - done -} - -tail_logs() { - /usr/bin/tail -f /opt/storm/current/logs/*log \ - /var/log/monasca/*log \ - /var/log/influxdb/*log \ - /opt/vertica/log/*log \ - /var/log/kafka/*log \ - /opt/kafka/logs/*log -} - -tail_metrics() { - /usr/bin/tail -f /tmp/kafka-logs/metr*/*log | /usr/bin/strings -} - -lag() { - # - # Print the consumer lag - # - /opt/kafka/bin/kafka-run-class.sh kafka.admin.ConsumerGroupCommand \ - --zookeeper localhost:2181 \ - --group $1 --describe 2>&1 -} - -case "$1" in - status) - status - ;; - start) - start - ;; - start-cluster) - start $INCLUDE_THRESH - ;; - stop) - stop - ;; - stop-cluster) - stop $INCLUDE_THRESH - ;; - restart) - stop - sleep 2 - start - ;; - restart-cluster) - stop $INCLUDE_THRESH - sleep 2 - start $INCLUDE_THRESH - ;; - tail-logs) - tail_logs - ;; - tail-metrics) - tail_metrics - ;; - local-lag) - lag '1_metrics' - ;; - mirror-lag) - lag '2_metrics' - ;; - *) - echo "Usage: "$1" {status|start|start-cluster|stop|stop-cluster|restart|restart-cluster|tail-logs|tail-metrics|local-lag|mirror-lag}" - exit 1 -esac diff --git a/files/vertica/mon_alarms_schema.sql b/files/vertica/mon_alarms_schema.sql deleted file mode 100644 index 9f8de41..0000000 --- a/files/vertica/mon_alarms_schema.sql +++ /dev/null @@ -1,14 +0,0 @@ -CREATE SCHEMA MonAlarms; - -CREATE TABLE MonAlarms.StateHistory( - id AUTO_INCREMENT, - tenant_id VARCHAR, - alarm_id VARCHAR, - metrics VARCHAR (65000), - old_state VARCHAR, - new_state VARCHAR, - sub_alarms VARCHAR (65000), - reason VARCHAR(65000), - reason_data VARCHAR(65000), - time_stamp TIMESTAMP NOT NULL -) PARTITION BY EXTRACT('year' FROM time_stamp)*100 + EXTRACT('month' FROM time_stamp); diff --git a/files/vertica/mon_grants.sql b/files/vertica/mon_grants.sql deleted file mode 100644 index 02cb476..0000000 --- a/files/vertica/mon_grants.sql +++ /dev/null @@ -1,15 +0,0 @@ -GRANT USAGE ON SCHEMA MonMetrics TO mon_persister; -GRANT USAGE ON SCHEMA MonAlarms TO mon_persister; -GRANT ALL ON TABLE MonMetrics.Measurements TO mon_persister; -GRANT ALL ON TABLE MonMetrics.Definitions TO mon_persister; -GRANT ALL ON TABLE MonMetrics.Dimensions TO mon_persister; -GRANT ALL ON TABLE MonMetrics.DefinitionDimensions TO mon_persister; -GRANT ALL ON TABLE MonAlarms.StateHistory TO mon_persister; - -GRANT USAGE ON SCHEMA MonMetrics TO mon_api; -GRANT USAGE ON SCHEMA MonAlarms TO mon_api; -GRANT SELECT ON TABLE MonMetrics.Measurements TO mon_api; -GRANT SELECT ON TABLE MonMetrics.Definitions TO mon_api; -GRANT SELECT ON TABLE MonMetrics.Dimensions TO mon_api; -GRANT SELECT ON TABLE MonMetrics.DefinitionDimensions TO mon_api; -GRANT ALL ON TABLE MonAlarms.StateHistory TO mon_api; diff --git a/files/vertica/mon_metrics_schema.sql b/files/vertica/mon_metrics_schema.sql deleted file mode 100644 index 9583ca2..0000000 --- a/files/vertica/mon_metrics_schema.sql +++ /dev/null @@ -1,152 +0,0 @@ -DROP SCHEMA MonMetrics CASCADE; - -CREATE SCHEMA MonMetrics; - -CREATE TABLE MonMetrics.Measurements ( - definition_dimensions_id BINARY(20) NOT NULL, - time_stamp TIMESTAMP NOT NULL, - value FLOAT NOT NULL, - value_meta VARCHAR(8192) -) PARTITION BY EXTRACT('year' FROM time_stamp)*100 + EXTRACT('month' FROM time_stamp); - -CREATE TABLE MonMetrics.Definitions( - id BINARY(20) NOT NULL, - name VARCHAR(255) NOT NULL, - tenant_id VARCHAR(255) NOT NULL, - region VARCHAR(255) NOT NULL, - PRIMARY KEY(id), - CONSTRAINT MetricsDefinitionsConstraint UNIQUE(name, tenant_id, region) -); - -CREATE TABLE MonMetrics.Dimensions ( - dimension_set_id BINARY(20) NOT NULL, - name VARCHAR(255) NOT NULL, - value VARCHAR(255) NOT NULL, - CONSTRAINT MetricsDimensionsConstraint UNIQUE(dimension_set_id, name, value) -); - -CREATE TABLE MonMetrics.DefinitionDimensions ( - id BINARY(20) NOT NULL, - definition_id BINARY(20) NOT NULL, - dimension_set_id BINARY(20) NOT NULL, - CONSTRAINT MetricsDefinitionDimensionsConstraint UNIQUE(definition_id, dimension_set_id) - ); - --- Projections --- ** These are for a single node system with no k safety - -CREATE PROJECTION Measurements_DBD_1_rep_MonMetrics /*+createtype(D)*/ -( - definition_dimensions_id ENCODING RLE, - time_stamp ENCODING COMMONDELTA_COMP, - value ENCODING AUTO, - value_meta ENCODING AUTO -) -AS - SELECT definition_dimensions_id, - time_stamp, - value, - value_meta - FROM MonMetrics.Measurements - ORDER BY definition_dimensions_id, - time_stamp -UNSEGMENTED ALL NODES; - -CREATE PROJECTION Measurements_time_order_rep_MonMetrics /*+createtype(D)*/ -( - definition_dimensions_id ENCODING RLE, - time_stamp ENCODING COMMONDELTA_COMP, - value ENCODING AUTO, - value_meta ENCODING AUTO -) -AS - SELECT definition_dimensions_id, - time_stamp, - value, - value_meta - FROM MonMetrics.Measurements - ORDER BY time_stamp, - definition_dimensions_id -UNSEGMENTED ALL NODES; - -CREATE PROJECTION Definitions_DBD_2_rep_MonMetrics /*+createtype(D)*/ -( - id ENCODING AUTO, - name ENCODING AUTO, - tenant_id ENCODING RLE, - region ENCODING RLE -) -AS - SELECT id, - name, - tenant_id, - region - FROM MonMetrics.Definitions - ORDER BY region, - tenant_id, - name -UNSEGMENTED ALL NODES; - -CREATE PROJECTION Dimensions_DBD_3_rep_MonMetrics /*+createtype(D)*/ -( - dimension_set_id ENCODING AUTO, - name ENCODING RLE, - value ENCODING AUTO -) -AS - SELECT dimension_set_id, - name, - value - FROM MonMetrics.Dimensions - ORDER BY name, - dimension_set_id, - value -UNSEGMENTED ALL NODES; - -CREATE PROJECTION MonMetrics.Dimensions_rep_set_id /*+createtype(D)*/ -( -dimension_set_id, -name ENCODING RLE, -value -) -AS -SELECT Dimensions.dimension_set_id, - Dimensions.name, - Dimensions.value -FROM MonMetrics.Dimensions -ORDER BY Dimensions.dimension_set_id, - Dimensions.name, - Dimensions.value -UNSEGMENTED ALL NODES; - -CREATE PROJECTION DefinitionDimensions_DBD_4_rep_MonMetrics /*+createtype(D)*/ -( - id ENCODING AUTO, - definition_id ENCODING RLE, - dimension_set_id ENCODING AUTO -) -AS - SELECT id, - definition_id, - dimension_set_id - FROM MonMetrics.DefinitionDimensions - ORDER BY definition_id, - dimension_set_id -UNSEGMENTED ALL NODES; - -CREATE PROJECTION MonMetrics.DefinitionDimensions_rep_set_id /*+createtype(D)*/ -( -id ENCODING AUTO, -definition_id ENCODING AUTO, -dimension_set_id ENCODING RLE -) -AS -SELECT id, - definition_id, - dimension_set_id -FROM MonMetrics.DefinitionDimensions -ORDER BY dimension_set_id, - definition_id -UNSEGMENTED ALL NODES; - -select refresh('MonMetrics.Measurements, MonMetrics.Definitions, MonMetrics.Dimensions, MonMetrics.DefinitionDimensions'); diff --git a/files/vertica/update_vertica_stats.sh b/files/vertica/update_vertica_stats.sh deleted file mode 100644 index 5748e7f..0000000 --- a/files/vertica/update_vertica_stats.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -MONASCA_TABLES="\ - MonMetrics.Dimensions \ - MonMetrics.Definitions \ - MonMetrics.DefinitionDimensions \ - MonMetrics.Measurements \ - MonAlarms.StateHistory" - - -for table in $MONASCA_TABLES -do - /usr/sbin/vsql -c "select analyze_statistics('$table');" -done diff --git a/lib/puppet/provider/agent_config/ini_setting.rb b/lib/puppet/provider/agent_config/ini_setting.rb deleted file mode 100644 index 5ef7af3..0000000 --- a/lib/puppet/provider/agent_config/ini_setting.rb +++ /dev/null @@ -1,10 +0,0 @@ -Puppet::Type.type(:agent_config).provide( - :ini_setting, - :parent => Puppet::Type.type(:openstack_config).provider(:ini_setting) -) do - - def self.file_path - '/etc/monasca/agent/agent.conf' - end - -end diff --git a/lib/puppet/provider/monasca_config/ini_setting.rb b/lib/puppet/provider/monasca_config/ini_setting.rb deleted file mode 100644 index a4e54d7..0000000 --- a/lib/puppet/provider/monasca_config/ini_setting.rb +++ /dev/null @@ -1,10 +0,0 @@ -Puppet::Type.type(:monasca_config).provide( - :ini_setting, - :parent => Puppet::Type.type(:openstack_config).provider(:ini_setting) -) do - - def self.file_path - '/etc/monasca/monasca.conf' - end - -end diff --git a/lib/puppet/provider/monasca_ini/ini_setting.rb b/lib/puppet/provider/monasca_ini/ini_setting.rb deleted file mode 100644 index 0d276f9..0000000 --- a/lib/puppet/provider/monasca_ini/ini_setting.rb +++ /dev/null @@ -1,10 +0,0 @@ -Puppet::Type.type(:monasca_ini).provide( - :ini_setting, - :parent => Puppet::Type.type(:openstack_config).provider(:ini_setting) -) do - - def self.file_path - '/etc/monasca/monasca.ini' - end - -end diff --git a/lib/puppet/type/agent_config.rb b/lib/puppet/type/agent_config.rb deleted file mode 100644 index ec0490e..0000000 --- a/lib/puppet/type/agent_config.rb +++ /dev/null @@ -1,49 +0,0 @@ -Puppet::Type.newtype(:agent_config) do - - ensurable - - newparam(:name, :namevar => true) do - desc 'Section/setting name to manage from agent.conf' - newvalues(/\S+\/\S+/) - end - - newproperty(:value) do - desc 'The value of the setting to be defined.' - munge do |value| - value = value.to_s.strip - value.capitalize! if value =~ /^(true|false)$/i - value - end - newvalues(/^[\S ]*$/) - - def is_to_s( currentvalue ) - if resource.secret? - return '[old secret redacted]' - else - return currentvalue - end - end - - def should_to_s( newvalue ) - if resource.secret? - return '[new secret redacted]' - else - return newvalue - end - end - end - - newparam(:secret, :boolean => true) do - desc 'Whether to hide the value from Puppet logs. Defaults to `false`.' - - newvalues(:true, :false) - - defaultto false - end - - newparam(:ensure_absent_val) do - desc 'A value that is specified as the value property will behave as if ensure => absent was specified' - defaultto('') - end - -end diff --git a/lib/puppet/type/monasca_config.rb b/lib/puppet/type/monasca_config.rb deleted file mode 100644 index 4a52eab..0000000 --- a/lib/puppet/type/monasca_config.rb +++ /dev/null @@ -1,49 +0,0 @@ -Puppet::Type.newtype(:monasca_config) do - - ensurable - - newparam(:name, :namevar => true) do - desc 'Section/setting name to manage from monasca.conf' - newvalues(/\S+\/\S+/) - end - - newproperty(:value) do - desc 'The value of the setting to be defined.' - munge do |value| - value = value.to_s.strip - value.capitalize! if value =~ /^(true|false)$/i - value - end - newvalues(/^[\S ]*$/) - - def is_to_s( currentvalue ) - if resource.secret? - return '[old secret redacted]' - else - return currentvalue - end - end - - def should_to_s( newvalue ) - if resource.secret? - return '[new secret redacted]' - else - return newvalue - end - end - end - - newparam(:secret, :boolean => true) do - desc 'Whether to hide the value from Puppet logs. Defaults to `false`.' - - newvalues(:true, :false) - - defaultto false - end - - newparam(:ensure_absent_val) do - desc 'A value that is specified as the value property will behave as if ensure => absent was specified' - defaultto('') - end - -end diff --git a/lib/puppet/type/monasca_ini.rb b/lib/puppet/type/monasca_ini.rb deleted file mode 100644 index 8bbe7b3..0000000 --- a/lib/puppet/type/monasca_ini.rb +++ /dev/null @@ -1,48 +0,0 @@ -Puppet::Type.newtype(:monasca_ini) do - - ensurable - - newparam(:name, :namevar => true) do - desc 'Section/setting name to manage from monasca.ini' - newvalues(/\S+\/\S+/) - end - - newproperty(:value) do - desc 'The value of the setting to be defined.' - munge do |value| - value = value.to_s.strip - value.capitalize! if value =~ /^(true|false)$/i - value - end - - def is_to_s( currentvalue ) - if resource.secret? - return '[old secret redacted]' - else - return currentvalue - end - end - - def should_to_s( newvalue ) - if resource.secret? - return '[new secret redacted]' - else - return newvalue - end - end - end - - newparam(:secret, :boolean => true) do - desc 'Whether to hide the value from Puppet logs. Defaults to `false`.' - - newvalues(:true, :false) - - defaultto false - end - - newparam(:ensure_absent_val) do - desc 'A value that is specified as the value property will behave as if ensure => absent was specified' - defaultto('') - end - -end diff --git a/manifests/agent.pp b/manifests/agent.pp deleted file mode 100644 index fd55414..0000000 --- a/manifests/agent.pp +++ /dev/null @@ -1,276 +0,0 @@ -# == Class: monasca::agents -# -# Setups monasca agent. -# -# === Parameters -# -# [*url*] -# url of the monasca api server to POST metrics to -# -# [*username*] -# monasca agent name -# -# [*password*] -# monasca agent password -# -# [*keystone_url*] -# keystone endpoint for authentication -# -# [*enabled*] -# flag to enable/disable the monasca agent -# -# [*project_name*] -# name of keystone project to POST metrics for -# -# [*project_domain_id*] -# domain id of the keystone project to POST metrics for -# -# [*project_domain_name*] -# domain name of the keystone project to POST metrics for -# -# [*project_id*] -# id of keystone project to POST metrics for -# -# [*ca_file*] -# certificate file to use in keystone authentication -# -# [*max_buffer_size*] -# buffer size for metrics POSTing -# -# [*backlog_send_rate*] -# how name metrics to POST from backlog at a time -# -# [*amplifier*] -# multiplier for testing, allows POSTing the same metric multiple times -# -# [*hostname*] -# hostname for this monasca agent -# -# [*dimensions*] -# list of dimensions for this monasca agent -# -# [*recent_point_threshold*] -# number of seconds to consider a metric 'recent' -# -# [*check_freq*] -# how frequently (in seconds) to run the agent -# -# [*listen_port*] -# port for the monasca agent to listen on -# -# [*non_local_traffic*] -# flag for whether or not to support non-local traffic -# (see monasca documentation for more details) -# -# [*statsd_port*] -# port for the statsd server -# -# [*statsd_interval*] -# frequency to poll statsd -# -# [*statsd_forward_host*] -# host for statsd server -# -# [*statsd_forward_port*] -# port for statsd server -# -# [*log_level*] -# logging level -- INFO, DEBUG, ALL... -# -# [*collector_log_file*] -# logfile for monasca collector -# -# [*forwarder_log_file*] -# logfile for monasca forwarder -# -# [*monstatsd_log_file*] -# logfile for monasca statsd collector -# -# [*log_to_syslog*] -# flag for whether or not to log to syslog -# -# [*syslog_host*] -# host of the syslog server -# -# [*syslog_port*] -# port of the syslog server -# -# [*virtual_env*] -# path of python virtual environment symlink -# -# [*virtual_env_dir*] -# directory for python virtual environments -# -# [*virtual_env_reqs*] -# requirements file for the agent venv -# -# [*virtual_envs*] -# a hash of virtual envs to build -# -# [*agent_user*] -# name of the monasca agent user -# -# [*install_python_deps*] -# flag for whether or not to install python dependencies -# -# [*python_dep_ensure*] -# flag for whether or not to ensure/update python dependencies -# -# [*pip_install_args*] -# arguments to pass to the pip install command -# -class monasca::agent( - $url, - $username, - $password, - $keystone_url, - $enabled = true, - $project_name = 'null', - $project_domain_id = 'null', - $project_domain_name = 'null', - $project_id = 'null', - $ca_file = undef, - $max_buffer_size = '1000', - $backlog_send_rate = '1000', - $amplifier = '0', - $hostname = undef, - $dimensions = {}, - $recent_point_threshold = '30', - $check_freq = '15', - $listen_port = '17123', - $non_local_traffic = false, - $statsd_port = '8125', - $statsd_interval = '10', - $statsd_forward_host = undef, - $statsd_forward_port = '8125', - $log_level = 'INFO', - $collector_log_file = '/var/log/monasca/agent/collector.log', - $forwarder_log_file = '/var/log/monasca/agent/forwarder.log', - $monstatsd_log_file = '/var/log/monasca/agent/monstatsd.log', - $log_to_syslog = false, - $syslog_host = undef , - $syslog_port = undef, - $virtual_env = '/var/lib/monasca-agent', - $virtual_env_dir = '/var/lib/monasca-agent-venvs', - $virtual_env_reqs = 'puppet:///modules/monasca/agent_requirements.txt', - $virtual_envs = {'default'=> {'venv_active'=> true}}, - $agent_user = 'monasca-agent', - $install_python_deps = true, - $python_dep_ensure = 'present', - $pip_install_args = '', -) { - include monasca - include monasca::params - - $agent_dir = "${::monasca::monasca_dir}/agent" - $additional_checksd = "${agent_dir}/checks.d" - $conf_dir = "${agent_dir}/conf.d" - - if $install_python_deps { - package { ['python-virtualenv', 'python-dev']: - ensure => $python_dep_ensure, - } - } - - file { $virtual_env_dir: - ensure => 'directory', - owner => 'root', - group => 'root', - mode => '0755', - } - $defaults = { - symlink => $virtual_env, - basedir => $virtual_env_dir, - venv_extra_args => $pip_install_args, - venv_requirements => $virtual_env_reqs, - } - create_resources('::monasca::virtualenv::agent_instance', $virtual_envs, - $defaults) - - user { $agent_user: - ensure => present, - groups => $::monasca::group, - require => Group[$::monasca::group] - } - - file{ "${::monasca::log_dir}/agent": - ensure => 'directory', - owner => $agent_user, - group => $::monasca::group, - mode => '0755', - require => File[$::monasca::log_dir], - before => Service['monasca-agent'], - } - - file { $agent_dir: - ensure => 'directory', - owner => 'root', - group => $::monasca::group, - mode => '0755', - require => File[$::monasca::monasca_dir], - } - - file { "${agent_dir}/agent.yaml": - owner => 'root', - group => 'root', - mode => '0755', - content => template('monasca/agent.yaml.erb'), - require => File[$agent_dir], - before => Service['monasca-agent'], - } ~> Service['monasca-agent'] - - file { $additional_checksd: - ensure => 'directory', - owner => 'root', - group => 'root', - mode => '0755', - require => File[$agent_dir], - before => Service['monasca-agent'], - # ensure removal of all checks unmanaged by puppet - purge => true, - force => true, - recurse => true, - } - - file { $conf_dir: - ensure => 'directory', - owner => 'root', - group => $::monasca::group, - mode => '0755', - require => File[$agent_dir], - before => Service['monasca-agent'], - # ensure removal of all checks unmanaged by puppet - purge => true, - force => true, - recurse => true, - } - - file { '/etc/init.d/monasca-agent': - owner => 'root', - group => 'root', - mode => '0755', - content => template('monasca/monasca-agent.init.erb'), - before => Service['monasca-agent'], - } - - $log_dir = "${::monasca::log_dir}/agent" - file { "${agent_dir}/supervisor.conf": - owner => 'root', - group => 'root', - mode => '0644', - content => template('monasca/supervisor.conf.erb'), - notify => Service['monasca-agent'], - } - - if $enabled { - $ensure = 'running' - } else { - $ensure = 'stopped' - } - - service { 'monasca-agent': - ensure => $ensure, - enable => $enabled, - name => $::monasca::params::agent_service, - } -} diff --git a/manifests/alarmdefs.pp b/manifests/alarmdefs.pp deleted file mode 100644 index 8e3b90e..0000000 --- a/manifests/alarmdefs.pp +++ /dev/null @@ -1,140 +0,0 @@ -# == Class: monasca::alarmdefs -# -# Class for bootstrapping monasca alarm definitions -# -# === Parameters: -# -# [*alarm_definition_config_source*] -# location of alarm definitions template to bootstrap in mysql database -# -# [*notification_config_source*] -# location of notification methods template to bootstrap in mysql database -# -# [*notification_assignments_source*] -# location of notification assignments template to bootstrap in mysql database -# -# [*admin_username*] -# name of the monasca admin user -# -# [*admin_password*] -# password of the monasca admin user -# -# [*api_server_url*] -# monasca api server endpoint -# -# [*auth_url*] -# keystone endpoint -# -# [*project_name*] -# keystone project name to bootstrap alarm definitions for -# -# [*virtual_env*] -# location of python virtual environment to install to -# -## DEPRECATED PARAMS -# -# [*install_python_deps*] -# flag for whether or not to install python dependencies -# -# [*python_dep_ensure*] -# flag for whether or not to ensure/update python dependencies -# -class monasca::alarmdefs( - $alarm_definition_config_source = 'monasca/alarm_definition_config.json.erb', - $notification_config_source = 'monasca/notification_config.json.erb', - $notification_assignments_source = 'monasca/notification_assignments.json.erb', - $admin_username = 'monasca-admin', - $admin_password = undef, - $api_server_url = undef, - $auth_url = undef, - $project_name = undef, - $virtual_env = '/var/www/monasca-alarmdefs', - ## DEPRECATED PARAMS - $install_python_deps = undef, - $python_dep_ensure = undef, -) -{ - include monasca::params - - if $install_python_deps { - warning('monasca::alarmdefs::install_python_deps is deprecated and has no effect') - } - if $python_dep_ensure { - warning('monasca::alarmdefs::python_dep_ensure is deprecated and has no effect') - } - - $alarm_definition_config = '/tmp/alarm_definition_config.json' - $notification_config = '/tmp/notification_config.json' - $notification_assignments = '/tmp/notification_assignments.json' - $script_name = 'bootstrap-alarm-definitions.py' - $script = "${virtual_env}/bin/${script_name}" - $cleanup_script = "${virtual_env}/bin/vm_alarm_cleanup.py" - $sql_host = $::monasca::params::sql_host - $sql_user = $::monasca::params::sql_user - $sql_password = $::monasca::params::sql_password - $sql_port = $::monasca::params::sql_port - - python::virtualenv { $virtual_env : - owner => 'root', - group => 'root', - before => [Exec[$script], File[$script]], - require => [Package['virtualenv'],Package['python-dev']], - } - - file { $script: - ensure => file, - content => template("monasca/${script_name}.erb"), - mode => '0755', - owner => 'root', - group => 'root', - } - - file { $alarm_definition_config: - ensure => file, - content => template($alarm_definition_config_source), - mode => '0755', - owner => 'root', - group => 'root', - } - - file { $notification_config: - ensure => file, - content => template($notification_config_source), - mode => '0755', - owner => 'root', - group => 'root', - } - - file { $notification_assignments: - ensure => file, - content => template($notification_assignments_source), - mode => '0755', - owner => 'root', - group => 'root', - } - - exec { $script: - subscribe => [File[$script], File[$alarm_definition_config], File[$notification_config], File[$notification_assignments]], - path => '/bin:/sbin:/usr/bin:/usr/sbin:/tmp', - cwd => "${virtual_env}/bin", - user => 'root', - group => 'root', - environment => ["OS_AUTH_URL=${auth_url}", - "OS_USERNAME=${admin_username}", - "OS_PASSWORD=${admin_password}", - "OS_PROJECT_NAME=${project_name}", - "MONASCA_API_URL=${api_server_url}"], - refreshonly => true, - require => Service['monasca-api'], - } - - file { $cleanup_script: - ensure => file, - content => template('monasca/vm_alarm_cleanup.py.erb'), - mode => '0755', - owner => 'root', - group => 'root', - require => Python::Virtualenv[$virtual_env], - } - -} diff --git a/manifests/api.pp b/manifests/api.pp deleted file mode 100644 index 648f8ae..0000000 --- a/manifests/api.pp +++ /dev/null @@ -1,233 +0,0 @@ -# == Class: monasca::api -# -# Class to setup monasca api -# -# === Parameters: -# -# [*api_db_thread_min*] -# (Optional) Minimum number of threads for db connection pool. -# Defaults to 4. -# -# [*api_db_thread_max*] -# (Optional) Maximum number of threads for db connection pool. -# Defaults to 32. -# -# [*api_db_wait*] -# (Optional) Amount of time to wait for db connection. Can specify -# any string supported by io.dropwizard Duration class, for example: -# -# '1ns' is 1 nanosecond -# '1s' is 1 seconds -# '1m' is 1 minute -# '1h' is 1 hour -# '1d' is 1 day -# -# Defaults to '5s' (5 seconds). -# -# [*api_user*] -# (Optional) Name of the monasca api user. -# Defaults to 'monasca_api'. -# -# [*blobmirror*] -# (Optional) URL of server to install debians from. -# Defaults to undef. -# -# [*check_conn_while_idle*] -# (Optional) Flag for whether db connection should stay alive while idle. -# Defaults to true. -# -# [*database_type*] -# (Optional) Type of database backend, influxdb or vertica. -# Defaults to influxdb. -# -# [*database_host*] -# (Optional) Host of database backend. -# Defaults to localhost. -# -# [*db_admin_password*] -# (Optional) Database admin password. -# Defaults to undef. -# -# [*gzip_setting*] -# (Optional) Flag for whether to use gzip for monasca api and persister. -# Defaults to true. -# -# [*kafka_brokers*] -# (Optional) List of kafka brokers and ports. -# Defaults to undef. -# -# [*keystone_endpoint*] -# (Optional) URL of keystone server. -# Defaults to undef. -# -# [*keystone_admin_token*] -# (Optional) Token for keystone admin. -# Defaults to undef. -# -# [*max_query_limit*] -# (Optional) Maximum number of records to be returned from db. -# Defaults to 10000. -# -# [*mon_api_build_ver*] -# (Optional) Build version of the monasca api debian package. -# Defaults to undef. -# -# [*mon_api_deb*] -# (Optional) Name of the monasca api debian package. -# Defaults to undef. -# -# [*region_name*] -# (Optional) Openstack region name for this install. -# Defaults to NA. -# -# [*roles_agent*] -# (Optional) List with the names of roles allowed to write metrics. -# Defaults to ['monasca-agent']. -# -# [*role_delegate*] -# (Optional) Name of the role allowed to write cross tenant metrics. -# Defaults to 'monitoring-delegate'. -# -# [*role_admin*] -# (Optional) Name of the role with extended permissions. Includes ability to -# publish metrics older than two weeks. -# Defaults to 'monasca-admin'. -# -# [*roles_default*] -# (Optional) List with the names of roles allowed to read and write metrics. -# Defaults to ['admin','monasca-user', '_member_']. -# -# [*roles_read_only*] -# (Optional) List with the names of roles allowed only to read metrics. -# Defaults to []. -# -# [*vertica_db_hint*] -# (Optional) Database hint to pass to vertica. -# Defaults to "". Setting this to "/*+KV*/" tells vertica to satisfy the -# query locally without talking to other nodes in the cluster -- which reduces -# network chatter when projections are replicated on each node. -# -# [*valid_notif_periods*] -# (Optional) List of valid notification periods in seconds. -# Defaults to [60]. -# -# [*zookeeper_servers*] -# (Optional) Comma separated list of zookeeper servers and ports. -# Defaults to undef. -# Example: "zookeeper_host_1:2181,zookeeper_host_2:2181" -# -class monasca::api ( - $api_db_thread_min = 4, - $api_db_thread_max = 32, - $api_db_wait = '5s', - $api_user = 'monasca_api', - $blobmirror = undef, - $check_conn_while_idle = true, - $database_type = 'influxdb', - $database_host = 'localhost', - $db_admin_password = undef, - $gzip_setting = true, - $kafka_brokers = undef, - $keystone_endpoint = undef, - $keystone_admin_token = undef, - $max_query_limit = 10000, - $mon_api_build_ver = undef, - $mon_api_deb = undef, - $region_name = 'NA', - $role_delegate = 'monitoring-delegate', - $role_admin = 'monasca-admin', - $roles_agent = ['monasca-agent'], - $roles_default = ['admin','monasca-user','_member_'], - $roles_read_only = [], - $valid_notif_periods = [60], - $vertica_db_hint = '', - $zookeeper_servers = undef, -) { - include monasca - include monasca::params - - $api_fetch_url = "http://${blobmirror}/repos/monasca/monasca_api" - $latest_api_deb = "/tmp/${mon_api_deb}" - $api_cfg_file = '/etc/monasca/api-config.yml' - $stack_script_src = 'puppet:///modules/monasca/monasca_stack.sh' - $stack_script = '/usr/bin/monasca_stack.sh' - $startup_script = '/etc/init/monasca-api.conf' - $startup_script_src = 'puppet:///modules/monasca/monasca-api.conf' - - wget::fetch { "${api_fetch_url}/${mon_api_build_ver}/${mon_api_deb}": - destination => $latest_api_deb, - timeout => 300, - before => [Package['install-api'],File[$latest_api_deb]], - } ~> Service['monasca-api'] - - user { $api_user: - ensure => present, - groups => $::monasca::group, - require => Group[$::monasca::group], - } - - file { $latest_api_deb: - ensure => present, - } - - package { 'monasca-api': - ensure => latest, - provider => dpkg, - source => $latest_api_deb, - alias => 'install-api', - tag => ['openstack', 'monasca-package'], - } - - #Variables for the template - $admin_password = $::monasca::params::admin_password - $admin_project_name = $::monasca::params::admin_project_name - $admin_name = $::monasca::params::admin_name - $auth_method = $::monasca::params::auth_method - $sql_host = $::monasca::params::sql_host - $sql_user = $::monasca::params::sql_user - $sql_password = $::monasca::params::sql_password - $sql_port = $::monasca::params::sql_port - $monasca_api_port = $::monasca::params::port - $api_db_user = $::monasca::params::api_db_user - $api_db_password = $::monasca::params::api_db_password - - file { $api_cfg_file: - ensure => file, - content => template('monasca/api-config.yml.erb'), - mode => '0644', - owner => $api_user, - group => $::monasca::group, - require => [User[$api_user], Group[$::monasca::group], File[$::monasca::log_dir]], - } ~> Service['monasca-api'] - - service { 'monasca-api': - ensure => running, - require => [File[$api_cfg_file], - File[$latest_api_deb], - File[$startup_script], - Package['install-api']], - tag => 'monasca-service', - } - - # Remove any old debs (puppet won't delete current resources) - tidy { '/tmp': - matches => 'monasca*.deb', - recurse => true, - } - - file { $stack_script: - ensure => file, - source => $stack_script_src, - mode => '0755', - owner => 'root', - group => 'root', - } - - file { $startup_script: - ensure => file, - source => $startup_script_src, - mode => '0755', - owner => 'root', - group => 'root', - } ~> Service['monasca-api'] -} diff --git a/manifests/checks/apache.pp b/manifests/checks/apache.pp deleted file mode 100644 index 3590d84..0000000 --- a/manifests/checks/apache.pp +++ /dev/null @@ -1,40 +0,0 @@ -# == Class: monasca::checks::apache -# -# Sets up the monasca apache check. -# -# === Parameters -# -# [*instances*] -# A hash of instances for the check. -# Each instance should be a hash of the check's parameters. -# Parameters for the apache check are: -# name (the instance key): The name of the instance. -# apache_status_url (required) -# dimensions -# e.g. -# instances: -# server: -# apache_status_url: 'http://your.server.name/server-status' -# -class monasca::checks::apache( - $instances = undef, -){ - $conf_dir = $::monasca::agent::conf_dir - - if($instances){ - Concat["${conf_dir}/apache.yaml"] ~> Service['monasca-agent'] - concat { "${conf_dir}/apache.yaml": - owner => 'root', - group => $::monasca::group, - mode => '0640', - warn => true, - require => File[$conf_dir], - } - concat::fragment { 'apache_header': - target => "${conf_dir}/apache.yaml", - order => '0', - content => "---\ninit_config: null\ninstances:\n", - } - create_resources('monasca::checks::instances::apache', $instances) - } -} diff --git a/manifests/checks/cpu.pp b/manifests/checks/cpu.pp deleted file mode 100644 index 7df2f70..0000000 --- a/manifests/checks/cpu.pp +++ /dev/null @@ -1,40 +0,0 @@ -# == Class: monasca::checks::cpu -# -# Sets up the monasca cpu check. -# -# === Parameters -# -# [*instances*] -# A hash of instances for the check. -# Each instance should be a hash of the check's parameters. -# Parameters for the cpu check are: -# name (the instance key): The name of the instance. -# send_rollup_stats (default = False) -# dimensions -# e.g. -# instances: -# cpu_stats: -# dimensions: -# -class monasca::checks::cpu( - $instances = undef, -){ - $conf_dir = $::monasca::agent::conf_dir - - if($instances){ - Concat["${conf_dir}/cpu.yaml"] ~> Service['monasca-agent'] - concat { "${conf_dir}/cpu.yaml": - owner => 'root', - group => $::monasca::group, - mode => '0640', - warn => true, - require => File[$conf_dir], - } - concat::fragment { 'cpu_header': - target => "${conf_dir}/cpu.yaml", - order => '0', - content => "---\ninit_config: null\ninstances:\n", - } - create_resources('monasca::checks::instances::cpu', $instances) - } -} diff --git a/manifests/checks/disk.pp b/manifests/checks/disk.pp deleted file mode 100644 index e4257f5..0000000 --- a/manifests/checks/disk.pp +++ /dev/null @@ -1,44 +0,0 @@ -# == Class: monasca::checks::disk -# -# Sets up the monasca disk check. -# -# === Parameters -# -# [*instances*] -# A hash of instances for the check. -# Each instance should be a hash of the check's parameters. -# Parameters for the disk check are: -# name (the instance key): The name of the instance. -# use_mount (default = True) -# send_io_stats (default = True) -# send_rollup_stats (default = False) -# device_blacklist_re -# ignore_filesystem_types -# dimensions -# e.g. -# instances: -# disk_stats: -# dimensions: -# -class monasca::checks::disk( - $instances = undef, -){ - $conf_dir = $::monasca::agent::conf_dir - - if($instances){ - Concat["${conf_dir}/disk.yaml"] ~> Service['monasca-agent'] - concat { "${conf_dir}/disk.yaml": - owner => 'root', - group => $::monasca::group, - mode => '0640', - warn => true, - require => File[$conf_dir], - } - concat::fragment { 'disk_header': - target => "${conf_dir}/disk.yaml", - order => '0', - content => "---\ninit_config: null\ninstances:\n", - } - create_resources('monasca::checks::instances::disk', $instances) - } -} diff --git a/manifests/checks/host_alive.pp b/manifests/checks/host_alive.pp deleted file mode 100644 index 71020ce..0000000 --- a/manifests/checks/host_alive.pp +++ /dev/null @@ -1,55 +0,0 @@ -# == Class: monasca::checks::host_alive -# -# Sets up the monasca host_alive check. -# -# === Parameters -# [*ssh_port*] -# -# [*ssh_timeout*] -# ssh_timeout is a floating-point number of seconds -# [*ping_timeout*] -# ping_timeout is an integer number of seconds -# [*instances*] -# A hash of instances for the check. -# Each instance should be a hash of the check's parameters. -# Parameters for the host_alive check are: -# name (the instance key): The name of the instance. -# host_name (required) -# alive_test (required) -# e.g. -# instances: -# host: -# host_name: 'somehost.somedomain.net' -# alive_test: 'ssh' -# gateway: -# host_name: 'gateway.somedomain.net' -# alive_test: 'ping' -# other: -# host_name: '192.168.0.221' -# alive_test: 'ssh' -# -class monasca::checks::host_alive( - $ssh_port = '22', - $ssh_timeout = '0.5', - $ping_timeout = '1', - $instances = undef, -){ - $conf_dir = $::monasca::agent::conf_dir - - if($instances){ - Concat["${conf_dir}/host_alive.yaml"] ~> Service['monasca-agent'] - concat { "${conf_dir}/host_alive.yaml": - owner => 'root', - group => $::monasca::group, - mode => '0640', - warn => true, - require => File[$conf_dir], - } - concat::fragment { 'host_alive_header': - target => "${conf_dir}/host_alive.yaml", - order => '0', - content => template('monasca/checks/host_alive.yaml.erb'), - } - create_resources('monasca::checks::instances::host_alive', $instances) - } -} diff --git a/manifests/checks/http_check.pp b/manifests/checks/http_check.pp deleted file mode 100644 index caeec99..0000000 --- a/manifests/checks/http_check.pp +++ /dev/null @@ -1,53 +0,0 @@ -# == Class: monasca::checks::http_check -# -# Sets up the monasca http_check check. -# -# === Parameters -# -# [*instances*] -# A hash of instances for the check. -# Each instance should be a hash of the check's parameters. -# Parameters for the http_check check are: -# name (the instance key): The name of the instance. -# url (required) -# timeout (default = 10) -# username -# password -# match_pattern -# use_keystone (default = False) -# collect_response_time (default = False) -# headers -# disable_ssl_validation (default = True) -# dimensions -# e.g. -# instances: -# nova-api: -# url: 'http://192.168.0.254:8774/v2.0' -# dimensions: '{service: compute_api}' -# match_pattern: '.*version=2.*' -# timeout: '10' -# use_keystone: 'True' -# collect_response_time: 'True' -# -class monasca::checks::http_check( - $instances = undef, -){ - $conf_dir = $::monasca::agent::conf_dir - - if($instances){ - Concat["${conf_dir}/http_check.yaml"] ~> Service['monasca-agent'] - concat { "${conf_dir}/http_check.yaml": - owner => 'root', - group => $::monasca::group, - mode => '0640', - warn => true, - require => File[$conf_dir], - } - concat::fragment { 'http_check_header': - target => "${conf_dir}/http_check.yaml", - order => '0', - content => "---\ninit_config: null\ninstances:\n", - } - create_resources('monasca::checks::instances::http_check', $instances) - } -} diff --git a/manifests/checks/instances/apache.pp b/manifests/checks/instances/apache.pp deleted file mode 100644 index 47a98a4..0000000 --- a/manifests/checks/instances/apache.pp +++ /dev/null @@ -1,23 +0,0 @@ -# == Defined Type: monasca::checks::instances::apache -# -# configure monasca plugin yaml file for apache -# -# === Parameters: -# -# [*apache_status_url*] -# url to get apache status from -# -# [*dimensions*] -# any additional dimensions for the check -# -define monasca::checks::instances::apache ( - $apache_status_url, - $dimensions = undef, -) { - $conf_dir = $::monasca::agent::conf_dir - concat::fragment { "${title}_apache_instance": - target => "${conf_dir}/apache.yaml", - content => template('monasca/checks/apache.erb'), - order => '1', - } -} diff --git a/manifests/checks/instances/cpu.pp b/manifests/checks/instances/cpu.pp deleted file mode 100644 index 840a317..0000000 --- a/manifests/checks/instances/cpu.pp +++ /dev/null @@ -1,23 +0,0 @@ -# == Defined Type: monasca::checks::instances::cpu -# -# configure monasca plugin yaml file for cpu interfaces -# -# === Parameters: -# -# [*send_rollup_stats*] -# flag for whether or not to send rollup statistics -# -# [*dimensions*] -# any additional dimensions for the check -# -define monasca::checks::instances::cpu ( - $send_rollup_stats = undef, - $dimensions = undef, -) { - $conf_dir = $::monasca::agent::conf_dir - concat::fragment { "${title}_cpu_instance": - target => "${conf_dir}/cpu.yaml", - content => template('monasca/checks/cpu.erb'), - order => '1', - } -} diff --git a/manifests/checks/instances/disk.pp b/manifests/checks/instances/disk.pp deleted file mode 100644 index 90f379d..0000000 --- a/manifests/checks/instances/disk.pp +++ /dev/null @@ -1,39 +0,0 @@ -# == Defined Type: monasca::checks::instances::disk -# -# configure monasca plugin yaml file for disk interfaces -# -# === Parameters: -# -# [*use_mount*] -# flag for mount setting for the check -# -# [*send_io_stats*] -# flag for whether or not to send io statistics -# -# [*send_rollup_stats*] -# flag for whether or not to send rollup statistics -# -# [*device_blacklist_re*] -# regular expression for devices to ignore -# -# [*ignore_filesystem_types*] -# types of file systems to ignore -# -# [*dimensions*] -# any additional dimensions for the check -# -define monasca::checks::instances::disk ( - $use_mount = undef, - $send_io_stats = undef, - $send_rollup_stats = undef, - $device_blacklist_re = undef, - $ignore_filesystem_types = undef, - $dimensions = undef, -) { - $conf_dir = $::monasca::agent::conf_dir - concat::fragment { "${title}_disk_instance": - target => "${conf_dir}/disk.yaml", - content => template('monasca/checks/disk.erb'), - order => '1', - } -} diff --git a/manifests/checks/instances/host_alive.pp b/manifests/checks/instances/host_alive.pp deleted file mode 100644 index 0569e12..0000000 --- a/manifests/checks/instances/host_alive.pp +++ /dev/null @@ -1,23 +0,0 @@ -# == Defined Type: monasca::checks::instances::host_alive -# -# configure monasca plugin yaml file for host_alive -# -# === Parameters: -# -# [*host_name*] -# host name to check for aliveness -# -# [*alive_test*] -# executable to run to test for aliveness -# -define monasca::checks::instances::host_alive ( - $host_name, - $alive_test, -) { - $conf_dir = $::monasca::agent::conf_dir - concat::fragment { "${title}_host_alive_instance": - target => "${conf_dir}/host_alive.yaml", - content => template('monasca/checks/host_alive.erb'), - order => '1', - } -} diff --git a/manifests/checks/instances/http_check.pp b/manifests/checks/instances/http_check.pp deleted file mode 100644 index 385da59..0000000 --- a/manifests/checks/instances/http_check.pp +++ /dev/null @@ -1,55 +0,0 @@ -# == Defined Type: monasca::checks::instances::http_check -# -# configure monasca plugin yaml file for http_check -# -# === Parameters: -# -# [*url*] -# url to get http status for -# -# [*timeout*] -# timeout in seconds for how long to wait for an http response -# -# [*username*] -# username for keystone authentication -# -# [*password*] -# password for keystone authentication -# -# [*match_pattern*] -# expected patter in http response -# -# [*use_keystone*] -# flag for whether to pass keystone token to url -# -# [*collect_response_time*] -# flag to collect the http response time metric -# -# [*headers*] -# any headers that should be passed to url -# -# [*disable_ssl_validation*] -# flag to disable ssl validation -# -# [*dimensions*] -# any additional dimensions for the check -# -define monasca::checks::instances::http_check ( - $url, - $timeout = undef, - $username = undef, - $password = undef, - $match_pattern = undef, - $use_keystone = undef, - $collect_response_time = undef, - $headers = undef, - $disable_ssl_validation = undef, - $dimensions = undef, -) { - $conf_dir = $::monasca::agent::conf_dir - concat::fragment { "${title}_http_check_instance": - target => "${conf_dir}/http_check.yaml", - content => template('monasca/checks/http_check.erb'), - order => '1', - } -} diff --git a/manifests/checks/instances/load.pp b/manifests/checks/instances/load.pp deleted file mode 100644 index 3324e2c..0000000 --- a/manifests/checks/instances/load.pp +++ /dev/null @@ -1,19 +0,0 @@ -# == Defined Type: monasca::checks::instances::load -# -# configure monasca plugin yaml file for load interfaces -# -# === Parameters: -# -# [*dimensions*] -# any additional dimensions for the check -# -define monasca::checks::instances::load ( - $dimensions = undef, -) { - $conf_dir = $::monasca::agent::conf_dir - concat::fragment { "${title}_load_instance": - target => "${conf_dir}/load.yaml", - content => template('monasca/checks/load.erb'), - order => '1', - } -} diff --git a/manifests/checks/instances/memory.pp b/manifests/checks/instances/memory.pp deleted file mode 100644 index 4c06b58..0000000 --- a/manifests/checks/instances/memory.pp +++ /dev/null @@ -1,19 +0,0 @@ -# == Defined Type: monasca::checks::instances::memory -# -# configure monasca plugin yaml file for memory interfaces -# -# === Parameters: -# -# [*dimensions*] -# any additional dimensions for the check -# -define monasca::checks::instances::memory ( - $dimensions = undef, -) { - $conf_dir = $::monasca::agent::conf_dir - concat::fragment { "${title}_memory_instance": - target => "${conf_dir}/memory.yaml", - content => template('monasca/checks/memory.erb'), - order => '1', - } -} diff --git a/manifests/checks/instances/mysql.pp b/manifests/checks/instances/mysql.pp deleted file mode 100644 index 83a8875..0000000 --- a/manifests/checks/instances/mysql.pp +++ /dev/null @@ -1,47 +0,0 @@ -# == Defined Type: monasca::checks::instances::mysql -# -# configure monasca plugin yaml file for mysql -# -# === Parameters: -# -# [*server*] -# mysql server to gather stats from -# -# [*user*] -# mysql user -# -# [*port*] -# mysql port -# -# [*pass*] -# mysql password -# -# [*sock*] -# mysql socket -# -# [*defaults_file*] -# file containing any default mysql settings -# -# [*dimensions*] -# any additional dimensions for the check -# -# [*options*] -# any additional options for the check -# -define monasca::checks::instances::mysql ( - $server = undef, - $user = undef, - $port = undef, - $pass = undef, - $sock = undef, - $defaults_file = undef, - $dimensions = undef, - $options = undef, -) { - $conf_dir = $::monasca::agent::conf_dir - concat::fragment { "${title}_mysql_instance": - target => "${conf_dir}/mysql.yaml", - content => template('monasca/checks/mysql.erb'), - order => '1', - } -} diff --git a/manifests/checks/instances/nagios_wrapper.pp b/manifests/checks/instances/nagios_wrapper.pp deleted file mode 100644 index f9f8da6..0000000 --- a/manifests/checks/instances/nagios_wrapper.pp +++ /dev/null @@ -1,42 +0,0 @@ -# == Defined Type: monasca::checks::instances::nagios_wrapper -# -# configure monasca plugin yaml file for nagios_wrapper -# -# === Parameters: -# -# nrpe is not used by the check, only by puppet to determine which host -# uses this fragment -# -# [*check_command*] -# command to execute for the nagios check -# -# [*check_name*] -# name of the nagios check -# -# [*host_name*] -# host name being checked -# -# [*check_interval*] -# how frequently (in seconds) the check should be run -# -# [*dimensions*] -# any additional dimensions for the check -# -# [*nrpe*] -# flag indicating if this is an nrpe check -# -define monasca::checks::instances::nagios_wrapper ( - $check_command, - $check_name = undef, - $host_name = undef, - $check_interval = undef, - $dimensions = undef, - $nrpe = undef, -) { - $conf_dir = $::monasca::agent::conf_dir - concat::fragment { "${title}_nagios_wrapper_instance": - target => "${conf_dir}/nagios_wrapper.yaml", - content => template('monasca/checks/nagios_wrapper.erb'), - order => '1', - } -} diff --git a/manifests/checks/instances/network.pp b/manifests/checks/instances/network.pp deleted file mode 100644 index cced735..0000000 --- a/manifests/checks/instances/network.pp +++ /dev/null @@ -1,35 +0,0 @@ -# == Defined Type: monasca::checks::instances::network -# -# configure monasca plugin yaml file for network interfaces -# -# === Parameters: -# -# [*collect_connection_state*] -# flag to indicate if connection state should be collected -# -# [*excluded_interfaces*] -# explicit list of interfaces to be ignored -# -# [*excluded_interface_re*] -# regular expression for interfaces to be ignored -# -# [*use_bits*] -# submits metrics in bits rather than bytes -# -# [*dimensions*] -# any additional dimensions for the check -# -define monasca::checks::instances::network ( - $collect_connection_state = undef, - $excluded_interfaces = undef, - $excluded_interface_re = undef, - $use_bits = undef, - $dimensions = undef, -) { - $conf_dir = $::monasca::agent::conf_dir - concat::fragment { "${title}_network_instance": - target => "${conf_dir}/network.yaml", - content => template('monasca/checks/network.erb'), - order => '1', - } -} diff --git a/manifests/checks/instances/process.pp b/manifests/checks/instances/process.pp deleted file mode 100644 index f3ce919..0000000 --- a/manifests/checks/instances/process.pp +++ /dev/null @@ -1,32 +0,0 @@ -# == Defined Type: monasca::checks::instances::process -# -# configure monasca plugin yaml file for process usage -# -# === Parameters: -# -# [*search_string*] -# process search string to include in the check -# -# [*exact_match*] -# flag if the search_string needs to be an exact match -# -# [*cpu_check_interval*] -# how frequently (in seconds) the check should run -# -# [*dimensions*] -# any additional dimensions for the check -# -define monasca::checks::instances::process ( - $search_string, - $exact_match = undef, - $cpu_check_interval = undef, - $dimensions = undef, -) { - $conf_dir = $::monasca::agent::conf_dir - concat::fragment { "${title}_process_instance": - target => "${conf_dir}/process.yaml", - content => template('monasca/checks/process.erb'), - order => '1', - } -} - diff --git a/manifests/checks/instances/rabbitmq.pp b/manifests/checks/instances/rabbitmq.pp deleted file mode 100644 index 16b6f20..0000000 --- a/manifests/checks/instances/rabbitmq.pp +++ /dev/null @@ -1,84 +0,0 @@ -# == Defined Type: monasca::checks::instances::rabbitmq -# -# configure monasca plugin yaml file for rabbitmq -# -# === Parameters: -# -# [*rabbitmq_api_url*] -# (Required) url of rabbit server -# -# [*rabbitmq_user*] -# (Optional) username for rabbit server -# Defaults to undef. -# -# [*rabbitmq_pass*] -# (Optional) password for rabbit server -# Defaults to undef. -# -# [*queues*] -# (Optional) an explicit list of rabbit queues to check -# Defaults to undef. -# -# [*nodes*] -# (Optional) an explicit list of rabbit nodes to check -# Defaults to undef. -# -# [*exchanges*] -# (Optional) an explicit list of rabbit exchanges to check -# Defaults to undef. -# -# [*queues_regexes*] -# (Optional) a list of regex for rabbit queues to check -# Defaults to undef. -# -# [*nodes_regexes*] -# (Optional) a list of regex for rabbit nodes to check -# Defaults to undef. -# -# [*exchanges_regexes*] -# (Optional) a list of regex for rabbit exchanges to check -# Defaults to undef. -# -# [*max_detailed_queues*] -# (Optional) maximum number of detailed queues to check -# Defaults to undef. -# -# [*max_detailed_exchanges*] -# (Optional) maximum number of detailed exchanges to check -# Defaults to undef. -# -# [*max_detailed_nodes*] -# (Optional) maximum number of detailed nodes to check -# Defaults to undef. -# -# [*whitelist*] -# (Optional) A dictionary of the node, queue and exchange metrics to collect -# Defaults to undef. -# -# [*dimensions*] -# (Optional) any additional dimensions for the check -# Defaults to undef. -# -define monasca::checks::instances::rabbitmq ( - $rabbitmq_api_url, - $rabbitmq_user = undef, - $rabbitmq_pass = undef, - $queues = undef, - $nodes = undef, - $exchanges = undef, - $queues_regexes = undef, - $nodes_regexes = undef, - $exchanges_regexes = undef, - $max_detailed_queues = undef, - $max_detailed_exchanges = undef, - $max_detailed_nodes = undef, - $whitelist = undef, - $dimensions = undef, -) { - $conf_dir = $::monasca::agent::conf_dir - concat::fragment { "${title}_rabbitmq_instance": - target => "${conf_dir}/rabbitmq.yaml", - content => template('monasca/checks/rabbitmq.erb'), - order => '1', - } -} diff --git a/manifests/checks/instances/solidfire.pp b/manifests/checks/instances/solidfire.pp deleted file mode 100644 index e21676e..0000000 --- a/manifests/checks/instances/solidfire.pp +++ /dev/null @@ -1,27 +0,0 @@ -# == Defined Type: monasca::checks::instances::solidfire -# -# Configure monasca plugin yaml file for solidfire. -# -# === Parameters: -# -# [*admin_name*] -# (Required) Name of the cluster administrator. -# -# [*admin_password*] -# (Required) Password of the cluster administrator. -# -# [*cluster_mvip*] -# (Required) Management VIP of the cluster. -# -define monasca::checks::instances::solidfire ( - $admin_name, - $admin_password, - $cluster_mvip, -) { - $conf_dir = $::monasca::agent::conf_dir - concat::fragment { "${title}_solidfire_instance": - target => "${conf_dir}/solidfire.yaml", - content => template('monasca/checks/solidfire.erb'), - order => '1', - } -} diff --git a/manifests/checks/instances/zk.pp b/manifests/checks/instances/zk.pp deleted file mode 100644 index ec458e2..0000000 --- a/manifests/checks/instances/zk.pp +++ /dev/null @@ -1,31 +0,0 @@ -# == Defined Type: monasca::checks::instances::zk -# -# configure monasca plugin yaml file for zookeeper -# -# === Parameters: -# -# [*host*] -# zookeeper host -# -# [*port*] -# zookeeper port -# -# [*timeout*] -# timeout in seconds to wait for zookeeper to respond -# -# [*dimensions*] -# any additional dimensions for the check -# -define monasca::checks::instances::zk ( - $host = undef, - $port = undef, - $timeout = undef, - $dimensions = undef, -) { - $conf_dir = $::monasca::agent::conf_dir - concat::fragment { "${title}_zk_instance": - target => "${conf_dir}/zk.yaml", - content => template('monasca/checks/zk.erb'), - order => '1', - } -} diff --git a/manifests/checks/libvirt.pp b/manifests/checks/libvirt.pp deleted file mode 100644 index 6c7ee17..0000000 --- a/manifests/checks/libvirt.pp +++ /dev/null @@ -1,140 +0,0 @@ -# == Class: monasca::checks::libvirt -# -# Sets up the monasca libvirt check. -# Requires lxml, libvirt-python and python-novaclient -# -# === Parameters -# -# [*admin_password*] -# (Required) Password for the monasca admin. -# -# [*admin_tenant_name*] -# (Required) Name of the monasca admin tenant/project. -# -# [*admin_user*] -# (Required) Name of the monasca admin. -# -# [*alive_only*] -# (Optional) Will suppress all per-VM metrics aside from host_alive_status -# and vm.host_alive_status, including all I/O, network, memory, ping, and -# CPU metrics. Aggregate metrics, however, would still be enabled if alive_only -# is true. -# Defaults to false. -# -# [*cache_dir*] -# (Optional) Cache directory to persist data. -# Defaults to '/dev/shm'. -# -# [*customer_metadata*] -# (Optional) A list of instance metadata to be submitted as dimensions -# with customer data. -# Defaults to not set in the config file. -# -# [*disk_collection_period*] -# (Optional) Have disk metrics be outputted less often to reduce -# metric load on the system. If this is less than the agent collection -# period, it will be ignored. -# Defaults to 0. -# -# [*host_aggregate_re*] -# (Optional) Regular expression of host aggregate names to match, which -# will add a 'host_aggregate' dimension to libvirt metrics for the operations -# project. -# Defaults to undef -- causing the flag to not be set in the config file. -# -# [*identity_uri*] -# (Required) URI of the keystone instance. -# -# [*metadata*] -# (Optional) A list of instance metadata to be submitted as dimensions -# with service data. -# Defaults to not set in the config file. -# -# [*network_use_bits*] -# (Optional) Submit network metrics in bits rather than bytes. -# Defaults to true. -# -# [*nova_refresh*] -# (Optional) Interval to force data refresh. Set to 0 to refresh every time -# the collector runs, or to None to disable regular refreshes entirely (though -# the instance cache will still be refreshed if a new instance is detected). -# Defaults to 14400 (4 hours). -# -# [*ping_check*] -# (Optional) The entire command line (sans the IP address, which is automatically -# appended) used to perform a ping check against instances, with a keyword NAMESPACE -# automatically replaced with the appropriate network namespace for the VM being -# monitored. Set to false to disable ping checks. -# Defaults to false. -# -# [*region_name*] -# (Required) Openstack keystone region for this install. -# -# [*vm_cpu_check_enable*] -# (Optional) Enables collecting of VM CPU metrics. -# Defaults to true. -# -# [*vm_disks_check_enable*] -# (Optional) Enables collecting of VM disk metrics. -# Defaults to true. -# -# [*vm_extended_disks_check_enable*] -# (Optional) nable collecting of extended disk metrics. -# Defaults to false. -# -# [*vm_network_check_enable*] -# (Optional) Enables collecting of VM network metrics. -# Defaults to true. -# -# [*vm_ping_check_enable*] -# (Optional) Enables host alive ping check. -# Defaults to false. -# -# [*vm_probation*] -# (Optional) Period of time (in seconds) in which to suspend metrics -# from a newly-created VM. This is to prevent quickly-obsolete metrics -# in an environment with a high amount of instance churn. -# Defaults to 300 seconds. -# -class monasca::checks::libvirt( - $admin_password = undef, - $admin_tenant_name = undef, - $admin_user = undef, - $alive_only = false, - $cache_dir = '/dev/shm', - $customer_metadata = [], - $disk_collection_period = 0, - $host_aggregate_re = undef, - $identity_uri = undef, - $metadata = [], - $network_use_bits = true, - $nova_refresh = '14400', - $ping_check = false, - $region_name = undef, - $vm_cpu_check_enable = true, - $vm_disks_check_enable = true, - $vm_extended_disks_check_enable = false, - $vm_network_check_enable = true, - $vm_ping_check_enable = false, - $vm_probation = '300', -){ - $conf_dir = $::monasca::agent::conf_dir - - File["${conf_dir}/libvirt.yaml"] ~> Service['monasca-agent'] - - file { "${conf_dir}/libvirt.yaml": - owner => 'root', - group => $::monasca::group, - mode => '0640', - content => template('monasca/checks/libvirt.yaml.erb'), - require => File[$conf_dir], - } - - # libxslt1-dev, libxml2-dev and zlib1g-dev are needed for lxml install - ensure_packages('libxslt1-dev') - ensure_packages('libxml2-dev') - ensure_packages('zlib1g-dev') - # libvirt-dev and pkg-config are needed libvirt-python - ensure_packages('libvirt-dev') - ensure_packages('pkg-config') -} diff --git a/manifests/checks/load.pp b/manifests/checks/load.pp deleted file mode 100644 index c92e9e2..0000000 --- a/manifests/checks/load.pp +++ /dev/null @@ -1,39 +0,0 @@ -# == Class: monasca::checks::load -# -# Sets up the monasca load check. -# -# === Parameters -# -# [*instances*] -# A hash of instances for the check. -# Each instance should be a hash of the check's parameters. -# Parameters for the load check are: -# name (the instance key): The name of the instance. -# dimensions -# e.g. -# instances: -# load_stats: -# dimensions: -# -class monasca::checks::load( - $instances = undef, -){ - $conf_dir = $::monasca::agent::conf_dir - - if($instances){ - Concat["${conf_dir}/load.yaml"] ~> Service['monasca-agent'] - concat { "${conf_dir}/load.yaml": - owner => 'root', - group => $::monasca::group, - mode => '0640', - warn => true, - require => File[$conf_dir], - } - concat::fragment { 'load_header': - target => "${conf_dir}/load.yaml", - order => '0', - content => "---\ninit_config: null\ninstances:\n", - } - create_resources('monasca::checks::instances::load', $instances) - } -} diff --git a/manifests/checks/memory.pp b/manifests/checks/memory.pp deleted file mode 100644 index 82a9670..0000000 --- a/manifests/checks/memory.pp +++ /dev/null @@ -1,39 +0,0 @@ -# == Class: monasca::checks::memory -# -# Sets up the monasca memory check. -# -# === Parameters -# -# [*instances*] -# A hash of instances for the check. -# Each instance should be a hash of the check's parameters. -# Parameters for the memory check are: -# name (the instance key): The name of the instance. -# dimensions -# e.g. -# instances: -# memory_stats: -# dimensions: -# -class monasca::checks::memory( - $instances = undef, -){ - $conf_dir = $::monasca::agent::conf_dir - - if($instances){ - Concat["${conf_dir}/memory.yaml"] ~> Service['monasca-agent'] - concat { "${conf_dir}/memory.yaml": - owner => 'root', - group => $::monasca::group, - mode => '0640', - warn => true, - require => File[$conf_dir], - } - concat::fragment { 'memory_header': - target => "${conf_dir}/memory.yaml", - order => '0', - content => "---\ninit_config: null\ninstances:\n", - } - create_resources('monasca::checks::instances::memory', $instances) - } -} diff --git a/manifests/checks/mysql.pp b/manifests/checks/mysql.pp deleted file mode 100644 index 5a0744c..0000000 --- a/manifests/checks/mysql.pp +++ /dev/null @@ -1,49 +0,0 @@ -# == Class: monasca::checks::mysql -# -# Sets up the monasca mysql check. -# Requires MySQL-python -# -# === Parameters -# -# [*instances*] -# A hash of instances for the check. -# Each instance should be a hash of the check's parameters. -# Parameters for the mysql check are: -# name (the instance key): The name of the instance. -# server -# user -# port -# pass -# sock -# defaults_file -# dimensions -# options -# e.g. -# instances: -# local: -# defaults_file: '/root/.my.cnf' -# server: 'localhost' -# user: 'root' -# -class monasca::checks::mysql( - $instances = undef, -){ - $conf_dir = $::monasca::agent::conf_dir - - if($instances){ - Concat["${conf_dir}/mysql.yaml"] ~> Service['monasca-agent'] - concat { "${conf_dir}/mysql.yaml": - owner => 'root', - group => $::monasca::group, - mode => '0640', - warn => true, - require => File[$conf_dir], - } - concat::fragment { 'mysql_header': - target => "${conf_dir}/mysql.yaml", - order => '0', - content => "---\ninit_config: null\ninstances:\n", - } - create_resources('monasca::checks::instances::mysql', $instances) - } -} diff --git a/manifests/checks/nagios_wrapper.pp b/manifests/checks/nagios_wrapper.pp deleted file mode 100644 index 72bb482..0000000 --- a/manifests/checks/nagios_wrapper.pp +++ /dev/null @@ -1,64 +0,0 @@ -# == Class: monasca::checks::nagios_wrapper -# -# Sets up the monasca nagios_wrapper check. -# -# === Parameters -# [*check_path*] -# Directories where Nagios checks (scripts, programs) may live -# [*temp_file_path*] -# Where to store last-run timestamps for each check -# [*instances*] -# A hash of instances for the check. -# Each instance should be a hash of the check's parameters. -# Parameters for the nagios_wrapper check are: -# service_name (the instance key): The name of the instance. -# check_command (required) -# host_name -# check_interval -# dimensions -# e.g. -# instances: -# load: -# check_command: 'check_load -r -w 2,1.5,1 -c 10,5,4' -# disk: -# check_command: 'check_disk -w 15\% -c 5\% -A -i /srv/node' -# check_interval: '300' -# [*host_name*] -# Use with the collector to determine which checks run on which host -# [*central_mon*] -# Set to true when using the collector if a single host will be running -# all non-nrpe checks -# -class monasca::checks::nagios_wrapper( - $check_path = '/usr/lib/nagios/plugins:/usr/local/bin/nagios', - $temp_file_path = '/dev/shm/', - $instances = undef, - $host_name = undef, - $central_mon = false, -){ - $conf_dir = $::monasca::agent::conf_dir - - if ($central_mon) { - Monasca::Checks::Instances::Nagios_wrapper <<| nrpe == false |>> - } - else { - Monasca::Checks::Instances::Nagios_wrapper <<| host_name == $host_name and nrpe != false |>> - } - - Concat["${conf_dir}/nagios_wrapper.yaml"] ~> Service['monasca-agent'] - concat { "${conf_dir}/nagios_wrapper.yaml": - owner => 'root', - group => $::monasca::group, - mode => '0640', - warn => true, - require => File[$conf_dir], - } - concat::fragment { 'nagios_wrapper_header': - target => "${conf_dir}/nagios_wrapper.yaml", - order => '0', - content => template('monasca/checks/nagios_wrapper.yaml.erb'), - } - if($instances){ - create_resources('monasca::checks::instances::nagios_wrapper', $instances) - } -} diff --git a/manifests/checks/network.pp b/manifests/checks/network.pp deleted file mode 100644 index 298364e..0000000 --- a/manifests/checks/network.pp +++ /dev/null @@ -1,44 +0,0 @@ -# == Class: monasca::checks::network -# -# Sets up the monasca network check. -# -# === Parameters -# -# [*instances*] -# A hash of instances for the check. -# Each instance should be a hash of the check's parameters. -# Parameters for the network check are: -# name (the instance key): The name of the instance. -# collect_connection_state (default = False) -# excluded_interfaces -# excluded_interface_re: A regular expression for excluded interfaces -# use_bits -# dimensions -# e.g. -# instances: -# network_stats: -# collect_connection_state: 'False' -# excluded_interfaces: '[lo, lo0]' -# -class monasca::checks::network( - $instances = undef, -){ - $conf_dir = $::monasca::agent::conf_dir - - if($instances){ - Concat["${conf_dir}/network.yaml"] ~> Service['monasca-agent'] - concat { "${conf_dir}/network.yaml": - owner => 'root', - group => $::monasca::group, - mode => '0640', - warn => true, - require => File[$conf_dir], - } - concat::fragment { 'network_header': - target => "${conf_dir}/network.yaml", - order => '0', - content => "---\ninit_config: null\ninstances:\n", - } - create_resources('monasca::checks::instances::network', $instances) - } -} diff --git a/manifests/checks/ovs.pp b/manifests/checks/ovs.pp deleted file mode 100644 index c018e55..0000000 --- a/manifests/checks/ovs.pp +++ /dev/null @@ -1,103 +0,0 @@ -# == Class: monasca::checks::ovs -# -# Sets up the monasca open vswitch check. -# -# === Parameters -# -# [*admin_user*] -# (Required) Name of the monasca admin. -# -# [*admin_password*] -# (Required) Password for the monasca admin. -# -# [*admin_tenant_name*] -# (Required) Name of the monasca admin tenant/project. -# -# [*identity_uri*] -# (Required) URI of the keystone instance. -# -# [*included_interface_re*] -# (Optional) Regular expression of interfaces to publish metrics for. -# Defaults to 'qg.*'. -# -# [*region_name*] -# (Required) Openstack keystone region for this install. -# -# [*cache_dir*] -# (Optional) Cache directory to persist data. -# Defaults to /dev/shm. -# -# [*metadata*] -# (Optional) A list of router metadata to be submitted as dimensions -# with service data. For example, 'tenant_name' in the list will -# add the tenant name dimension to router metrics posted to the -# infrastructure project. -# Defaults to an empty list in the config file. -# -# [*neutron_refresh*] -# (Optional) Interval to force data refresh from neutron. -# Defaults to 14400 seconds (4 hours).. -# -# [*check_router_ha*] -# (Optional) Flag to indicate if additional neutron calls should be -# made to determine if an HA router is active or standby. -# Defaults to true. -# -# [*network_use_bits*] -# (Optional) Flag to indicate bits should be reported instead of bytes. -# Defaults to true. -# -# [*ovs_cmd*] -# (Optional) Command to run to get ovs data. -# Defaults to 'sudo /usr/bin/ovs-vsctl'. -# -# [*publish_router_capacity*] -# (Optional) Flag indicating if router capacity metrics should be -# published. -# Defaults to true. -# -# [*use_absolute_metrics*] -# (Optional) Flag indicating if absolute metrics should be published -# for interfaces. -# Defaults to true. -# -# [*use_health_metrics*] -# (Optional) Flag indicating if health metrics should be published -# for interfaces. -# Defaults to true. -# -# [*use_rate_metrics*] -# (Optional) Flag indicating if rate metrics should be published -# for interfaces. -# Defaults to true. -# -class monasca::checks::ovs( - $admin_user = undef, - $admin_password = undef, - $admin_tenant_name = undef, - $cache_dir = '/dev/shm', - $check_router_ha = true, - $identity_uri = undef, - $included_interface_re = 'qg.*', - $metadata = [], - $network_use_bits = true, - $neutron_refresh = '14400', - $ovs_cmd = 'sudo /usr/bin/ovs-vsctl', - $publish_router_capacity = true, - $region_name = undef, - $use_absolute_metrics = true, - $use_health_metrics = true, - $use_rate_metrics = true, -){ - $conf_dir = $::monasca::agent::conf_dir - - File["${conf_dir}/ovs.yaml"] ~> Service['monasca-agent'] - - file { "${conf_dir}/ovs.yaml": - owner => 'root', - group => $::monasca::group, - mode => '0640', - content => template('monasca/checks/ovs.yaml.erb'), - require => File[$conf_dir], - } -} diff --git a/manifests/checks/process.pp b/manifests/checks/process.pp deleted file mode 100644 index 220d413..0000000 --- a/manifests/checks/process.pp +++ /dev/null @@ -1,46 +0,0 @@ -# == Class: monasca::checks::process -# -# Sets up the monasca process check. -# -# === Parameters -# -# [*instances*] -# A hash of instances for the check. -# Each instance should be a hash of the check's parameters. -# Parameters for the process check are: -# name (the instance key): The name of the instance. -# search_string (required): An array of process names to search for. -# exact_match (default = True): Whether the search_string should exactly -# match the service name. (Boolean) -# cpu_check_interval (default = 0.1): -# dimensions: Additional dimensions for the instance. -# e.g. -# instances: -# nova-api: -# search_string: '[nova-api]' -# dimensions: '{component: nova-api, service: compute}' -# rabbitmq-server: -# search_string: '[rabbitmq-server]' -# -class monasca::checks::process( - $instances = undef, -){ - $conf_dir = $::monasca::agent::conf_dir - - if($instances){ - Concat["${conf_dir}/process.yaml"] ~> Service['monasca-agent'] - concat { "${conf_dir}/process.yaml": - owner => 'root', - group => $::monasca::group, - mode => '0640', - warn => true, - require => File[$conf_dir], - } - concat::fragment { 'process_header': - target => "${conf_dir}/process.yaml", - order => '0', - content => "---\ninit_config: null\ninstances:\n", - } - create_resources('monasca::checks::instances::process', $instances) - } -} diff --git a/manifests/checks/rabbitmq.pp b/manifests/checks/rabbitmq.pp deleted file mode 100644 index 06d71b8..0000000 --- a/manifests/checks/rabbitmq.pp +++ /dev/null @@ -1,53 +0,0 @@ -# == Class: monasca::checks::rabbitmq -# -# Sets up the monasca rabbitmq check. -# -# === Parameters -# -# [*instances*] -# A hash of instances for the check. -# Each instance should be a hash of the check's parameters. -# Parameters for the rabbitmq check are: -# name (the instance key): The name of the instance. -# rabbitmq_user (default = guest) -# rabbitmq_pass (default = guest) -# rabbitmq_api_url (required) -# queues -# nodes -# exchanges -# max_detailed_queues -# max_detailed_exchanges -# max_detailed_nodes -# dimensions -# e.g. -# instances: -# rabbit: -# rabbitmq_user: 'guest' -# rabbitmq_pass: 'guest' -# rabbitmq_api_url: 'http://localhost:15672/api' -# exchanges: '[nova, cinder, ceilometer, glance, keystone, neutron, heat]' -# nodes: '[rabbit@devstack]' -# queues: '[conductor]' -# -class monasca::checks::rabbitmq( - $instances = undef, -){ - $conf_dir = $::monasca::agent::conf_dir - - if($instances){ - Concat["${conf_dir}/rabbitmq.yaml"] ~> Service['monasca-agent'] - concat { "${conf_dir}/rabbitmq.yaml": - owner => 'root', - group => $::monasca::group, - mode => '0640', - warn => true, - require => File[$conf_dir], - } - concat::fragment { 'rabbitmq_header': - target => "${conf_dir}/rabbitmq.yaml", - order => '0', - content => "---\ninit_config: null\ninstances:\n", - } - create_resources('monasca::checks::instances::rabbitmq', $instances) - } -} diff --git a/manifests/checks/solidfire.pp b/manifests/checks/solidfire.pp deleted file mode 100644 index 7b1900a..0000000 --- a/manifests/checks/solidfire.pp +++ /dev/null @@ -1,46 +0,0 @@ -# == Class: monasca::checks::solidfire -# -# Sets up the monasca solidfire agent plugin. -# -# === Parameters -# -# [*instances*] -# (Required) A hash of instances for the solidfire plugin. Each instance -# should be a hash of the check's parameters. Parameters for the solidfire -# check are (all required): -# -# cluster_name (the instance key): The name of the cluster. -# admin_name: Name of the cluster administrator. -# admin_password: Password of the cluster administrator. -# cluster_mvip: Management VIP of the cluster. -# -# Example: -# -# instances: -# rack_d_cluster: -# admin_name: monasca_admin -# admin_password: secret_password -# cluster_mvip: 192.168.1.1 -# -class monasca::checks::solidfire( - $instances, -){ - $conf_dir = $::monasca::agent::conf_dir - - if($instances){ - Concat["${conf_dir}/solidfire.yaml"] ~> Service['monasca-agent'] - concat { "${conf_dir}/solidfire.yaml": - owner => 'root', - group => $::monasca::group, - mode => '0640', - warn => true, - require => File[$conf_dir], - } - concat::fragment { 'solidfire_header': - target => "${conf_dir}/solidfire.yaml", - order => '0', - content => "---\ninit_config: null\ninstances:\n", - } - create_resources('monasca::checks::instances::solidfire', $instances) - } -} diff --git a/manifests/checks/vertica.pp b/manifests/checks/vertica.pp deleted file mode 100644 index 424b726..0000000 --- a/manifests/checks/vertica.pp +++ /dev/null @@ -1,42 +0,0 @@ -# == Class: monasca::checks::vertica -# -# Sets up the monasca agent vertica plugin/check. -# -# === Parameters -# -# [*node_name*] -# (Required) Vertica node name for this node (example: 'v_mon_node0001'). -# -# [*password*] -# (Required) Password for the vertica user. -# -# [*user*] -# (Required) Name of the vertica user. -# -# [*service*] -# (Optional) Name of service dimension for vertica metrics. -# Defaults to 'vertica'. -# -# [*timeout*] -# (Optional) Timeout in seconds for how long to wait for a query. -# Defaults to 3 seconds. -# -class monasca::checks::vertica( - $node_name, - $password, - $user, - $service = 'vertica', - $timeout = 3, -){ - $conf_dir = $::monasca::agent::conf_dir - - File["${conf_dir}/vertica.yaml"] ~> Service['monasca-agent'] - - file { "${conf_dir}/vertica.yaml": - owner => 'root', - group => $::monasca::group, - mode => '0640', - content => template('monasca/checks/vertica.yaml.erb'), - require => File[$conf_dir], - } -} diff --git a/manifests/checks/zk.pp b/manifests/checks/zk.pp deleted file mode 100644 index 77f4ba0..0000000 --- a/manifests/checks/zk.pp +++ /dev/null @@ -1,44 +0,0 @@ -# == Class: monasca::checks::zk -# -# Sets up the monasca zookeeper check. -# -# === Parameters -# -# [*instances*] -# A hash of instances for the check. -# Each instance should be a hash of the check's parameters. -# Parameters for the zk check are: -# name (the instance key): The name of the instance. -# host (default = localhost) -# port (default = 2181) -# timeout (default = 3.0) -# dimensions -# e.g. -# instances: -# local: -# host: 'localhost' -# port: '2181' -# timeout: '3' -# -class monasca::checks::zk( - $instances = undef, -){ - $conf_dir = $::monasca::agent::conf_dir - - if($instances){ - Concat["${conf_dir}/zk.yaml"] ~> Service['monasca-agent'] - concat { "${conf_dir}/zk.yaml": - owner => 'root', - group => $::monasca::group, - mode => '0640', - warn => true, - require => File[$conf_dir], - } - concat::fragment { 'zk_header': - target => "${conf_dir}/zk.yaml", - order => '0', - content => "---\ninit_config: null\ninstances:\n", - } - create_resources('monasca::checks::instances::zk', $instances) - } -} diff --git a/manifests/config.pp b/manifests/config.pp deleted file mode 100644 index 2e77b75..0000000 --- a/manifests/config.pp +++ /dev/null @@ -1,40 +0,0 @@ -# == Class: monasca::config -# -# This class is used to manage arbitrary monasca configurations. -# -# example xxx_config -# (optional) Allow configuration of arbitrary monasca configurations. -# The value is an hash of xxx_config resources. Example: -# { 'DEFAULT/foo' => { value => 'fooValue'}, -# 'DEFAULT/bar' => { value => 'barValue'} -# } -# -# In yaml format, Example: -# xxx_config: -# DEFAULT/foo: -# value: fooValue -# DEFAULT/bar: -# value: barValue -# -# === Parameters -# -# [*monasca_config*] -# (optional) Allow configuration of monasca.conf configurations. -# -# [*monasca_ini*] -# (optional) Allow configuration of monasca.ini configurations. -# -# NOTE: The configuration MUST NOT be already handled by this module -# or Puppet catalog compilation will fail with duplicate resources. -# -class monasca::config ( - $monasca_config = {}, - $monasca_ini = {}, -) { - - validate_legacy(Hash, 'validate_hash', $monasca_config) - validate_legacy(Hash, 'validate_hash', $monasca_ini) - - create_resources('monasca_config', $monasca_config) - create_resources('monasca_ini', $monasca_ini) -} diff --git a/manifests/db/mysql.pp b/manifests/db/mysql.pp deleted file mode 100644 index 50df713..0000000 --- a/manifests/db/mysql.pp +++ /dev/null @@ -1,59 +0,0 @@ -# -# Class to configure monasca's mysql database, which is used -# for configuration of thresholds, alarms, etc. -# -class monasca::db::mysql { - include monasca::params - - $sql_host = $::monasca::params::sql_host - $sql_user = $::monasca::params::sql_user - $sql_password = $::monasca::params::sql_password - $sql_port = $::monasca::params::sql_port - $monsql = '/tmp/mon.sql' - $mysql_user_class = 'mysql_user' - $monasca_remote = 'monasca@%' - $notification_local = 'notification@localhost' - $notification_remote = 'notification@%' - $thresh_local = 'thresh@localhost' - $thresh_remote = 'thresh@%' - - $prereqs = [ - Mysql_user[$monasca_remote], - Mysql_user[$notification_local], - Mysql_user[$notification_remote], - Mysql_user[$thresh_local], - Mysql_user[$thresh_remote], - File[$monsql]] - - file { $monsql: - ensure => file, - content => template('monasca/mon.sql.erb'), - mode => '0644', - owner => 'root', - group => 'root', - } - - mysql::db { 'mon': - user => 'monasca', - password => $sql_password, - host => 'localhost', - sql => $monsql, - require => $prereqs, - } - - $user_resource = { - ensure => present, - password_hash => mysql::password($sql_password), - provider => 'mysql', - require => Class['mysql::server'], - } - - # - # We get the monasca local user for free above in the db declaration. - # - ensure_resource($mysql_user_class, $monasca_remote, $user_resource) - ensure_resource($mysql_user_class, $notification_local, $user_resource) - ensure_resource($mysql_user_class, $notification_remote, $user_resource) - ensure_resource($mysql_user_class, $thresh_local, $user_resource) - ensure_resource($mysql_user_class, $thresh_remote, $user_resource) -} diff --git a/manifests/influxdb/bootstrap.pp b/manifests/influxdb/bootstrap.pp deleted file mode 100644 index 6c2c259..0000000 --- a/manifests/influxdb/bootstrap.pp +++ /dev/null @@ -1,67 +0,0 @@ -# == Class: monasca::influxdb::bootstrap -# -# Class for bootstrapping influxdb for monasca -# -# === Parameters: -# -# [*influxdb_password*] -# password for the influxdb admin -# -# [*influxdb_dbuser_ro_password*] -# password for the influxdb read-only user -# -# [*influxdb_def_ret_pol_name*] -# default retention policy name -# -# [*influxdb_def_ret_pol_duration*] -# default influxdb retention policy duration -# -# [*influxdb_tmp_ret_pol_name*] -# temporary retention policy -# -# [*influxdb_tmp_ret_pol_duration*] -# temporary influxdb retention policy duration -# -# [*influxdb_retention_replication*] -# influxdb retention policy replication factor -# -class monasca::influxdb::bootstrap( - $influxdb_password = undef, - $influxdb_dbuser_ro_password = undef, - $influxdb_def_ret_pol_name = 'raw', - $influxdb_def_ret_pol_duration = '390d', - $influxdb_tmp_ret_pol_name = 'tmp', - $influxdb_tmp_ret_pol_duration = '1h', - $influxdb_retention_replication = 1, -) -{ - include monasca::params - - $influxdb_dbuser_password = $::monasca::params::api_db_password - $script = 'bootstrap-influxdb.sh' - $influxdb_host = 'localhost' - $influxdb_port = 8086 - $influxdb_user = 'root' - - file { "/tmp/${script}": - ensure => file, - content => template("monasca/${script}.erb"), - mode => '0755', - owner => 'root', - group => 'root', - } - - exec { "/tmp/${script}": - require => [ Package['influxdb'], Service['influxdb'] ], - subscribe => File["/tmp/${script}"], - path => '/bin:/sbin:/usr/bin:/usr/sbin:/tmp', - cwd => '/tmp', - user => 'root', - group => 'root', - logoutput => true, - refreshonly => true, - environment => ["INFLUX_ADMIN_PASSWORD=${influxdb_password}", - "DB_USER_PASSWORD=${influxdb_dbuser_password}", - "DB_READ_ONLY_USER_PASSWORD=${influxdb_dbuser_ro_password}"], - } -} diff --git a/manifests/init.pp b/manifests/init.pp deleted file mode 100644 index 0706c72..0000000 --- a/manifests/init.pp +++ /dev/null @@ -1,40 +0,0 @@ -# == Class: monasca -# -# This class sets up configuration common -# across all monasca services. -# -# === Parameters: -# -# [*log_dir*] -# -# [*monasca_dir*] -# -# [*group*] -# -class monasca( - $log_dir = '/var/log/monasca', - $monasca_dir = '/etc/monasca', - $group = 'monasca', -){ - - group { $group: - ensure => present, - } - - file { $log_dir: - ensure => directory, - owner => 'root', - group => $group, - mode => '0775', - require => Group[$group], - } - - file { $monasca_dir: - ensure => 'directory', - owner => 'root', - group => 'root', - mode => '0755', - require => Group[$group], - } - -} diff --git a/manifests/kafka/config.pp b/manifests/kafka/config.pp deleted file mode 100644 index 508c614..0000000 --- a/manifests/kafka/config.pp +++ /dev/null @@ -1,36 +0,0 @@ -# == Class: monasca::kakfa::config -# -# Class for creating kafka topics needed by monasca -# -# === Parameters: -# -# [*kafka_zookeeper_connections*] -# list of zookeeper servers and ports -# -# [*kafka_replication_factor*] -# replication factor for kafka -# -# [*topic_config*] -# topic specific topic configuration, sample hiera: -# -# monasca::kafka::config::topic_config: -# metrics: -# partitions: 4 -# events: -# partitions: 4 -# alarm-notifications: -# partitions: 8 -# alarm-state-transitions: -# partitions: 8 -# retry-notifications: -# partitions: 2 -# healthcheck: -# partitions: 4 -# -class monasca::kafka::config ( - $kafka_zookeeper_connections = undef, - $kafka_replication_factor = undef, - $topic_config = {}, -) { - create_resources('monasca::kafka::topics', $topic_config) -} diff --git a/manifests/kafka/topics.pp b/manifests/kafka/topics.pp deleted file mode 100644 index 2f396d6..0000000 --- a/manifests/kafka/topics.pp +++ /dev/null @@ -1,46 +0,0 @@ -# == Defined Type: monasca::kafka::topics -# -# Defined type to create kafka topics for monasca -# -# === Parameters: -# -# [*partitions*] -# number of kafka partitions for this topic -# -# [*kafka_zookeeper_connections*] -# list of zookeeper connections for kafka topic -# -# [*kafka_replication_factor*] -# replication factor for kakfa topic -# -# [*install_dir*] -# directory of kafka install -# -define monasca::kafka::topics ( - $partitions = 2, - $kafka_zookeeper_connections = $monasca::kafka::config::kafka_zookeeper_connections, - $kafka_replication_factor = $monasca::kafka::config::kafka_replication_factor, - $install_dir = '/opt/kafka', -) { - - exec { "Ensure ${name} is created": - # lint:ignore:140chars - command => "kafka-topics.sh --create --zookeeper ${kafka_zookeeper_connections} --replication-factor ${kafka_replication_factor} --partitions ${partitions} --topic ${name}", - # lint:endignore - path => "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:${install_dir}/bin", - cwd => $install_dir, - user => 'root', - group => 'root', - onlyif => "kafka-topics.sh --topic ${name} --list --zookeeper ${kafka_zookeeper_connections} | grep -q ${name}; test $? -ne 0" - } - -> exec { "Ensure ${name} is has ${partitions} partitions": - command => "kafka-topics.sh --alter --zookeeper ${kafka_zookeeper_connections} --partitions ${partitions} --topic ${name}", - path => "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:${install_dir}/bin", - cwd => $install_dir, - user => 'root', - group => 'root', - # lint:ignore:140chars - onlyif => "kafka-topics.sh --describe --zookeeper ${kafka_zookeeper_connections} --topic ${name} | grep 'PartitionCount:${partitions}'; test $? -ne 0" - # lint:endignore - } -} diff --git a/manifests/keystone/auth.pp b/manifests/keystone/auth.pp deleted file mode 100644 index 43a5c70..0000000 --- a/manifests/keystone/auth.pp +++ /dev/null @@ -1,259 +0,0 @@ -# == Class: monasca::keystone::auth -# -# Configures Monasca user, service and endpoint in Keystone. -# -# === Parameters -# [*auth_name*] -# Username for Monasca service. Optional. Defaults to 'monasca'. -# -# [*admin_email*] -# Email for Monasca admin user. Optional. Defaults to 'monasca@localhost'. -# -# [*agent_email*] -# Email for Monasca agent user. Optional. Defaults to 'monasca@localhost'. -# -# [*user_email*] -# Email for Monasca default. Optional. Defaults to 'monasca@localhost'. -# -# [*configure_endpoint*] -# Should Monasca endpoint be configured? Optional. Defaults to 'true'. -# -# [*configure_user*] -# Should Monasca service user be configured? Optional. Defaults to 'true'. -# -# [*configure_user_role*] -# Should roles be configured on Monasca service user? Optional. Defaults to 'true'. -# -# [*service_name*] -# Name of the service. Optional. Defaults to 'monasca'. -# -# [*service_type*] -# Type of service. Optional. Defaults to 'monitoring'. -# -# [*service_description*] -# Description for monasca/monitoring service in the keystone service catalog. -# Optional. Defaults to 'Openstack Monitoring Service'. -# -# [*public_address*] -# Public address for endpoint. Optional. Defaults to '127.0.0.1'. -# -# [*admin_address*] -# Admin address for endpoint. Optional. Defaults to '127.0.0.1'. -# -# [*internal_address*] -# Internal address for endpoint. Optional. Defaults to '127.0.0.1'. -# -# [*tenant*] -# Tenant for Monasca user. Optional. Defaults to 'services'. -# -# [*public_protocol*] -# Protocol for public endpoint. Optional. Defaults to 'http'. -# -# [*admin_protocol*] -# Protocol for admin endpoint. Optional. Defaults to 'http'. -# -# [*internal_protocol*] -# Protocol for public endpoint. Optional. Defaults to 'http'. -# -# [*public_url*] -# The endpoint's public url. -# Optional. Defaults to $public_protocol://$public_address:$port -# This url should *not* contain any API version and should have -# no trailing '/' -# Setting this variable overrides other $public_* parameters. -# -# [*admin_url*] -# The endpoint's admin url. -# Optional. Defaults to $admin_protocol://$admin_address:$port -# This url should *not* contain any API version and should have -# no trailing '/' -# Setting this variable overrides other $admin_* parameters. -# -# [*internal_url*] -# The endpoint's internal url. -# Optional. Defaults to $internal_protocol://$internal_address:$port -# This url should *not* contain any API version and should have -# no trailing '/' -# Setting this variable overrides other $internal_* parameters. -# -# [*role_agent*] -# name for the monasca agent role -# -# [*role_delegate*] -# name for the monasca delegate role -# -# [*role_admin*] -# name for the monasca admin role -# -# [*role_user*] -# name for the monasca user role -# -# [*user_roles_agent*] -# list of roles to assign to the monasca agent user -# -# [*user_roles_admin*] -# list of roles to assign to the monasca admin user -# -# [*user_roles_user*] -# list of roles to assign to the monasca user user -# -class monasca::keystone::auth ( - $auth_name = 'monasca', - $admin_email = 'monasca@localhost', - $agent_email = 'monasca@localhost', - $user_email = 'monasca@localhost', - $configure_user = true, - $configure_user_role = true, - $service_name = 'monasca', - $service_type = 'monitoring', - $service_description = 'Openstack Monitoring Service', - $public_address = '127.0.0.1', - $admin_address = '127.0.0.1', - $internal_address = '127.0.0.1', - $tenant = 'services', - $public_protocol = 'http', - $admin_protocol = 'http', - $internal_protocol = 'http', - $configure_endpoint = true, - $public_url = undef, - $admin_url = undef, - $internal_url = undef, - $role_agent = 'monasca-agent', - $role_delegate = 'monitoring-delegate', - $role_admin = 'monasca-admin', - $role_user = 'monasca-user', - $user_roles_agent = undef, - $user_roles_admin = undef, - $user_roles_user = undef, -) { - include monasca::params - - $admin_name = $::monasca::params::admin_name - $agent_name = $::monasca::params::agent_name - $user_name = $::monasca::params::user_name - $admin_password = $::monasca::params::admin_password - $agent_password = $::monasca::params::agent_password - $user_password = $::monasca::params::user_password - $port = $::monasca::params::port - $api_version = $::monasca::params::api_version - $region = $::monasca::params::region - - if $public_url { - $public_url_real = $public_url - } else { - $public_url_real = "${public_protocol}://${public_address}:${port}/${api_version}" - } - - if $admin_url { - $admin_url_real = $admin_url - } else { - $admin_url_real = "${admin_protocol}://${admin_address}:${port}/${api_version}" - } - - if $internal_url { - $internal_url_real = $internal_url - } else { - $internal_url_real = "${internal_protocol}://${internal_address}:${port}/${api_version}" - } - - if $configure_user { - Keystone_user_role[$agent_name] - ~> Service <| name == 'monasca-agent' |> - Keystone_user_role[$user_name] - ~> Service <| name == 'monasca-agent' |> - - keystone_user { $agent_name: - ensure => present, - password => $agent_password, - email => $agent_email, - } - - keystone_user { $user_name: - ensure => present, - password => $user_password, - email => $user_email, - } - } - - if $configure_user_role { - Keystone_user_role["${admin_name}@${tenant}"] - ~> Service <| name == 'monasca-api' |> - Keystone_user_role["${agent_name}@${tenant}"] - ~> Service <| name == 'monasca-api' |> - Keystone_user_role["${user_name}@${tenant}"] - ~> Service <| name == 'monasca-api' |> - Keystone_user_role["${agent_name}@${tenant}"] - ~> Service <| name == 'monasca-agent' |> - Keystone_user_role["${user_name}@${tenant}"] - ~> Service <| name == 'monasca-agent' |> - - if !defined(Keystone_role[$role_agent]) { - keystone_role { $role_agent: - ensure => present, - } - } - if !defined(Keystone_role[$role_delegate]) { - keystone_role { $role_delegate: - ensure => present, - } - } - if !defined(Keystone_role[$role_admin]) { - keystone_role { $role_admin: - ensure => present, - } - } - if !defined(Keystone_role[$role_user]) { - keystone_role { $role_user: - ensure => present, - } - } - - if $user_roles_agent { - $real_user_roles_agent = $user_roles_agent - } else { - $real_user_roles_agent = [$role_agent, $role_delegate] - } - if $user_roles_admin { - $real_user_roles_admin = $user_roles_admin - } else { - $real_user_roles_admin = ['admin'] - } - if $user_roles_user { - $real_user_roles_user = $user_roles_user - } else { - $real_user_roles_user = [$role_user] - } - - keystone_user_role { "${agent_name}@${tenant}": - ensure => present, - roles => $real_user_roles_agent, - } - keystone_user_role { "${user_name}@${tenant}": - ensure => present, - roles => $real_user_roles_user, - } - } - - keystone::resource::service_identity { 'monasca': - configure_user => $configure_user, - configure_user_role => $configure_user_role, - configure_endpoint => $configure_endpoint, - service_type => $service_type, - service_description => $service_description, - service_name => $service_name, - region => $region, - roles => $real_user_roles_admin, - auth_name => $admin_name, - password => $admin_password, - email => $admin_email, - tenant => $tenant, - public_url => $public_url_real, - admin_url => $admin_url_real, - internal_url => $internal_url_real, - } - - if $configure_endpoint { - Keystone_endpoint["${region}/${service_name}::${service_type}"] - ~> Service <| name == 'monasca-api' |> - } -} diff --git a/manifests/notification.pp b/manifests/notification.pp deleted file mode 100644 index d4c9b36..0000000 --- a/manifests/notification.pp +++ /dev/null @@ -1,171 +0,0 @@ -# == Class: monasca::notification -# -# Class for configuring monasca notifications -# -# === Parameters: -# -# [*from_email_address*] -# (Optional) Email address to send notifications from. -# Defaults to empty string. -# -# [*hipchat_ca_certs*] -# (Optional) CA cert file for hipchat notifications -# Defaults to "/etc/ssl/certs/ca-certificates.crt" -# -# [*hipchat_insecure*] -# (Optional) Flag to indicate if hipchat notification calls should -# be insecure. -# Defaults to False -# -# [*install_python_deps*] -# (Optional) Flag for whether or not to install python dependencies. -# Defaults to true. -# -# [*kafka_brokers*] -# (Optional) List of kafka broker servers and ports. -# Defaults to 'localhost:9092'. -# -# [*notification_user*] -# (Optional) Name of the monasca notification user. -# Defaults to 'monasca-notification'. -# -# [*pagerduty_url*] -# (Optional) URL of pager duty if used as a notification method. -# Defaults to 'https://events.pagerduty.com/generic/2010-04-15/create_event.json'. -# -# [*periodic_kafka_topics*] -# (Optional) List of periodic notification kafka topics -# Defaults to '60: 60-seconds-notifications' -# -# [*periodic_zookeeper_paths*] -# (Optional) List of periodic notification zookeeper paths -# Defaults to '60: /notification/60_seconds' -# -# [*python_dep_ensure*] -# (Optional) Flag for whether or not to ensure/update python dependencies. -# Defaults to 'present'. -# -# [*slack_ca_certs*] -# (Optional) CA cert file for slack notifications. -# Defaults to "/etc/ssl/certs/ca-certificates.crt". -# -# [*slack_insecure*] -# (Optional) Flag to indicate if slack notification calls should -# be insecure. -# Defaults to False. -# -# [*smtp_password*] -# (Optional) Password for the smtp server. -# Defaults to empty string. -# -# [*smtp_port*] -# (Optional) Port on the smtp server to send mail to. -# Defaults to 25. -# -# [*smtp_server*] -# (Optional) Host of the smtp server. -# Defaults to 'localhost'. -# -# [*smtp_user*] -# (Optional) Name to use when authenticating agains the smtp server. -# Defaults to empty string. -# -# [*virtual_env*] -# directory of python virtual environment -# -# [*webhook_url*] -# (Optional) URL for webhook notifications. -# Defaults to empty string. -# -# [*zookeeper_servers*] -# (Optional) List of zookeeper servers and ports. -# Defaults to 'localhost:2181'. -# -class monasca::notification( - $from_email_address = '', - $hipchat_ca_certs = '/etc/ssl/certs/ca-certificates.crt', - $hipchat_insecure = false, - $install_python_deps = true, - $kafka_brokers = 'localhost:9092', - $notification_user = 'monasca-notification', - $pagerduty_url = 'https://events.pagerduty.com/generic/2010-04-15/create_event.json', - $periodic_kafka_topics = ['60: 60-seconds-notifications'], - $periodic_zookeeper_paths = ['60: /notification/60_seconds'], - $python_dep_ensure = 'present', - $slack_ca_certs = '/etc/ssl/certs/ca-certificates.crt', - $slack_insecure = false, - $smtp_password = '', - $smtp_port = 25, - $smtp_server = 'localhost', - $smtp_user = '', - $virtual_env = '/var/www/monasca-notification', - $webhook_url = '', - $zookeeper_servers = 'localhost:2181', -) -{ - include monasca::params - - # variables for the template - $sql_host = $::monasca::params::sql_host - $sql_user = $::monasca::params::sql_user - $sql_password = $::monasca::params::sql_password - $sql_port = $::monasca::params::sql_port - - $cfg_file = '/etc/monasca/notification.yaml' - $startup_script = '/etc/init/monasca-notification.conf' - - if $install_python_deps { - # Name virtualenv instead of python-virtualenv for compat with puppet-python - package { 'virtualenv': - ensure => $python_dep_ensure, - name => 'python-virtualenv', - before => Python::Virtualenv[$virtual_env], - } - - package { 'python-dev': - ensure => $python_dep_ensure, - before => Python::Virtualenv[$virtual_env], - } - } - - python::virtualenv { $virtual_env : - owner => 'root', - group => 'root', - require => [Package['virtualenv'],Package['python-dev']], - } - - python::pip { 'monasca-notification' : - virtualenv => $virtual_env, - owner => 'root', - require => Python::Virtualenv[$virtual_env], - } - - user { $notification_user: - ensure => present, - groups => $::monasca::group, - require => Group[$::monasca::group], - } - - file { $cfg_file: - ensure => file, - content => template('monasca/notification.yaml.erb'), - mode => '0644', - owner => $notification_user, - group => $::monasca::group, - require => [User[$notification_user], Group[$::monasca::group], File[$::monasca::log_dir]], - } ~> Service['monasca-notification'] - - service { 'monasca-notification': - ensure => running, - require => [File[$cfg_file], File[$startup_script]], - tag => 'monasca-service', - } - - file { $startup_script: - ensure => file, - content => template('monasca/notification.conf.erb'), - mode => '0755', - owner => 'root', - group => 'root', - } ~> Service['monasca-notification'] -} diff --git a/manifests/params.pp b/manifests/params.pp deleted file mode 100644 index 6defb05..0000000 --- a/manifests/params.pp +++ /dev/null @@ -1,117 +0,0 @@ -# == Class: monasca::params -# -# This class is used to specify configuration parameters that are common -# across all monasca services. -# -# === Parameters: -# -# [*api_db_user*] -# name of the monasca api user for the database -# -# [*api_db_password*] -# password for the monasca api database user -# -# [*port*] -# port to run monasca api server on -# -# [*api_version*] -# version of the monasca api to configure -# -# [*region*] -# default openstack region for this monasca api instance -# -# [*admin_name*] -# name of the monasca admin user -# -# [*agent_name*] -# name of the monasca agent user -# -# [*user_name*] -# name of the default monasca user -# -# [*auth_method*] -# keystone auth method, token or password -# -# [*admin_password*] -# password for the monasca admin user -# -# [*admin_project_name*] -# project name for the monasca admin user -# -# [*agent_password*] -# password for the monasca agent user -# -# [*user_password*] -# password for the monasca default user -# -# [*sql_host*] -# host of the mysql instance -# -# [*sql_user*] -# name of the mysql user -# -# [*sql_password*] -# password for the mysql user -# -# [*sql_port*] -# port for the mysql instance -# -# [*persister_config_defaults*] -# defaults for monasca persister settings -# -# [*pers_db_user*] -# name of the monasca persister user for the database -# -# [*pers_db_password*] -# password for the monasca persister database user -# -class monasca::params( - $api_db_user = 'mon_api', - $api_db_password = undef, - $port = '8070', - $api_version = 'v2.0', - $region = 'RegionOne', - $admin_name = 'monasca-admin', - $agent_name = 'monasca-agent', - $user_name = 'monasca-user', - $auth_method = 'token', - $admin_password = undef, - $admin_project_name = undef, - $agent_password = undef, - $user_password = undef, - $sql_host = undef, - $sql_user = undef, - $sql_password = undef, - $sql_port = 3306, - $pers_db_user = 'mon_persister', - $pers_db_password = undef, - $persister_config_defaults = { - 'admin_port' => 8091, - 'application_port' => 8090, - 'consumer_group_id' => 1, - 'database_url' => 'http://localhost:8086', - 'database_type' => 'influxdb', - } -) { - - include openstacklib::defaults - - validate_legacy(String, 'validate_string', $admin_password) - validate_legacy(String, 'validate_string', $admin_project_name) - validate_legacy(String, 'validate_string', $user_password) - validate_legacy(String, 'validate_string', $agent_password) - validate_legacy(String, 'validate_string', $sql_password) - validate_legacy(String, 'validate_string', $sql_host) - validate_legacy(String, 'validate_string', $api_db_password) - validate_legacy(String, 'validate_string', $pers_db_password) - - if $::osfamily == 'Debian' { - $agent_package = 'monasca-agent' - $agent_service = 'monasca-agent' - } elsif($::osfamily == 'RedHat') { - $agent_package = false - $agent_service = '' - } else { - fail("unsupported osfamily ${::osfamily}, currently Debian and Redhat are the only supported platforms") - } -} diff --git a/manifests/persister.pp b/manifests/persister.pp deleted file mode 100644 index e103b58..0000000 --- a/manifests/persister.pp +++ /dev/null @@ -1,84 +0,0 @@ -# == Class: monasca::persister -# -# Class to setup monasca persister -# -# === Parameters: -# -# [*blobmirror*] -# location of server to pull debian package from -# -# [*consumer_id*] -# id of the kafka consumer for this persister -# -# [*batch_size*] -# batch size of metrics/alarm to persist at the same time -# -# [*num_threads*] -# number of persister threads -# -# [*batch_seconds*] -# frequency for this perisiter to write to db -# -# [*config*] -# persister specific configuration -- allows running multiple persisters. -# -# [*db_admin_password*] -# admin password for database -# -# [*mon_pers_build_ver*] -# version of the persister to install -# -# [*mon_pers_deb*] -# name of the debian package for the persister -# -# [*pers_user*] -# name of the monasca perisister user -# -# [*zookeeper_servers*] -# list of zookeeper servers -# -class monasca::persister ( - $blobmirror = undef, - $consumer_id = 1, - $batch_size = 10000, - $num_threads = 1, - $batch_seconds = 30, - $config = $monasca::params::persister_config_defaults, - $db_admin_password = undef, - $mon_pers_build_ver = undef, - $mon_pers_deb = undef, - $pers_user = 'persister', - $zookeeper_servers = undef, -) { - include monasca - include monasca::params - - $pers_fetch_url = "http://${blobmirror}/repos/monasca/monasca_persister" - $latest_pers_deb = "/tmp/${mon_pers_deb}" - - wget::fetch { "${pers_fetch_url}/${mon_pers_build_ver}/${mon_pers_deb}": - destination => $latest_pers_deb, - timeout => 300, - before => [Package['install-persister'], File[$latest_pers_deb]], - } - - file { $latest_pers_deb: - ensure => present, - } - - package { 'monasca-persister': - ensure => latest, - provider => dpkg, - source => $latest_pers_deb, - alias => 'install-persister', - tag => ['openstack', 'monasca-package'], - } - - user { $pers_user: - ensure => present, - groups => $::monasca::group, - require => Group[$::monasca::group], - } - - create_resources('monasca::persister::config', $config) -} diff --git a/manifests/persister/config.pp b/manifests/persister/config.pp deleted file mode 100644 index a20a203..0000000 --- a/manifests/persister/config.pp +++ /dev/null @@ -1,89 +0,0 @@ -# == Defined Type: monasca::persister::config -# -# Defined type to setup monasca persister -# -# === Parameters: -# -# [*batch_seconds*] -# frequency for this perisiter to write to db -# -# [*batch_size*] -# batch size of metrics/alarm to persist at the same time -# -# [*check_conn_while_idle*] -# flag for whether db connection should stay alive while idle -# -# [*config*] -# persister specific configuration -- allows running multiple persisters. -# -# [*consumer_id*] -# id of the kafka consumer for this persister -# -# [*database_type*] -# influxdb or vertica -# -# [*db_admin_password*] -# admin password for database -# -# [*gzip_setting*] -# true for gzipping http data -# -# [*num_threads*] -# number of persister threads to run -# -# [*pers_user*] -# name of the monasca perisister default user -# -# [*replication_factor*] -# replication factor for this persister -# -# [*retention_policy*] -# retention policy for this persister -# -# [*zookeeper_servers*] -# list of zookeeper servers -# -define monasca::persister::config ( - $batch_seconds = $monasca::persister::batch_seconds, - $batch_size = $monasca::persister::batch_size, - $check_conn_while_idle = true, - $config = {}, - $consumer_id = $monasca::persister::consumer_id, - $database_type = $monasca::persister::database_type, - $db_admin_password = $monasca::persister::db_admin_password, - $gzip_setting = true, - $num_threads = $monasca::persister::num_threads, - $pers_user = $monasca::persister::pers_user, - $replication_factor = 1, - $retention_policy = 'raw', - $zookeeper_servers = $monasca::persister::zookeeper_servers, -) { - include monasca::params - $persister_config = deep_merge($monasca::params::persister_config_defaults, $config) - - $persister_service_name = $name - $pers_cfg_file = "/etc/monasca/${persister_service_name}.yml" - $pers_db_user = $::monasca::params::pers_db_user - $pers_db_password = $::monasca::params::pers_db_password - - file { $pers_cfg_file: - ensure => file, - content => template('monasca/persister-config.yml.erb'), - mode => '0644', - owner => $pers_user, - group => $::monasca::group, - require => [User[$pers_user], Group[$::monasca::group], File[$::monasca::log_dir]], - } ~> Service[$persister_service_name] - - service { $persister_service_name: - ensure => running, - require => [File[$pers_cfg_file], Package['install-persister'], - Monasca::Persister::Startup_script[$persister_service_name]], - tag => 'monasca-service', - } - - monasca::persister::startup_script { $persister_service_name: - require => Package['install-persister'], - } - -} diff --git a/manifests/persister/startup_script.pp b/manifests/persister/startup_script.pp deleted file mode 100644 index 7966abd..0000000 --- a/manifests/persister/startup_script.pp +++ /dev/null @@ -1,17 +0,0 @@ -# -# Defined type for creating a persister startup script. -# -define monasca::persister::startup_script ( -){ - $persister_service_name = $name - $script = "/etc/init/${persister_service_name}.conf" - - file { $script: - ensure => file, - content => template('monasca/persister-startup-script.erb'), - mode => '0755', - owner => 'root', - group => 'root', - } ~> Service[$persister_service_name] - -} diff --git a/manifests/storm/config.pp b/manifests/storm/config.pp deleted file mode 100644 index 4a69b56..0000000 --- a/manifests/storm/config.pp +++ /dev/null @@ -1,110 +0,0 @@ -# -# Class for configuring misc storm packages for use by monasca api server -# -# [*storm_version*] -# version of apache-storm to use -# -# [*mirror*] -# location of apache-storm mirror -# -# [*install_dir*] -# location to install storm -# -# [*storm_user*] -# name of the storm user -# -# [*storm_group*] -# name of the storm group -# -# [*log_dir*] -# directory for storm logs -# -class monasca::storm::config ( - $storm_version = 'apache-storm-0.9.3', - $mirror = 'http://apache.arvixe.com/storm', - $install_dir = '/opt/storm', - $storm_user = 'storm', - $storm_group = 'storm', - $log_dir = '/var/log/storm', -) { - $cache_dir = '/var/cache/storm' - $storm_local = '/storm-local' - - user { $storm_user: - ensure => present, - } - - group { $storm_group: - ensure => present, - } - - File { - mode => '0644', - owner => $storm_user, - group => $storm_group, - } - - file { ['/usr/lib/storm', $storm_local, $install_dir]: - ensure => directory, - } - - $tarfile = "${storm_version}.tar.gz" - - # - # The redownload and cache_dir flags will only do the wget if it's changed - # - wget::fetch { "${mirror}/${storm_version}/${tarfile}": - destination => "/${cache_dir}/${tarfile}", - timeout => 120, - before => Exec['untar-storm-package'], - cache_dir => $cache_dir, - redownload => false, - verbose => true, - } - - # - # Only untar if the directory hasn't yet been untarred yet. - # - exec { "tar -xvzf /${cache_dir}/${tarfile}": - path => '/bin:/sbin:/usr/bin:/usr/sbin', - cwd => $install_dir, - alias => 'untar-storm-package', - user => $storm_user, - group => $storm_group, - before => File[$log_dir], - creates => "${install_dir}/${storm_version}", - } - - file { "${install_dir}/current": - ensure => link, - target => "${install_dir}/${storm_version}", - } - - file { $log_dir: - ensure => directory, - } - - monasca::storm::startup_script { - '/etc/init.d/storm-ui': - require => File[$install_dir], - storm_service => 'ui', - storm_install_dir => "${install_dir}/current", - storm_user => $storm_user, - } - - monasca::storm::startup_script { - '/etc/init.d/storm-supervisor': - require => [ File[$install_dir], File[$storm_local] ], - storm_service => 'supervisor', - storm_install_dir => "${install_dir}/current", - storm_user => $storm_user, - } - - monasca::storm::startup_script { - '/etc/init.d/storm-nimbus': - require => [ File[$install_dir], File[$storm_local] ], - storm_service => 'nimbus', - storm_install_dir => "${install_dir}/current", - storm_user => $storm_user, - } -} diff --git a/manifests/storm/startup_script.pp b/manifests/storm/startup_script.pp deleted file mode 100644 index 6795b54..0000000 --- a/manifests/storm/startup_script.pp +++ /dev/null @@ -1,29 +0,0 @@ -# == Defined Type: monasca::storm::startup_script -# -# Defined type for creating a storm startup script. -# -# === Parameters: -# -# [*storm_service*] -# executable for the storm service -# -# [*storm_install_dir*] -# directory for the storm installation -# -# [*storm_user*] -# name of the storm user -# -define monasca::storm::startup_script ( - $storm_service = undef, - $storm_install_dir = undef, - $storm_user = undef -){ - $script = $name - file { $script: - ensure => file, - content => template('monasca/storm-startup-script.erb'), - mode => '0755', - owner => 'root', - group => 'root', - } -} diff --git a/manifests/thresh.pp b/manifests/thresh.pp deleted file mode 100644 index 73cf747..0000000 --- a/manifests/thresh.pp +++ /dev/null @@ -1,91 +0,0 @@ -# -# Class to install monasca thresholding engine -# -# [*blobmirror*] -# location of the server to pull debians from -# -# [*kafka_brokers*] -# list of kafka brokers -# -# [*mon_thresh_build_ver*] -# version of the thresh debian package -# -# [*mon_thresh_deb*] -# name of the thresh debian package -# -# [*zookeeper_servers*] -# list of zookeeper servers -# -class monasca::thresh ( - $blobmirror = undef, - $kafka_brokers = undef, - $mon_thresh_build_ver = undef, - $mon_thresh_deb = undef, - $zookeeper_servers = undef, -) { - include monasca - include monasca::params - - # variables for the template - $sql_host = $::monasca::params::sql_host - $sql_user = $::monasca::params::sql_user - $sql_password = $::monasca::params::sql_password - $sql_port = $::monasca::params::sql_port - - $thresh_fetch_url = "http://${blobmirror}/repos/monasca/monasca_thresh" - $latest_thresh_deb = "/tmp/${mon_thresh_deb}" - $thresh_cfg_file = '/etc/monasca/thresh-config.yml' - $startup_script = '/etc/init.d/monasca-thresh' - $startup_script_src = 'puppet:///modules/monasca/monasca-thresh' - - wget::fetch { "${thresh_fetch_url}/${mon_thresh_build_ver}/${mon_thresh_deb}": - destination => $latest_thresh_deb, - timeout => 300, - before => [Package['install-thresh'], File[$latest_thresh_deb]], - } ~> Service['monasca-thresh'] - - file { $latest_thresh_deb: - ensure => present, - } - - file { $thresh_cfg_file: - ensure => file, - content => template('monasca/thresh-config.yml.erb'), - mode => '0644', - owner => 'root', - group => $::monasca::group, - require => [Group[$::monasca::group], File[$::monasca::log_dir]], - } - - package { 'monasca-thresh': - ensure => latest, - provider => dpkg, - source => $latest_thresh_deb, - alias => 'install-thresh', - tag => ['openstack', 'monasca-package'], - } - - service { 'monasca-thresh': - ensure => running, - require => [File[$thresh_cfg_file], - File[$latest_thresh_deb], - File[$startup_script], - User['thresh']], - tag => 'monasca-service', - } - - user { 'thresh': - ensure => present, - groups => $::monasca::group, - require => Group[$::monasca::group], - } - - file { $startup_script: - ensure => file, - source => $startup_script_src, - mode => '0755', - owner => 'root', - group => 'root', - require => Package['install-thresh'], - } -} diff --git a/manifests/vertica/config.pp b/manifests/vertica/config.pp deleted file mode 100644 index 1c25f9a..0000000 --- a/manifests/vertica/config.pp +++ /dev/null @@ -1,243 +0,0 @@ -# -# Class for vertica specific files -# -# === Parameters -# -# [*api_pool*] -# name of the resource pool for monasca api process -# -# [*api_pool_mem_size*] -# memory size for api resource pool -# -# [*api_pool_max_mem_size*] -# max memory size for api resource pool -# -# [*api_pool_planned_con*] -# planned concurrency for api resource pool -# -# [*api_pool_max_con*] -# max concurrency for api resource pool -# -# [*api_pool_runtime_priority*] -# runtime priority for api resource pool (LOW, MEDIUM..) -# -# [*api_pool_runtime_priority_thresh*] -# runtime priority threshold for api resource pool (# of seconds) -# -# [*api_pool_priority*] -# priority threshold api resource pool -# -# [*api_pool_exec_parallel*] -# execution parallelism for api resource pool -# -# [*db_admin_password*] -# database admin password -# -# [*db_group*] -# name of the database group -# -# [*db_user*] -# name of the database user -# -# [*metrics_schema*] -# location of the metrics schema/projections file -# -# [*monitor_password*] -# database monitor user password -# -# [*monitor_user*] -# database monitor user name -# -# [*pers_pool*] -# name of the resource pool for monasca persister process -# -# [*pers_pool_mem_size*] -# memory size for persister resource pool -# -# [*pers_pool_max_mem_size*] -# max memory size for persister resource pool -# -# [*pers_pool_planned_con*] -# planned concurrency for persister resource pool -# -# [*pers_pool_max_con*] -# max concurrency for persister resource pool -# -# [*pers_pool_runtime_priority*] -# runtime priority for persister resource pool (LOW, MEDIUM..) -# -# [*pers_pool_runtime_priority_thresh*] -# runtime priority threshold for persister resource pool (# of seconds) -# -# [*pers_pool_priority*] -# priority threshold persister resource pool -# -# [*pers_pool_exec_parallel*] -# execution parallelism for persister resource pool -# -# [*virtual_env*] -# location of python virtual environment to install to for any -# python utilities -# -class monasca::vertica::config ( - $api_pool = 'api_pool', - $api_pool_mem_size = '5G', - $api_pool_max_mem_size = '15G', - $api_pool_planned_con = '2', - $api_pool_max_con = '4', - $api_pool_runtime_priority = 'MEDIUM', - $api_pool_runtime_priority_thresh = '2', - $api_pool_priority = '50', - $api_pool_exec_parallel = '2', - $db_user = 'dbadmin', - $db_group = 'verticadba', - $db_admin_password = unset, - $metrics_schema = 'puppet:///modules/monasca/vertica/mon_metrics_schema.sql', - $monitor_password = unset, - $monitor_user = 'monitor', - $pers_pool = 'persister_pool', - $pers_pool_mem_size = '5G', - $pers_pool_max_mem_size = '15G', - $pers_pool_planned_con = '2', - $pers_pool_max_con = '4', - $pers_pool_runtime_priority = 'MEDIUM', - $pers_pool_runtime_priority_thresh = '2', - $pers_pool_priority = '60', - $pers_pool_exec_parallel = '1', - $virtual_env = '/var/lib/vertica', -) { - - include monasca::params - - $api_db_user = $::monasca::params::api_db_user - $api_db_password = $::monasca::params::api_db_password - $pers_db_user = $::monasca::params::pers_db_user - $pers_db_password = $::monasca::params::pers_db_password - - $files = 'puppet:///modules/monasca/vertica/' - $templates = 'monasca/vertica' - $install_dir = '/var/vertica' - $alarms_schema = 'mon_alarms_schema.sql' - $grants_schema = 'mon_grants.sql' - $config_schema = 'mon_schema.sql' - $users_schema = 'mon_users.sql' - $cluster_script = 'create_mon_db_cluster.sh' - $single_node_script = 'create_mon_db.sh' - $prune_script_name = 'prune_vertica.py' - $prune_script = "${virtual_env}/bin/${prune_script_name}" - $partition_drop_script_name = 'drop_vertica_partitions.py' - $partition_drop_script = "${virtual_env}/bin/${partition_drop_script_name}" - - file { $install_dir: - ensure => directory, - owner => $db_user, - group => $db_group, - mode => '0755', - } - - file { "${install_dir}/${alarms_schema}": - ensure => file, - source => "${files}/${alarms_schema}", - mode => '0644', - owner => $db_user, - group => $db_group, - require => File[$install_dir], - } - - file { '/usr/sbin/vsql': - ensure => file, - content => template("${templates}/vsql.erb"), - mode => '0755', - owner => $db_user, - group => $db_group, - require => File[$install_dir], - } - - python::virtualenv { $virtual_env : - owner => 'root', - group => 'root', - before => [File[$prune_script], File[$partition_drop_script]], - require => [Package['virtualenv'],Package['python-dev']], - } - - file { $prune_script: - ensure => file, - content => template("${templates}/${prune_script_name}.erb"), - mode => '0755', - owner => $db_user, - group => $db_group, - require => File[$install_dir], - } - - file { $partition_drop_script: - ensure => file, - content => template("${templates}/${partition_drop_script_name}.erb"), - mode => '0755', - owner => $db_user, - group => $db_group, - require => File[$install_dir], - } - - file { '/usr/sbin/update_vertica_stats.sh': - ensure => file, - source => "${files}/update_vertica_stats.sh", - mode => '0755', - owner => $db_user, - group => $db_group, - require => File[$install_dir], - } - - file { "${install_dir}/${grants_schema}": - ensure => file, - source => "${files}/${grants_schema}", - mode => '0644', - owner => $db_user, - group => $db_group, - require => File[$install_dir], - } - - file { "${install_dir}/mon_metrics_schema.sql": - ensure => file, - source => $metrics_schema, - mode => '0644', - owner => $db_user, - group => $db_group, - require => File[$install_dir], - } - - file { "${install_dir}/${config_schema}": - ensure => file, - content => template("${templates}/${config_schema}.erb"), - mode => '0644', - owner => $db_user, - group => $db_group, - require => File[$install_dir], - } - - file { "${install_dir}/${users_schema}": - ensure => file, - content => template("${templates}/${users_schema}.erb"), - mode => '0644', - owner => $db_user, - group => $db_group, - require => File[$install_dir], - } - - file { "${install_dir}/${cluster_script}": - ensure => file, - content => template("${templates}/${cluster_script}.erb"), - mode => '0755', - owner => $db_user, - group => $db_group, - require => File[$install_dir], - } - - file { "${install_dir}/${single_node_script}": - ensure => file, - content => template("${templates}/${single_node_script}.erb"), - mode => '0755', - owner => $db_user, - group => $db_group, - require => File[$install_dir], - } -} diff --git a/manifests/virtualenv/agent_instance.pp b/manifests/virtualenv/agent_instance.pp deleted file mode 100644 index c048c39..0000000 --- a/manifests/virtualenv/agent_instance.pp +++ /dev/null @@ -1,65 +0,0 @@ -# == Define: virtualenv::agent_instance -# -# Sets up a virtualenv instance and handles agent specific setup in the venv. -# See the instance class for details on using virtualenv instances -# -# === Parameters -# -# [*ensure*] (required) Whether or not the package should be removed or -# installed. Should be 'present', or 'absent'. For package installs, other -# values such as a version number or 'latest' are also acceptable. -# -# [*venv_active*] (optional) Whether or not the virtualenv should be made -# active by managing symlinks into it and restarting services if the links are -# changed. Only one virtualenv can be active at a time. Defaults to false. -# -# [*basedir*] (required) Base directory for storing virtualenvs. -# -# [*symlink*] (required if venv_active is true) The path to link to the venv_dir -# -# [*venv_prefix*] Prefix to give to virtualenv directories -# This can be specified to provide more meaningful names, or to have multiple -# virtualenvs installed at the same time. Defaults to $name -# -# [*venv_requirements*] (required) Python requirements.txt to pass to pip when -# populating the virtualenv. Required if the instance is ensured to be present. -# -# [*venv_extra_args*] (optional) Extra arguments that will be passed to `pip -# install` when creating the virtualenv. - -define monasca::virtualenv::agent_instance( - $basedir, - $venv_prefix = $name, - $ensure = 'present', - $symlink = undef, - $venv_requirements = undef, - $venv_active = false, - $venv_extra_args = undef, -) { - - validate_legacy(String, 'validate_string', $ensure) - - $valid_values = [ - '^present$', - '^absent$', - ] - - validate_legacy(Enum['present', 'absent'], 'validate_re', $ensure, - [$valid_values, "Unknown value '${ensure}' for ensure, must be present or absent"]) - - File[$basedir] -> anchor { 'monasca::virtualenv::instance': } - Package<| name == 'python-virtualenv' |> -> Anchor['monasca::virtualenv::instance'] - Package<| name == 'python-dev' |> -> Anchor['monasca::virtualenv::instance'] - - monasca::virtualenv::instance { $name: - ensure => $ensure, - basedir => $basedir, - venv_prefix => $venv_prefix, - symlink => $symlink, - venv_requirements => $venv_requirements, - venv_active => $venv_active, - venv_extra_args => $venv_extra_args, - require => Anchor['monasca::virtualenv::instance'], - } - -} diff --git a/manifests/virtualenv/instance.pp b/manifests/virtualenv/instance.pp deleted file mode 100644 index 767c774..0000000 --- a/manifests/virtualenv/instance.pp +++ /dev/null @@ -1,109 +0,0 @@ -# == Define: virtualenv::instance -# -# This class will manage the installation of the monasca agent into a Python -# virtualenv. It will also manage the config files needed by that software, -# with different policies for packages and virtualenvs. By default the config -# files will be copied from the template files internal to the module. This -# behavior can be overridden by providing a $config_files hash. -# -# Virtualenv installations are built by installing packages from a given -# requirements.txt file. For production use you will normally want to override -# the requirements.txt and provide one that contains pinned module versions, -# and possibly include information about a local pypi mirror in the -# requirements.txt. -# -# This module explicitly supports provisioning multiple virtualenv based -# installations in order to make upgrades and rollbacks easier. To take -# advantage of this, you can define additional instances of -# monasca::virtualenv::instance type with the active flag set to false -# and with different $venv_prefix options. The monasca::agent class will allow -# configuring multiple virtualenvs via hiera. -# -# If using virtualenv based installations it's *strongly* recommended that -# virtualenvs be treated as immutable once created. Behavior with changing -# requirements.txt or code may not be what you expect, since the existing -# virtualenv will be updated, not rebuilt when requirements.txt or the git -# revision changes. -# -# === Parameters -# -# [*ensure*] (required) Whether or not the package should be removed or -# installed. Should be 'present', or 'absent'. For package installs, other -# values such as a version number or 'latest' are also acceptable. -# -# [*venv_active*] (optional) Whether or not the virtualenv should be made -# active by managing symlinks into it and restarting services if the links are -# changed. Only one virtualenv can be active at a time. Defaults to false. -# -# [*basedir*] (required) Base directory for storing virtualenvs. -# -# [*symlink*] (required if venv_active is true) The path to link to the venv_dir -# -# [*venv_prefix*] Prefix to give to virtualenv directories -# This can be specified to provide more meaningful names, or to have multiple -# virtualenvs installed at the same time. Defaults to $name -# -# [*venv_requirements*] (required) Python requirements.txt to pass to pip when -# populating the virtualenv. Required if the instance is ensured to be present. -# -# [*venv_extra_args*] (optional) Extra arguments that will be passed to `pip -# install` when creating the virtualenv. - -define monasca::virtualenv::instance( - $basedir, - $venv_prefix = $name, - $ensure = 'present', - $symlink = undef, - $venv_requirements = undef, - $venv_active = false, - $venv_extra_args = undef, -) { - - validate_legacy(String, 'validate_string', $ensure) - - $valid_values = [ - '^present$', - '^absent$', - ] - - validate_legacy(Enum['present', 'absent'], 'validate_re', $ensure, - [$valid_values, "Unknown value '${ensure}' for ensure, must be present or absent"]) - - $req_dest = "${basedir}/${venv_prefix}-requirements.txt" - $venv_dir = "${basedir}/${venv_prefix}-venv" - $venv_name = "${venv_prefix}-${name}" - - if $ensure == 'present' { - validate_legacy(String, 'validate_string', $venv_requirements) - - file { $req_dest: - ensure => 'file', - owner => 'root', - group => 'root', - mode => '0644', - source => $venv_requirements, - before => Python::Virtualenv[$venv_name], - } - } else { - file { $req_dest: - ensure => 'absent', - } - } - - python::virtualenv { $venv_name: - ensure => $ensure, - venv_dir => $venv_dir, - requirements => $req_dest, - extra_pip_args => $venv_extra_args, - owner => 'root', - group => 'root', - } - - if $venv_active { - file { $symlink: - ensure => 'link', - force => true, - target => $venv_dir, - } - } -} diff --git a/metadata.json b/metadata.json deleted file mode 100644 index a573a09..0000000 --- a/metadata.json +++ /dev/null @@ -1,73 +0,0 @@ -{ - "name": "openstack-monasca", - "version": "8.2.0", - "author": "Time Warner Cable and OpenStack Contributors", - "summary": "Puppet module for OpenStack Monasca", - "license": "Apache-2.0", - "source": "https://opendev.org/openstack/puppet-monasca.git", - "project_page": "https://launchpad.net/puppet-monasca", - "issues_url": "https://bugs.launchpad.net/puppet-monasca", - "requirements": [ - { - "name": "puppet", - "version_requirement": ">= 6.0.0 < 7.0.0" - } - ], - "operatingsystem_support": [ - { - "operatingsystem": "Debian", - "operatingsystemrelease": [ - "9" - ] - }, - { - "operatingsystem": "Ubuntu", - "operatingsystemrelease": [ - "20.04" - ] - } - ], - "description": "Installs and configures OpenStack Monasca (Monitoring as a Service).", - "dependencies": [ - { - "name": "deric/storm", - "version_requirement": ">=0.0.1 <1.0.0" - }, - { - "name": "deric/zookeeper", - "version_requirement": ">=0.0.1 <1.0.0" - }, - { - "name": "jdowning/influxdb", - "version_requirement": ">=0.3.0 <1.0.0" - }, - { - "name": "maestrodev/wget", - "version_requirement": ">=0.0.1 <2.0.0" - }, - { - "name": "puppetlabs/mysql", - "version_requirement": ">=6.0.0 <12.0.0" - }, - { - "name": "openstack/keystone", - "version_requirement": ">=19.2.0 <20.0.0" - }, - { - "name": "openstack/openstacklib", - "version_requirement": ">=19.2.0 <20.0.0" - }, - { - "name": "opentable/kafka", - "version_requirement": ">=1.0.0 <2.0.0" - }, - { - "name": "puppetlabs/stdlib", - "version_requirement": ">=5.0.0 <8.0.0" - }, - { - "name": "puppet/python", - "version_requirement": ">=2.1.1 <7.0.0" - } - ] -} diff --git a/releasenotes/notes/deprecate-alarmdefs-b82d96e8ac36cdee.yaml b/releasenotes/notes/deprecate-alarmdefs-b82d96e8ac36cdee.yaml deleted file mode 100644 index e371d35..0000000 --- a/releasenotes/notes/deprecate-alarmdefs-b82d96e8ac36cdee.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -deprecations: - - | - The parameters install_python_deps and python_dep_ensure in monasca::alarmdefs - is deprecated, has no effect and will be removed in a future release. diff --git a/releasenotes/notes/keystone-auth-name-dc84be6eb135881e.yaml b/releasenotes/notes/keystone-auth-name-dc84be6eb135881e.yaml deleted file mode 100644 index 835c883..0000000 --- a/releasenotes/notes/keystone-auth-name-dc84be6eb135881e.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - The keystone auth class has been updated to provide a default service_name - to allow a user to specify a custom auth_name that may not contain the - name of the service. diff --git a/releasenotes/notes/puppet4-mysql-func-5a0aec333e429d3f.yaml b/releasenotes/notes/puppet4-mysql-func-5a0aec333e429d3f.yaml deleted file mode 100644 index 9db533e..0000000 --- a/releasenotes/notes/puppet4-mysql-func-5a0aec333e429d3f.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -upgrade: - - | - This module now requires a puppetlabs-mysql version >= 6.0.0 diff --git a/releasenotes/notes/release-note-ubuntu-py3-2f3de3fab77d1991.yaml b/releasenotes/notes/release-note-ubuntu-py3-2f3de3fab77d1991.yaml deleted file mode 100644 index 8ec2452..0000000 --- a/releasenotes/notes/release-note-ubuntu-py3-2f3de3fab77d1991.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -prelude: > - In this release Ubuntu has moved all projects that supported it to python3 - which means that there will be a lot of changes. The Puppet OpenStack project - does not test the upgrade path from python2 to python3 packages so there might - be manual steps required when moving to the python3 packages. -upgrade: - - | - Ubuntu packages are now using python3, the upgrade path is not tested by - Puppet OpenStack. Manual steps may be required when upgrading. diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder deleted file mode 100644 index e69de29..0000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index d6987ce..0000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,257 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - - -# If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'openstackdocstheme', - 'reno.sphinxext', -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -copyright = u'2018, Puppet OpenStack Developers' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = '' -# The full version, including alpha/beta/rc tags. -release = '' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'native' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -#html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'puppet-monascaReleaseNotesdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'puppet-monascaReleaseNotes.tex', u'puppet-monasca Release Notes Documentation', - u'2018, Puppet OpenStack Developers', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'puppet-monascareleasenotes', u'puppet-monasca Release Notes Documentation', - [u'2018, Puppet OpenStack Developers'], 1) -] - -# If true, show URL addresses after external links. -#man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'puppet-monascaReleaseNotes', u'puppet-monasca Release Notes Documentation', - u'2018, Puppet OpenStack Developers', 'puppet-monascaReleaseNotes', 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -#texinfo_appendices = [] - -# If false, no module index is generated. -#texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] - - -# openstackdocstheme options -openstackdocs_repo_name = 'openstack/puppet-monasca' -openstackdocs_bug_project = 'puppet-monasca' -openstackdocs_bug_tag = '' -openstackdocs_auto_name = False diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index 2bc4ac0..0000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,24 +0,0 @@ -======================================= -Welcome to senlin Release Notes! -======================================= - -Contents -======== - -.. toctree:: - :maxdepth: 2 - - unreleased - wallaby - victoria - ussuri - train - stein - rocky - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` diff --git a/releasenotes/source/rocky.rst b/releasenotes/source/rocky.rst deleted file mode 100644 index 40dd517..0000000 --- a/releasenotes/source/rocky.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Rocky Series Release Notes -=================================== - -.. release-notes:: - :branch: stable/rocky diff --git a/releasenotes/source/stein.rst b/releasenotes/source/stein.rst deleted file mode 100644 index efaceb6..0000000 --- a/releasenotes/source/stein.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Stein Series Release Notes -=================================== - -.. release-notes:: - :branch: stable/stein diff --git a/releasenotes/source/train.rst b/releasenotes/source/train.rst deleted file mode 100644 index 5839003..0000000 --- a/releasenotes/source/train.rst +++ /dev/null @@ -1,6 +0,0 @@ -========================== -Train Series Release Notes -========================== - -.. release-notes:: - :branch: stable/train diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst deleted file mode 100644 index 2334dd5..0000000 --- a/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================== - Current Series Release Notes -============================== - - .. release-notes:: diff --git a/releasenotes/source/ussuri.rst b/releasenotes/source/ussuri.rst deleted file mode 100644 index e21e50e..0000000 --- a/releasenotes/source/ussuri.rst +++ /dev/null @@ -1,6 +0,0 @@ -=========================== -Ussuri Series Release Notes -=========================== - -.. release-notes:: - :branch: stable/ussuri diff --git a/releasenotes/source/victoria.rst b/releasenotes/source/victoria.rst deleted file mode 100644 index 4efc7b6..0000000 --- a/releasenotes/source/victoria.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================= -Victoria Series Release Notes -============================= - -.. release-notes:: - :branch: stable/victoria diff --git a/releasenotes/source/wallaby.rst b/releasenotes/source/wallaby.rst deleted file mode 100644 index d77b565..0000000 --- a/releasenotes/source/wallaby.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================ -Wallaby Series Release Notes -============================ - -.. release-notes:: - :branch: stable/wallaby diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 15f2abd..0000000 --- a/setup.cfg +++ /dev/null @@ -1,13 +0,0 @@ -[metadata] -name = puppet-monasca -summary = Puppet module for OpenStack Monasca -description_file = - README.md -author = OpenStack -author_email = openstack-discuss@lists.openstack.org -home_page = https://docs.openstack.org/puppet-openstack-guide/latest/ -classifier = - Intended Audience :: Developers - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux diff --git a/spec/acceptance/monasca_alarmdefs_spec.rb b/spec/acceptance/monasca_alarmdefs_spec.rb deleted file mode 100644 index ef37bf7..0000000 --- a/spec/acceptance/monasca_alarmdefs_spec.rb +++ /dev/null @@ -1,26 +0,0 @@ -require 'spec_helper_acceptance' - -describe 'alarmdefs class' do - - describe 'bootstrapping alarm definitions' do - it 'we expect a failure for now' do - tmpdir = default.tmpdir('alarmdefs') - pp = <<-EOS - class { 'monasca::alarmdefs': - admin_password => 'foo', - api_server_url => 'http://127.0.0.1:8070', - auth_url => 'http://127.0.0.1:5000', - project_name => 'project_foo', - } - EOS - - # - # Since the bootstrap script will try to talk - # to a real keystone and monasca api server. - # - # TODO: More comprehensive stack setup - # - apply_manifest(pp, :catch_failures => false) - end - end -end diff --git a/spec/acceptance/monasca_config_spec.rb b/spec/acceptance/monasca_config_spec.rb deleted file mode 100644 index c1851d9..0000000 --- a/spec/acceptance/monasca_config_spec.rb +++ /dev/null @@ -1,90 +0,0 @@ -require 'spec_helper_acceptance' - -describe 'basic monasca_config resource' do - - context 'default parameters' do - - it 'should work with no errors' do - pp= <<-EOS - Exec { logoutput => 'on_failure' } - - File <||> -> Monasca_config <||> - File <||> -> Agent_config <||> - - file { '/etc/monasca' : - ensure => directory, - } - file { '/etc/monasca/monasca.conf' : - ensure => file, - } - file { '/etc/monasca/agent/agent.conf' : - ensure => file, - } - - monasca_config { 'DEFAULT/thisshouldexist' : - value => 'foo', - } - - monasca_config { 'DEFAULT/thisshouldnotexist' : - value => '', - } - - monasca_config { 'DEFAULT/thisshouldexist2' : - value => '', - ensure_absent_val => 'toto', - } - - monasca_config { 'DEFAULT/thisshouldnotexist2' : - value => 'toto', - ensure_absent_val => 'toto', - } - - agent_config { 'DEFAULT/thisshouldexist' : - value => 'foo', - } - - agent_config { 'DEFAULT/thisshouldnotexist' : - value => '', - } - - agent_config { 'DEFAULT/thisshouldexist2' : - value => '', - ensure_absent_val => 'toto', - } - - agent_config { 'DEFAULT/thisshouldnotexist2' : - value => 'toto', - ensure_absent_val => 'toto', - } - EOS - - - # Run it twice and test for idempotency - apply_manifest(pp, :catch_failures => true) - apply_manifest(pp, :catch_changes => true) - end - - describe file('/etc/monasca/monasca.conf') do - it { is_expected.to exist } - it { is_expected.to contain('thisshouldexist=foo') } - it { is_expected.to contain('thisshouldexist2=') } - - describe '#content' do - subject { super().content } - it { is_expected.to_not match /thisshouldnotexist/ } - end - end - - describe file('/etc/monasca/agent/agent.conf') do - it { is_expected.to exist } - it { is_expected.to contain('thisshouldexist=foo') } - it { is_expected.to contain('thisshouldexist2=') } - - describe '#content' do - subject { super().content } - it { is_expected.to_not match /thisshouldnotexist/ } - end - end - - end -end diff --git a/spec/classes/monasca_agent_spec.rb b/spec/classes/monasca_agent_spec.rb deleted file mode 100644 index a226bb9..0000000 --- a/spec/classes/monasca_agent_spec.rb +++ /dev/null @@ -1,72 +0,0 @@ -require 'spec_helper' - -describe 'monasca::agent' do - - let :params do - { :url => 'http://localhost:8070/v2.0', - :username => 'monasca-agent', - :password => 'password', - :keystone_url => 'http://localhost:5000/v3/', - :install_python_deps => false } - end - - shared_examples 'monasca-agent' do - - context 'with default parameters' do - - it 'sets up monasca-agent files' do - is_expected.to contain_file('/etc/init.d/monasca-agent').with( - :owner => 'root', - :group => 'root', - :mode => '0755', - ) - end - - it 'installs monasca-agent service' do - is_expected.to contain_service('monasca-agent').with( - :ensure => 'running', - ) - end - - it 'configures various stuff' do - is_expected.to contain_file('/etc/monasca/agent/agent.yaml').with_content(/^\s*url: http:\/\/localhost:8070\/v2.0$/) - is_expected.to contain_file('/etc/monasca/agent/agent.yaml').with_content(/^\s*username: monasca-agent$/) - is_expected.to contain_file('/etc/monasca/agent/agent.yaml').with_content(/^\s*password: password$/) - is_expected.to contain_file('/etc/monasca/agent/agent.yaml').with_content(/^\s*keystone_url: http:\/\/localhost:5000\/v3\/$/) - is_expected.to contain_file('/etc/monasca/agent/agent.yaml').with_content(/^\s*project_name: null$/) - is_expected.to contain_file('/etc/monasca/agent/agent.yaml').with_content(/^\s*project_domain_id: null$/) - is_expected.to contain_file('/etc/monasca/agent/agent.yaml').with_content(/^\s*project_domain_name: null$/) - is_expected.to contain_file('/etc/monasca/agent/agent.yaml').with_content(/^\s*project_id: null$/) - end - end - - context 'with overridden parameters' do - before do - params.merge!({ - :project_name => 'test_project', - :project_domain_id => 'domain_id', - :project_domain_name => 'test_domain', - :project_id => 'project_id', - }) - end - - it 'configures various stuff' do - is_expected.to contain_file('/etc/monasca/agent/agent.yaml').with_content(/^\s*project_name: test_project$/) - is_expected.to contain_file('/etc/monasca/agent/agent.yaml').with_content(/^\s*project_domain_id: domain_id$/) - is_expected.to contain_file('/etc/monasca/agent/agent.yaml').with_content(/^\s*project_domain_name: test_domain$/) - is_expected.to contain_file('/etc/monasca/agent/agent.yaml').with_content(/^\s*project_id: project_id$/) - end - end - end - - on_supported_os({ - :supported_os => OSDefaults.get_supported_os - }).each do |os,facts| - context "on #{os}" do - let (:facts) do - facts.merge!(OSDefaults.get_facts) - end - it_behaves_like 'monasca-agent' - end - end -end diff --git a/spec/classes/monasca_alarmdefs_spec.rb b/spec/classes/monasca_alarmdefs_spec.rb deleted file mode 100644 index 65984bb..0000000 --- a/spec/classes/monasca_alarmdefs_spec.rb +++ /dev/null @@ -1,24 +0,0 @@ -require 'spec_helper' - -describe 'monasca::alarmdefs' do - let :pre_condition do - "include monasca - include monasca::api" - end - - shared_examples 'monasca::alarmdefs' do - it { should contain_python__virtualenv('/var/www/monasca-alarmdefs') } - end - - on_supported_os({ - :supported_os => OSDefaults.get_supported_os - }).each do |os,facts| - context "on #{os}" do - let (:facts) do - facts.merge!(OSDefaults.get_facts()) - end - - it_behaves_like 'monasca::alarmdefs' - end - end -end diff --git a/spec/classes/monasca_api_spec.rb b/spec/classes/monasca_api_spec.rb deleted file mode 100644 index b0a3bd7..0000000 --- a/spec/classes/monasca_api_spec.rb +++ /dev/null @@ -1,66 +0,0 @@ -require 'spec_helper' - -describe 'monasca::api' do - - let :params do - {} - end - - shared_examples 'monasca-api' do - - context 'with default parameters' do - - it { is_expected.to contain_class('monasca') } - it { is_expected.to contain_class('monasca::params') } - - it 'installs monasca-api package and service' do - is_expected.to contain_service('monasca-api').with( - :name => 'monasca-api', - :ensure => 'running', - :tag => 'monasca-service', - ) - is_expected.to contain_package('monasca-api').with( - :name => 'monasca-api', - :ensure => 'latest', - :tag => ['openstack', 'monasca-package'], - ) - end - - it 'configures various stuff' do - is_expected.to contain_file('/etc/monasca/api-config.yml').with_content(/^\s*region: NA$/) - is_expected.to contain_file('/etc/monasca/api-config.yml').with_content(/^\s*maxQueryLimit: 10000$/) - is_expected.to contain_file('/etc/monasca/api-config.yml').with_content(/^\s*delegateAuthorizedRole: monitoring-delegate$/) - is_expected.to contain_file('/etc/monasca/api-config.yml').with_content(/^\s*adminRole: monasca-admin$/) - end - end - - context 'with overridden parameters' do - before do - params.merge!({ - :region_name => 'region1', - :max_query_limit => 100, - :role_delegate => 'monitoring-delegate2', - :role_admin => 'monasca-admin2', - }) - end - - it 'configures various stuff' do - is_expected.to contain_file('/etc/monasca/api-config.yml').with_content(/^\s*region: region1$/) - is_expected.to contain_file('/etc/monasca/api-config.yml').with_content(/^\s*maxQueryLimit: 100$/) - is_expected.to contain_file('/etc/monasca/api-config.yml').with_content(/^\s*delegateAuthorizedRole: monitoring-delegate2$/) - is_expected.to contain_file('/etc/monasca/api-config.yml').with_content(/^\s*adminRole: monasca-admin2$/) - end - end - end - - on_supported_os({ - :supported_os => OSDefaults.get_supported_os - }).each do |os,facts| - context "on #{os}" do - let (:facts) do - facts.merge!(OSDefaults.get_facts) - end - it_behaves_like 'monasca-api' - end - end -end diff --git a/spec/classes/monasca_checks_libvirt_spec.rb b/spec/classes/monasca_checks_libvirt_spec.rb deleted file mode 100644 index abf907e..0000000 --- a/spec/classes/monasca_checks_libvirt_spec.rb +++ /dev/null @@ -1,63 +0,0 @@ -require 'spec_helper' - -describe 'monasca::checks::libvirt' do - shared_examples 'monasca::checks::libvirt' do - let :libvirt_file do - '/etc/monasca/agent/conf.d/libvirt.yaml' - end - - let :pre_condition do - "class { 'monasca::agent': - url => 'http://127.0.0.1', - username => 'user', - password => 'password', - keystone_url => 'http://127.0.0.1:5000', - install_python_deps => false, - }" - end - - let :params do - { - :admin_password => 'password', - :admin_tenant_name => 'tenant_name', - :admin_user => 'user', - :identity_uri => 'uri', - :host_aggregate_re => 'M4', - } - end - - it 'builds the libvirt config file properly' do - should contain_file(libvirt_file).with_content(/^\s*admin_password: password$/) - should contain_file(libvirt_file).with_content(/^\s*admin_tenant_name: tenant_name$/) - should contain_file(libvirt_file).with_content(/^\s*admin_user: user$/) - should contain_file(libvirt_file).with_content(/^\s*identity_uri: uri$/) - should contain_file(libvirt_file).with_content(/^\s*cache_dir: \/dev\/shm$/) - should contain_file(libvirt_file).with_content(/^\s*nova_refresh: 14400$/) - should contain_file(libvirt_file).with_content(/^\s*network_use_bits: true$/) - should contain_file(libvirt_file).with_content(/^\s*metadata: \[\]$/) - should contain_file(libvirt_file).with_content(/^\s*customer_metadata: \[\]$/) - should contain_file(libvirt_file).with_content(/^\s*vm_probation: 300$/) - should contain_file(libvirt_file).with_content(/^\s*ping_check: false$/) - should contain_file(libvirt_file).with_content(/^\s*alive_only: false$/) - should contain_file(libvirt_file).with_content(/^\s*disk_collection_period: 0$/) - should contain_file(libvirt_file).with_content(/^\s*vm_cpu_check_enable: true$/) - should contain_file(libvirt_file).with_content(/^\s*vm_disks_check_enable: true$/) - should contain_file(libvirt_file).with_content(/^\s*vm_network_check_enable: true$/) - should contain_file(libvirt_file).with_content(/^\s*vm_ping_check_enable: false$/) - should contain_file(libvirt_file).with_content(/^\s*vm_extended_disks_check_enable: false$/) - should contain_file(libvirt_file).with_content(/^\s*host_aggregate_re: M4$/) - end - end - - on_supported_os({ - :supported_os => OSDefaults.get_supported_os - }).each do |os,facts| - context "on #{os}" do - let (:facts) do - facts.merge!(OSDefaults.get_facts()) - end - - it_behaves_like 'monasca::checks::libvirt' - end - end -end diff --git a/spec/classes/monasca_checks_ovs_spec.rb b/spec/classes/monasca_checks_ovs_spec.rb deleted file mode 100644 index 80050f1..0000000 --- a/spec/classes/monasca_checks_ovs_spec.rb +++ /dev/null @@ -1,58 +0,0 @@ -require 'spec_helper' - -describe 'monasca::checks::ovs' do - shared_examples 'monasca::checks::ovs' do - let :ovs_file do - '/etc/monasca/agent/conf.d/ovs.yaml' - end - - let :pre_condition do - "class { 'monasca::agent': - url => 'http://127.0.0.1', - username => 'user', - password => 'password', - keystone_url => 'http://127.0.0.1:5000', - install_python_deps => false, - }" - end - - let :params do - { - :admin_password => 'password', - :admin_tenant_name => 'tenant_name', - :admin_user => 'user', - :identity_uri => 'uri', - :metadata => ['tenant_name'], - } - end - - it 'builds the ovs config file properly' do - should contain_file(ovs_file).with_content(/^\s*admin_password: password$/) - should contain_file(ovs_file).with_content(/^\s*admin_tenant_name: tenant_name$/) - should contain_file(ovs_file).with_content(/^\s*admin_user: user$/) - should contain_file(ovs_file).with_content(/^\s*cache_dir: \/dev\/shm$/) - should contain_file(ovs_file).with_content(/^\s*identity_uri: uri$/) - should contain_file(ovs_file).with_content(/^\s*network_use_bits: true$/) - should contain_file(ovs_file).with_content(/^\s*metadata: \["tenant_name"\]$/) - should contain_file(ovs_file).with_content(/^\s*neutron_refresh: 14400$/) - should contain_file(ovs_file).with_content(/^\s*ovs_cmd: 'sudo \/usr\/bin\/ovs-vsctl'$/) - should contain_file(ovs_file).with_content(/^\s*included_interface_re: qg\.\*$/) - should contain_file(ovs_file).with_content(/^\s*use_absolute_metrics: true$/) - should contain_file(ovs_file).with_content(/^\s*use_rate_metrics: true$/) - should contain_file(ovs_file).with_content(/^\s*use_health_metrics: true$/) - should contain_file(ovs_file).with_content(/^\s*publish_router_capacity: true$/) - end - end - - on_supported_os({ - :supported_os => OSDefaults.get_supported_os - }).each do |os,facts| - context "on #{os}" do - let (:facts) do - facts.merge!(OSDefaults.get_facts()) - end - - it_behaves_like 'monasca::checks::ovs' - end - end -end diff --git a/spec/classes/monasca_checks_rabbitmq_spec.rb b/spec/classes/monasca_checks_rabbitmq_spec.rb deleted file mode 100644 index a35b997..0000000 --- a/spec/classes/monasca_checks_rabbitmq_spec.rb +++ /dev/null @@ -1,69 +0,0 @@ -require 'spec_helper' - -describe 'monasca::checks::rabbitmq' do - shared_examples 'monasca::checks::rabbitmq' do - let :rabbitmq_fragment do - 'test_instance_rabbitmq_instance' - end - - let :pre_condition do - "class { 'monasca::agent': - url => 'http://127.0.0.1', - username => 'user', - password => 'password', - keystone_url => 'http://127.0.0.1:5000', - install_python_deps => false, - }" - end - - let :params do - { - :instances => { - 'test_instance' => { - 'rabbitmq_api_url' => 'url', - 'rabbitmq_user' => 'user', - 'rabbitmq_pass' => 'password', - 'queues' => ['test_queue'], - 'nodes' => ['test_node'], - 'exchanges' => ['test_exchange'], - 'queues_regexes' => ['test_queue_regex'], - 'nodes_regexes' => ['test_node_regex'], - 'exchanges_regexes' => ['test_exchange_regex'], - 'max_detailed_queues' => 1000, - 'max_detailed_exchanges' => 100, - 'max_detailed_nodes' => 10, - 'whitelist' => {}, - } - } - } - end - - it 'builds the rabbitmq config file properly' do - should contain_concat_fragment(rabbitmq_fragment).with_content(/^\s*rabbitmq_api_url: url$/) - should contain_concat_fragment(rabbitmq_fragment).with_content(/^\s*rabbitmq_user: user$/) - should contain_concat_fragment(rabbitmq_fragment).with_content(/^\s*rabbitmq_pass: password$/) - should contain_concat_fragment(rabbitmq_fragment).with_content(/^\s*queues: \["test_queue"\]$/) - should contain_concat_fragment(rabbitmq_fragment).with_content(/^\s*nodes: \["test_node"\]$/) - should contain_concat_fragment(rabbitmq_fragment).with_content(/^\s*exchanges: \["test_exchange"\]$/) - should contain_concat_fragment(rabbitmq_fragment).with_content(/^\s*queues_regexes: \["test_queue_regex"\]$/) - should contain_concat_fragment(rabbitmq_fragment).with_content(/^\s*nodes_regexes: \["test_node_regex"\]$/) - should contain_concat_fragment(rabbitmq_fragment).with_content(/^\s*exchanges_regexes: \["test_exchange_regex"\]$/) - should contain_concat_fragment(rabbitmq_fragment).with_content(/^\s*max_detailed_queues: 1000$/) - should contain_concat_fragment(rabbitmq_fragment).with_content(/^\s*max_detailed_exchanges: 100$/) - should contain_concat_fragment(rabbitmq_fragment).with_content(/^\s*max_detailed_nodes: 10$/) - should contain_concat_fragment(rabbitmq_fragment).with_content(/^\s*whitelist: {}$/) - end - end - - on_supported_os({ - :supported_os => OSDefaults.get_supported_os - }).each do |os,facts| - context "on #{os}" do - let (:facts) do - facts.merge!(OSDefaults.get_facts()) - end - - it_behaves_like 'monasca::checks::rabbitmq' - end - end -end diff --git a/spec/classes/monasca_keystone_auth_spec.rb b/spec/classes/monasca_keystone_auth_spec.rb deleted file mode 100644 index 9a9bc12..0000000 --- a/spec/classes/monasca_keystone_auth_spec.rb +++ /dev/null @@ -1,44 +0,0 @@ -require 'spec_helper' - -describe 'monasca::keystone::auth' do - - let :params do - {} - end - - shared_examples 'monasca-keystone-auth' do - - context 'with default parameters' do - - it { is_expected.to contain_class('monasca::params') } - - it 'configures users' do - is_expected.to contain_keystone_user('monasca-agent') - is_expected.to contain_keystone_user('monasca-user') - - is_expected.to contain_keystone_role('monasca-agent') - is_expected.to contain_keystone_role('monitoring-delegate') - is_expected.to contain_keystone_role('monasca-admin') - is_expected.to contain_keystone_role('monasca-user') - - is_expected.to contain_keystone_user_role('monasca-agent@services').with( - :roles => ['monasca-agent', 'monitoring-delegate'], - ) - is_expected.to contain_keystone_user_role('monasca-user@services').with( - :roles => ['monasca-user'], - ) - end - end - end - - on_supported_os({ - :supported_os => OSDefaults.get_supported_os - }).each do |os,facts| - context "on #{os}" do - let (:facts) do - facts.merge!(OSDefaults.get_facts) - end - it_behaves_like 'monasca-keystone-auth' - end - end -end diff --git a/spec/classes/monasca_notification_spec.rb b/spec/classes/monasca_notification_spec.rb deleted file mode 100644 index 3e424e4..0000000 --- a/spec/classes/monasca_notification_spec.rb +++ /dev/null @@ -1,83 +0,0 @@ -require 'spec_helper' - -describe 'monasca::notification' do - shared_examples 'monasca::notification' do - let :cfg_file do - '/etc/monasca/notification.yaml' - end - - let :start_script do - '/etc/init/monasca-notification.conf' - end - - let :pre_condition do - 'include monasca' - end - - let :params do - { - :install_python_deps => false, - } - end - - it 'starts the notification service' do - should contain_service('monasca-notification') - end - - it 'builds the notification config file properly' do - should contain_file(cfg_file).with_content(/^\s*kafka:$/) - should contain_file(cfg_file).with_content(/^\s*url: localhost:9092$/) - should contain_file(cfg_file).with_content(/^\s*group: monasca-notification$/) - should contain_file(cfg_file).with_content(/^\s*alarm_topic: alarm-state-transitions$/) - should contain_file(cfg_file).with_content(/^\s*notification_topic: alarm-notifications$/) - should contain_file(cfg_file).with_content(/^\s*notification_retry_topic: retry-notifications$/) - should contain_file(cfg_file).with_content(/^\s*max_offset_lag: 600$/) - should contain_file(cfg_file).with_content(/^\s*periodic:$/) - should contain_file(cfg_file).with_content(/^\s*60: 60-seconds-notifications$/) - should contain_file(cfg_file).with_content(/^\s*mysql:$/) - should contain_file(cfg_file).with_content(/^\s*host: $/) - should contain_file(cfg_file).with_content(/^\s*port: 3306$/) - should contain_file(cfg_file).with_content(/^\s*user: $/) - should contain_file(cfg_file).with_content(/^\s*passwd: $/) - should contain_file(cfg_file).with_content(/^\s*db: mon$/) - should contain_file(cfg_file).with_content(/^\s*notification_types:$/) - should contain_file(cfg_file).with_content(/^\s*plugins:$/) - should contain_file(cfg_file).with_content(/^\s*- monasca_notification.plugins.hipchat_notifier:HipChatNotifier$/) - should contain_file(cfg_file).with_content(/^\s*- monasca_notification.plugins.slack_notifier:SlackNotifier$/) - should contain_file(cfg_file).with_content(/^\s*email:$/) - should contain_file(cfg_file).with_content(/^\s*server: localhost$/) - should contain_file(cfg_file).with_content(/^\s*port: 25$/) - should contain_file(cfg_file).with_content(/^\s*notifications_size: 256$/) - should contain_file(cfg_file).with_content(/^\s*sent_notifications_size: 50$/) - should contain_file(cfg_file).with_content(/^\s*notification_path: \/notification\/alarms$/) - should contain_file(cfg_file).with_content(/^\s*notification_retry_path: \/notification\/retry$/) - should contain_file(cfg_file).with_content(/^\s*60: \/notification\/60_seconds$/) - should contain_file(cfg_file).with_content(/^\s*periodic_path:$/) - should contain_file(cfg_file).with_content(/^\s*logging:$/) - should contain_file(cfg_file).with_content(/^\s*version: 1$/) - should contain_file(cfg_file).with_content(/^\s*disable_existing_loggers: False$/) - should contain_file(cfg_file).with_content(/^\s*formatters:$/) - should contain_file(cfg_file).with_content(/^\s*filename: \/var\/log\/monasca\/notification.log$/) - should contain_file(cfg_file).with_content(/^\s*ca_certs: \/etc\/ssl\/certs\/ca-certificates.crt$/) - end - - it 'builds the startup script properly' do - should contain_file(start_script).with_content(/^\s*kill timeout 240$/) - should contain_file(start_script).with_content(/^\s*setgid monasca$/) - should contain_file(start_script).with_content(/^\s*setuid monasca-notification$/) - should contain_file(start_script).with_content(/^\s*exec \/var\/www\/monasca-notification\/bin\/monasca-notification > \/dev\/null$/) - end - end - - on_supported_os({ - :supported_os => OSDefaults.get_supported_os - }).each do |os,facts| - context "on #{os}" do - let (:facts) do - facts.merge!(OSDefaults.get_facts()) - end - - it_behaves_like 'monasca::notification' - end - end -end diff --git a/spec/shared_examples.rb b/spec/shared_examples.rb deleted file mode 100644 index fec0eac..0000000 --- a/spec/shared_examples.rb +++ /dev/null @@ -1,5 +0,0 @@ -shared_examples_for "a Puppet::Error" do |description| - it "with message matching #{description.inspect}" do - expect { is_expected.to have_class_count(1) }.to raise_error(Puppet::Error, description) - end -end diff --git a/spec/spec_helper.rb b/spec/spec_helper.rb deleted file mode 100644 index 036c7e4..0000000 --- a/spec/spec_helper.rb +++ /dev/null @@ -1,17 +0,0 @@ -# Load libraries from openstacklib here to simulate how they live together in a real puppet run (for provider unit tests) -$LOAD_PATH.push(File.join(File.dirname(__FILE__), 'fixtures', 'modules', 'openstacklib', 'lib')) -require 'puppetlabs_spec_helper/module_spec_helper' -require 'shared_examples' -require 'puppet-openstack_spec_helper/facts' - -fixture_path = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures')) - -RSpec.configure do |c| - c.alias_it_should_behave_like_to :it_configures, 'configures' - c.alias_it_should_behave_like_to :it_raises, 'raises' - - c.module_path = File.join(fixture_path, 'modules') - c.manifest_dir = File.join(fixture_path, 'manifests') -end - -at_exit { RSpec::Puppet::Coverage.report! } diff --git a/spec/spec_helper_acceptance.rb b/spec/spec_helper_acceptance.rb deleted file mode 100644 index d51dfdb..0000000 --- a/spec/spec_helper_acceptance.rb +++ /dev/null @@ -1 +0,0 @@ -require 'puppet-openstack_spec_helper/litmus_spec_helper' diff --git a/spec/unit/provider/agent_config/ini_setting_spec.rb b/spec/unit/provider/agent_config/ini_setting_spec.rb deleted file mode 100644 index 3de8334..0000000 --- a/spec/unit/provider/agent_config/ini_setting_spec.rb +++ /dev/null @@ -1,63 +0,0 @@ -$LOAD_PATH.push( - File.join( - File.dirname(__FILE__), - '..', - '..', - '..', - 'fixtures', - 'modules', - 'inifile', - 'lib') -) -$LOAD_PATH.push( - File.join( - File.dirname(__FILE__), - '..', - '..', - '..', - 'fixtures', - 'modules', - 'openstacklib', - 'lib') -) -require 'spec_helper' -provider_class = Puppet::Type.type(:agent_config).provider(:ini_setting) -describe provider_class do - - it 'should default to the default setting when no other one is specified' do - resource = Puppet::Type::Agent_config.new( - {:name => 'DEFAULT/foo', :value => 'bar'} - ) - provider = provider_class.new(resource) - expect(provider.section).to eq('DEFAULT') - expect(provider.setting).to eq('foo') - end - - it 'should allow setting to be set explicitly' do - resource = Puppet::Type::Agent_config.new( - {:name => 'dude/whoa', :value => 'bar'} - ) - provider = provider_class.new(resource) - expect(provider.section).to eq('dude') - expect(provider.setting).to eq('whoa') - end - - it 'should ensure absent when is specified as a value' do - resource = Puppet::Type::Agent_config.new( - {:name => 'dude/foo', :value => ''} - ) - provider = provider_class.new(resource) - provider.exists? - expect(resource[:ensure]).to eq :absent - end - - it 'should ensure absent when value matches ensure_absent_val' do - resource = Puppet::Type::Agent_config.new( - {:name => 'dude/foo', :value => 'foo', :ensure_absent_val => 'foo' } - ) - provider = provider_class.new(resource) - provider.exists? - expect(resource[:ensure]).to eq :absent - end - -end diff --git a/spec/unit/provider/monasca_config/ini_setting_spec.rb b/spec/unit/provider/monasca_config/ini_setting_spec.rb deleted file mode 100644 index a67d5f9..0000000 --- a/spec/unit/provider/monasca_config/ini_setting_spec.rb +++ /dev/null @@ -1,63 +0,0 @@ -$LOAD_PATH.push( - File.join( - File.dirname(__FILE__), - '..', - '..', - '..', - 'fixtures', - 'modules', - 'inifile', - 'lib') -) -$LOAD_PATH.push( - File.join( - File.dirname(__FILE__), - '..', - '..', - '..', - 'fixtures', - 'modules', - 'openstacklib', - 'lib') -) -require 'spec_helper' -provider_class = Puppet::Type.type(:monasca_config).provider(:ini_setting) -describe provider_class do - - it 'should default to the default setting when no other one is specified' do - resource = Puppet::Type::Monasca_config.new( - {:name => 'DEFAULT/foo', :value => 'bar'} - ) - provider = provider_class.new(resource) - expect(provider.section).to eq('DEFAULT') - expect(provider.setting).to eq('foo') - end - - it 'should allow setting to be set explicitly' do - resource = Puppet::Type::Monasca_config.new( - {:name => 'dude/whoa', :value => 'bar'} - ) - provider = provider_class.new(resource) - expect(provider.section).to eq('dude') - expect(provider.setting).to eq('whoa') - end - - it 'should ensure absent when is specified as a value' do - resource = Puppet::Type::Monasca_config.new( - {:name => 'dude/foo', :value => ''} - ) - provider = provider_class.new(resource) - provider.exists? - expect(resource[:ensure]).to eq :absent - end - - it 'should ensure absent when value matches ensure_absent_val' do - resource = Puppet::Type::Monasca_config.new( - {:name => 'dude/foo', :value => 'foo', :ensure_absent_val => 'foo' } - ) - provider = provider_class.new(resource) - provider.exists? - expect(resource[:ensure]).to eq :absent - end - -end diff --git a/spec/unit/provider/monasca_ini/ini_setting_spec.rb b/spec/unit/provider/monasca_ini/ini_setting_spec.rb deleted file mode 100644 index 66c53b5..0000000 --- a/spec/unit/provider/monasca_ini/ini_setting_spec.rb +++ /dev/null @@ -1,63 +0,0 @@ -$LOAD_PATH.push( - File.join( - File.dirname(__FILE__), - '..', - '..', - '..', - 'fixtures', - 'modules', - 'inifile', - 'lib') -) -$LOAD_PATH.push( - File.join( - File.dirname(__FILE__), - '..', - '..', - '..', - 'fixtures', - 'modules', - 'openstacklib', - 'lib') -) -require 'spec_helper' -provider_class = Puppet::Type.type(:monasca_ini).provider(:ini_setting) -describe provider_class do - - it 'should default to the default setting when no other one is specified' do - resource = Puppet::Type::Monasca_ini.new( - {:name => 'DEFAULT/foo', :value => 'bar'} - ) - provider = provider_class.new(resource) - expect(provider.section).to eq('DEFAULT') - expect(provider.setting).to eq('foo') - end - - it 'should allow setting to be set explicitly' do - resource = Puppet::Type::Monasca_ini.new( - {:name => 'dude/whoa', :value => 'bar'} - ) - provider = provider_class.new(resource) - expect(provider.section).to eq('dude') - expect(provider.setting).to eq('whoa') - end - - it 'should ensure absent when is specified as a value' do - resource = Puppet::Type::Monasca_ini.new( - {:name => 'dude/foo', :value => ''} - ) - provider = provider_class.new(resource) - provider.exists? - expect(resource[:ensure]).to eq :absent - end - - it 'should ensure absent when value matches ensure_absent_val' do - resource = Puppet::Type::Monasca_ini.new( - {:name => 'dude/foo', :value => 'foo', :ensure_absent_val => 'foo' } - ) - provider = provider_class.new(resource) - provider.exists? - expect(resource[:ensure]).to eq :absent - end - -end diff --git a/spec/unit/type/agent_config.rb b/spec/unit/type/agent_config.rb deleted file mode 100644 index 2df1427..0000000 --- a/spec/unit/type/agent_config.rb +++ /dev/null @@ -1,52 +0,0 @@ -require 'puppet' -require 'puppet/type/agent_config' -describe 'Puppet::Type.type(:agent_config)' do - before :each do - @agent_config = Puppet::Type.type(:agent_config).new(:name => 'DEFAULT/foo', :value => 'bar') - end - - it 'should require a name' do - expect { - Puppet::Type.type(:agent_config).new({}) - }.to raise_error(Puppet::Error, 'Title or name must be provided') - end - - it 'should not expect a name with whitespace' do - expect { - Puppet::Type.type(:agent_config).new(:name => 'f oo') - }.to raise_error(Puppet::Error, /Parameter name failed/) - end - - it 'should fail when there is no section' do - expect { - Puppet::Type.type(:agent_config).new(:name => 'foo') - }.to raise_error(Puppet::Error, /Parameter name failed/) - end - - it 'should not require a value when ensure is absent' do - Puppet::Type.type(:agent_config).new(:name => 'DEFAULT/foo', :ensure => :absent) - end - - it 'should accept a valid value' do - @agent_config[:value] = 'bar' - expect(@agent_config[:value]).to eq('bar') - end - - it 'should not accept a value with whitespace' do - @agent_config[:value] = 'b ar' - expect(@agent_config[:value]).to eq('b ar') - end - - it 'should accept valid ensure values' do - @agent_config[:ensure] = :present - expect(@agent_config[:ensure]).to eq(:present) - @agent_config[:ensure] = :absent - expect(@agent_config[:ensure]).to eq(:absent) - end - - it 'should not accept invalid ensure values' do - expect { - @agent_config[:ensure] = :latest - }.to raise_error(Puppet::Error, /Invalid value/) - end -end diff --git a/spec/unit/type/monasca_config_spec.rb b/spec/unit/type/monasca_config_spec.rb deleted file mode 100644 index cc82fcd..0000000 --- a/spec/unit/type/monasca_config_spec.rb +++ /dev/null @@ -1,52 +0,0 @@ -require 'puppet' -require 'puppet/type/monasca_config' -describe 'Puppet::Type.type(:monasca_config)' do - before :each do - @monasca_config = Puppet::Type.type(:monasca_config).new(:name => 'DEFAULT/foo', :value => 'bar') - end - - it 'should require a name' do - expect { - Puppet::Type.type(:monasca_config).new({}) - }.to raise_error(Puppet::Error, 'Title or name must be provided') - end - - it 'should not expect a name with whitespace' do - expect { - Puppet::Type.type(:monasca_config).new(:name => 'f oo') - }.to raise_error(Puppet::Error, /Parameter name failed/) - end - - it 'should fail when there is no section' do - expect { - Puppet::Type.type(:monasca_config).new(:name => 'foo') - }.to raise_error(Puppet::Error, /Parameter name failed/) - end - - it 'should not require a value when ensure is absent' do - Puppet::Type.type(:monasca_config).new(:name => 'DEFAULT/foo', :ensure => :absent) - end - - it 'should accept a valid value' do - @monasca_config[:value] = 'bar' - expect(@monasca_config[:value]).to eq('bar') - end - - it 'should not accept a value with whitespace' do - @monasca_config[:value] = 'b ar' - expect(@monasca_config[:value]).to eq('b ar') - end - - it 'should accept valid ensure values' do - @monasca_config[:ensure] = :present - expect(@monasca_config[:ensure]).to eq(:present) - @monasca_config[:ensure] = :absent - expect(@monasca_config[:ensure]).to eq(:absent) - end - - it 'should not accept invalid ensure values' do - expect { - @monasca_config[:ensure] = :latest - }.to raise_error(Puppet::Error, /Invalid value/) - end -end diff --git a/spec/unit/type/monasca_ini.rb b/spec/unit/type/monasca_ini.rb deleted file mode 100644 index 8ddb813..0000000 --- a/spec/unit/type/monasca_ini.rb +++ /dev/null @@ -1,52 +0,0 @@ -require 'puppet' -require 'puppet/type/monasca_ini' -describe 'Puppet::Type.type(:monasca_ini)' do - before :each do - @monasca_ini = Puppet::Type.type(:monasca_ini).new(:name => 'DEFAULT/foo', :value => 'bar') - end - - it 'should require a name' do - expect { - Puppet::Type.type(:monasca_ini).new({}) - }.to raise_error(Puppet::Error, 'Title or name must be provided') - end - - it 'should not expect a name with whitespace' do - expect { - Puppet::Type.type(:monasca_ini).new(:name => 'f oo') - }.to raise_error(Puppet::Error, /Parameter name failed/) - end - - it 'should fail when there is no section' do - expect { - Puppet::Type.type(:monasca_ini).new(:name => 'foo') - }.to raise_error(Puppet::Error, /Parameter name failed/) - end - - it 'should not require a value when ensure is absent' do - Puppet::Type.type(:monasca_ini).new(:name => 'DEFAULT/foo', :ensure => :absent) - end - - it 'should accept a valid value' do - @monasca_ini[:value] = 'bar' - expect(@monasca_ini[:value]).to eq('bar') - end - - it 'should not accept a value with whitespace' do - @monasca_ini[:value] = 'b ar' - expect(@monasca_ini[:value]).to eq('b ar') - end - - it 'should accept valid ensure values' do - @monasca_ini[:ensure] = :present - expect(@monasca_ini[:ensure]).to eq(:present) - @monasca_ini[:ensure] = :absent - expect(@monasca_ini[:ensure]).to eq(:absent) - end - - it 'should not accept invalid ensure values' do - expect { - @monasca_ini[:ensure] = :latest - }.to raise_error(Puppet::Error, /Invalid value/) - end -end diff --git a/templates/agent.yaml.erb b/templates/agent.yaml.erb deleted file mode 100644 index fdbb46c..0000000 --- a/templates/agent.yaml.erb +++ /dev/null @@ -1,51 +0,0 @@ -Api: - url: <%= @url %> - username: <%= @username %> - password: <%= @password %> - keystone_url: <%= @keystone_url %> - project_name: <%= @project_name %> - project_domain_id: <%= @project_domain_id %> - project_domain_name: <%= @project_domain_name %> - project_id: <%= @project_id %> -<%- if @ca_file -%> - insecure: false - ca_file: <%= @ca_file %> -<%- else -%> - insecure: true -<%- end -%> - max_buffer_size: <%= @max_buffer_size %> - backlog_send_rate: <%= @backlog_send_rate %> - amplifier: <%= @amplifier %> - -Main: -<%- if @hostname -%> - hostname: <%= @hostname %> -<%- end -%> - dimensions: -<%- @dimensions.each_pair do |key, value| -%> - <%= key %>: <%= value %> -<%- end -%> - recent_point_threshold: <%= @recent_point_threshold %> - check_freq: <%= @check_freq %> - listen_port: <%= @listen_port %> - additional_checksd: <%= @additional_checksd %> - non_local_traffic: <%= @non_local_traffic %> - -Statsd: - monasca_statsd_port: <%= @statsd_port %> - monasca_statsd_interval : <%= @statsd_interval %> -<%- if @statsd_forward_host -%> - monasca_statsd_forward_host: <%= @statsd_forward_host %> - monasca_statsd_statsd_forward_port: <%= @statsd_forward_port %> -<%- end -%> - -Logging: - log_level: <%= @log_level %> - collector_log_file: <%= @collector_log_file %> - forwarder_log_file: <%= @forwarder_log_file %> - statsd_log_file: <%= @monstatsd_log_file %> - log_to_syslog: <%= @log_to_syslog %> -<%- if @syslog_host and @syslog_port -%> - syslog_host: <%= @syslog_host %> - syslog_port: <%= @syslog_port %> -<%- end -%> diff --git a/templates/alarm_definition_config.json.erb b/templates/alarm_definition_config.json.erb deleted file mode 100644 index 849f102..0000000 --- a/templates/alarm_definition_config.json.erb +++ /dev/null @@ -1,8 +0,0 @@ -[ - { - "name":"http_status", - "expression":"avg(http_status{}) > 0", - "severity":"LOW", - "match_by":"service,url,tenant_id,region,hostname,id" - } -] diff --git a/templates/api-config.yml.erb b/templates/api-config.yml.erb deleted file mode 100644 index f1a8834..0000000 --- a/templates/api-config.yml.erb +++ /dev/null @@ -1,126 +0,0 @@ -region: <%= @region_name %> - -accessedViaHttps: false - -metricsTopic: metrics - -eventsTopic: events - -maxQueryLimit: <%= @max_query_limit %> - -<% if @valid_notif_periods and not @valid_notif_periods.empty? -%> -validNotificationPeriods: -<% @valid_notif_periods.each do |period| -%> - - <%= period %> -<%- end -%> -<% end -%> - -kafka: - brokerUris: - - <%= @kafka_brokers %> - zookeeperUris: - - <%= @zookeeper_servers %> - healthCheckTopic: healthcheck - -mysql: - driverClass: com.mysql.jdbc.Driver - url: jdbc:mysql://<%= @sql_host %>:<%= @sql_port %>/mon?connectTimeout=5000&autoReconnect=true - user: <%= @sql_user %> - password: <%= @sql_password %> - maxWaitForConnection: 1s - validationQuery: "/* MyService Health Check */ SELECT 1" - minSize: 8 - maxSize: 32 - checkConnectionWhileIdle: <%= @check_conn_while_idle %> - checkConnectionOnBorrow: true - -databaseConfiguration: - databaseType: <%= @database_type %> - -influxDB: - name: mon - version: V9 - maxHttpConnections: 100 - gzip: <%= @gzip_setting %> - replicationFactor: 1 - url: http://<%= @database_host %>:8086 - user: <%= @api_db_user %> - password: <%= @api_db_password %> - -vertica: - driverClass: com.vertica.jdbc.Driver - url: jdbc:vertica://<%= @database_host %>:5433/mon - user: <%= @api_db_user %> - password: <%= @api_db_password %> - maxWaitForConnection: <%= @api_db_wait %> - validationQuery: "/* MyService Health Check */ SELECT 1" - minSize: <%= @api_db_thread_min %> - maxSize: <%= @api_db_thread_max %> - checkConnectionWhileIdle: <%= @check_conn_while_idle %> - dbHint: <%= @vertica_db_hint %> - -middleware: - enabled: true - serverVIP: <%= @keystone_endpoint %> - serverPort: 5000 - connTimeout: 5000 - connSSLClientAuth: true - keystore: None - keystorePassword: None - connPoolMaxActive: 3 - connPoolMaxIdle: 3 - connPoolEvictPeriod: 600000 - connPoolMinIdleTime: 600000 - connRetryTimes: 2 - connRetryInterval: 50 - defaultAuthorizedRoles: <%= @roles_default %> -<%- if @roles_read_only and not @roles_read_only.empty? -%> - readOnlyAuthorizedRoles: <%= @roles_read_only %> -<%- end -%> - agentAuthorizedRoles: <%= @roles_agent %> - delegateAuthorizedRole: <%= @role_delegate %> - adminRole: <%= @role_admin %> - adminAuthMethod: <%= @auth_method %> - adminUser: <%= @admin_name %> - adminPassword: <%= @admin_password %> - adminProjectName: <%= @admin_project_name %> - adminToken: <%= @keystone_admin_token %> - timeToCacheToken: 600 - maxTokenCacheSize: 1048576 - -server: - applicationConnectors: - - type: http - maxRequestHeaderSize: 16KiB - port: <%= @monasca_api_port %> - -logging: - - level: INFO - - loggers: - - com.example.app: DEBUG - - appenders: - - type: console - threshold: ALL - timeZone: UTC - target: stdout - logFormat: - - - type: file - currentLogFilename: /var/log/monasca/monasca-api.log - threshold: ALL - archive: true - archivedLogFilenamePattern: /var/log/monasca/monasca-api-%d.log.gz - archivedFileCount: 5 - timeZone: UTC - logFormat: - - - type: syslog - host: localhost - port: 514 - facility: local0 - threshold: ALL - logFormat: diff --git a/templates/bootstrap-alarm-definitions.py.erb b/templates/bootstrap-alarm-definitions.py.erb deleted file mode 100644 index 2afcfc1..0000000 --- a/templates/bootstrap-alarm-definitions.py.erb +++ /dev/null @@ -1,141 +0,0 @@ -#!<%= @virtual_env %>/bin/python - -import json -from keystoneclient.v3 import client as keystone_client -from monascaclient import client as monasca_client -import monascaclient.exc as exc -import os - - -def get_monasca_client(): - kwargs = { - 'username': os.environ['OS_USERNAME'], - 'password': os.environ['OS_PASSWORD'], - 'auth_url': os.environ['OS_AUTH_URL'], - 'project_name': os.environ['OS_PROJECT_NAME'], - } - - _ksclient = keystone_client.Client(**kwargs) - - kwargs = { - 'token': _ksclient.auth_token, - } - - return monasca_client.Client( - '2_0', - os.environ['MONASCA_API_URL'], - **kwargs) - - -def get_current_def_names(client): - names = [] - current_definitions = client.alarm_definitions.list() - for definition in current_definitions: - names.append(definition['name']) - return names - - -def get_current_notif_names(client): - names = [] - current_notifications = client.notifications.list() - for notification in current_notifications: - names.append(notification['name']) - return names - -def build_alarm_def(current_def, new_id, method, actions): - new_def = {} - new_def['alarm_id'] = current_def['id'] - new_def['name'] = current_def['name'] - new_def['description'] = current_def['description'] - new_def['match_by'] = current_def['match_by'] - new_def['severity'] = current_def['severity'] - new_def['expression'] = current_def['expression'] - new_def['actions_enabled'] = current_def['actions_enabled'] - new_def['alarm_actions'] = current_def['alarm_actions'] - new_def['ok_actions'] = current_def['ok_actions'] - new_def['undetermined_actions'] = current_def['undetermined_actions'] - - if 'ALARM' in actions and new_id not in new_def['alarm_actions']: - print("Adding ALARM action for method '%s' to '%s'" % (method, new_def['name'])) - new_def['alarm_actions'].append(new_id) - if 'OK' in actions and new_id not in new_def['ok_actions']: - print("Adding OK action for method '%s' to '%s'" % (method, new_def['name'])) - new_def['ok_actions'].append(new_id) - if 'UNDETERMINED' in actions and new_id not in new_def['undetermined_actions']: - print("Adding UNDETERMINED action for method '%s' to '%s'" % (method, new_def['name'])) - new_def['undetermined_actions'].append(new_id) - - return new_def - - -def assign_notification_method(client, assignment): - - def_name = assignment['alarm_definition_name'] - methods = assignment['notification_methods'] - actions = assignment['actions'] - - current_definitions = client.alarm_definitions.list() - current_notifications = client.notifications.list() - for definition in current_definitions: - if def_name == definition['name']: - for method in methods: - for notification in current_notifications: - if method == notification['name']: - new_id = notification['id'] - if new_id in definition['alarm_actions']: - print("Skipping assignment of notification method '%s' to '%s', already there." % (method, def_name)) - continue - new_def = build_alarm_def(definition, - notification['id'], - method, - actions) - client.alarm_definitions.update(**new_def) - print("Assigned notification method '%s' to '%s'" % (method, def_name)) - - -def main(): - - try: - client = get_monasca_client() - def_names = get_current_def_names(client) - notif_names = get_current_notif_names(client) - alarm_definitions = json.load(open('<%= @alarm_definition_config %>')) - notification_methods = json.load(open('<%= @notification_config %>')) - notification_assignments = json.load(open('<%= @notification_assignments %>')) - - # - # Create all the alarm definitions - # - for alarm_definition in alarm_definitions: - name = alarm_definition['name'] - if name in def_names: - print("Skipping alarm definition '%s', already defined." % name) - continue - client.alarm_definitions.create(**alarm_definition) - print("Added alarm definition '%s'" % name) - - # - # Create all the notification methods - # - for notification in notification_methods: - name = notification['name'] - if name in notif_names: - print("Skipping notification '%s', already defined." % name) - continue - client.notifications.create(**notification) - print("Added notification method '%s'" % name) - - # - # Now assign the notification method(s) to - # alarm definitions defined in assignment file. - # - for assignment in notification_assignments: - assign_notification_method(client, assignment) - - except exc.HTTPException as he: - raise exc.CommandError('HTTPException code=%s message=%s' % - (he.code, he.message)) - - -if __name__ == '__main__': - main() diff --git a/templates/bootstrap-influxdb.sh.erb b/templates/bootstrap-influxdb.sh.erb deleted file mode 100644 index d420de5..0000000 --- a/templates/bootstrap-influxdb.sh.erb +++ /dev/null @@ -1,120 +0,0 @@ -#!/bin/bash - -if [[ -z ${INFLUX_ADMIN_PASSWORD} ]] -then - echo "INFLUX_ADMIN_PASSWORD must be set in env" - exit 1 -fi - -if [[ -z ${DB_USER_PASSWORD} ]] -then - echo "DB_USER_PASSWORD must be set in env" - exit 1 -fi - -if [[ -z ${DB_READ_ONLY_USER_PASSWORD} ]] -then - echo "DB_READ_ONLY_USER_PASSWORD must be set in env" - exit 1 -fi - -CONFIG_FILE="/etc/opt/influxdb/influxdb.conf" -INFLUX_HOST="<%= @influxdb_host %>:<%= @influxdb_port %>" -INFLUX_ADMIN="<%= @influxdb_user %>" -MONASCA_DB="mon" -MONASCA_USERS="mon_api mon_persister" -MONASCA_READ_ONLY_USERS="mon_ro" -DEFAULT_RETENTION_POLICY_NAME="<%= @influxdb_def_ret_pol_name %>" -DEFAULT_RETENTION_POLICY_DURATION="<%= @influxdb_def_ret_pol_duration %>" -TMP_RETENTION_POLICY_NAME="<%= @influxdb_tmp_ret_pol_name %>" -TMP_RETENTION_POLICY_DURATION="<%= @influxdb_tmp_ret_pol_duration %>" -RETENTION_POLICY_REPLICATION_FACTOR="<%= @influxdb_retention_replication %>" - -wait_for_influx() -{ - HTTP_CODE=`curl -s -w "%{http_code}" "http://$INFLUX_HOST/ping" -o /dev/null` - PING_RC=$? - ATTEMPTS=1 - - until [ $PING_RC -eq "0" ] && [ $HTTP_CODE -eq "204" ] - do - HTTP_CODE=`curl -s -w "%{http_code}" "http://$INFLUX_HOST/ping" -o /dev/null` - PING_RC=$? - ATTEMPTS=$((ATTEMPTS+1)) - if [ $ATTEMPTS -eq 30 ] - then - echo "InfluxDB not up after $ATTEMPTS seconds!" - echo "HTTP CODE: $HTTP_CODE" - echo "CURL RETURN CODE: $PING_RC" - exit 1 - fi - sleep 1 - done -} - -wait_for_influx - -USERS=`curl -s -G http://$INFLUX_HOST/query --data-urlencode "q=SHOW USERS" --data "u=$INFLUX_ADMIN" --data "p=$INFLUX_ADMIN_PASSWORD" --data "pretty=true"` -# check to see if we've created the root user yet -if ! grep -q root <<<$USERS -then - # Nope, need to restart in insecure mode the first time - # to create the admin user. - echo "Restarting influxdb in insecure mode to create first user" - sed -i -e '/\[authentication\]/{n;s/.*/enabled = false/}' $CONFIG_FILE - service influxdb restart - wait_for_influx - q="CREATE USER $INFLUX_ADMIN WITH PASSWORD '$INFLUX_ADMIN_PASSWORD' WITH ALL PRIVILEGES" - curl -s -G -w "%{http_code} CREATE USER $INFLUX_ADMIN\\n" http://$INFLUX_HOST/query --data-urlencode "q=$q" -o /dev/null - echo "Restarting influxdb in secure mode" - sed -i -e '/\[authentication\]/{n;s/.*/enabled = true/}' $CONFIG_FILE - service influxdb restart - wait_for_influx -fi - -# create the database -q="CREATE DATABASE $MONASCA_DB" -curl -s -G -w "%{http_code} $q\\n" http://$INFLUX_HOST/query --data-urlencode "q=$q" --data "u=$INFLUX_ADMIN" --data "p=$INFLUX_ADMIN_PASSWORD" -o /dev/null - -# create the default retention policy -q="CREATE RETENTION POLICY $DEFAULT_RETENTION_POLICY_NAME ON $MONASCA_DB DURATION $DEFAULT_RETENTION_POLICY_DURATION REPLICATION $RETENTION_POLICY_REPLICATION_FACTOR DEFAULT" -curl -s -G -w "%{http_code} $q\\n" http://$INFLUX_HOST/query --data-urlencode "q=$q" --data "u=$INFLUX_ADMIN" --data "p=$INFLUX_ADMIN_PASSWORD" -o /dev/null -# alter it in case we've change replication or duration -q="ALTER RETENTION POLICY $DEFAULT_RETENTION_POLICY_NAME ON $MONASCA_DB DURATION $DEFAULT_RETENTION_POLICY_DURATION REPLICATION $RETENTION_POLICY_REPLICATION_FACTOR DEFAULT" -curl -s -G -w "%{http_code} $q\\n" http://$INFLUX_HOST/query --data-urlencode "q=$q" --data "u=$INFLUX_ADMIN" --data "p=$INFLUX_ADMIN_PASSWORD" -o /dev/null - -# create the tmp retention policy -q="CREATE RETENTION POLICY $TMP_RETENTION_POLICY_NAME ON $MONASCA_DB DURATION $TMP_RETENTION_POLICY_DURATION REPLICATION $RETENTION_POLICY_REPLICATION_FACTOR" -curl -s -G -w "%{http_code} $q\\n" http://$INFLUX_HOST/query --data-urlencode "q=$q" --data "u=$INFLUX_ADMIN" --data "p=$INFLUX_ADMIN_PASSWORD" -o /dev/null -# alter it in case we've change replication or duration -q="ALTER RETENTION POLICY $TMP_RETENTION_POLICY_NAME ON $MONASCA_DB DURATION $TMP_RETENTION_POLICY_DURATION REPLICATION $RETENTION_POLICY_REPLICATION_FACTOR" -curl -s -G -w "%{http_code} $q\\n" http://$INFLUX_HOST/query --data-urlencode "q=$q" --data "u=$INFLUX_ADMIN" --data "p=$INFLUX_ADMIN_PASSWORD" -o /dev/null - -# create the monasca users as admins -for u in $MONASCA_USERS -do - q="CREATE USER $u WITH PASSWORD '$DB_USER_PASSWORD' WITH ALL PRIVILEGES" - curl -s -G -w "%{http_code} CREATE USER $u\\n" http://$INFLUX_HOST/query --data-urlencode "q=$q" --data "u=$INFLUX_ADMIN" --data "p=$INFLUX_ADMIN_PASSWORD" -o /dev/null -done - -# create the read only users -for u in $MONASCA_READ_ONLY_USERS -do - # create - q="CREATE USER $u WITH PASSWORD '$DB_READ_ONLY_USER_PASSWORD'" - curl -s -G -w "%{http_code} CREATE USER $u\\n" http://$INFLUX_HOST/query --data-urlencode "q=$q" --data "u=$INFLUX_ADMIN" --data "p=$INFLUX_ADMIN_PASSWORD" -o /dev/null - - # grant read access to the monasca db - q="GRANT read ON $MONASCA_DB TO $u" - curl -s -G -w "%{http_code} $q\\n" http://$INFLUX_HOST/query --data-urlencode "q=$q" --data "u=$INFLUX_ADMIN" --data "p=$INFLUX_ADMIN_PASSWORD" -o /dev/null -done - -echo "Done creating entities" - -echo "\nDatabases:" -curl -s -G http://$INFLUX_HOST/query --data-urlencode "q=SHOW DATABASES" --data "u=$INFLUX_ADMIN" --data "p=$INFLUX_ADMIN_PASSWORD" --data "pretty=true" -echo "\n\nRetention policies:" -curl -s -G http://$INFLUX_HOST/query --data-urlencode "q=SHOW RETENTION POLICIES $MONASCA_DB" --data "u=$INFLUX_ADMIN" --data "p=$INFLUX_ADMIN_PASSWORD" --data "pretty=true" -echo "\n\nUsers:" -curl -s -G http://$INFLUX_HOST/query --data-urlencode "q=SHOW USERS" --data "u=$INFLUX_ADMIN" --data "p=$INFLUX_ADMIN_PASSWORD" --data "pretty=true" -echo "" diff --git a/templates/checks/apache.erb b/templates/checks/apache.erb deleted file mode 100644 index d2cef10..0000000 --- a/templates/checks/apache.erb +++ /dev/null @@ -1,5 +0,0 @@ - - name: <%= @title %> - apache_status_url: <%= @apache_status_url %> -<%- if @dimensions -%> - dimensions: <%= @dimensions %> -<%- end -%> \ No newline at end of file diff --git a/templates/checks/cpu.erb b/templates/checks/cpu.erb deleted file mode 100644 index 1fff41a..0000000 --- a/templates/checks/cpu.erb +++ /dev/null @@ -1,7 +0,0 @@ - - name: <%= @title %> -<%- if not @send_rollup_stats.nil? -%> - send_rollup_stats: <%= @send_rollup_stats %> -<%- end -%> -<%- if @dimensions -%> - dimensions: <%= @dimensions %> -<%- end -%> \ No newline at end of file diff --git a/templates/checks/disk.erb b/templates/checks/disk.erb deleted file mode 100644 index 2b92858..0000000 --- a/templates/checks/disk.erb +++ /dev/null @@ -1,19 +0,0 @@ - - name: <%= @title %> -<%- if not @use_mount.nil? -%> - use_mount: <%= @use_mount %> -<%- end -%> -<%- if not @send_io_stats.nil? -%> - send_io_stats: <%= @send_io_stats %> -<%- end -%> -<%- if not @send_rollup_stats.nil? -%> - send_rollup_stats: <%= @send_rollup_stats %> -<%- end -%> -<%- if not @device_blacklist_re.nil? -%> - device_blacklist_re: <%= @device_blacklist_re %> -<%- end -%> -<%- if not @ignore_filesystem_types.nil? -%> - ignore_filesystem_types: <%= @ignore_filesystem_types %> -<%- end -%> -<%- if @dimensions -%> - dimensions: <%= @dimensions %> -<%- end -%> \ No newline at end of file diff --git a/templates/checks/host_alive.erb b/templates/checks/host_alive.erb deleted file mode 100644 index 68755c5..0000000 --- a/templates/checks/host_alive.erb +++ /dev/null @@ -1,3 +0,0 @@ - - name: <%= @title %> - host_name: <%= @host_name %> - alive_test: <%= @alive_test %> \ No newline at end of file diff --git a/templates/checks/host_alive.yaml.erb b/templates/checks/host_alive.yaml.erb deleted file mode 100644 index c393940..0000000 --- a/templates/checks/host_alive.yaml.erb +++ /dev/null @@ -1,7 +0,0 @@ ---- -init_config: - ssh_port: <%= @ssh_port %> - ssh_timeout: <%= @ssh_timeout %> - ping_timeout: <%= @ping_timeout %> - -instances: diff --git a/templates/checks/http_check.erb b/templates/checks/http_check.erb deleted file mode 100644 index eb201ae..0000000 --- a/templates/checks/http_check.erb +++ /dev/null @@ -1,29 +0,0 @@ - - name: <%= @title %> - url: <%= @url %> -<%- if @timeout -%> - timeout: <%= @timeout %> -<%- end -%> -<%- if @username -%> - username: <%= @username %> -<%- end -%> -<%- if @password -%> - password: <%= @password %> -<%- end -%> -<%- if @match_pattern -%> - match_pattern: <%= @match_pattern %> -<%- end -%> -<%- if not @use_keystone.nil? -%> - use_keystone: <%= @use_keystone %> -<%- end -%> -<%- if not @collect_response_time.nil? -%> - collect_response_time: <%= @collect_response_time %> -<%- end -%> -<%- if @headers -%> - headers: <%= @headers %> -<%- end -%> -<%- if not @disable_ssl_validation.nil? -%> - disable_ssl_validation: <%= @disable_ssl_validation %> -<%- end -%> -<%- if @dimensions -%> - dimensions: <%= @dimensions %> -<%- end -%> \ No newline at end of file diff --git a/templates/checks/libvirt.yaml.erb b/templates/checks/libvirt.yaml.erb deleted file mode 100644 index 96c720b..0000000 --- a/templates/checks/libvirt.yaml.erb +++ /dev/null @@ -1,35 +0,0 @@ ---- -init_config: - admin_password: <%= @admin_password %> - admin_tenant_name: <%= @admin_tenant_name %> - admin_user: <%= @admin_user %> - identity_uri: <%= @identity_uri %> -<%- if @region_name -%> - region_name: <%= @region_name %> -<%- end -%> - cache_dir: <%= @cache_dir %> - nova_refresh: <%= @nova_refresh %> -<%- if @network_use_bits -%> - network_use_bits: <%= @network_use_bits %> -<%- end -%> -<%- if @metadata -%> - metadata: <%= @metadata %> -<%- end -%> -<%- if @customer_metadata -%> - customer_metadata: <%= @customer_metadata %> -<%- end -%> - vm_probation: <%= @vm_probation %> - ping_check: <%= @ping_check %> - alive_only: <%= @alive_only %> - disk_collection_period: <%= @disk_collection_period %> - vm_cpu_check_enable: <%= @vm_cpu_check_enable %> - vm_disks_check_enable: <%= @vm_disks_check_enable %> - vm_network_check_enable: <%= @vm_network_check_enable %> - vm_ping_check_enable: <%= @vm_ping_check_enable %> - vm_extended_disks_check_enable: <%= @vm_extended_disks_check_enable %> -<%- if @host_aggregate_re -%> - host_aggregate_re: <%= @host_aggregate_re %> -<%- end -%> - -instances: - - {} diff --git a/templates/checks/load.erb b/templates/checks/load.erb deleted file mode 100644 index 46f1446..0000000 --- a/templates/checks/load.erb +++ /dev/null @@ -1,4 +0,0 @@ - - name: <%= @title %> -<%- if @dimensions -%> - dimensions: <%= @dimensions %> -<%- end -%> \ No newline at end of file diff --git a/templates/checks/memory.erb b/templates/checks/memory.erb deleted file mode 100644 index 46f1446..0000000 --- a/templates/checks/memory.erb +++ /dev/null @@ -1,4 +0,0 @@ - - name: <%= @title %> -<%- if @dimensions -%> - dimensions: <%= @dimensions %> -<%- end -%> \ No newline at end of file diff --git a/templates/checks/mysql.erb b/templates/checks/mysql.erb deleted file mode 100644 index 7177606..0000000 --- a/templates/checks/mysql.erb +++ /dev/null @@ -1,25 +0,0 @@ - - name: <%= @title %> -<%- if @server -%> - server: <%= @server %> -<%- end -%> -<%- if @user -%> - user: <%= @user %> -<%- end -%> -<%- if @port -%> - port: <%= @port %> -<%- end -%> -<%- if @pass -%> - pass: <%= @pass %> -<%- end -%> -<%- if @sock -%> - sock: <%= @sock %> -<%- end -%> -<%- if @defaults_file -%> - defaults_file: <%= @defaults_file %> -<%- end -%> -<%- if @dimensions -%> - dimensions: <%= @dimensions %> -<%- end -%> -<%- if @options -%> - options: <%= @options %> -<%- end -%> \ No newline at end of file diff --git a/templates/checks/nagios_wrapper.erb b/templates/checks/nagios_wrapper.erb deleted file mode 100644 index 46e44c1..0000000 --- a/templates/checks/nagios_wrapper.erb +++ /dev/null @@ -1,17 +0,0 @@ - - name: <%= @title %> - check_command: <%= @check_command %> -<%- if @check_name -%> - metric_name: <%= @check_name %> -<%- end -%> -<%- if @host_name -%> - host_name: <%= @host_name %> -<%- end -%> -<%- if @check_interval -%> - check_interval: <%= @check_interval %> -<%- end -%> -<%- if @dimensions -%> - dimensions: -<%- @dimensions.each do |key, value| -%> - <%= key %>: <%= value %> -<%- end -%> -<%- end -%> diff --git a/templates/checks/nagios_wrapper.yaml.erb b/templates/checks/nagios_wrapper.yaml.erb deleted file mode 100644 index b68445f..0000000 --- a/templates/checks/nagios_wrapper.yaml.erb +++ /dev/null @@ -1,6 +0,0 @@ ---- -init_config: - check_path: <%= @check_path %> - temp_file_path: <%= @temp_file_path %> - -instances: diff --git a/templates/checks/network.erb b/templates/checks/network.erb deleted file mode 100644 index 9def43e..0000000 --- a/templates/checks/network.erb +++ /dev/null @@ -1,16 +0,0 @@ - - name: <%= @title %> -<%- if not @collect_connection_state.nil? -%> - collect_connection_state: <%= @collect_connection_state %> -<%- end -%> -<%- if @excluded_interfaces -%> - excluded_interfaces: <%= @excluded_interfaces %> -<%- end -%> -<%- if @excluded_interface_re -%> - excluded_interface_re: <%= @excluded_interface_re %> -<%- end -%> -<%- if @use_bits -%> - use_bits: <%= @use_bits %> -<%- end -%> -<%- if @dimensions -%> - dimensions: <%= @dimensions %> -<%- end -%> diff --git a/templates/checks/ovs.yaml.erb b/templates/checks/ovs.yaml.erb deleted file mode 100644 index b3bd942..0000000 --- a/templates/checks/ovs.yaml.erb +++ /dev/null @@ -1,25 +0,0 @@ ---- -init_config: - admin_password: <%= @admin_password %> - admin_tenant_name: <%= @admin_tenant_name %> - admin_user: <%= @admin_user %> - cache_dir: <%= @cache_dir %> - check_router_ha: <%= @check_router_ha %> - identity_uri: <%= @identity_uri %> -<%- if @metadata -%> - metadata: <%= @metadata %> -<%- end -%> - network_use_bits: <%= @network_use_bits %> - neutron_refresh: <%= @neutron_refresh %> - ovs_cmd: '<%= @ovs_cmd %>' -<%- if @region_name -%> - region_name: <%= @region_name %> -<%- end -%> - included_interface_re: <%= @included_interface_re %> - use_absolute_metrics: <%= @use_absolute_metrics %> - use_rate_metrics: <%= @use_rate_metrics %> - use_health_metrics: <%= @use_health_metrics %> - publish_router_capacity: <%= @publish_router_capacity %> - -instances: - - {} diff --git a/templates/checks/process.erb b/templates/checks/process.erb deleted file mode 100644 index ca27353..0000000 --- a/templates/checks/process.erb +++ /dev/null @@ -1,11 +0,0 @@ - - name: <%= @title %> - search_string: <%= @search_string %> -<%- if not @exact_match.nil? -%> - exact_match: <%= @exact_match %> -<%- end -%> -<%- if @cpu_check_interval -%> - cpu_check_interval: <%= @cpu_check_interval %> -<%- end -%> -<%- if @dimensions -%> - dimensions: <%= @dimensions %> -<%- end -%> \ No newline at end of file diff --git a/templates/checks/rabbitmq.erb b/templates/checks/rabbitmq.erb deleted file mode 100644 index d13da29..0000000 --- a/templates/checks/rabbitmq.erb +++ /dev/null @@ -1,41 +0,0 @@ - - name: <%= @title %> - rabbitmq_api_url: <%= @rabbitmq_api_url %> -<%- if @rabbitmq_user -%> - rabbitmq_user: <%= @rabbitmq_user %> -<%- end -%> -<%- if @rabbitmq_pass -%> - rabbitmq_pass: <%= @rabbitmq_pass %> -<%- end -%> -<%- if @queues -%> - queues: <%= @queues %> -<%- end -%> -<%- if @nodes -%> - nodes: <%= @nodes %> -<%- end -%> -<%- if @exchanges -%> - exchanges: <%= @exchanges %> -<%- end -%> -<%- if @queues_regexes -%> - queues_regexes: <%= @queues_regexes %> -<%- end -%> -<%- if @nodes_regexes -%> - nodes_regexes: <%= @nodes_regexes %> -<%- end -%> -<%- if @exchanges_regexes -%> - exchanges_regexes: <%= @exchanges_regexes %> -<%- end -%> -<%- if @max_detailed_queues -%> - max_detailed_queues: <%= @max_detailed_queues %> -<%- end -%> -<%- if @max_detailed_exchanges -%> - max_detailed_exchanges: <%= @max_detailed_exchanges %> -<%- end -%> -<%- if @max_detailed_nodes -%> - max_detailed_nodes: <%= @max_detailed_nodes %> -<%- end -%> -<%- if @whitelist -%> - whitelist: <%= @whitelist %> -<%- end -%> -<%- if @dimensions -%> - dimensions: <%= @dimensions %> -<%- end -%> diff --git a/templates/checks/solidfire.erb b/templates/checks/solidfire.erb deleted file mode 100644 index b3e804a..0000000 --- a/templates/checks/solidfire.erb +++ /dev/null @@ -1,4 +0,0 @@ - - name: <%= @title %> - username: <%= @admin_name %> - password: '<%= @admin_password %>' - mvip: <%= @cluster_mvip %> diff --git a/templates/checks/vertica.yaml.erb b/templates/checks/vertica.yaml.erb deleted file mode 100644 index 87ecb73..0000000 --- a/templates/checks/vertica.yaml.erb +++ /dev/null @@ -1,9 +0,0 @@ ---- -init_config: - -instances: - - user: '<%= @user %>' - password: '<%= @password %>' - service: '<%= @service %>' - node_name: '<%= @node_name %>' - timeout: '<%= @timeout %>' diff --git a/templates/checks/zk.erb b/templates/checks/zk.erb deleted file mode 100644 index c56088e..0000000 --- a/templates/checks/zk.erb +++ /dev/null @@ -1,13 +0,0 @@ - - name: <%= @title %> -<%- if @host -%> - host: <%= @host %> -<%- end -%> -<%- if @port -%> - port: <%= @port %> -<%- end -%> -<%- if @timeout -%> - timeout: <%= @timeout %> -<%- end -%> -<%- if @dimensions -%> - dimensions: <%= @dimensions %> -<%- end -%> \ No newline at end of file diff --git a/templates/config.js.erb b/templates/config.js.erb deleted file mode 100644 index 27dba8c..0000000 --- a/templates/config.js.erb +++ /dev/null @@ -1,95 +0,0 @@ -///// @scratch /configuration/config.js/1 - // == Configuration - // config.js is where you will find the core Grafana configuration. This file contains parameter that - // must be set before Grafana is run for the first time. - /// -define(['settings'], -function (Settings) { - "use strict"; - - return new Settings({ - - /* Data sources - * ======================================================== - * Datasources are used to fetch metrics, annotations, and serve as dashboard storage - * - You can have multiple of the same type. - * - grafanaDB: true marks it for use for dashboard storage - * - default: true marks the datasource as the default metric source (if you have multiple) - * - basic authentication: use url syntax http://username:password@domain:port - */ - - // InfluxDB example setup (the InfluxDB databases specified need to exist) - datasources: { - influxdb: { - type: 'influxdb', - url: "<%= @db_url %>:<%= @db_port %>/db/<%= @db_name %>", - username: <%= @db_username %>, - password: <%= @db_password %>, - }, - }, - */ - - // Graphite & Elasticsearch example setup - /* - datasources: { - graphite: { - type: 'graphite', - url: "http://my.graphite.server.com:8080", - }, - elasticsearch: { - type: 'elasticsearch', - url: "http://my.elastic.server.com:9200", - index: 'grafana-dash', - grafanaDB: true, - } - }, - */ - - // OpenTSDB & Elasticsearch example setup - /* - datasources: { - opentsdb: { - type: 'opentsdb', - url: "http://opentsdb.server:4242", - }, - elasticsearch: { - type: 'elasticsearch', - url: "http://my.elastic.server.com:9200", - index: 'grafana-dash', - grafanaDB: true, - } - }, - */ - - /* Global configuration options - * ======================================================== - */ - - // specify the limit for dashboard search results - search: { - max_results: 20 - }, - - // default start dashboard - default_route: <%= @default_board %>,, - - // set to false to disable unsaved changes warning - unsaved_changes_warning: true, - - // set the default timespan for the playlist feature - // Example: "1m", "1h" - playlist_timespan: "1m", - - // If you want to specify password before saving, please specify it bellow - // The purpose of this password is not security, but to stop some users from accidentally changing dashboards - admin: { - password: '' - }, - - // Add your own custom pannels - plugins: { - panels: [] - } - - }); -}); diff --git a/templates/mon.sql.erb b/templates/mon.sql.erb deleted file mode 100644 index cb14dfd..0000000 --- a/templates/mon.sql.erb +++ /dev/null @@ -1,259 +0,0 @@ -CREATE DATABASE IF NOT EXISTS `mon` DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci; -USE `mon`; -SET foreign_key_checks = 0; - -/* - * Enum tables - */ -CREATE TABLE IF NOT EXISTS `alarm_state` ( - `name` varchar(20) COLLATE utf8mb4_unicode_ci NOT NULL, - PRIMARY KEY (`name`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; - -CREATE TABLE IF NOT EXISTS `alarm_definition_severity` ( - `name` varchar(20) COLLATE utf8mb4_unicode_ci NOT NULL, - PRIMARY KEY (`name`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; - -CREATE TABLE IF NOT EXISTS `notification_method_type` ( - `name` varchar(20) COLLATE utf8mb4_unicode_ci NOT NULL, - PRIMARY KEY (`name`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; - -CREATE TABLE IF NOT EXISTS `stream_actions_action_type` ( - `name` varchar(20) COLLATE utf8mb4_unicode_ci NOT NULL, - PRIMARY KEY (`name`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; - -CREATE TABLE IF NOT EXISTS `alarm` ( - `id` varchar(36) COLLATE utf8mb4_unicode_ci NOT NULL, - `alarm_definition_id` varchar(36) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '', - `state` varchar(20) COLLATE utf8mb4_unicode_ci NOT NULL, - `lifecycle_state` varchar(50) DEFAULT NULL, - `link` varchar(512) DEFAULT NULL, - `created_at` datetime NOT NULL, - `state_updated_at` datetime, - `updated_at` datetime NOT NULL, - PRIMARY KEY (`id`), - KEY `alarm_definition_id` (`alarm_definition_id`), - CONSTRAINT `fk_alarm_definition_id` FOREIGN KEY (`alarm_definition_id`) REFERENCES `alarm_definition` (`id`) ON DELETE CASCADE, - CONSTRAINT `fk_alarm_alarm_state` FOREIGN KEY (`state`) REFERENCES `alarm_state` (`name`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; - -CREATE TABLE IF NOT EXISTS `alarm_action` ( - `alarm_definition_id` varchar(36) COLLATE utf8mb4_unicode_ci NOT NULL, - `alarm_state` varchar(20) COLLATE utf8mb4_unicode_ci NOT NULL, - `action_id` varchar(36) COLLATE utf8mb4_unicode_ci NOT NULL, - PRIMARY KEY (`alarm_definition_id`,`alarm_state`,`action_id`), - CONSTRAINT `fk_alarm_action_alarm_definition_id` FOREIGN KEY (`alarm_definition_id`) REFERENCES `alarm_definition` (`id`) ON DELETE CASCADE, - CONSTRAINT `fk_alarm_action_notification_method_id` FOREIGN KEY (`action_id`) REFERENCES `notification_method` (`id`) ON DELETE CASCADE, - CONSTRAINT `fk_alarm_action_alarm_state` FOREIGN KEY (`alarm_state`) REFERENCES `alarm_state` (`name`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; - -CREATE TABLE IF NOT EXISTS `alarm_definition` ( - `id` varchar(36) COLLATE utf8mb4_unicode_ci NOT NULL, - `tenant_id` varchar(36) COLLATE utf8mb4_unicode_ci NOT NULL, - `name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '', - `description` varchar(255) COLLATE utf8mb4_unicode_ci DEFAULT NULL, - `expression` longtext COLLATE utf8mb4_unicode_ci NOT NULL, - `severity` varchar(20) COLLATE utf8mb4_unicode_ci NOT NULL, - `match_by` varchar(255) COLLATE utf8mb4_unicode_ci DEFAULT '', - `actions_enabled` tinyint(1) NOT NULL DEFAULT '1', - `created_at` datetime NOT NULL, - `updated_at` datetime NOT NULL, - `deleted_at` datetime DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `tenant_id` (`tenant_id`), - KEY `deleted_at` (`deleted_at`), - CONSTRAINT `fk_alarm_definition_severity` FOREIGN KEY (`severity`) REFERENCES `alarm_definition_severity` (`name`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; - -CREATE TABLE IF NOT EXISTS `alarm_metric` ( - `alarm_id` varchar(36) COLLATE utf8mb4_unicode_ci NOT NULL, - `metric_definition_dimensions_id` binary(20) NOT NULL DEFAULT '\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0', - PRIMARY KEY (`alarm_id`,`metric_definition_dimensions_id`), - KEY `alarm_id` (`alarm_id`), - KEY `metric_definition_dimensions_id` (`metric_definition_dimensions_id`), - CONSTRAINT `fk_alarm_id` FOREIGN KEY (`alarm_id`) REFERENCES `alarm` (`id`) ON DELETE CASCADE -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; - -CREATE TABLE IF NOT EXISTS `metric_definition` ( - `id` binary(20) NOT NULL DEFAULT '\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0', - `name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, - `tenant_id` varchar(36) COLLATE utf8mb4_unicode_ci NOT NULL, - `region` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '', - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; - -CREATE TABLE IF NOT EXISTS `metric_definition_dimensions` ( - `id` binary(20) NOT NULL DEFAULT '\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0', - `metric_definition_id` binary(20) NOT NULL DEFAULT '\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0', - `metric_dimension_set_id` binary(20) NOT NULL DEFAULT '\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0', - KEY `metric_definition_id` (`metric_definition_id`), - KEY `metric_dimension_set_id` (`metric_dimension_set_id`), - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; - -/* - * mysql limits the size of a unique key to 767 bytes. The utf8mb4 charset requires - * 4 bytes to be allocated for each character while the utf8 charset requires 3 bytes. - * The utf8 charset should be sufficient for any reasonable characters, see the definition - * of supplementary characters for what it doesn't support. - * Even with utf8, the unique key length would be 785 bytes so only a subset of the - * name is used. Potentially the size of the name should be limited to 250 characters - * which would resolve this issue. - * - * The unique key is required to allow high performance inserts without doing a select by using - * the "insert into metric_dimension ... on duplicate key update dimension_set_id=dimension_set_id - * syntax - */ -CREATE TABLE IF NOT EXISTS `metric_dimension` ( - `dimension_set_id` binary(20) NOT NULL DEFAULT '\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0', - `name` varchar(255) COLLATE utf8_unicode_ci NOT NULL DEFAULT '', - `value` varchar(255) COLLATE utf8_unicode_ci NOT NULL DEFAULT '', - UNIQUE KEY `metric_dimension_key` (`dimension_set_id`,`name`(252)), - KEY `dimension_set_id` (`dimension_set_id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci COMMENT='PRIMARY KEY (`id`)'; - -CREATE TABLE IF NOT EXISTS `notification_method` ( - `id` varchar(36) COLLATE utf8mb4_unicode_ci NOT NULL, - `tenant_id` varchar(36) COLLATE utf8mb4_unicode_ci NOT NULL, - `name` varchar(250) COLLATE utf8mb4_unicode_ci DEFAULT NULL, - `type` varchar(20) COLLATE utf8mb4_unicode_ci NOT NULL, - `address` varchar(512) COLLATE utf8mb4_unicode_ci DEFAULT NULL, - `period` int NOT NULL DEFAULT 0, - `created_at` datetime NOT NULL, - `updated_at` datetime NOT NULL, - PRIMARY KEY (`id`), - CONSTRAINT `fk_alarm_noticication_method_type` FOREIGN KEY (`type`) REFERENCES `notification_method_type` (`name`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; - -CREATE TABLE IF NOT EXISTS `sub_alarm_definition` ( - `id` varchar(36) COLLATE utf8mb4_unicode_ci NOT NULL, - `alarm_definition_id` varchar(36) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '', - `function` varchar(10) COLLATE utf8mb4_unicode_ci NOT NULL, - `metric_name` varchar(100) COLLATE utf8mb4_unicode_ci DEFAULT NULL, - `operator` varchar(5) COLLATE utf8mb4_unicode_ci NOT NULL, - `threshold` double NOT NULL, - `period` int(11) NOT NULL, - `periods` int(11) NOT NULL, - `is_deterministic` tinyint(1) NOT NULL DEFAULT '0', - `created_at` datetime NOT NULL, - `updated_at` datetime NOT NULL, - PRIMARY KEY (`id`), - KEY `fk_sub_alarm_definition` (`alarm_definition_id`), - CONSTRAINT `fk_sub_alarm_definition` FOREIGN KEY (`alarm_definition_id`) REFERENCES `alarm_definition` (`id`) ON DELETE CASCADE -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; - -CREATE TABLE IF NOT EXISTS `sub_alarm_definition_dimension` ( - `sub_alarm_definition_id` varchar(36) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '', - `dimension_name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '', - `value` varchar(255) COLLATE utf8mb4_unicode_ci DEFAULT NULL, - CONSTRAINT `fk_sub_alarm_definition_dimension` FOREIGN KEY (`sub_alarm_definition_id`) REFERENCES `sub_alarm_definition` (`id`) ON DELETE CASCADE -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; - -CREATE TABLE IF NOT EXISTS `sub_alarm` ( - `id` varchar(36) COLLATE utf8mb4_unicode_ci NOT NULL, - `alarm_id` varchar(36) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '', - `sub_expression_id` varchar(36) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '', - `expression` longtext COLLATE utf8mb4_unicode_ci NOT NULL, - `created_at` datetime NOT NULL, - `updated_at` datetime NOT NULL, - PRIMARY KEY (`id`), - KEY `fk_sub_alarm` (`alarm_id`), - KEY `fk_sub_alarm_expr` (`sub_expression_id`), - CONSTRAINT `fk_sub_alarm` FOREIGN KEY (`alarm_id`) REFERENCES `alarm` (`id`) ON DELETE CASCADE, - CONSTRAINT `fk_sub_alarm_expr` FOREIGN KEY (`sub_expression_id`) REFERENCES `sub_alarm_definition` (`id`) -); - -CREATE TABLE IF NOT EXISTS `schema_migrations` ( - `version` varchar(255) NOT NULL, - UNIQUE KEY `unique_schema_migrations` (`version`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; - -/* - * The tables needed by Monasca for event stream definitions - */ -CREATE TABLE IF NOT EXISTS `stream_actions` ( - `stream_definition_id` varchar(36) COLLATE utf8mb4_unicode_ci NOT NULL, - `action_id` varchar(36) COLLATE utf8mb4_unicode_ci NOT NULL, - `action_type` varchar(20) COLLATE utf8mb4_unicode_ci NOT NULL, - PRIMARY KEY (`stream_definition_id`,`action_id`,`action_type`), - KEY `stream_definition_id` (`stream_definition_id`), - KEY `action_type` (`action_type`), - KEY `fk_stream_action_notification_method_id` (`action_id`), - CONSTRAINT `fk_stream_action_stream_definition_id` FOREIGN KEY (`stream_definition_id`) REFERENCES `stream_definition` (`id`) ON DELETE CASCADE, - CONSTRAINT `fk_stream_action_notification_method_id` FOREIGN KEY (`action_id`) REFERENCES `notification_method` (`id`) ON DELETE CASCADE, - CONSTRAINT `fk_stream_actions_action_type` FOREIGN KEY (`action_type`) REFERENCES `stream_actions_action_type` (`name`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; - -CREATE TABLE IF NOT EXISTS `stream_definition` ( - `id` varchar(36) COLLATE utf8mb4_unicode_ci NOT NULL, - `tenant_id` varchar(36) COLLATE utf8mb4_unicode_ci NOT NULL, - `name` varchar(190) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '', - `description` varchar(255) COLLATE utf8mb4_unicode_ci DEFAULT NULL, - `select_by` longtext COLLATE utf8mb4_unicode_ci DEFAULT NULL, - `group_by` longtext COLLATE utf8mb4_unicode_ci DEFAULT NULL, - `fire_criteria` longtext COLLATE utf8mb4_unicode_ci DEFAULT NULL, - `expiration` int(10) UNSIGNED DEFAULT '0', - `actions_enabled` tinyint(1) NOT NULL DEFAULT '1', - `created_at` datetime NOT NULL, - `updated_at` datetime NOT NULL, - `deleted_at` datetime DEFAULT NULL, - PRIMARY KEY (`id`), - UNIQUE KEY `tenant_name` (`tenant_id`,`name`), - KEY `name` (`name`), - KEY `tenant_id` (`tenant_id`), - KEY `deleted_at` (`deleted_at`), - KEY `created_at` (`created_at`), - KEY `updated_at` (`updated_at`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; - -CREATE TABLE IF NOT EXISTS `event_transform` ( - `id` varchar(36) COLLATE utf8mb4_unicode_ci NOT NULL, - `tenant_id` varchar(36) COLLATE utf8mb4_unicode_ci NOT NULL, - `name` varchar(64) COLLATE utf8mb4_unicode_ci NOT NULL, - `description` varchar(250) COLLATE utf8mb4_unicode_ci NOT NULL, - `specification` longtext COLLATE utf8mb4_unicode_ci NOT NULL, - `enabled` bool DEFAULT NULL, - `created_at` DATETIME NOT NULL, - `updated_at` DATETIME NOT NULL, - `deleted_at` DATETIME DEFAULT NULL, - PRIMARY KEY (`id`), - UNIQUE KEY `tenant_name` (`tenant_id`,`name`), - KEY `name` (`name`), - KEY `tenant_id` (`tenant_id`), - KEY `deleted_at` (`deleted_at`), - KEY `created_at` (`created_at`), - KEY `updated_at` (`updated_at`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; - - -GRANT SELECT ON mon.* TO 'notification'@'%'; -GRANT SELECT ON mon.* TO 'notification'@'localhost'; - -GRANT ALL ON mon.* TO 'monasca'@'%'; -GRANT ALL ON mon.* TO 'monasca'@'localhost'; - -GRANT ALL ON mon.* TO 'thresh'@'%'; -GRANT ALL ON mon.* TO 'thresh'@'localhost'; - -/* provide data for enum tables */ -insert into `alarm_state` values ('UNDETERMINED'); -insert into `alarm_state` values ('OK'); -insert into `alarm_state` values ('ALARM'); - -insert into `alarm_definition_severity` values ('LOW'); -insert into `alarm_definition_severity` values ('MEDIUM'); -insert into `alarm_definition_severity` values ('HIGH'); -insert into `alarm_definition_severity` values ('CRITICAL'); - -insert into `notification_method_type` values ('EMAIL'); -insert into `notification_method_type` values ('WEBHOOK'); -insert into `notification_method_type` values ('PAGERDUTY'); - -insert into `stream_actions_action_type` values ('FIRE'); -insert into `stream_actions_action_type` values ('EXPIRE'); -/* provide data for enum tables */ - -SET foreign_key_checks = 1; diff --git a/templates/monasca-agent.init.erb b/templates/monasca-agent.init.erb deleted file mode 100755 index 7d46f0c..0000000 --- a/templates/monasca-agent.init.erb +++ /dev/null @@ -1,180 +0,0 @@ -#!/bin/sh - -### BEGIN INIT INFO -# Provides: monasca-agent -# Short-Description: Start and start monasca-agent -# Description: monasca-agent is the monitoring Agent component OpenStack Monitoring -# Required-Start: $remote_fs -# Required-Stop: $remote_fs -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -### END INIT INFO - -. <%= @virtual_env %>/bin/activate - -PATH=$PATH:/usr/local/bin # supervisord might live here -PATH=$PATH:/sbin - -AGENTPATH="<%= @virtual_env %>/bin/monasca-collector" -AGENTCONF="<%= @agent_dir %>/agent.yaml" -MONASCASTATSDPATH="<%= @virtual_env %>/bin/monasca-statsd" -AGENTUSER="<%= @agent_user %>" -FORWARDERPATH="<%= @virtual_env %>/bin/monasca-forwarder" -NAME="monasca-agent" -DESC="Monasca Monitoring Agent" -AGENT_PID_PATH="/var/tmp/monasca-agent.pid" -SUPERVISOR_PIDFILE="/var/tmp/monasca-agent-supervisord.pid" -SUPERVISOR_FILE="<%= @agent_dir %>/supervisor.conf" -SUPERVISOR_SOCK="/var/tmp/monasca-agent-supervisor.sock" -SUPERVISORD="<%= @virtual_env %>/bin/supervisord" -SUPERVISORCTL="<%= @virtual_env %>/bin/supervisorctl" - -# This script is considered a configuration file and will not be -# removed by dpkg unless the --purge option it set. Therefore we -# make sure that the Agent is actually installed before we try to do anything: -if [ ! -x $AGENTPATH ]; then - echo "$AGENTPATH not found. Exiting." - exit 0 -fi - -check_status() { - # If the socket exists, we can use supervisorctl - if [ -e $SUPERVISOR_SOCK ]; then - # If we're using supervisor, check the number of processes - # supervisor is currently controlling, and make sure that it's the - # same as the number of programs specified in the supervisor config - # file: - - supervisor_processes=$($SUPERVISORCTL -c $SUPERVISOR_FILE status) - supervisor_processes=$(echo "$supervisor_processes" | - grep -v pup | - grep $NAME | - grep -c RUNNING) - supervisor_config_programs=$(grep -v pup $SUPERVISOR_FILE | - grep -c '\[program:') - - if [ "$supervisor_processes" -ne "$supervisor_config_programs" ]; then - echo "$supervisor_processes" - echo "$DESC (supervisor) is NOT running all child processes" - return 1 - else - echo "$DESC (supervisor) is running all child processes" - return 0 - fi - else - echo "$DESC (supervisor) is not running" - return 1 - fi -} - -# Action to take -case "$1" in - start) - if [ ! -f $AGENTCONF ]; then - echo "$AGENTCONF not found. Exiting." - exit 3 - fi - - check_status > /dev/null - if [ $? -eq 0 ]; then - echo "$DESC is already running" - exit 0 - fi - - su $AGENTUSER -c "$AGENTPATH configcheck" > /dev/null - if [ $? -ne 0 ]; then - echo "Invalid check configuration. Please run sudo /etc/init.d/monasca-agent configtest for more details." - echo "Resuming starting process." - fi - - - echo "Starting $DESC (using supervisord)" "$NAME" - $SUPERVISORD -c $SUPERVISOR_FILE -u $AGENTUSER --pidfile $SUPERVISOR_PIDFILE - if [ $? -ne 0 ]; then - exit $? - fi - - # check if the agent is running once per second for 10 seconds - retries=10 - while [ $retries -gt 1 ]; do - if check_status > /dev/null; then - # We've started up successfully. Exit cleanly - exit 0 - else - retries=$(($retries - 1)) - sleep 1 - fi - done - # After 10 tries the agent didn't start. Report an error - exit 1 - check_status # report what went wrong - $0 stop - exit 1 - ;; - stop) - - if [ -e $SUPERVISOR_PIDFILE ]; then - kill `cat $SUPERVISOR_PIDFILE` - retries=10 - until ! check_status > /dev/null; do - if [ $retries -le 1 ]; then - echo "Timeout hit while waiting for agent to stop" - break - else - retries=$(($retries - 1)) - sleep 1 - fi - done - else - echo "Pid file $SUPERVISOR_PIDFILE not found, nothing to stop" - fi - - exit $? - - ;; - - info) - shift # Shift 'info' out of args so we can pass any - # additional options to the real command - # (right now only monasca-agent supports additional flags) - su $AGENTUSER -c "$AGENTPATH info $@" - COLLECTOR_RETURN=$? - su $AGENTUSER -c "$MONASCASTATSDPATH info" - MONASCASTATSD_RETURN=$? - su $AGENTUSER -c "$FORWARDERPATH info" - FORWARDER_RETURN=$? - exit $(($COLLECTOR_RETURN+$MONASCASTATSD_RETURN+$FORWARDER_RETURN)) - ;; - - status) - check_status - ;; - - restart|force-reload) - $0 stop - $0 start - ;; - - configcheck) - su $AGENTUSER -c "$AGENTPATH configcheck" - exit $? - ;; - - configtest) - su $AGENTUSER -c "$AGENTPATH configcheck" - exit $? - ;; - - jmx) - shift - su $AGENTUSER -c "$AGENTPATH jmx $@" - exit $? - ;; - - *) - echo "Usage: /etc/init.d/$NAME {{start|stop|restart|info|status|configcheck|configtest|jmx}}" - exit 1 - ;; -esac - -exit $? diff --git a/templates/notification.conf.erb b/templates/notification.conf.erb deleted file mode 100644 index 7b3a4b0..0000000 --- a/templates/notification.conf.erb +++ /dev/null @@ -1,14 +0,0 @@ -# Startup script for the monasca_notification - -description "Monasca Notification daemon" -start on runlevel [2345] - -console log -respawn - -kill timeout 240 -respawn limit 25 5 - -setgid monasca -setuid monasca-notification -exec <%= @virtual_env %>/bin/monasca-notification > /dev/null diff --git a/templates/notification.yaml.erb b/templates/notification.yaml.erb deleted file mode 100644 index 2c0ea9d..0000000 --- a/templates/notification.yaml.erb +++ /dev/null @@ -1,107 +0,0 @@ -kafka: - url: <%= @kafka_brokers %> - group: monasca-notification - alarm_topic: alarm-state-transitions - notification_topic: alarm-notifications - notification_retry_topic: retry-notifications - max_offset_lag: 600 -<% if @periodic_kafka_topics and not @periodic_kafka_topics.empty? -%> - periodic: -<% @periodic_kafka_topics.each do |periodic_kafka_topic| -%> - <%= periodic_kafka_topic %> -<%- end -%> -<% end -%> - -mysql: - host: <%= @sql_host %> - port: <%= @sql_port %> - user: <%= @sql_user %> - passwd: <%= @sql_password %> - db: mon - -notification_types: - plugins: - - monasca_notification.plugins.hipchat_notifier:HipChatNotifier - - monasca_notification.plugins.slack_notifier:SlackNotifier - - email: - server: <%= @smtp_server %> - port: <%= @smtp_port %> - user: <%= @smtp_user %> - password: <%= @smtp_password %> - timeout: 60 - from_addr: <%= @from_email_address %> - - webhook: - timeout: 5 - url: <%= @webhook_url %> - - pagerduty: - timeout: 5 - url: <%= @pagerduty_url %> - - hipchat: - timeout: 5 - ca_certs: <%= @hipchat_ca_certs %> - insecure: <%= @hipchat_insecure %> - - slack: - timeout: 5 - ca_certs: <%= @slack_ca_certs %> - insecure: <%= @slack_insecure %> - -processors: - alarm: - number: 2 - ttl: 14400 - notification: - number: 4 - -retry: - interval: 30 - max_attempts: 5 - -queues: - alarms_size: 256 - finished_size: 256 - notifications_size: 256 - sent_notifications_size: 50 - -zookeeper: - url: <%= @zookeeper_servers %> - notification_path: /notification/alarms - notification_retry_path: /notification/retry -<% if @periodic_zookeeper_paths and not @periodic_zookeeper_paths.empty? -%> - periodic_path: -<% @periodic_zookeeper_paths.each do |periodic_zookeeper_path| -%> - <%= periodic_zookeeper_path %> -<%- end -%> -<% end -%> - -logging: - version: 1 - disable_existing_loggers: False - formatters: - default: - format: "%(asctime)s %(levelname)s %(name)s %(message)s" - handlers: - console: - class: logging.StreamHandler - formatter: default - file: - class : logging.handlers.RotatingFileHandler - filename: /var/log/monasca/notification.log - formatter: default - maxBytes: 10485760 - backupCount: 5 - loggers: - kazoo: - level: INFO - kafka: - level: INFO - statsd: - level: INFO - root: - handlers: - - file - level: INFO diff --git a/templates/notification_assignments.json.erb b/templates/notification_assignments.json.erb deleted file mode 100644 index 672eff0..0000000 --- a/templates/notification_assignments.json.erb +++ /dev/null @@ -1,7 +0,0 @@ -[ - { - "alarm_definition_name":"http_status", - "notification_methods": ["sample_email"], - "actions": ["ALARM", "OK", "UNDETERMINED"] - } -] diff --git a/templates/notification_config.json.erb b/templates/notification_config.json.erb deleted file mode 100644 index 1bfe7e1..0000000 --- a/templates/notification_config.json.erb +++ /dev/null @@ -1,7 +0,0 @@ -[ - { - "name":"sample_email", - "type":"EMAIL", - "address":"foo@gmail.com" - } -] diff --git a/templates/persister-config.yml.erb b/templates/persister-config.yml.erb deleted file mode 100644 index f6bee54..0000000 --- a/templates/persister-config.yml.erb +++ /dev/null @@ -1,96 +0,0 @@ -name: <%= @persister_service_name %> - -alarmHistoryConfiguration: - batchSize: <%= @batch_size %> - numThreads: <%= @num_threads %> - maxBatchTime: <%= @batch_seconds %> - topic: alarm-state-transitions - groupId: <%= @persister_config['consumer_group_id'] %>_alarm-state-transitions - consumerId: <%= @consumer_id %> - clientId : 1 - -metricConfiguration: - batchSize: <%= @batch_size %> - numThreads: <%= @num_threads %> - maxBatchTime: <%= @batch_seconds %> - topic: metrics - groupId: <%= @persister_config['consumer_group_id'] %>_metrics - consumerId: <%= @consumer_id %> - clientId : 1 - -kafkaConfig: - zookeeperConnect: <%= @zookeeper_servers %> - socketTimeoutMs: 30000 - socketReceiveBufferBytes : 65536 - fetchMessageMaxBytes: 1048576 - queuedMaxMessageChunks: 10 - rebalanceMaxRetries: 4 - fetchMinBytes: 1 - fetchWaitMaxMs: 100 - rebalanceBackoffMs: 2000 - refreshLeaderBackoffMs: 200 - autoOffsetReset: largest - consumerTimeoutMs: 1000 - zookeeperSessionTimeoutMs : 60000 - zookeeperConnectionTimeoutMs : 60000 - zookeeperSyncTimeMs: 2000 - -verticaMetricRepoConfig: - maxCacheSize: 2000000 - -databaseConfiguration: - databaseType: <%= @persister_config['database_type'] %> - -influxDbConfiguration: - name: mon - version: V9 - maxHttpConnections: 100 - gzip: <%= @gzip_setting %> - replicationFactor: <%= @replication_factor %> - retentionPolicy: <%= @retention_policy %> - url: <%= @persister_config['database_url'] %> - user: <%= @pers_db_user %> - password: <%= @pers_db_password %> - -dataSourceFactory: - driverClass: com.vertica.jdbc.Driver - url: <%= @persister_config['database_url'] %> - user: <%= @pers_db_user %> - password: <%= @pers_db_password %> - properties: - ssl: false - maxWaitForConnection: 5s - validationQuery: "/* MyService Health Check */ SELECT 1" - minSize: 8 - maxSize: 41 - checkConnectionWhileIdle: <%= @check_conn_while_idle %> - maxConnectionAge: 1 minute - -metrics: - frequency: 1 second - -logging: - - level: DEBUG - - loggers: - - com.example.app: DEBUG - - appenders: - - - type: file - threshold: INFO - archive: true - currentLogFilename: /var/log/monasca/<%= @persister_service_name %>.log - archivedLogFilenamePattern: /var/log/monasca/<%= @persister_service_name %>.log-%d.log.gz - archivedFileCount: 5 - timeZone: UTC - -server: - applicationConnectors: - - type: http - port: <%= @persister_config['application_port'] %> - adminConnectors: - - type: http - port: <%= @persister_config['admin_port'] %> diff --git a/templates/persister-startup-script.erb b/templates/persister-startup-script.erb deleted file mode 100644 index 63b673f..0000000 --- a/templates/persister-startup-script.erb +++ /dev/null @@ -1,15 +0,0 @@ -# Startup script for the <%= @persister_service_name %> - -description "<%= @persister_service_name %> java app" -start on runlevel [2345] - -console log -respawn - -setgid monasca -setuid persister -exec /usr/bin/java -Dfile.encoding=UTF-8 -Xmx8g -cp \ - /opt/monasca/monasca-persister.jar:/opt/vertica/java/lib/vertica_jdbc.jar \ - monasca.persister.PersisterApplication server \ - /etc/monasca/<%= @persister_service_name %>.yml \ - > /dev/null diff --git a/templates/storm-startup-script.erb b/templates/storm-startup-script.erb deleted file mode 100644 index 65a53c3..0000000 --- a/templates/storm-startup-script.erb +++ /dev/null @@ -1,67 +0,0 @@ -#!/bin/bash -# -# /etc/init.d/storm-<%= @storm_service %> -# -# Startup script for storm-<%= @storm_service %> -# -# description: Starts and stops storm-<%= @storm_service %> -# -stormBin=<%= @storm_install_dir %>/bin/storm -stormSvc=<%= @storm_service %> -desc="Storm $stormSvc daemon" -outFile="/var/log/storm/storm-$stormSvc.out" - -if ! [ -f $stormBin ]; then - echo "storm binary not found." - exit 5 -fi - -start() { - echo "Starting $desc (storm-$stormSvc): " - su <%= @storm_user %> -c "nohup $stormBin <%= @storm_service %> >>$outFile 2>&1 &" - RETVAL=$? - sleep 2 - return $RETVAL -} - -stop() { - echo "Shutting down $desc (storm-$stormSvc): " - if [ $stormSvc == "ui" ]; then - procname="storm.ui.core" - else - procname="storm.daemon.$stormSvc" - fi - - pkill -f $procname -} - -restart() { - stop - start -} - -status() { - if [ $stormSvc == "ui" ]; then - pid=$(pgrep -f storm.ui.core) - else - pid=$(pgrep -f storm.daemon.$stormSvc) - fi - - if [ -z $pid ]; then - echo "storm-$stormSvc is NOT running." - exit 1 - fi - - echo "storm-$stormSvc running with pid $pid" - exit 0 -} - -case "$1" in - start) start;; -stop) stop;; -restart) restart;; -status) status;; -*) echo "Usage: $0 {start|stop|restart}" - RETVAL=2;; -esac -exit $RETVAL diff --git a/templates/supervisor.conf.erb b/templates/supervisor.conf.erb deleted file mode 100644 index 339af43..0000000 --- a/templates/supervisor.conf.erb +++ /dev/null @@ -1,48 +0,0 @@ -[supervisorctl] -serverurl = unix:///var/tmp/monasca-agent-supervisor.sock - -[unix_http_server] -file=/var/tmp/monasca-agent-supervisor.sock - -[rpcinterface:supervisor] -supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface - -[supervisord] -minfds = 1024 -minprocs = 200 -loglevel = info -logfile = <%= @log_dir %>/supervisord.log -logfile_maxbytes = 50MB -nodaemon = false -pidfile = /var/run/monasca-agent-supervisord.pid -logfile_backups = 10 - -[program:collector] -command=<%= @virtual_env %>/bin/monasca-collector foreground -stdout_logfile=NONE -stderr_logfile=NONE -priority=999 -startsecs=2 -user=<%= @agent_user %> -autorestart=true - -[program:forwarder] -command=<%= @virtual_env %>/bin/monasca-forwarder -stdout_logfile=NONE -stderr_logfile=NONE -startsecs=3 -priority=998 -user=<%= @agent_user %> -autorestart=true - -[program:statsd] -command=<%= @virtual_env %>/bin/monasca-statsd -stdout_logfile=NONE -stderr_logfile=NONE -startsecs=3 -priority=998 -user=<%= @agent_user %> -autorestart=true - -[group:monasca-agent] -programs=forwarder,collector,statsd diff --git a/templates/thresh-config.yml.erb b/templates/thresh-config.yml.erb deleted file mode 100644 index df3d087..0000000 --- a/templates/thresh-config.yml.erb +++ /dev/null @@ -1,88 +0,0 @@ -metricSpoutThreads: 2 -metricSpoutTasks: 2 - -metricSpoutConfig: - kafkaConsumerConfiguration: - topic: metrics - numThreads: 1 - groupId: thresh-metric - zookeeperConnect: <%= @zookeeper_servers %> - consumerId: 1 - socketTimeoutMs: 30000 - socketReceiveBufferBytes : 65536 - fetchMessageMaxBytes: 1048576 - autoCommitEnable: true - autoCommitIntervalMs: 60000 - queuedMaxMessageChunks: 10 - rebalanceMaxRetries: 4 - fetchMinBytes: 1 - fetchWaitMaxMs: 100 - rebalanceBackoffMs: 2000 - refreshLeaderBackoffMs: 200 - autoOffsetReset: largest - consumerTimeoutMs: -1 - clientId : 1 - zookeeperSessionTimeoutMs : 60000 - zookeeperConnectionTimeoutMs : 60000 - zookeeperSyncTimeMs: 2000 - -eventSpoutConfig: - kafkaConsumerConfiguration: - topic: events - numThreads: 1 - groupId: thresh-event - zookeeperConnect: <%= @zookeeper_servers %> - consumerId: 1 - socketTimeoutMs: 30000 - socketReceiveBufferBytes : 65536 - fetchMessageMaxBytes: 1048576 - autoCommitEnable: true - autoCommitIntervalMs: 60000 - queuedMaxMessageChunks: 10 - rebalanceMaxRetries: 4 - fetchMinBytes: 1 - fetchWaitMaxMs: 100 - rebalanceBackoffMs: 2000 - refreshLeaderBackoffMs: 200 - autoOffsetReset: largest - consumerTimeoutMs: -1 - clientId : 1 - zookeeperSessionTimeoutMs : 60000 - zookeeperConnectionTimeoutMs : 60000 - zookeeperSyncTimeMs: 2000 - -kafkaProducerConfig: - topic: alarm-state-transitions - metadataBrokerList: <%= @kafka_brokers %> - serializerClass: kafka.serializer.StringEncoder - partitionerClass: - requestRequiredAcks: 1 - requestTimeoutMs: 10000 - producerType: sync - keySerializerClass: - compressionCodec: none - compressedTopics: - messageSendMaxRetries: 3 - retryBackoffMs: 100 - topicMetadataRefreshIntervalMs: 600000 - queueBufferingMaxMs: 5000 - queueBufferingMaxMessages: 10000 - queueEnqueueTimeoutMs: -1 - batchNumMessages: 200 - sendBufferBytes: 102400 - clientId : Threshold_Engine - -sporadicMetricNamespaces: - - foo - -database: - driverClass: com.mysql.jdbc.Driver - url: jdbc:mysql://<%= @sql_host %>:<%= @sql_port %>/mon?connectTimeout=5000&autoReconnect=true - user: <%= @sql_user %> - password: <%= @sql_password %> - properties: - ssl: false - maxWaitForConnection: 1s - validationQuery: "/* MyService Health Check */ SELECT 1" - minSize: 8 - maxSize: 41 diff --git a/templates/vertica/create_mon_db.sh.erb b/templates/vertica/create_mon_db.sh.erb deleted file mode 100644 index b7d61a1..0000000 --- a/templates/vertica/create_mon_db.sh.erb +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/sh -xe -# -# Build the mon data base -# - -if [ $USER != 'dbadmin' ]; then - echo "Must be run by the dbadmin user" - exit -fi - -if [ $# -lt 1 ]; then - echo "Usage: create_mon_db.sh.erb [USE_SSL]" - exit -fi - -LICENSE_FILE=$1 - -if [ $# -ge 2 ] && [ $2 -eq "USE_SSL" ]; then - USE_SSL=true -else - USE_SSL=false -fi - -# Make sure the locale settings are set correctly -. /etc/profile.d/vertica_node.sh - -# see if the db already exists -/opt/vertica/bin/admintools -t list_db -d mon - -if [ $? -eq 1 ]; then - # db doesn't exist, create it - /opt/vertica/bin/admintools -t create_db -s 127.0.0.1 -d mon -p <%= @db_admin_password %> -l $LICENSE_FILE -fi - -# see if the db is running -RUNNING_DBS=$(/opt/vertica/bin/admintools -t show_active_db) - -if [ "$RUNNING_DBS" != "mon" ]; then - # start the db - /opt/vertica/bin/admintools -t start_db -p <%= @db_admin_password %> -d mon -fi - -# Add in the schemas -/opt/vertica/bin/vsql -w <%= @db_admin_password %> < /var/vertica/mon_schema.sql -/opt/vertica/bin/vsql -w <%= @db_admin_password %> < /var/vertica/mon_metrics_schema.sql -/opt/vertica/bin/vsql -w <%= @db_admin_password %> < /var/vertica/mon_alarms_schema.sql -/opt/vertica/bin/vsql -w <%= @db_admin_password %> < /var/vertica/mon_users.sql -/opt/vertica/bin/vsql -w <%= @db_admin_password %> < /var/vertica/mon_grants.sql - -# Set restart policy so a single node cluster comes back after a reboot -/opt/vertica/bin/admintools -t set_restart_policy -d mon -p always - -# For ssl support link the cert/key and restart the db -if [ $USE_SSL == "true" ] -then - ln /var/vertica/server* /var/vertica/catalog/mon/v*/ - /opt/vertica/bin/admintools -t stop_db -F -p <%= @db_admin_password %> -d mon - /opt/vertica/bin/admintools -t start_db -p <%= @db_admin_password %> -d mon -fi diff --git a/templates/vertica/create_mon_db_cluster.sh.erb b/templates/vertica/create_mon_db_cluster.sh.erb deleted file mode 100644 index c7721be..0000000 --- a/templates/vertica/create_mon_db_cluster.sh.erb +++ /dev/null @@ -1,67 +0,0 @@ -#!/bin/sh -xe -# -# Build the mon data base -# - -if [ $USER != 'dbadmin' ]; then - echo "Must be run by the dbadmin user" - exit -fi - -if [ $# -lt 1 ]; then - echo "Usage: create_mon_db_cluster.sh [USE_SSL]" - exit -fi - -LICENSE_FILE=$1 - -if [ $# -ge 2 ] && [ $2 -eq "USE_SSL" ]; then - USE_SSL=true -else - USE_SSL=false -fi - -# Make sure the locale settings are set correctly -. /etc/profile.d/vertica_node.sh - -# Pull comma seperated list of nodes from the config -nodes=`grep install_opts /opt/vertica/config/admintools.conf | cut -d\ -f 4 |cut -d\' -f 2` - -# see if the db already exists -/opt/vertica/bin/admintools -t list_db -d mon - -if [ $? -eq 1 ]; then - # db doesn't exist, create it - /opt/vertica/bin/admintools -t create_db -s $nodes -d mon -p <%= @db_admin_password %> -l $LICENSE_FILE -fi - -# see if the db is running -RUNNING_DBS=$(/opt/vertica/bin/admintools -t show_active_db) - -if [ "$RUNNING_DBS" != "mon" ]; then - # start the db - /opt/vertica/bin/admintools -t start_db -p <%= @db_admin_password %> -d mon -fi - -# Add in the schemas -/opt/vertica/bin/vsql -w <%= @db_admin_password %> < /var/vertica/mon_schema.sql -/opt/vertica/bin/vsql -w <%= @db_admin_password %> < /var/vertica/mon_metrics_schema.sql -/opt/vertica/bin/vsql -w <%= @db_admin_password %> < /var/vertica/mon_alarms_schema.sql -/opt/vertica/bin/vsql -w <%= @db_admin_password %> < /var/vertica/mon_users.sql -/opt/vertica/bin/vsql -w <%= @db_admin_password %> < /var/vertica/mon_grants.sql - -# Set restart policy to ksafe -/opt/vertica/bin/admintools -t set_restart_policy -d mon -p ksafe - -if [ $USE_SSL == "true" ] -then - # For ssl support link the cert/key on each server and restart the db - IFS=',' - for node in $nodes - do - ssh $node 'ln -s /var/vertica/server* /var/vertica/catalog/mon/v*/' - done - - /opt/vertica/bin/admintools -t stop_db -F -p <%= @db_admin_password %> -d mon - /opt/vertica/bin/admintools -t start_db -p <%= @db_admin_password %> -d mon -fi diff --git a/templates/vertica/drop_vertica_partitions.py.erb b/templates/vertica/drop_vertica_partitions.py.erb deleted file mode 100755 index dc392e5..0000000 --- a/templates/vertica/drop_vertica_partitions.py.erb +++ /dev/null @@ -1,80 +0,0 @@ -#!<%= @virtual_env %>/bin/python -# -from datetime import datetime -from dateutil.relativedelta import relativedelta - -import argparse -import subprocess - -VSQL = "/usr/sbin/vsql" - -GET_KEYS_SQL = """ - SELECT DISTINCT - partition_key - FROM - partitions - WHERE - partition_key <= %s AND table_schema = '%s'; -""" - -DROP_SQL = "SELECT DROP_PARTITION('%s', %s);" - -SCHEMAS_DICT = {"MonMetrics": "MonMetrics.Measurements", - "MonAlarms": "MonAlarms.StateHistory"} - - -def main(): - parser = argparse.ArgumentParser() - - help_text = """ - Number of months of partitions to retain. - """ - parser.add_argument('-m', '--months', required=True, - help=help_text) - - args = parser.parse_args() - drop_partitions(args) - - -def get_drop_key(months_ago): - the_past = datetime.utcnow() - relativedelta(months=+int(months_ago)) - partition_key = str(the_past.year) + str(the_past.month).zfill(2) - return partition_key - - -def drop_partitions(args): - drop_key = get_drop_key(args.months) - print "Deleting partitions '%s' and older" % drop_key - for schema, table in SCHEMAS_DICT.iteritems(): - keys = vsql(GET_KEYS_SQL % (drop_key, schema)) - if len(keys) == 0: - msg = " No partitions older than or equal to '%s' found for '%s'" - print msg % (drop_key, schema) - continue - for key in keys: - print " Dropping '%s' partition for '%s'" % (key, table) - vsql(DROP_SQL % (table, key)) - - -def vsql(sql): - results = [] - p = subprocess.Popen([VSQL, "-c", sql], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - - out, err = p.communicate() - if p.returncode != 0: - print("vsql failed: %s %s" % (out, err)) - return "" - for l in out.splitlines(): - if '--' in l or 'partition_key' in l: - # header row, skip - continue - if 'row' in l: - # we're done - return results - results.append(l.strip()) - - -if __name__ == '__main__': - main() diff --git a/templates/vertica/mon_schema.sql.erb b/templates/vertica/mon_schema.sql.erb deleted file mode 100644 index 1ad2111..0000000 --- a/templates/vertica/mon_schema.sql.erb +++ /dev/null @@ -1,33 +0,0 @@ --- Tune the DB -SELECT SET_CONFIG_PARAMETER ('MaxClientSessions', 200); --- Turn off messages in the log created by the load balancer/icinga checks -SELECT set_config_parameter('WarnOnIncompleteStartupPacket', 0); - --- Enable SSL ** Requires a db restart, also the restart will fail of the ssl cert is not in place on the server --- The certs are placed in the root catalog dir by vertica and should be linked to the correct dir after db creation --- ln /var/vertica/catalog/server* /var/vertica/catalog/mon/v*/' --- TODO ADD DYNAMIC SSL SUPPORT TO MODULE --- SELECT SET_CONFIG_PARAMETER('EnableSSL', '1'); - --- Enable SNMP alerts -SELECT SET_CONFIG_PARAMETER('SnmpTrapsEnabled', 1 ); -SELECT SET_CONFIG_PARAMETER('SnmpTrapEvents', 'Low Disk Space, Read Only File System, Loss Of K Safety, Current Fault Tolerance at Critical Level, Too Many ROS Containers, WOS Over Flow, Node State Change, Recovery Failure, Recovery Error, Recovery Lock Error, Recovery Projection Retrieval Error, Refresh Error, Tuple Mover Error, Stale Checkpoint'); --- Set the snmp trap destination, the host name for the appropriate icinga server should be filled in before the port, ie --- SELECT SET_CONFIG_PARAMETER('SnmpTrapDestinationsList', 'ops-aw1rdd1-monitoring0000.rndd.aw1.hpcloud.net 162 public' ); - --- Set the WOS size large to handle lots of inserts and give it a dedicated bit of space so inserts can be constant, --- The catch is every moveout makes a ROS and we quickly get lots of partitions, mergeouts are slow but keep partitions down -SELECT do_tm_task('moveout'); -- Do a moveout as the memory sizes won't change with active transactions. -ALTER RESOURCE POOL wosdata memorysize '250M' maxmemorysize '5G'; -- default 0 and 2GB -ALTER RESOURCE POOL tm plannedconcurrency 2 maxconcurrency 4; -- default 1 and 2 -SELECT SET_CONFIG_PARAMETER ('MoveOutSizePct', 75); -- default 0 -SELECT SET_CONFIG_PARAMETER ('MoveOutInterval', 300); -- default 300 -SELECT SET_CONFIG_PARAMETER ('MergeOutInterval', 300); -- default 600 - -CREATE RESOURCE POOL <%= @api_pool %> MEMORYSIZE '<%= @api_pool_mem_size %>' MAXMEMORYSIZE '<%= @api_pool_max_mem_size %>' PLANNEDCONCURRENCY <%= @api_pool_planned_con %> MAXCONCURRENCY <%= @api_pool_max_con %> RUNTIMEPRIORITY <%= @api_pool_runtime_priority %> RUNTIMEPRIORITYTHRESHOLD <%= @api_pool_runtime_priority_thresh %> PRIORITY <%= @api_pool_priority %> EXECUTIONPARALLELISM <%= @api_pool_exec_parallel %>; -CREATE RESOURCE POOL <%= @pers_pool %> MEMORYSIZE '<%= @pers_pool_mem_size %>' MAXMEMORYSIZE '<%= @pers_pool_max_mem_size %>' PLANNEDCONCURRENCY <%= @pers_pool_planned_con %> MAXCONCURRENCY <%= @pers_pool_max_con %> RUNTIMEPRIORITY <%= @pers_pool_runtime_priority %> RUNTIMEPRIORITYTHRESHOLD <%= @pers_pool_runtime_priority_thresh %> PRIORITY <%= @pers_pool_priority %> EXECUTIONPARALLELISM <%= @pers_pool_exec_parallel %>; - --- Create users -CREATE USER <%= @monitor_user %> IDENTIFIED BY '<%= @monitor_password %>'; -GRANT pseudosuperuser TO monitor; -- This is the only way I know to allow the monitor user to see some user permissions. -ALTER USER monitor DEFAULT ROLE pseudosuperuser; diff --git a/templates/vertica/mon_users.sql.erb b/templates/vertica/mon_users.sql.erb deleted file mode 100644 index 4e1f489..0000000 --- a/templates/vertica/mon_users.sql.erb +++ /dev/null @@ -1,7 +0,0 @@ -CREATE USER <%= @pers_db_user %> IDENTIFIED BY '<%= @pers_db_password %>'; -GRANT USAGE ON RESOURCE POOL <%= @pers_pool %> to <%= @pers_db_user %>; -ALTER USER <%= @pers_db_user %> RESOURCE POOL <%= @pers_pool %>; - -CREATE USER <%= @api_db_user %> IDENTIFIED BY '<%= @api_db_password %>'; -GRANT USAGE ON RESOURCE POOL <%= @api_pool %> to <%= @api_db_user %>; -ALTER USER <%= @api_db_user %> RESOURCE POOL <%= @api_pool %>; diff --git a/templates/vertica/prune_vertica.py.erb b/templates/vertica/prune_vertica.py.erb deleted file mode 100755 index cdc7943..0000000 --- a/templates/vertica/prune_vertica.py.erb +++ /dev/null @@ -1,363 +0,0 @@ -#!<%= @virtual_env %>/bin/python -# -# TODO: Once monasca supports per-project retention policy, -# use the monasca client to retrieve the policy and -# honor that. For now, -l and -i can be used to accomplish -# the same thing. -# -from datetime import datetime -from prettytable import PrettyTable - -import keystoneclient.v2_0.client as keystone_client -import argparse -import json -import sys -import subprocess -import time -import os - -VSQL = "/usr/sbin/vsql" -MON_TABLES = ["Definitions", - "Dimensions", - "DefinitionDimensions", - "Measurements"] - - -def main(): - parser = argparse.ArgumentParser() - limit_ignore_group = parser.add_mutually_exclusive_group() - date_group = parser.add_mutually_exclusive_group(required=True) - - help_text = """ - Limit pruning to just this list of tenant name(s). - """ - limit_ignore_group.add_argument('-l', '--limit', required=False, - nargs='+', help=help_text) - - help_text = """ - Ignore pruning for this list of tenant name(s). - """ - limit_ignore_group.add_argument('-i', '--ignore', required=False, - nargs='+', help=help_text) - help_text = """ - The name of the admin tenant. This tenant will not be - pruned, and libvirt metrics that are cross-posted here will - be pruned. - """ - parser.add_argument('-a', '--admin_tenant', required=True, - help=help_text) - - help_text = """ - Number of days of measurements to retain. - """ - date_group.add_argument('-r', '--retain_days', required=False, - help=help_text) - - help_text = """ - Filename containing whitelist of metric names to not prune from the admin - tenant. Valid only with -a and -r. Format of the file: - [ - "metric_name_one", - "metric_name_two", - "metric_name_three" - ] - """ - parser.add_argument('-w', '--whitelist', required=False, - help=help_text) - - help_text = """ - Start time of the pruning period. - """ - date_group.add_argument('-s', '--start_time', required=False, - help=help_text) - - help_text = """ - End time of the pruning period. - """ - parser.add_argument('-e', '--end_time', required=False, - help=help_text) - - help_text = """ - Metric name(s) separated by spaces to delete, only works with - -l (limit) argument. As in '-m mem.total_mb cpu.idle_perc'. - """ - parser.add_argument('-m', '--metric_name', required=False, - nargs='+', help=help_text) - - help_text = """ - Dimension key/value pairs to delete, only works with -m (name) - argument. Syntax is the same as the monasca command: - -d 'region=dev02,hostname=dev02-keystone-001'. Note that - multiple dimensions are supported -- but only if part of - a common dimension set (not multiple dimension sets). - """ - parser.add_argument('-d', '--dimensions', required=False, - help=help_text) - - args = parser.parse_args() - - if args.limit and args.admin_tenant in args.limit: - print "Error: Pruning the admin project is not allowed!" - sys.exit(1) - - if args.start_time and not args.end_time: - print "Error: Please provide both start and end time." - sys.exit(1) - - if args.start_time and args.end_time and not args.start_time < args.end_time: - print "Error: Start time must preceed end time." - sys.exit(1) - - if args.dimensions and not args.metric_name: - print "Error: Please provide metric name with dimensions." - sys.exit(1) - - if args.whitelist and not (args.admin_tenant and args.retain_days): - print "Error: Whitelist file only valid with -a and -r." - sys.exit(1) - - before_counts = get_table_counts(MON_TABLES) - prune_tables(args) - display_pruning_result(before_counts) - - -def prune_tables(args): - now = time.time() - run_time = get_isotime(now) - tenants = get_tenant_list() - admin_id = get_tenant_id(tenants, args.admin_tenant) - print "Database pruning executing at %s" % run_time - print "Admin project '%s' (%s)" % (args.admin_tenant, admin_id) - if args.retain_days: - retain_time = get_isotime(now - (int(args.retain_days) * 60 * 60 * 24)) - time_clause = "time_stamp < '%s'" % retain_time - print "Keeping records more recent than %s" % retain_time - else: - print "Pruning records between %s and %s" % (args.start_time, args.end_time) - time_clause = "time_stamp >= '%s' and time_stamp <= '%s'" % (args.start_time, args.end_time) - - if args.whitelist: - msg = "Only retaining metrics specified in '%s' for '%s' project" - print msg % (args.whitelist, args.admin_tenant) - whitelist = read_whitelist_file(args.whitelist) - admin_clause = "AND def.tenant_id = '%s' AND def.name NOT IN (%s)" % \ - (admin_id, whitelist) - else: - admin_clause = "AND def.tenant_id = '%s' AND REGEXP_LIKE(def.name, '^(vm|ovs)\.')" % \ - admin_id - - delete_sql = """ - DELETE - FROM MonMetrics.Measurements - WHERE %s - AND definition_dimensions_id IN (SELECT - defdims.id - FROM MonMetrics.Definitions def, - MonMetrics.DefinitionDimensions defdims - LEFT OUTER JOIN MonMetrics.Dimensions dims - ON dims.dimension_set_id = defdims.dimension_set_id - WHERE def.id = defdims.definition_id - %s); - COMMIT; - """ - - if args.limit: - - print "Limiting pruning to projects '%s'" % args.limit - limited_in_list = get_tenant_id_in_list(tenants, args.limit) - if args.metric_name: - limit_clause = \ - "AND def.name IN (%s) AND def.tenant_id IN (%s)" \ - % (str(args.metric_name).strip('[]'), limited_in_list) - if args.dimensions: - print "Only pruning '%s' with '%s' dimensions " % \ - (args.metric_name, args.dimensions) - limit_clause += get_dimensions_sql(args.dimensions) - else: - print "Only pruning '%s' metrics" % args.metric_name - else: - limit_clause = \ - "AND def.tenant_id != '%s' AND def.tenant_id IN (%s)" \ - % (admin_id, limited_in_list) - - vsql(delete_sql % (time_clause, limit_clause)) - - elif args.ignore: - - print "Ignoring pruning for projects '%s'" % args.ignore - ignore_in_list = get_tenant_id_in_list(tenants, args.ignore) - non_admin_with_ignore_clause = \ - "AND def.tenant_id != '%s' AND def.tenant_id NOT IN (%s)" \ - % (admin_id, ignore_in_list) - vsql(delete_sql % (time_clause, non_admin_with_ignore_clause)) - vsql(delete_sql % (time_clause, admin_clause)) - - else: - non_admin_clause = "AND def.tenant_id != '%s'" % admin_id - vsql(delete_sql % (time_clause, non_admin_clause)) - vsql(delete_sql % (time_clause, admin_clause)) - - delete_defdims_sql = """ - DELETE FROM MonMetrics.DefinitionDimensions - WHERE id NOT IN (SELECT DISTINCT - definition_dimensions_id - FROM MonMetrics.measurements); - COMMIT; - """ - - delete_dims_sql = """ - DELETE FROM monmetrics.dimensions - WHERE dimension_set_id NOT IN (SELECT DISTINCT - dimension_set_id - FROM monmetrics.definitiondimensions); - COMMIT; - """ - - delete_defs_sql = """ - DELETE FROM monmetrics.definitions - WHERE id NOT IN (SELECT DISTINCT - definition_id - FROM monmetrics.definitiondimensions); - COMMIT; - """ - - vsql(delete_defdims_sql) - vsql(delete_dims_sql) - vsql(delete_defs_sql) - - vsql("select make_ahm_now();") - vsql("select purge();") - - -def read_whitelist_file(white_list_file): - try: - metrics = [str(metric) for metric in json.load(open(white_list_file))] - if len(metrics) <= 0: - msg = "Whitelist file '{}' didn't contain any metrics!" - print msg.format(white_list_file) - sys.exit(1) - return str(metrics).strip('[]') - except Exception as e: - msg = "Caught exception '{}' trying to open and parse '{}'." - print msg.format(e, white_list_file) - sys.exit(1) - - -def get_tenant_list(): - kc = get_keystone_client() - return kc.tenants.list() - - -def get_dimensions_sql(dimensions): - - sql = """ - and defDims.dimension_set_id in (select - dimension_set_id - from - MonMetrics.Dimensions - where - """ - - first_time = True - pairs = dimensions.split(',') - for pair in pairs: - kv = pair.split('=') - if first_time: - sql += " ( " - else: - sql += " or " - sql += " (name = '%s' and value = '%s')" % (kv[0], kv[1]) - first_time = False - - - sql += """ - ) group by - dimension_set_id - having - count(*) = %d) - """ % len(pairs) - - return sql - - -def get_tenant_id_in_list(tenants, names): - tenant_ids = [] - for tenant_name in names: - tenant_id = get_tenant_id(tenants, tenant_name) - tenant_ids.append(tenant_id) - in_list = str(tenant_ids).strip('[]') - return in_list - - -def get_tenant_id(tenants, tenant_name): - tenant_id = None - for tenant in tenants: - if tenant.name == tenant_name: - tenant_id = tenant.id - break - if not tenant_id: - print "Unable to find '%s' project -- aborting!" % tenant_name - sys.exit(1) - - return str(tenant_id) - - -def get_keystone_client(): - kwargs = { - 'username': os.environ['OS_USERNAME'], - 'tenant_name': os.environ['OS_TENANT_NAME'], - 'password': os.environ['OS_PASSWORD'], - 'auth_url': os.environ['OS_AUTH_URL'], - 'region_name': os.environ['OS_REGION_NAME'], - } - - return keystone_client.Client(**kwargs) - - -def get_table_counts(tables): - counts = {} - for table in tables: - sql = "SELECT COUNT(*) FROM MonMetrics.%s" % table - counts[table] = vsql(sql) - return counts - - -def display_pruning_result(before): - display_table = PrettyTable(["Monasca Table", "Count Before", - "Count After", "Rows Pruned"]) - after = get_table_counts(MON_TABLES) - for table in MON_TABLES: - display_table.add_row([table, - before[table], - after[table], - int(before[table]) - int(after[table])]) - print display_table - - -def vsql(sql): - p = subprocess.Popen([VSQL, "-c", sql], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - - out, err = p.communicate() - if p.returncode != 0: - print("vsql failed: %s %s" % (out, err)) - return "" - result_next_line = False - for l in out.splitlines(): - if '--' in l: - # we want the next row - result_next_line = True - continue - if result_next_line: - return l - - -def get_isotime(time_stamp): - utc = str(datetime.utcfromtimestamp(time_stamp)) - utc = utc.replace(" ", "T")[:-7] + 'Z' - return utc - - -if __name__ == '__main__': - main() diff --git a/templates/vertica/vsql.erb b/templates/vertica/vsql.erb deleted file mode 100644 index 851c53c..0000000 --- a/templates/vertica/vsql.erb +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -# -# Simple script to execute vertica sql commands -# - -/opt/vertica/bin/vsql -U <%= @db_user %> -w <%= @db_admin_password %> "$@" diff --git a/templates/vm_alarm_cleanup.py.erb b/templates/vm_alarm_cleanup.py.erb deleted file mode 100755 index de29410..0000000 --- a/templates/vm_alarm_cleanup.py.erb +++ /dev/null @@ -1,101 +0,0 @@ -#!<%= @virtual_env %>/bin/python - -import MySQLdb -import os -import os_client_config -import shade - -OS_VARS = ['OS_REGION_NAME', - 'OS_USERNAME', - 'OS_PASSWORD', - 'OS_TENANT_NAME', - 'OS_AUTH_URL'] - -DELETE_SQL = """ - DELETE a FROM alarm a WHERE a.id IN (%s); COMMIT; - """ - -GET_SQL = """ - SELECT - a.id AS alarm_id, - mdg.dimensions AS metric_dimensions - FROM alarm AS a - INNER JOIN alarm_definition ad - ON ad.id = a.alarm_definition_id - INNER JOIN alarm_metric AS am - ON am.alarm_id = a.id - INNER JOIN metric_definition_dimensions AS mdd - ON mdd.id = am.metric_definition_dimensions_id - INNER JOIN metric_definition AS md - ON md.id = mdd.metric_definition_id - LEFT OUTER JOIN (SELECT - dimension_set_id, - name, - value, - group_concat(name, '=', value) AS dimensions - FROM metric_dimension - GROUP BY dimension_set_id) AS mdg - ON mdg.dimension_set_id = mdd.metric_dimension_set_id - WHERE ad.deleted_at IS NULL - AND a.state = 'UNDETERMINED' - AND mdg.dimensions LIKE '%component=vm%' - AND mdg.dimensions LIKE '%region={}%' - ORDER BY a.id; - """ - - -def validate_env(): - for os_var in OS_VARS: - if not os.environ.get(os_var): - print "Error: %s not found." % os_var - sys.exit(1) - - -def get_cloud(): - validate_env() - client_config = os_client_config.OpenStackConfig() - cloud_config = client_config.get_one_cloud() - return shade.OpenStackCloud(cloud_config=cloud_config) - - -def prune_alarms(active_vm_ids): - - db = MySQLdb.connect(user = '<%= @sql_user %>', - passwd = '<%= @sql_password %>', - host = '<%= @sql_host %>', - port = <%= @sql_port %>, - db = 'mon') - - try: - c = db.cursor() - c.execute(GET_SQL.format(os.environ.get('OS_REGION_NAME'))) - - alarms = {} - for (alarm_id, dims) in c: - dim_dict = dict(s.split('=') for s in dims.split(',')) - if 'resource_id' in dim_dict.keys(): - alarms[alarm_id] = dim_dict['resource_id'] - - alarm_ids_to_delete = [] - for (alarm_id, resource_id) in alarms.iteritems(): - if resource_id not in active_vm_ids: - alarm_ids_to_delete.append(alarm_id) - print "Deleting alarm id '%s' for deleted vm '%s'" % \ - (alarm_id, resource_id) - - if len(alarm_ids_to_delete) > 0: - c.execute(DELETE_SQL % str(alarm_ids_to_delete).strip('[]')) - - finally: - c.close() - db.close() - - -def main(): - cloud = get_cloud() - servers = cloud.nova_client.servers.list(search_opts={'all_tenants': 1}, - limit=-1) - prune_alarms(list(server.id for server in servers)) - -if __name__ == '__main__': - main() diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 90eea2e..0000000 --- a/tox.ini +++ /dev/null @@ -1,13 +0,0 @@ -[tox] -minversion = 3.1 -skipsdist = True -envlist = releasenotes -ignore_basepython_conflict = True - -[testenv] -basepython = python3 -install_command = pip install -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} {opts} {packages} - -[testenv:releasenotes] -deps = -r{toxinidir}/doc/requirements.txt -commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html