Add system tests for the LMA collector plugin

This change adds tests for the LMA collector [1] plugin. It deploys also
the Elasticsearch-Kibana [2] and the InfluxDB-Grafana [3] plugins which
provide the necessary backends for the LMA collector.

[1] https://github.com/stackforge/fuel-plugin-lma-collector
[2] https://github.com/stackforge/fuel-plugin-elasticsearch-kibana
[3] https://github.com/stackforge/fuel-plugin-influxdb-grafana

Change-Id: I892282ce6b362e29f60689d7d5a411d714f00280
Closes-Bug: #1453734
This commit is contained in:
Simon Pasquier 2015-05-06 14:27:39 +02:00
parent 581d774703
commit 551dcff242
4 changed files with 182 additions and 0 deletions

View File

@ -42,6 +42,7 @@ def import_tests():
from tests.plugins.plugin_contrail import test_fuel_plugin_contrail # noqa
from tests.plugins.plugin_glusterfs import test_plugin_glusterfs # noqa
from tests.plugins.plugin_lbaas import test_plugin_lbaas # noqa
from tests.plugins.plugin_lma_collector import test_plugin_lma_collector # noqa
from tests.plugins.plugin_reboot import test_plugin_reboot_task # noqa
from tests.plugins.plugin_zabbix import test_plugin_zabbix # noqa
from tests import test_multiple_networks # noqa

View File

@ -358,6 +358,10 @@ GLUSTER_CLUSTER_ENDPOINT = os.environ.get('GLUSTER_CLUSTER_ENDPOINT')
EXAMPLE_PLUGIN_PATH = os.environ.get('EXAMPLE_PLUGIN_PATH')
LBAAS_PLUGIN_PATH = os.environ.get('LBAAS_PLUGIN_PATH')
ZABBIX_PLUGIN_PATH = os.environ.get('ZABBIX_PLUGIN_PATH')
LMA_COLLECTOR_PLUGIN_PATH = os.environ.get('LMA_COLLECTOR_PLUGIN_PATH')
ELASTICSEARCH_KIBANA_PLUGIN_PATH = os.environ.get(
'ELASTICSEARCH_KIBANA_PLUGIN_PATH')
INFLUXDB_GRAFANA_PLUGIN_PATH = os.environ.get('INFLUXDB_GRAFANA_PLUGIN_PATH')
FUEL_STATS_CHECK = os.environ.get('FUEL_STATS_CHECK', 'false') == 'true'
FUEL_STATS_ENABLED = os.environ.get('FUEL_STATS_ENABLED', 'true') == 'true'

View File

@ -0,0 +1,177 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_is_not_none
from proboscis.asserts import assert_true
from proboscis import test
import requests
from fuelweb_test import logger
from fuelweb_test import settings as conf
from fuelweb_test.helpers import checkers
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@test(groups=["plugins"])
class TestLmaCollectorPlugin(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["deploy_lma_collector_ha"])
@log_snapshot_on_error
def deploy_lma_collector_ha(self):
"""Deploy cluster in HA mode with the LMA collector plugin
This also deploys the Elasticsearch-Kibana plugin and the
InfluxDB-Grafana plugin since they work together with the LMA collector
plugin.
Scenario:
1. Upload plugins to the master node
2. Install plugins
3. Create cluster
4. Add 3 nodes with controller role
5. Add 1 node with compute + cinder role
6. Add 1 node with base-os role
7. Deploy the cluster
8. Check that the plugins work
9. Run OSTF
Duration 70m
Snapshot deploy_lma_collector_ha
"""
self.env.revert_snapshot("ready_with_5_slaves")
# copy plugins to the master node
checkers.upload_tarball(
self.env.d_env.get_admin_remote(),
conf.LMA_COLLECTOR_PLUGIN_PATH, "/var")
checkers.upload_tarball(
self.env.d_env.get_admin_remote(),
conf.ELASTICSEARCH_KIBANA_PLUGIN_PATH, "/var")
checkers.upload_tarball(
self.env.d_env.get_admin_remote(),
conf.INFLUXDB_GRAFANA_PLUGIN_PATH, "/var")
# install plugins
checkers.install_plugin_check_code(
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(conf.LMA_COLLECTOR_PLUGIN_PATH))
checkers.install_plugin_check_code(
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(conf.ELASTICSEARCH_KIBANA_PLUGIN_PATH))
checkers.install_plugin_check_code(
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(conf.INFLUXDB_GRAFANA_PLUGIN_PATH))
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=conf.DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": conf.NEUTRON_SEGMENT_TYPE,
}
)
# this is how the base-os node will be named eventually
analytics_node_name = 'slave-05_base-os'
plugins = [
{
'name': 'lma_collector',
'options': {
'metadata/enabled': True,
'environment_label': 'deploy_lma_collector_ha',
'elasticsearch_mode': 'local',
'elasticsearch_node_name': analytics_node_name,
'enable_notifications': True,
'influxdb_mode': 'local',
'influxdb_node_name': analytics_node_name,
'influxdb_password': 'lmapass',
}
},
{
'name': 'elasticsearch_kibana',
'options': {
'metadata/enabled': True,
'node_name': analytics_node_name,
}
},
{
'name': 'influxdb_grafana',
'options': {
'metadata/enabled': True,
'node_name': analytics_node_name,
'influxdb_rootpass': 'lmapass',
'influxdb_userpass': 'lmapass',
}
},
]
for plugin in plugins:
plugin_name = plugin['name']
msg = "Plugin '%s' couldn't be found. Test aborted" % plugin_name
assert_true(
self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
msg)
logger.debug('%s plugin is installed' % plugin_name)
self.fuel_web.update_plugin_data(cluster_id, plugin_name,
plugin['options'])
self.fuel_web.update_nodes(
cluster_id,
{
"slave-01": ["controller"],
"slave-02": ["controller"],
"slave-03": ["controller"],
"slave-04": ["compute", "cinder"],
"slave-05": ["base-os"]
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
analytics_node_ip = self.fuel_web.get_nailgun_node_by_name(
"slave-05").get('ip')
assert_is_not_none(
analytics_node_ip,
"Fail to retrieve the IP address for slave-05"
)
def assert_http_get_response(url, expected=200):
r = requests.get(url)
assert_equal(r.status_code, expected,
"{} responded with {}, expected {}".format(
url, r.status_code, expected))
logger.debug("Check that Elasticsearch is ready")
assert_http_get_response("http://{}:9200/".format(analytics_node_ip))
logger.debug("Check that Kibana is ready")
assert_http_get_response("http://{}/".format(analytics_node_ip))
logger.debug("Check that InfluxDB is ready")
assert_http_get_response(
"http://{}:8086/db/lma/series?u=lma&p={}&q=list+series".format(
analytics_node_ip, "lmapass"))
logger.debug("Check that Grafana is ready")
assert_http_get_response("http://{}/".format(analytics_node_ip))
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("deploy_lma_collector_ha")