We shouldn't support cases with separate keystone/db,

due to this fact cases will be deleted.

Change-Id: I65bd70e1aada05ba75c1d240549d23eaec783583
Related-Bug: #1601824
This commit is contained in:
NastyaUrlapova 2016-07-11 17:10:23 +03:00
parent 5a62e85194
commit 540baa73a3
5 changed files with 0 additions and 900 deletions

View File

@ -666,16 +666,6 @@ Test OpenStack Upgrades
Tests for separated services
============================
Test for separate keystone service
----------------------------------
.. automodule:: fuelweb_test.tests.tests_separate_services.test_separate_keystone
:members:
Test for separate keystone service and ceph
-------------------------------------------
.. automodule:: fuelweb_test.tests.tests_separate_services.test_separate_keystone_ceph
:members:
Test for separate haproxy service
---------------------------------
.. automodule:: fuelweb_test.tests.tests_separate_services.test_separate_haproxy
@ -686,16 +676,6 @@ Test for separate horizon service
.. automodule:: fuelweb_test.tests.tests_separate_services.test_separate_horizon
:members:
Test for separate mysql service
-------------------------------
.. automodule:: fuelweb_test.tests.tests_separate_services.test_separate_db
:members:
Test for separate mysql service and ceph
----------------------------------------
.. automodule:: fuelweb_test.tests.tests_separate_services.test_separate_db_ceph
:members:
Test for separate multiroles
----------------------------
.. automodule:: fuelweb_test.tests.tests_separate_services.test_separate_multiroles

View File

@ -1,279 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from proboscis.asserts import assert_true
from proboscis import test
from devops.helpers.helpers import wait
from fuelweb_test.helpers import checkers
from fuelweb_test.helpers import utils
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test import settings
from fuelweb_test import logger
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@test(groups=["thread_separate_services", "thread_db_separate_services"])
class SeparateDb(TestBasic):
"""SeparateDb""" # TODO documentation
@test(depends_on=[SetupEnvironment.prepare_slaves_9],
groups=["separate_db_service"])
@log_snapshot_after_test
def separate_db_service(self):
"""Deploy cluster with 3 separate database roles
Scenario:
1. Create cluster
2. Add 3 nodes with controller role
3. Add 3 nodes with database role
4. Add 1 compute and cinder
5. Verify networks
6. Deploy the cluster
7. Verify networks
8. Run OSTF
Duration 120m
Snapshot separate_db_service
"""
self.check_run("separate_db_service")
checkers.check_plugin_path_env(
var_name='SEPARATE_SERVICE_DB_PLUGIN_PATH',
plugin_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH
)
self.env.revert_snapshot("ready_with_9_slaves")
# copy plugins to the master node
utils.upload_tarball(
ip=self.ssh_manager.admin_ip,
tar_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH,
tar_target="/var")
# install plugins
utils.install_plugin_check_code(
ip=self.ssh_manager.admin_ip,
plugin=os.path.basename(
settings.SEPARATE_SERVICE_DB_PLUGIN_PATH))
data = {
'tenant': 'separatedb',
'user': 'separatedb',
'password': 'separatedb',
"net_provider": 'neutron',
"net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings=data)
plugin_name = 'detach-database'
msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
assert_true(
self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
msg)
options = {'metadata/enabled': True}
self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['standalone-database'],
'slave-05': ['standalone-database'],
'slave-06': ['standalone-database'],
'slave-07': ['compute'],
'slave-08': ['cinder']
}
)
self.fuel_web.verify_network(cluster_id)
# Cluster deploy
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("separate_db_service", is_make=True)
@test(groups=["thread_separate_services", "thread_db_separate_services"])
class SeparateDbFailover(TestBasic):
"""SeparateDbFailover""" # TODO documentation
@test(depends_on=[SeparateDb.separate_db_service],
groups=["separate_db_service_shutdown"])
@log_snapshot_after_test
def separate_db_service_shutdown(self):
"""Shutdown one database node
Scenario:
1. Revert snapshot separate_db_service
2. Destroy db node that is master
3. Wait galera is up
4. Run OSTF
Duration 30m
"""
self.env.revert_snapshot("separate_db_service")
cluster_id = self.fuel_web.get_last_created_cluster()
# destroy one db node
db_node = self.env.d_env.nodes().slaves[3]
db_node.destroy()
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
db_node)['online'], timeout=60 * 5)
# Wait until MySQL Galera is UP on some db node
self.fuel_web.wait_mysql_galera_is_up(['slave-05'])
self.fuel_web.assert_ha_services_ready(cluster_id)
self.fuel_web.assert_os_services_ready(cluster_id, timeout=15 * 60)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
@test(depends_on=[SeparateDb.separate_db_service],
groups=["separate_db_service_restart"])
@log_snapshot_after_test
def separate_db_service_restart(self):
"""Restart one database node
Scenario:
1. Revert snapshot separate_db_service
2. Restart db node that is master
3. Wait galera is up
4. Run OSTF
Duration 30m
"""
self.env.revert_snapshot("separate_db_service")
cluster_id = self.fuel_web.get_last_created_cluster()
# restart one db node
db_node = self.env.d_env.nodes().slaves[3]
self.fuel_web.warm_restart_nodes([db_node])
wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
db_node)['online'], timeout=60 * 5)
# Wait until MySQL Galera is UP on some db node
self.fuel_web.wait_mysql_galera_is_up(['slave-05'])
self.fuel_web.assert_ha_services_ready(cluster_id)
self.fuel_web.assert_os_services_ready(cluster_id, timeout=15 * 60)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
@test(depends_on=[SeparateDb.separate_db_service],
groups=["separate_db_service_controller_shutdown"])
@log_snapshot_after_test
def separate_db_service_controller_shutdown(self):
"""Shutdown primary controller node
Scenario:
1. Revert snapshot separate_db_service
2. Shutdown primary controller node
3. Wait rabbit and db are operational
4. Run OSTF
Duration 30m
"""
self.env.revert_snapshot("separate_db_service")
cluster_id = self.fuel_web.get_last_created_cluster()
# shutdown primary controller
controller = self.fuel_web.get_nailgun_primary_node(
self.env.d_env.nodes().slaves[0])
logger.debug(
"controller with primary role is {}".format(controller.name))
controller.destroy()
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
controller)['online'], timeout=60 * 5)
# One test should fail: Check state of haproxy backends on controllers
self.fuel_web.assert_ha_services_ready(cluster_id, should_fail=1)
self.fuel_web.assert_os_services_ready(cluster_id, timeout=15 * 60)
self.fuel_web.run_ostf(cluster_id=cluster_id)
@test(depends_on=[SeparateDb.separate_db_service],
groups=["separate_db_service_add_delete_node"])
@log_snapshot_after_test
def separate_db_service_add_delete_node(self):
"""Add and delete database node
Scenario:
1. Revert snapshot separate_db_service
2. Add one database node and re-deploy cluster
3. Run network verification
4. Run OSTF
5. Check hiera hosts are the same for
different group of roles
6. Delete one database node
7. Run network verification
8. Run ostf
9. Check hiera hosts are the same for
different group of roles
Duration 30m
"""
self.env.revert_snapshot("separate_db_service")
cluster_id = self.fuel_web.get_last_created_cluster()
node = {'slave-09': ['standalone-database']}
self.fuel_web.update_nodes(
cluster_id, node, True, False)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(cluster_id=cluster_id,
test_sets=['sanity', 'smoke', 'ha'])
checkers.check_hiera_hosts(
self.fuel_web.client.list_cluster_nodes(cluster_id),
cmd='hiera memcache_roles')
database_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
cluster_id, ['standalone-database'])
logger.debug("database nodes are {0}".format(database_nodes))
checkers.check_hiera_hosts(
database_nodes,
cmd='hiera corosync_roles')
nailgun_node = self.fuel_web.update_nodes(cluster_id, node,
False, True)
nodes = [_node for _node in nailgun_node
if _node["pending_deletion"] is True]
self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)
wait(lambda: self.fuel_web.is_node_discovered(nodes[0]),
timeout=6 * 60)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(cluster_id=cluster_id,
test_sets=['sanity', 'smoke', 'ha'])
checkers.check_hiera_hosts(
self.fuel_web.client.list_cluster_nodes(cluster_id),
cmd='hiera memcache_roles')
database_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
cluster_id, ['standalone-database'])
logger.debug("database nodes are {0}".format(database_nodes))
checkers.check_hiera_hosts(
database_nodes,
cmd='hiera corosync_roles')

View File

@ -1,128 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from proboscis.asserts import assert_true
from proboscis import test
from fuelweb_test.helpers.checkers import check_plugin_path_env
from fuelweb_test.helpers import utils
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test import settings
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@test(groups=["thread_separate_services_ceph"])
class SeparateDbCeph(TestBasic):
"""SeparateDbCeph""" # TODO documentation
@test(depends_on=[SetupEnvironment.prepare_slaves_9],
groups=["separate_db_ceph_service"])
@log_snapshot_after_test
def separate_db_ceph_service(self):
"""Deployment with separate db nodes and ceph for all
Scenario:
1. Install the plugin on the master node
2. Create Ubuntu, Neutron VXLAN, ceph for all cluster
3. Change ceph replication factor to 2
4. Add 3 nodes with controller role
5. Add 3 nodes with db role
6. Add 1 compute node
7. Add 2 ceph nodes
8. Run network verification
9. Deploy changes
10. Run network verification
11. Run OSTF tests
Duration 120m
Snapshot separate_db_ceph_service
"""
self.check_run("separate_db_ceph_service")
check_plugin_path_env(
var_name='SEPARATE_SERVICE_DB_PLUGIN_PATH',
plugin_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH
)
self.env.revert_snapshot("ready_with_9_slaves")
# copy plugins to the master node
utils.upload_tarball(
ip=self.ssh_manager.admin_ip,
tar_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH,
tar_target="/var")
# install plugins
utils.install_plugin_check_code(
ip=self.ssh_manager.admin_ip,
plugin=os.path.basename(
settings.SEPARATE_SERVICE_DB_PLUGIN_PATH))
data = {
'volumes_lvm': False,
'volumes_ceph': True,
'images_ceph': True,
'ephemeral_ceph': True,
'osd_pool_size': '2',
'objects_ceph': True,
'tenant': 'separatedbceph',
'user': 'separatedbceph',
'password': 'separatedbceph',
"net_provider": 'neutron',
"net_segment_type": settings.NEUTRON_SEGMENT['tun'],
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings=data)
plugin_name = 'detach-database'
msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
assert_true(
self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
msg)
options = {'metadata/enabled': True}
self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['standalone-database'],
'slave-05': ['standalone-database'],
'slave-06': ['standalone-database'],
'slave-07': ['compute'],
'slave-08': ['ceph-osd'],
'slave-09': ['ceph-osd']
}
)
self.fuel_web.verify_network(cluster_id)
# Cluster deploy
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("separate_db_ceph_service")

View File

@ -1,330 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from proboscis import test
from proboscis.asserts import assert_true
from devops.helpers.helpers import wait
from fuelweb_test.helpers import checkers
from fuelweb_test.helpers import utils
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test import settings
from fuelweb_test import logger
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@test(groups=["thread_separate_services",
"thread_keystone_separate_services"])
class SeparateKeystone(TestBasic):
"""SeparateKeystone""" # TODO documentation
@test(depends_on=[SetupEnvironment.prepare_slaves_9],
groups=["separate_keystone_service"])
@log_snapshot_after_test
def separate_keystone_service(self):
"""Deploy cluster with 3 separate keystone roles
Scenario:
1. Create cluster
2. Add 3 nodes with controller role
3. Add 3 nodes with keystone role
4. Add 1 compute and cinder
5. Verify networks
6. Deploy the cluster
7. Verify networks
8. Run OSTF
Duration 120m
Snapshot separate_keystone_service
"""
self.check_run("separate_keystone_service")
checkers.check_plugin_path_env(
var_name='SEPARATE_SERVICE_DB_PLUGIN_PATH',
plugin_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH
)
checkers.check_plugin_path_env(
var_name='SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH',
plugin_path=settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH
)
self.env.revert_snapshot("ready_with_9_slaves")
# copy plugins to the master node
utils.upload_tarball(
ip=self.ssh_manager.admin_ip,
tar_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH,
tar_target="/var")
utils.upload_tarball(
ip=self.ssh_manager.admin_ip,
tar_path=settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH,
tar_target="/var")
# install plugins
utils.install_plugin_check_code(
ip=self.ssh_manager.admin_ip,
plugin=os.path.basename(
settings.SEPARATE_SERVICE_DB_PLUGIN_PATH))
utils.install_plugin_check_code(
ip=self.ssh_manager.admin_ip,
plugin=os.path.basename(
settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH))
data = {
'tenant': 'separatekeystone',
'user': 'separatekeystone',
'password': 'separatekeystone',
"net_provider": 'neutron',
"net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings=data)
plugin_names = ['detach-database', 'detach-keystone']
msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
for plugin_name in plugin_names:
assert_true(
self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
msg)
options = {'metadata/enabled': True}
self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['standalone-database', 'standalone-keystone'],
'slave-05': ['standalone-database', 'standalone-keystone'],
'slave-06': ['standalone-database', 'standalone-keystone'],
'slave-07': ['compute'],
'slave-08': ['cinder']
}
)
self.fuel_web.verify_network(cluster_id)
# Cluster deploy
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("separate_keystone_service", is_make=True)
@test(groups=["thread_separate_services",
"thread_keystone_separate_services"])
class SeparateKeystoneFailover(TestBasic):
"""SeparateKeystoneFailover""" # TODO documentation
@test(depends_on=[SeparateKeystone.separate_keystone_service],
groups=["separate_keystone_service_shutdown"])
@log_snapshot_after_test
def separate_keystone_service_shutdown(self):
"""Shutdown one keystone node
Scenario:
1. Revert snapshot separate_keystone_service
2. Destroy keystone node
3. Wait HA is working
4. Run OSTF
Duration 30m
"""
self.env.revert_snapshot("separate_keystone_service")
cluster_id = self.fuel_web.get_last_created_cluster()
# destroy one keystone node
keystone_node = self.env.d_env.nodes().slaves[3]
keystone_node.destroy()
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
keystone_node)['online'], timeout=60 * 5)
self.fuel_web.assert_ha_services_ready(cluster_id)
self.fuel_web.assert_os_services_ready(cluster_id, timeout=15 * 60)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
@test(depends_on=[SeparateKeystone.separate_keystone_service],
groups=["separate_keystone_service_restart"])
@log_snapshot_after_test
def separate_keystone_service_restart(self):
"""Restart one keystone node
Scenario:
1. Revert snapshot separate_keystone_service
2. Restart keystone
3. Wait HA is working
4. Run OSTF
Duration 30m
"""
self.env.revert_snapshot("separate_keystone_service")
cluster_id = self.fuel_web.get_last_created_cluster()
# restart one keystone node
keystone_node = self.env.d_env.nodes().slaves[3]
self.fuel_web.warm_restart_nodes([keystone_node])
wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
keystone_node)['online'], timeout=60 * 5)
self.fuel_web.assert_ha_services_ready(cluster_id)
self.fuel_web.assert_os_services_ready(cluster_id, timeout=15 * 60)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
@test(depends_on=[SeparateKeystone.separate_keystone_service],
groups=["separate_keystone_service_controller_shutdown"])
@log_snapshot_after_test
def separate_keystone_service_controller_shutdown(self):
"""Shutdown primary controller node
Scenario:
1. Revert snapshot separate_keystone_service
2. Shutdown primary controller node
3. Wait HA is working
4. Run OSTF
Duration 30m
"""
self.env.revert_snapshot("separate_keystone_service")
cluster_id = self.fuel_web.get_last_created_cluster()
# shutdown primary controller
controller = self.fuel_web.get_nailgun_primary_node(
self.env.d_env.nodes().slaves[0])
logger.debug(
"controller with primary role is {}".format(controller.name))
controller.destroy()
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
controller)['online'], timeout=60 * 5)
# One test should fail: Check state of haproxy backends on controllers
self.fuel_web.assert_ha_services_ready(cluster_id, should_fail=1)
self.fuel_web.assert_os_services_ready(cluster_id, timeout=15 * 60)
self.fuel_web.run_ostf(cluster_id=cluster_id)
@test(depends_on=[SeparateKeystone.separate_keystone_service],
groups=["separate_keystone_service_add_delete_node"])
@log_snapshot_after_test
def separate_keystone_service_add_delete_node(self):
"""Add and delete keystone node
Scenario:
1. Revert snapshot separate_keystone_service
2. Add one keystone node and re-deploy cluster
3. Run network verification
4. Run OSTF
5. Check hiera hosts are the same for
different group of roles
6. Delete one keystone node
7. Run network verification
8. Run ostf
9. Check hiera hosts are the same for
different group of roles
Duration 30m
"""
def check_keystone_nodes(nodes):
hiera_hosts = []
for node in nodes:
cmd = "cat /etc/hiera/plugins/detach-keystone.yaml"
result = self.ssh_manager.execute_on_remote(
ip=node['ip'],
cmd=cmd,
yamlify=True
)['stdout_yaml']
hosts = result['corosync_roles']
logger.debug("hosts on {0} are {1}".format(node['hostname'],
hosts))
if not hiera_hosts:
hiera_hosts = hosts
continue
else:
assert_true(set(hosts) == set(hiera_hosts),
'Hosts on node {0} differ from '
'others'.format(node['hostname']))
self.env.revert_snapshot("separate_keystone_service")
cluster_id = self.fuel_web.get_last_created_cluster()
node = {'slave-09': ['standalone-keystone']}
self.fuel_web.update_nodes(
cluster_id, node, True, False)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(cluster_id=cluster_id,
test_sets=['sanity', 'smoke', 'ha'])
keystone_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
cluster_id, ['standalone-keystone'])
logger.debug("keystone nodes are {0}".format(keystone_nodes))
checkers.check_hiera_hosts(
keystone_nodes,
cmd='hiera memcache_roles')
other_nodes = []
for role in ['compute', 'cinder', 'controller']:
for nodes_list in self.fuel_web.get_nailgun_cluster_nodes_by_roles(
cluster_id, [role]):
other_nodes.append(nodes_list)
logger.debug("other nodes are {0}".format(other_nodes))
checkers.check_hiera_hosts(
other_nodes,
cmd='hiera memcache_roles')
check_keystone_nodes(keystone_nodes)
nailgun_node = self.fuel_web.update_nodes(cluster_id, node,
False, True)
nodes = [_node for _node in nailgun_node
if _node["pending_deletion"] is True]
self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)
wait(lambda: self.fuel_web.is_node_discovered(nodes[0]),
timeout=6 * 60)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(cluster_id=cluster_id,
test_sets=['sanity', 'smoke', 'ha'])
keystone_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
cluster_id, ['standalone-keystone'])
logger.debug("keystone nodes are {0}".format(keystone_nodes))
checkers.check_hiera_hosts(
keystone_nodes,
cmd='hiera memcache_roles')
other_nodes = []
for role in ['compute', 'cinder', 'controller']:
for nodes_list in self.fuel_web.get_nailgun_cluster_nodes_by_roles(
cluster_id, [role]):
other_nodes.append(nodes_list)
logger.debug("other nodes are {0}".format(other_nodes))
checkers.check_hiera_hosts(
other_nodes,
cmd='hiera memcache_roles')
check_keystone_nodes(keystone_nodes)

View File

@ -1,143 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from proboscis import test
from proboscis.asserts import assert_true
from fuelweb_test.helpers.checkers import check_plugin_path_env
from fuelweb_test.helpers import utils
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test import settings
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@test(groups=["thread_separate_services_ceph"])
class SeparateKeystoneCeph(TestBasic):
"""SeparateKeystoneCeph""" # TODO documentation
@test(depends_on=[SetupEnvironment.prepare_slaves_9],
groups=["separate_keystone_ceph_service"])
@log_snapshot_after_test
def separate_keystone_ceph_service(self):
"""Deployment with separate keystone nodes and ceph for all
Scenario:
1. Install database and keystone plugins on the master node
2. Create Ubuntu, Neutron VXLAN, ceph for all storages cluster
3. Change ceph replication factor to 2
4. Add 3 nodes with controller role
5. Add 3 nodes with database+keystone role
6. Add 1 compute node
7. Add 2 ceph nodes
8. Run network verification
9. Deploy changes
10. Run network verification
11. Run OSTF tests
Duration 120m
Snapshot separate_keystone_ceph_service
"""
self.check_run("separate_keystone_ceph_service")
check_plugin_path_env(
var_name='SEPARATE_SERVICE_DB_PLUGIN_PATH',
plugin_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH
)
check_plugin_path_env(
var_name='SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH',
plugin_path=settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH
)
self.env.revert_snapshot("ready_with_9_slaves")
# copy plugins to the master node
utils.upload_tarball(
ip=self.ssh_manager.admin_ip,
tar_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH,
tar_target="/var")
utils.upload_tarball(
ip=self.ssh_manager.admin_ip,
tar_path=settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH,
tar_target="/var")
# install plugins
utils.install_plugin_check_code(
ip=self.ssh_manager.admin_ip,
plugin=os.path.basename(
settings.SEPARATE_SERVICE_DB_PLUGIN_PATH))
utils.install_plugin_check_code(
ip=self.ssh_manager.admin_ip,
plugin=os.path.basename(
settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH))
data = {
'volumes_lvm': False,
'volumes_ceph': True,
'images_ceph': True,
'ephemeral_ceph': True,
'objects_ceph': True,
'osd_pool_size': '2',
'tenant': 'separatekeystoneceph',
'user': 'separatekeystoneceph',
'password': 'separatekeystoneceph',
"net_provider": 'neutron',
"net_segment_type": settings.NEUTRON_SEGMENT['tun'],
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings=data)
plugin_names = ['detach-database', 'detach-keystone']
msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
for plugin_name in plugin_names:
assert_true(
self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
msg)
options = {'metadata/enabled': True}
self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['standalone-database', 'standalone-keystone'],
'slave-05': ['standalone-database', 'standalone-keystone'],
'slave-06': ['standalone-database', 'standalone-keystone'],
'slave-07': ['compute'],
'slave-08': ['ceph-osd'],
'slave-09': ['ceph-osd']
}
)
self.fuel_web.verify_network(cluster_id)
# Cluster deploy
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("separate_keystone_ceph_service")