Merge "Upgrade rework" into stable/8.0

This commit is contained in:
Jenkins 2016-07-07 08:54:29 +00:00 committed by Gerrit Code Review
commit 551f59c4ba
6 changed files with 435 additions and 415 deletions

View File

@ -1,4 +1,4 @@
# Copyright 2015 Mirantis, Inc. # Copyright 2016 Mirantis, Inc.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain # not use this file except in compliance with the License. You may obtain
@ -12,13 +12,16 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import json
import os import os
import posixpath import posixpath
import re import re
import json import traceback
from paramiko import RSAKey from devops.helpers.helpers import wait
from devops.models.node import SSHClient from devops.models.node import SSHClient
from paramiko import RSAKey
from fuelweb_test import logger from fuelweb_test import logger
@ -60,15 +63,19 @@ class SSHManager(object):
self.login = login self.login = login
self.password = password self.password = password
def _connect(self, remote): @staticmethod
def _connect(remote):
""" Check if connection is stable and return this one """ Check if connection is stable and return this one
:param remote: :param remote:
:return: :return:
""" """
try: try:
remote.execute("cd ~") wait(lambda: remote.execute("cd ~")['exit_code'] == 0, timeout=20)
except Exception: except Exception:
logger.info('SSHManager: Check for current '
'connection fails. Try to reconnect')
logger.debug(traceback.format_exc())
remote.reconnect() remote.reconnect()
return remote return remote
@ -156,28 +163,38 @@ class SSHManager(object):
if assert_ec_equal is None: if assert_ec_equal is None:
assert_ec_equal = [0] assert_ec_equal = [0]
result = self.execute(ip=ip, port=port, cmd=cmd) result = self.execute(ip=ip, port=port, cmd=cmd)
if result['exit_code'] not in assert_ec_equal:
error_details = {
'command': cmd,
'host': ip,
'stdout': result['stdout'],
'stderr': result['stderr'],
'exit_code': result['exit_code']}
error_msg = (err_msg or "Unexpected exit_code returned:" result['stdout_str'] = ''.join(result['stdout']).strip()
" actual {0}, expected {1}." result['stdout_len'] = len(result['stdout'])
.format(error_details['exit_code'], result['stderr_str'] = ''.join(result['stderr']).strip()
result['stderr_len'] = len(result['stderr'])
details_log = (
"Host: {host}\n"
"Command: '{cmd}'\n"
"Exit code: {code}\n"
"STDOUT:\n{stdout}\n"
"STDERR:\n{stderr}".format(
host=ip, cmd=cmd, code=result['exit_code'],
stdout=result['stdout_str'], stderr=result['stderr_str']
))
if result['exit_code'] not in assert_ec_equal:
error_msg = (
err_msg or
"Unexpected exit_code returned: actual {0}, expected {1}."
"".format(
result['exit_code'],
' '.join(map(str, assert_ec_equal)))) ' '.join(map(str, assert_ec_equal))))
log_msg = ("{0} Command: '{1}' " log_msg = (
"Details: {2}".format(error_msg, cmd, error_details)) "{0} Command: '{1}' "
"Details:\n{2}".format(
error_msg, cmd, details_log))
logger.error(log_msg) logger.error(log_msg)
if raise_on_assert: if raise_on_assert:
raise Exception(log_msg) raise Exception(log_msg)
else:
result['stdout_str'] = ''.join(result['stdout']) logger.debug(details_log)
result['stdout_len'] = len(result['stdout'])
result['stderr_str'] = ''.join(result['stderr'])
result['stderr_len'] = len(result['stderr'])
if jsonify: if jsonify:
try: try:
@ -200,7 +217,7 @@ class SSHManager(object):
:raise: Exception :raise: Exception
""" """
if isinstance(json_string, list): if isinstance(json_string, list):
json_string = ''.join(json_string) json_string = ''.join(json_string).strip()
try: try:
obj = json.loads(json_string) obj = json.loads(json_string)
@ -274,7 +291,7 @@ class SSHManager(object):
return 0 return 0
files_count = 0 files_count = 0
for rootdir, subdirs, files in os.walk(source): for rootdir, _, files in os.walk(source):
targetdir = os.path.normpath( targetdir = os.path.normpath(
os.path.join( os.path.join(
target, target,

View File

@ -403,21 +403,33 @@ def run_on_remote_get_results(remote, cmd, clear=False, err_msg=None,
if assert_ec_equal is None: if assert_ec_equal is None:
assert_ec_equal = [0] assert_ec_equal = [0]
result = remote.execute(cmd) result = remote.execute(cmd)
if result['exit_code'] not in assert_ec_equal:
error_details = {
'command': cmd,
'host': remote.host,
'stdout': result['stdout'],
'stderr': result['stderr'],
'exit_code': result['exit_code']}
error_msg = (err_msg or "Unexpected exit_code returned:" result['stdout_str'] = ''.join(result['stdout']).strip()
" actual {0}, expected {1}." result['stdout_len'] = len(result['stdout'])
.format(error_details['exit_code'], result['stderr_str'] = ''.join(result['stderr']).strip()
result['stderr_len'] = len(result['stderr'])
details_log = (
"Host: {host}\n"
"Command: '{cmd}'\n"
"Exit code: {code}\n"
"STDOUT:\n{stdout}\n"
"STDERR:\n{stderr}".format(
host=remote.host, cmd=cmd, code=result['exit_code'],
stdout=result['stdout_str'], stderr=result['stderr_str']
))
if result['exit_code'] not in assert_ec_equal:
error_msg = (
err_msg or
"Unexpected exit_code returned: actual {0}, expected {1}."
"".format(
result['exit_code'],
' '.join(map(str, assert_ec_equal)))) ' '.join(map(str, assert_ec_equal))))
log_msg = ("{0} Command: '{1}' Details: {2}".format(error_msg, log_msg = (
cmd, "{0} Command: '{1}' "
error_details)) "Details:\n{2}".format(
error_msg, cmd, details_log))
logger.error(log_msg) logger.error(log_msg)
if raise_on_assert: if raise_on_assert:
raise Exception(log_msg) raise Exception(log_msg)
@ -425,11 +437,6 @@ def run_on_remote_get_results(remote, cmd, clear=False, err_msg=None,
if clear: if clear:
remote.clear() remote.clear()
result['stdout_str'] = ''.join(result['stdout'])
result['stdout_len'] = len(result['stdout'])
result['stderr_str'] = ''.join(result['stderr'])
result['stderr_len'] = len(result['stderr'])
if jsonify: if jsonify:
try: try:
result['stdout_json'] = json_deserialize(result['stdout_str']) result['stdout_json'] = json_deserialize(result['stdout_str'])

View File

@ -118,7 +118,8 @@ def import_tests():
from tests import test_clone_env # noqa from tests import test_clone_env # noqa
from tests import test_node_reassignment # noqa from tests import test_node_reassignment # noqa
from tests import test_os_upgrade # noqa from tests import test_os_upgrade # noqa
from tests.tests_upgrade import test_data_driven_upgrade # noqa # from tests.tests_upgrade import test_data_driven_upgrade # noqa
# TODO(vkhlyunev): Uncomment upper line after test rework.
from tests.tests_strength import test_failover # noqa from tests.tests_strength import test_failover # noqa
from tests.tests_strength import test_failover_with_ceph # noqa from tests.tests_strength import test_failover_with_ceph # noqa
from tests.tests_strength import test_master_node_failover # noqa from tests.tests_strength import test_master_node_failover # noqa

View File

@ -14,22 +14,20 @@
import urllib2 import urllib2
from proboscis import SkipTest
from proboscis import test
from proboscis.asserts import assert_equal from proboscis.asserts import assert_equal
from proboscis.asserts import fail from proboscis.asserts import fail
from proboscis import test
from proboscis import SkipTest
from fuelweb_test.helpers.decorators import log_snapshot_after_test from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test.tests import base_test_case as base_test_data from fuelweb_test.tests.base_test_case import TestBasic
from fuelweb_test.tests.test_os_upgrade import TestOSupgrade
@test(groups=["clone_env_for_os_upgrade", "os_upgrade"]) @test(groups=["clone_env_for_os_upgrade"],
class TestCloneEnv(base_test_data.TestBasic): depends_on_groups=["upgrade_ceph_ha_restore"],
enabled=False)
@test( class TestCloneEnv(TestBasic):
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan], @test(groups=["test_clone_environment"])
groups=["test_clone_environment"])
@log_snapshot_after_test @log_snapshot_after_test
def test_clone_environment(self): def test_clone_environment(self):
"""Test clone environment """Test clone environment
@ -115,7 +113,8 @@ class TestCloneEnv(base_test_data.TestBasic):
network["vlan_start"]) network["vlan_start"])
@test( @test(
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan], depends_on_groups=['upgrade_old_nodes'],
# TODO(astepanov) maintain names changes later
groups=["test_clone_nonexistent_cluster"]) groups=["test_clone_nonexistent_cluster"])
@log_snapshot_after_test @log_snapshot_after_test
def test_clone_nonexistent_cluster(self): def test_clone_nonexistent_cluster(self):
@ -143,9 +142,7 @@ class TestCloneEnv(base_test_data.TestBasic):
else: else:
fail("Doesn't raise needed error") fail("Doesn't raise needed error")
@test( @test(groups=["test_clone_wo_name_in_body"])
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["test_clone_wo_name_in_body"])
@log_snapshot_after_test @log_snapshot_after_test
def test_clone_wo_name_in_body(self): def test_clone_wo_name_in_body(self):
"""Test clone without name in POST body """Test clone without name in POST body
@ -177,9 +174,7 @@ class TestCloneEnv(base_test_data.TestBasic):
else: else:
fail("Doesn't raise needed error") fail("Doesn't raise needed error")
@test( @test(groups=["test_clone_wo_release_id_in_body"])
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["test_clone_wo_release_id_in_body"])
@log_snapshot_after_test @log_snapshot_after_test
def test_clone_wo_release_id_in_body(self): def test_clone_wo_release_id_in_body(self):
"""Test clone without release id in POST body """Test clone without release id in POST body
@ -208,9 +203,7 @@ class TestCloneEnv(base_test_data.TestBasic):
else: else:
fail("Doesn't raise needed error") fail("Doesn't raise needed error")
@test( @test(groups=["test_clone_with_empty_body"])
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["test_clone_with_empty_body"])
@log_snapshot_after_test @log_snapshot_after_test
def test_clone_with_empty_body(self): def test_clone_with_empty_body(self):
"""Test clone with empty body """Test clone with empty body
@ -235,9 +228,7 @@ class TestCloneEnv(base_test_data.TestBasic):
else: else:
fail("Doesn't raise needed error") fail("Doesn't raise needed error")
@test( @test(groups=["test_clone_with_nonexistent_release_id"])
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["test_clone_with_nonexistent_release_id"])
@log_snapshot_after_test @log_snapshot_after_test
def test_clone_with_nonexistent_release_id(self): def test_clone_with_nonexistent_release_id(self):
"""Test clone with nonexistent release id in POST body """Test clone with nonexistent release id in POST body
@ -268,9 +259,7 @@ class TestCloneEnv(base_test_data.TestBasic):
else: else:
fail("Doesn't raise needed error") fail("Doesn't raise needed error")
@test( @test(groups=["test_clone_with_incorrect_release_id"])
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["test_clone_with_incorrect_release_id"])
@log_snapshot_after_test @log_snapshot_after_test
def test_clone_with_incorrect_release_id(self): def test_clone_with_incorrect_release_id(self):
"""Test clone with incorrect release id in POST body """Test clone with incorrect release id in POST body
@ -301,9 +290,7 @@ class TestCloneEnv(base_test_data.TestBasic):
else: else:
fail("Doesn't raise needed error") fail("Doesn't raise needed error")
@test( @test(groups=["test_double_clone_environment"])
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["test_double_clone_environment"])
@log_snapshot_after_test @log_snapshot_after_test
def test_double_clone_environment(self): def test_double_clone_environment(self):
"""Test double clone environment """Test double clone environment

View File

@ -14,22 +14,21 @@
import urllib2 import urllib2
from proboscis import SkipTest
from proboscis import test
from proboscis.asserts import assert_equal from proboscis.asserts import assert_equal
from proboscis.asserts import fail from proboscis.asserts import fail
from proboscis import test
from proboscis import SkipTest
from fuelweb_test.helpers.decorators import log_snapshot_after_test from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test.tests.base_test_case import TestBasic
from fuelweb_test.tests import base_test_case as base_test_data
from fuelweb_test.tests.test_os_upgrade import TestOSupgrade
@test(groups=["reassign_node_for_os_upgrade", "os_upgrade"]) @test(groups=["reassign_node_for_os_upgrade", "os_upgrade"],
class TestReassignNode(base_test_data.TestBasic): depends_on_groups=["upgrade_ceph_ha_restore"],
enabled=False)
class TestReassignNode(TestBasic):
@test( @test(groups=["reassign_node_to_cloned_environment"])
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["reassign_node_to_cloned_environment"])
@log_snapshot_after_test @log_snapshot_after_test
def reassign_node_to_cloned_environment(self): def reassign_node_to_cloned_environment(self):
"""Test reassign node """Test reassign node
@ -109,9 +108,7 @@ class TestReassignNode(base_test_data.TestBasic):
) )
self.fuel_web.assert_task_success(task) self.fuel_web.assert_task_success(task)
@test( @test(groups=["reassign_node_to_nonexistent_cluster"])
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["reassign_node_to_nonexistent_cluster"])
@log_snapshot_after_test @log_snapshot_after_test
def reassign_node_to_nonexistent_cluster(self): def reassign_node_to_nonexistent_cluster(self):
"""Test reassign node to nonexistent cluster """Test reassign node to nonexistent cluster
@ -146,9 +143,7 @@ class TestReassignNode(base_test_data.TestBasic):
"to non-existing" "to non-existing"
"cluster 123456".format(controller_node["id"])) "cluster 123456".format(controller_node["id"]))
@test( @test(groups=["reassign_node_with_empty_body"])
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["reassign_node_with_empty_body"])
@log_snapshot_after_test @log_snapshot_after_test
def reassign_node_with_empty_body(self): def reassign_node_with_empty_body(self):
"""Test reassign node with empty body """Test reassign node with empty body
@ -186,9 +181,7 @@ class TestReassignNode(base_test_data.TestBasic):
fail("Doesn't raise HTTP 400 error on request" fail("Doesn't raise HTTP 400 error on request"
"to reassigning node with empty body") "to reassigning node with empty body")
@test( @test(groups=["reassign_node_with_incorrect_node"])
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["reassign_node_with_incorrect_node"])
@log_snapshot_after_test @log_snapshot_after_test
def reassign_node_with_incorrect_node(self): def reassign_node_with_incorrect_node(self):
"""Test reassign node with incorrect node in POST body """Test reassign node with incorrect node in POST body
@ -230,9 +223,7 @@ class TestReassignNode(base_test_data.TestBasic):
fail("Doesn't raise HTTP 400 error on request" fail("Doesn't raise HTTP 400 error on request"
"to reassigning node with incorrect node_id") "to reassigning node with incorrect node_id")
@test( @test(groups=["reassign_nonexistent_node_to_cloned_environment"])
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["reassign_nonexistent_node_to_cloned_environment"])
@log_snapshot_after_test @log_snapshot_after_test
def reassign_nonexistent_node_to_cloned_environment(self): def reassign_nonexistent_node_to_cloned_environment(self):
"""Test reassign node with nonexistent node in POST body """Test reassign node with nonexistent node in POST body

View File

@ -1,4 +1,4 @@
# Copyright 2015 Mirantis, Inc. # Copyright 2016 Mirantis, Inc.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain # not use this file except in compliance with the License. You may obtain
@ -12,221 +12,140 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import time from proboscis.asserts import assert_equal, assert_not_equal
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_false
from proboscis.asserts import assert_true from proboscis.asserts import assert_true
from proboscis import test from proboscis import test
from proboscis import SkipTest from proboscis import SkipTest
from fuelweb_test.helpers import checkers from fuelweb_test import logger
from fuelweb_test.helpers.decorators import log_snapshot_after_test from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test.helpers.utils import install_pkg from fuelweb_test.settings import KEYSTONE_CREDS
from fuelweb_test.tests import base_test_case as base_test_data from fuelweb_test.settings import OPENSTACK_RELEASE
from fuelweb_test import settings as hlp_data from fuelweb_test.settings import OPENSTACK_RELEASE_UBUNTU
from fuelweb_test.settings import DEPLOYMENT_MODE_HA from fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade import \
DataDrivenUpgradeBase
@test(groups=["prepare_os_upgrade"])
class PrepareOSupgrade(base_test_data.TestBasic):
@test(depends_on=[base_test_data.SetupEnvironment.prepare_slaves_9],
groups=["ha_ceph_for_all_ubuntu_neutron_vlan"])
@log_snapshot_after_test
def ha_ceph_for_all_ubuntu_neutron_vlan(self):
"""Deploy cluster with ha mode, ceph for all, neutron vlan
Scenario:
1. Create cluster
2. Add 3 nodes with controller role
3. Add 3 nodes with compute and ceph OSD roles
4. Deploy the cluster
5. Run ostf
6. Make snapshot
Duration 50m
Snapshot ha_ceph_for_all_ubuntu_neutron_vlan
"""
if hlp_data.OPENSTACK_RELEASE_UBUNTU not in hlp_data.OPENSTACK_RELEASE:
raise SkipTest()
self.check_run('ha_ceph_for_all_ubuntu_neutron_vlan')
self.env.revert_snapshot("ready_with_9_slaves")
data = {
'volumes_ceph': True,
'images_ceph': True,
'ephemeral_ceph': True,
'objects_ceph': True,
'volumes_lvm': False,
'net_provider': 'neutron',
'net_segment_type': hlp_data.NEUTRON_SEGMENT['vlan']
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE_HA,
settings=data
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute', 'ceph-osd'],
'slave-05': ['compute', 'ceph-osd'],
'slave-06': ['compute', 'ceph-osd']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("ha_ceph_for_all_ubuntu_neutron_vlan",
is_make=True)
@test(groups=["os_upgrade"]) @test(groups=["os_upgrade"])
class TestOSupgrade(base_test_data.TestBasic): class TestOSupgrade(DataDrivenUpgradeBase):
@staticmethod
def check_release_requirements():
if OPENSTACK_RELEASE_UBUNTU not in OPENSTACK_RELEASE:
raise SkipTest('{0} not in {1}'.format(
OPENSTACK_RELEASE_UBUNTU, OPENSTACK_RELEASE))
@test(groups=["upgrade_ha_ceph_for_all_ubuntu_neutron_vlan"]) def minimal_check(self, seed_cluster_id, nwk_check=False):
@log_snapshot_after_test def next_step():
def upgrade_ha_ceph_for_all_ubuntu_neutron_vlan(self): return self.current_log_step + 1
"""Upgrade master node ha mode, ceph for all, neutron vlan
Scenario: if nwk_check:
1. Revert snapshot with ha mode, ceph for all, neutron vlan env self.show_step(next_step())
2. Run upgrade on master self.fuel_web.verify_network(seed_cluster_id)
3. Check that upgrade was successful
""" self.show_step(next_step())
if hlp_data.OPENSTACK_RELEASE_UBUNTU not in hlp_data.OPENSTACK_RELEASE: self.fuel_web.run_single_ostf_test(
raise SkipTest() cluster_id=seed_cluster_id, test_sets=['sanity'],
test_name=('fuel_health.tests.sanity.test_sanity_identity'
'.SanityIdentityTest.test_list_users'))
self.check_run('upgrade_ha_ceph_for_all_ubuntu_neutron_vlan') def check_ceph_health(self, ip):
self.env.revert_snapshot('ha_ceph_for_all_ubuntu_neutron_vlan') ceph_health = self.ssh_manager.execute_on_remote(
ip=ip, cmd="ceph health")["stdout_str"]
cluster_id = self.fuel_web.get_last_created_cluster() # There are an issue with PG calculation - LP#1464656
try:
assert_true("HEALTH_OK" in ceph_health,
"Ceph health is not ok! Inspect output below:\n"
"{!r}".format(ceph_health))
except AssertionError:
logger.warning("Ceph health is not ok! trying to check LP#1464656")
if "HEALTH_WARN" in ceph_health and \
"too many PGs per OSD" in ceph_health:
logger.info("Known issue in ceph - see LP#1464656 for details")
else:
raise
self.env.admin_actions.upgrade_master_node() @property
def orig_cluster_id(self):
return self.fuel_web.client.get_cluster_id('prepare_upgrade_ceph_ha')
self.fuel_web.assert_nodes_in_ready_state(cluster_id) @test(depends_on_groups=['upgrade_ceph_ha_restore'],
self.fuel_web.wait_nodes_get_online_state(
self.env.d_env.nodes().slaves[:6])
self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
self.fuel_web.assert_nailgun_upgrade_migration()
self.env.make_snapshot("upgrade_ha_ceph_for_all_ubuntu_neutron_vlan",
is_make=True)
@test(depends_on=[upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["prepare_before_os_upgrade"])
@log_snapshot_after_test
def prepare_before_os_upgrade(self):
"""Make prepare actions before os upgrade
Scenario:
1. Revert snapshot upgraded with ceph, neutron vlan
2. yum update
3. pip install pyzabbix
4. yum install fuel-octane
5. Create mirrors
"""
if hlp_data.OPENSTACK_RELEASE_UBUNTU not in hlp_data.OPENSTACK_RELEASE:
raise SkipTest()
self.check_run('prepare_before_os_upgrade')
self.env.revert_snapshot("upgrade_ha_ceph_for_all_ubuntu_neutron_vlan")
with self.env.d_env.get_admin_remote() as remote:
remote.execute("yum -y update")
remote.execute("pip install pyzabbix")
install_pkg(remote, "fuel-octane")
cmd = (
"sed -i 's/DEBUG=\"no\"/DEBUG=\"yes\"/' {}".format(
'/etc/fuel-createmirror/config/ubuntu.cfg'
)
)
remote.execute(cmd)
remote.execute("/usr/bin/fuel-createmirror")
self.env.make_snapshot("prepare_before_os_upgrade", is_make=True)
@test(depends_on=[prepare_before_os_upgrade],
groups=["os_upgrade_env"]) groups=["os_upgrade_env"])
@log_snapshot_after_test @log_snapshot_after_test
def os_upgrade_env(self): def os_upgrade_env(self):
"""Octane clone target environment """Octane clone target environment
Scenario: Scenario:
1. Revert snapshot prepare_before_os_upgrade 1. Revert snapshot upgrade_ceph_ha_restore
2. run octane upgrade-env <target_env_id> 2. Run "octane upgrade-env <orig_env_id>"
3. Ensure that new cluster was created with correct release
""" """
if hlp_data.OPENSTACK_RELEASE_UBUNTU not in hlp_data.OPENSTACK_RELEASE: self.check_release_requirements()
raise SkipTest()
self.check_run('os_upgrade_env') self.check_run('os_upgrade_env')
self.env.revert_snapshot("prepare_before_os_upgrade") self.env.revert_snapshot("upgrade_ceph_ha_restore", skip_timesync=True)
self.install_octane()
cluster_id = self.fuel_web.get_last_created_cluster() self.ssh_manager.execute_on_remote(
ip=self.env.get_admin_node_ip(),
cmd="octane upgrade-env {0}".format(self.orig_cluster_id),
err_msg="'upgrade-env' command failed, inspect logs for details")
with self.env.d_env.get_admin_remote() as remote: new_cluster_id = self.fuel_web.get_last_created_cluster()
octane_upgrade_env = remote.execute( assert_not_equal(self.orig_cluster_id, new_cluster_id,
"octane upgrade-env {0}".format(cluster_id) "Cluster IDs are the same: {!r} and {!r}".format(
) self.orig_cluster_id, new_cluster_id))
assert_equal(self.fuel_web.get_cluster_release_id(new_cluster_id),
cluster_id = self.fuel_web.get_last_created_cluster() self.fuel_web.client.get_release_id(
release_name='Liberty on Ubuntu 14.04'))
assert_equal(0, octane_upgrade_env['exit_code'])
assert_equal(cluster_id,
int(octane_upgrade_env['stdout'][0].split()[0]))
self.env.make_snapshot("os_upgrade_env", is_make=True) self.env.make_snapshot("os_upgrade_env", is_make=True)
@test(depends_on=[os_upgrade_env], @test(depends_on=[os_upgrade_env], groups=["upgrade_first_cic"])
groups=["upgrade_first_cic"])
@log_snapshot_after_test @log_snapshot_after_test
def upgrade_first_cic(self): def upgrade_first_cic(self):
"""Upgrade first controller """Upgrade first controller
Scenario: Scenario:
1. Revert snapshot os_upgrade_env 1. Revert snapshot os_upgrade_env
2. run octane upgrade-node --isolated <seed_env_id> <node_id> 2. Select cluster for upgrade and upgraded cluster
3. Select controller for upgrade
4. Run "octane upgrade-node --isolated <seed_env_id> <node_id>"
5. Check tasks status after upgrade run completion
6. Run minimal OSTF sanity check (user list) on target cluster
""" """
if hlp_data.OPENSTACK_RELEASE_UBUNTU not in hlp_data.OPENSTACK_RELEASE: self.check_release_requirements()
raise SkipTest()
self.check_run('upgrade_first_cic') self.check_run('upgrade_first_cic')
self.env.revert_snapshot("os_upgrade_env")
target_cluster_id = self.fuel_web.client.get_cluster_id( self.show_step(1, initialize=True)
'TestOSupgrade' self.env.revert_snapshot("os_upgrade_env")
) self.install_octane()
self.show_step(2)
seed_cluster_id = self.fuel_web.get_last_created_cluster() seed_cluster_id = self.fuel_web.get_last_created_cluster()
self.show_step(3)
controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
target_cluster_id, ["controller"] self.orig_cluster_id, ["controller"])
) self.show_step(4)
with self.env.d_env.get_admin_remote() as remote: self.ssh_manager.execute_on_remote(
octane_upgrade_node = remote.execute( ip=self.ssh_manager.admin_ip,
"octane upgrade-node --isolated {0} {1}".format( cmd="octane upgrade-node --isolated "
seed_cluster_id, controllers[-1]["id"]) "{0} {1}".format(seed_cluster_id, controllers[-1]["id"]),
) err_msg="octane upgrade-node failed")
assert_equal(0, octane_upgrade_node['exit_code'],
"octane upgrade-node returns non zero" self.show_step(5)
"status code,"
"current result {}".format(octane_upgrade_node))
tasks_started_by_octane = [ tasks_started_by_octane = [
task for task in self.fuel_web.client.get_tasks() task for task in self.fuel_web.client.get_tasks()
if task['cluster'] == seed_cluster_id if task['cluster'] == seed_cluster_id]
]
for task in tasks_started_by_octane: for task in tasks_started_by_octane:
self.fuel_web.assert_task_success(task) self.fuel_web.assert_task_success(task)
self.show_step(6)
self.minimal_check(seed_cluster_id=seed_cluster_id)
self.env.make_snapshot("upgrade_first_cic", is_make=True) self.env.make_snapshot("upgrade_first_cic", is_make=True)
@test(depends_on=[upgrade_first_cic], @test(depends_on=[upgrade_first_cic],
@ -237,77 +156,72 @@ class TestOSupgrade(base_test_data.TestBasic):
Scenario: Scenario:
1. Revert snapshot upgrade_first_cic 1. Revert snapshot upgrade_first_cic
2. run octane upgrade-db <target_env_id> <seed_env_id> 2. Select cluster for upgrade and upgraded cluster
3. Select controller for db upgrade
4. Collect from db IDs for upgrade (used in checks)
5. Run "octane upgrade-db <orig_env_id> <seed_env_id>"
6. Check upgrade status
""" """
if hlp_data.OPENSTACK_RELEASE_UBUNTU not in hlp_data.OPENSTACK_RELEASE: self.check_release_requirements()
raise SkipTest()
self.check_run('upgrade_db') self.check_run('upgrade_db')
self.env.revert_snapshot("upgrade_first_cic")
target_cluster_id = self.fuel_web.client.get_cluster_id( self.show_step(1, initialize=True)
'TestOSupgrade' self.env.revert_snapshot("upgrade_first_cic", skip_timesync=True)
) self.install_octane()
target_controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
target_cluster_id, ["controller"] self.show_step(2)
)[0]
seed_cluster_id = self.fuel_web.get_last_created_cluster() seed_cluster_id = self.fuel_web.get_last_created_cluster()
controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
seed_cluster_id, ["controller"]
)[0]
with self.env.d_env.get_ssh_to_remote( self.show_step(3)
target_controller["ip"]) as remote: orig_controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
target_ids = remote.execute( self.orig_cluster_id, ["controller"])[0]
seed_controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
seed_cluster_id, ["controller"])[0]
mysql_req = (
'mysql cinder <<< "select id from volumes;"; ' 'mysql cinder <<< "select id from volumes;"; '
'mysql glance <<< "select id from images"; ' 'mysql glance <<< "select id from images"; '
'mysql neutron <<< "(select id from networks) ' 'mysql neutron <<< "(select id from networks) '
'UNION (select id from routers) ' 'UNION (select id from routers) '
'UNION (select id from subnets)"; ' 'UNION (select id from subnets)"; '
'mysql keystone <<< "(select id from project) ' 'mysql keystone <<< "(select id from project) '
'UNION (select id from user)"' 'UNION (select id from user)"')
)["stdout"]
with self.env.d_env.get_admin_remote() as remote: self.show_step(4)
octane_upgrade_db = remote.execute( target_ids = self.ssh_manager.execute_on_remote(
"octane upgrade-db {0} {1}".format( ip=orig_controller["ip"], cmd=mysql_req)['stdout']
target_cluster_id, seed_cluster_id)
)
assert_equal(0, octane_upgrade_db['exit_code'], self.show_step(5)
"octane upgrade-db returns non zero" self.ssh_manager.execute_on_remote(
"status code," ip=self.ssh_manager.admin_ip,
"current result is {}".format(octane_upgrade_db)) cmd="octane upgrade-db {0} {1}".format(
self.orig_cluster_id, seed_cluster_id),
err_msg="octane upgrade-db failed")
with self.env.d_env.get_ssh_to_remote(controller["ip"]) as remote: self.show_step(6)
stdout = remote.execute("crm resource status")["stdout"]
seed_ids = remote.execute(
'mysql cinder <<< "select id from volumes;"; '
'mysql glance <<< "select id from images"; '
'mysql neutron <<< "(select id from networks) '
'UNION (select id from routers) '
'UNION (select id from subnets)"; '
'mysql keystone <<< "(select id from project) '
'UNION (select id from user)"'
)["stdout"]
while stdout: crm_status = self.ssh_manager.execute_on_remote(
current = stdout.pop(0) ip=seed_controller["ip"], cmd="crm resource status")['stdout']
while crm_status:
current = crm_status.pop(0)
if "vip" in current: if "vip" in current:
assert_true("Started" in current) assert_true("Started" in current)
elif "master_p" in current: elif "master_p" in current:
next_element = stdout.pop(0) next_element = crm_status.pop(0)
assert_true("Masters: [ node-" in next_element) assert_true("Masters: [ node-" in next_element)
elif any(x in current for x in ["ntp", "mysql", "dns"]): elif any(x in current for x in ["ntp", "mysql", "dns"]):
next_element = stdout.pop(0) next_element = crm_status.pop(0)
assert_true("Started" in next_element) assert_true("Started" in next_element)
elif any(x in current for x in ["nova", "cinder", "keystone", elif any(x in current for x in ["nova", "cinder", "keystone",
"heat", "neutron", "glance"]): "heat", "neutron", "glance"]):
next_element = stdout.pop(0) next_element = crm_status.pop(0)
assert_true("Stopped" in next_element) assert_true("Stopped" in next_element)
seed_ids = self.ssh_manager.execute_on_remote(
ip=seed_controller["ip"], cmd=mysql_req)['stdout']
assert_equal(sorted(target_ids), sorted(seed_ids), assert_equal(sorted(target_ids), sorted(seed_ids),
"Objects in target and seed dbs are different") "Objects in target and seed dbs are different")
@ -321,150 +235,253 @@ class TestOSupgrade(base_test_data.TestBasic):
Scenario: Scenario:
1. Revert snapshot upgrade_db 1. Revert snapshot upgrade_db
2. run octane upgrade-ceph <target_env_id> <seed_env_id> 2. Select cluster for upgrade and upgraded cluster
3. Run octane upgrade-ceph <orig_env_id> <seed_env_id>
4. Check CEPH health on seed env
""" """
if hlp_data.OPENSTACK_RELEASE_UBUNTU not in hlp_data.OPENSTACK_RELEASE: self.check_release_requirements()
raise SkipTest()
self.check_run('upgrade_ceph') self.check_run('upgrade_ceph')
self.show_step(1, initialize=True)
self.env.revert_snapshot("upgrade_db") self.env.revert_snapshot("upgrade_db")
self.install_octane()
target_cluster_id = self.fuel_web.client.get_cluster_id( self.show_step(2)
'TestOSupgrade'
)
seed_cluster_id = self.fuel_web.get_last_created_cluster() seed_cluster_id = self.fuel_web.get_last_created_cluster()
controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
seed_cluster_id, ["controller"]
)[0]
with self.env.d_env.get_admin_remote() as remote: seed_controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
octane_upgrade_ceph = remote.execute( seed_cluster_id, ["controller"])[0]
"octane upgrade-ceph {0} {1}".format(
target_cluster_id, seed_cluster_id)
)
assert_equal(0, octane_upgrade_ceph['exit_code'], self.show_step(3)
"octane upgrade-ceph returns non zero status code," self.ssh_manager.execute_on_remote(
"current result is {}".format(octane_upgrade_ceph)) ip=self.ssh_manager.admin_ip,
cmd="octane upgrade-ceph {0} {1}".format(
self.orig_cluster_id, seed_cluster_id),
err_msg="octane upgrade-ceph failed")
with self.env.d_env.get_ssh_to_remote(controller["ip"]) as remote: self.show_step(4)
ceph_health = remote.execute("ceph health")["stdout"][0][:-1] self.check_ceph_health(seed_controller['ip'])
assert_equal("HEALTH_OK", ceph_health)
self.env.make_snapshot("upgrade_ceph", is_make=True) self.env.make_snapshot("upgrade_ceph", is_make=True)
@test(depends_on=[upgrade_ceph], @test(depends_on=[upgrade_ceph],
groups=["upgrade_control_plane"]) groups=["upgrade_controllers"])
@log_snapshot_after_test @log_snapshot_after_test
def upgrade_control_plane(self): def upgrade_controllers(self):
"""Upgrade control plane """Upgrade control plane and remaining controllers
Scenario: Scenario:
1. Revert snapshot upgrade_ceph 1. Revert snapshot upgrade_ceph
2. run octane upgrade-control <target_env_id> <seed_env_id> 2. Select cluster for upgrade and upgraded cluster
3. run octane upgrade-node <seed_cluster_id> <node_id> <node_id> 3. Run octane upgrade-control <orig_env_id> <seed_env_id>
4. Check cluster consistency
5. Collect old controllers for upgrade
6. Run octane upgrade-node <seed_cluster_id> <node_id> <node_id>
7. Check tasks status after upgrade run completion
8. Run network verification on target cluster
9. Run minimal OSTF sanity check (user list) on target cluster
""" """
if hlp_data.OPENSTACK_RELEASE_UBUNTU not in hlp_data.OPENSTACK_RELEASE: self.check_release_requirements()
raise SkipTest() self.check_run('upgrade_controllers')
self.check_run('upgrade_control_plane') self.show_step(1, initialize=True)
self.env.revert_snapshot("upgrade_ceph") self.env.revert_snapshot("upgrade_ceph")
self.install_octane()
target_cluster_id = self.fuel_web.client.get_cluster_id( self.show_step(2)
'TestOSupgrade'
)
seed_cluster_id = self.fuel_web.get_last_created_cluster() seed_cluster_id = self.fuel_web.get_last_created_cluster()
with self.env.d_env.get_admin_remote() as remote: self.show_step(3)
octane_upgrade_control = remote.execute( self.ssh_manager.execute_on_remote(
"octane upgrade-control {0} {1}".format( ip=self.ssh_manager.admin_ip,
target_cluster_id, seed_cluster_id) cmd="octane upgrade-control {0} {1}".format(
) self.orig_cluster_id, seed_cluster_id),
err_msg="octane upgrade-control failed")
assert_equal(0, octane_upgrade_control['exit_code'],
"octane upgrade-control returns non zero status code,"
"current result is {}".format(octane_upgrade_control))
self.show_step(4)
controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
seed_cluster_id, ["controller"] seed_cluster_id, ["controller"])
)
old_computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
target_cluster_id, ["compute"]
)
old_controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
target_cluster_id, ["controller"]
)
ping_ips = [] old_controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
for node in controllers + old_computes: self.orig_cluster_id, ["controller"])
for data in node["network_data"]:
old_computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
self.orig_cluster_id, ["compute"])
def collect_management_ips(node_list):
result = []
for item in node_list:
for data in item["network_data"]:
if data["name"] == "management": if data["name"] == "management":
ping_ips.append(data["ip"].split("/")[0]) result.append(data["ip"].split("/")[0])
return result
ping_ips = collect_management_ips(controllers + old_computes)
ping_ips.append(self.fuel_web.get_mgmt_vip(seed_cluster_id)) ping_ips.append(self.fuel_web.get_mgmt_vip(seed_cluster_id))
non_ping_ips = [] non_ping_ips = collect_management_ips(old_controllers)
for node in old_controllers:
for data in node["network_data"]: ping_cmd = "ping -W 1 -i 1 -s 56 -c 1 -w 10 {host}"
if data["name"] == "management":
non_ping_ips.append(data["ip"].split("/")[0])
for node in controllers + old_computes: for node in controllers + old_computes:
with self.env.d_env.get_ssh_to_remote(node["ip"]) as remote: self.ssh_manager.execute_on_remote(
remote.execute("ip -s -s neigh flush all") ip=node["ip"], cmd="ip -s -s neigh flush all")
for ip in ping_ips: for ip in ping_ips:
assert_true(checkers.check_ping(remote, ip), self.ssh_manager.execute_on_remote(
"Can not ping {0} from {1}" ip=node["ip"],
cmd=ping_cmd.format(host=ip),
err_msg="Can not ping {0} from {1}"
"need to check network" "need to check network"
" connectivity".format(ip, node["ip"])) " connectivity".format(ip, node["ip"]))
for ip in non_ping_ips: for ip in non_ping_ips:
assert_false(checkers.check_ping(remote, ip), self.ssh_manager.execute_on_remote(
"Patch ports from old controllers" ip=node["ip"],
"isn't removed") cmd=ping_cmd.format(host=ip),
err_msg="Patch ports from old controllers isn't removed",
assert_ec_equal=[1, 2]) # No reply, Other errors
time.sleep(180) # TODO need to remove crm = self.ssh_manager.execute_on_remote(
# after fix of https://bugs.launchpad.net/fuel/+bug/1499696 ip=controllers[0]["ip"],
cmd="crm resource status")["stdout"]
with self.env.d_env.get_ssh_to_remote(controllers[0]["ip"]) as remote: while crm:
stdout = remote.execute("crm resource status")["stdout"] current = crm.pop(0)
while stdout:
current = stdout.pop(0)
if "vip" in current: if "vip" in current:
assert_true("Started" in current) assert_true("Started" in current)
elif "master_p" in current: elif "master_p" in current:
next_element = stdout.pop(0) next_element = crm.pop(0)
assert_true("Masters: [ node-" in next_element) assert_true("Masters: [ node-" in next_element)
elif any(x in current for x in ["ntp", "mysql", "dns"]): elif any(x in current for x in ["ntp", "mysql", "dns",
next_element = stdout.pop(0) "nova", "cinder", "keystone",
assert_true("Started" in next_element)
elif any(x in current for x in ["nova", "cinder", "keystone",
"heat", "neutron", "glance"]): "heat", "neutron", "glance"]):
next_element = stdout.pop(0) next_element = crm.pop(0)
assert_true("Started" in next_element) assert_true("Started" in next_element)
with self.env.d_env.get_admin_remote() as remote: # upgrade controllers part
octane_upgrade_node = remote.execute( self.show_step(5)
"octane upgrade-node {0} {1} {2}".format( seed_cluster_id = self.fuel_web.get_last_created_cluster()
seed_cluster_id, old_controllers[0]["id"],
old_controllers[1]["id"]) self.show_step(6)
) self.ssh_manager.execute_on_remote(
assert_equal(0, octane_upgrade_node['exit_code'], ip=self.ssh_manager.admin_ip,
"octane upgrade-node returns non zero" cmd="octane upgrade-node {0} {1}".format(
"status code," seed_cluster_id,
"current result {}".format(octane_upgrade_node)) " ".join([str(ctrl["id"]) for ctrl in old_controllers])),
err_msg="octane upgrade-node failed")
self.show_step(7)
tasks_started_by_octane = [ tasks_started_by_octane = [
task for task in self.fuel_web.client.get_tasks() task for task in self.fuel_web.client.get_tasks()
if task['cluster'] == seed_cluster_id if task['cluster'] == seed_cluster_id]
]
for task in tasks_started_by_octane: for task in tasks_started_by_octane:
self.fuel_web.assert_task_success(task) self.fuel_web.assert_task_success(task)
self.env.make_snapshot("upgrade_control_plane", is_make=True) self.show_step(8)
self.show_step(9)
self.minimal_check(seed_cluster_id=seed_cluster_id, nwk_check=True)
self.env.make_snapshot("upgrade_controllers", is_make=True)
@test(depends_on=[upgrade_controllers], groups=["upgrade_ceph_osd"])
@log_snapshot_after_test
def upgrade_ceph_osd(self):
"""Upgrade ceph osd
Scenario:
1. Revert snapshot upgrade_all_controllers
2. Select cluster for upgrade and upgraded cluster
3. Run octane upgrade-osd <target_env_id> <seed_env_id>
4. Check CEPH health on seed env
5. run network verification on target cluster
6. run minimal OSTF sanity check (user list) on target cluster
"""
self.check_release_requirements()
self.check_run('upgrade_ceph_osd')
self.show_step(1, initialize=True)
self.env.revert_snapshot("upgrade_controllers")
self.install_octane()
self.show_step(2)
seed_cluster_id = self.fuel_web.get_last_created_cluster()
seed_controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
seed_cluster_id, ["controller"]
)[0]
self.show_step(3)
self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd="octane upgrade-osd --admin-password {0} {1}".format(
KEYSTONE_CREDS['password'],
self.orig_cluster_id),
err_msg="octane upgrade-osd failed"
)
self.show_step(4)
self.check_ceph_health(seed_controller['ip'])
self.minimal_check(seed_cluster_id=seed_cluster_id, nwk_check=True)
self.env.make_snapshot("upgrade_ceph_osd", is_make=True)
@test(depends_on=[upgrade_ceph_osd], groups=["upgrade_old_nodes"])
@log_snapshot_after_test
def upgrade_old_nodes(self):
"""Upgrade all non controller nodes
Scenario:
1. Revert snapshot upgrade_all_controllers
2. Select cluster for upgrade and upgraded cluster
3. Collect nodes for upgrade
4. Run octane upgrade-node $SEED_ID <ID>
5. run network verification on target cluster
6. run OSTF check
7. Drop old cluster
"""
self.check_release_requirements()
self.check_run('upgrade_old_nodes')
self.show_step(1, initialize=True)
self.env.revert_snapshot("upgrade_ceph_osd")
self.install_octane()
self.show_step(2)
seed_cluster_id = self.fuel_web.get_last_created_cluster()
self.show_step(3)
# old_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
# orig_cluster_id, ["compute"]
# )
# TODO(astepanov): validate, that only correct nodes acquired
old_nodes = self.fuel_web.client.list_cluster_nodes(
self.orig_cluster_id)
self.show_step(4)
self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd="octane upgrade-node {0} {1}".format(
seed_cluster_id,
" ".join([str(ctrl["id"]) for ctrl in old_nodes])),
err_msg="octane upgrade-node failed"
)
self.show_step(5)
self.fuel_web.verify_network(seed_cluster_id)
self.show_step(6)
self.fuel_web.run_ostf(seed_cluster_id)
self.show_step(7)
self.fuel_web.delete_env_wait(self.orig_cluster_id)