OS Upgrade tests: separate helpers
1. test_os_upgrade is moved under test_os_upgrade 2. main code is moved to separate place for excluding copy-paste in new tests. 3. Fixed logging due to unreadable output 4. Switch to check_call -> remove pain of cherry-picking 5. Use octane-cleanup Closes-bug: #1612236 Change-Id: Ida7f5901f070a9ef507ce6027fd2618b8617d89f
This commit is contained in:
parent
292b8e2efd
commit
a9f7dd2f83
@ -343,7 +343,7 @@ Test Services reconfiguration
|
||||
:members:
|
||||
|
||||
Test Support HugePages
|
||||
-----------------------------
|
||||
----------------------
|
||||
.. automodule:: fuelweb_test.tests.test_support_hugepages
|
||||
:members:
|
||||
|
||||
@ -680,7 +680,10 @@ OS upgrade tests
|
||||
|
||||
Test OpenStack Upgrades
|
||||
-----------------------
|
||||
.. automodule:: fuelweb_test.tests.test_os_upgrade
|
||||
.. automodule:: fuelweb_test.tests.tests_upgrade.upgrade_base
|
||||
:members:
|
||||
|
||||
.. automodule:: fuelweb_test.tests.tests_upgrade.test_os_upgrade
|
||||
:members:
|
||||
|
||||
Tests for separated services
|
||||
|
@ -69,6 +69,10 @@ class TestBasic(object):
|
||||
def current_log_step(self, new_val):
|
||||
self.__current_log_step = new_val
|
||||
|
||||
@property
|
||||
def next_step(self):
|
||||
return self.current_log_step + 1
|
||||
|
||||
@property
|
||||
def test_program(self):
|
||||
if self.__test_program is None:
|
||||
@ -95,7 +99,7 @@ class TestBasic(object):
|
||||
"""
|
||||
if snapshot_name:
|
||||
if self.env.d_env.has_snapshot(snapshot_name):
|
||||
raise SkipTest()
|
||||
raise SkipTest('{} is already presents'.format(snapshot_name))
|
||||
|
||||
def show_step(self, step, details='', initialize=False):
|
||||
"""Show a description of the step taken from docstring
|
||||
|
@ -1,534 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from proboscis.asserts import assert_equal, assert_not_equal
|
||||
from proboscis.asserts import assert_true
|
||||
from proboscis import test
|
||||
from proboscis import SkipTest
|
||||
|
||||
from fuelweb_test import logger
|
||||
from fuelweb_test.helpers.decorators import log_snapshot_after_test
|
||||
from fuelweb_test.settings import KEYSTONE_CREDS
|
||||
from fuelweb_test.settings import OPENSTACK_RELEASE
|
||||
from fuelweb_test.settings import OPENSTACK_RELEASE_UBUNTU
|
||||
from fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_base import \
|
||||
DataDrivenUpgradeBase
|
||||
|
||||
|
||||
@test(groups=["os_upgrade"])
|
||||
class TestOSupgrade(DataDrivenUpgradeBase):
|
||||
@staticmethod
|
||||
def check_release_requirements():
|
||||
if OPENSTACK_RELEASE_UBUNTU not in OPENSTACK_RELEASE:
|
||||
raise SkipTest('{0} not in {1}'.format(
|
||||
OPENSTACK_RELEASE_UBUNTU, OPENSTACK_RELEASE))
|
||||
|
||||
def minimal_check(self, seed_cluster_id, nwk_check=False):
|
||||
def next_step():
|
||||
return self.current_log_step + 1
|
||||
|
||||
if nwk_check:
|
||||
self.show_step(next_step())
|
||||
self.fuel_web.verify_network(seed_cluster_id)
|
||||
|
||||
self.show_step(next_step())
|
||||
self.fuel_web.run_single_ostf_test(
|
||||
cluster_id=seed_cluster_id, test_sets=['sanity'],
|
||||
test_name=('fuel_health.tests.sanity.test_sanity_identity'
|
||||
'.SanityIdentityTest.test_list_users'))
|
||||
|
||||
def check_ceph_health(self, ip):
|
||||
ceph_health = self.ssh_manager.execute_on_remote(
|
||||
ip=ip, cmd="ceph health")["stdout_str"]
|
||||
|
||||
# There are an issue with PG calculation - LP#1464656
|
||||
try:
|
||||
assert_true("HEALTH_OK" in ceph_health,
|
||||
"Ceph health is not ok! Inspect output below:\n"
|
||||
"{!r}".format(ceph_health))
|
||||
except AssertionError:
|
||||
logger.warning("Ceph health is not ok! trying to check LP#1464656")
|
||||
if "HEALTH_WARN" in ceph_health and \
|
||||
"too many PGs per OSD" in ceph_health:
|
||||
logger.info("Known issue in ceph - see LP#1464656 for details")
|
||||
else:
|
||||
raise
|
||||
|
||||
@property
|
||||
def orig_cluster_id(self):
|
||||
return self.fuel_web.client.get_cluster_id('prepare_upgrade_ceph_ha')
|
||||
|
||||
@test(depends_on_groups=['upgrade_ceph_ha_restore'],
|
||||
groups=["os_upgrade_env"])
|
||||
@log_snapshot_after_test
|
||||
def os_upgrade_env(self):
|
||||
"""Octane clone target environment
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot upgrade_ceph_ha_restore
|
||||
2. Run "octane upgrade-env <orig_env_id>"
|
||||
3. Ensure that new cluster was created with correct release
|
||||
|
||||
"""
|
||||
self.check_release_requirements()
|
||||
self.check_run('os_upgrade_env')
|
||||
self.env.revert_snapshot("upgrade_ceph_ha_restore", skip_timesync=True)
|
||||
self.install_octane()
|
||||
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.env.get_admin_node_ip(),
|
||||
cmd="octane upgrade-env {0}".format(self.orig_cluster_id),
|
||||
err_msg="'upgrade-env' command failed, inspect logs for details")
|
||||
|
||||
new_cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
assert_not_equal(self.orig_cluster_id, new_cluster_id,
|
||||
"Cluster IDs are the same: {!r} and {!r}".format(
|
||||
self.orig_cluster_id, new_cluster_id))
|
||||
assert_equal(self.fuel_web.get_cluster_release_id(new_cluster_id),
|
||||
self.fuel_web.client.get_release_id(
|
||||
release_name='Liberty on Ubuntu 14.04'))
|
||||
|
||||
self.env.make_snapshot("os_upgrade_env", is_make=True)
|
||||
|
||||
@test(depends_on=[os_upgrade_env], groups=["upgrade_first_cic"])
|
||||
@log_snapshot_after_test
|
||||
def upgrade_first_cic(self):
|
||||
"""Upgrade first controller
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot os_upgrade_env
|
||||
2. Select cluster for upgrade and upgraded cluster
|
||||
3. Select controller for upgrade
|
||||
4. Run "octane upgrade-node --isolated <seed_env_id> <node_id>"
|
||||
5. Check tasks status after upgrade run completion
|
||||
6. Run minimal OSTF sanity check (user list) on target cluster
|
||||
|
||||
"""
|
||||
self.check_release_requirements()
|
||||
self.check_run('upgrade_first_cic')
|
||||
|
||||
self.show_step(1, initialize=True)
|
||||
self.env.revert_snapshot("os_upgrade_env")
|
||||
self.install_octane()
|
||||
|
||||
self.show_step(2)
|
||||
seed_cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
|
||||
self.show_step(3)
|
||||
controller = self.fuel_web.get_devops_node_by_nailgun_node(
|
||||
self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||
self.orig_cluster_id, ["controller"])[0])
|
||||
primary = self.fuel_web.get_nailgun_node_by_devops_node(
|
||||
self.fuel_web.get_nailgun_primary_node(controller)
|
||||
)
|
||||
|
||||
self.show_step(4)
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd="octane upgrade-node --isolated "
|
||||
"{0} {1}".format(seed_cluster_id, primary["id"]),
|
||||
err_msg="octane upgrade-node failed")
|
||||
|
||||
self.show_step(5)
|
||||
tasks_started_by_octane = [
|
||||
task for task in self.fuel_web.client.get_tasks()
|
||||
if task['cluster'] == seed_cluster_id]
|
||||
|
||||
for task in tasks_started_by_octane:
|
||||
self.fuel_web.assert_task_success(task)
|
||||
|
||||
self.show_step(6)
|
||||
self.minimal_check(seed_cluster_id=seed_cluster_id)
|
||||
|
||||
self.env.make_snapshot("upgrade_first_cic", is_make=True)
|
||||
|
||||
@test(depends_on=[upgrade_first_cic],
|
||||
groups=["upgrade_db"])
|
||||
@log_snapshot_after_test
|
||||
def upgrade_db(self):
|
||||
"""Move and upgrade mysql db from target cluster to seed cluster
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot upgrade_first_cic
|
||||
2. Select cluster for upgrade and upgraded cluster
|
||||
3. Select controller for db upgrade
|
||||
4. Collect from db IDs for upgrade (used in checks)
|
||||
5. Run "octane upgrade-db <orig_env_id> <seed_env_id>"
|
||||
6. Check upgrade status
|
||||
|
||||
"""
|
||||
|
||||
self.check_release_requirements()
|
||||
self.check_run('upgrade_db')
|
||||
|
||||
self.show_step(1, initialize=True)
|
||||
self.env.revert_snapshot("upgrade_first_cic", skip_timesync=True)
|
||||
self.install_octane()
|
||||
|
||||
self.show_step(2)
|
||||
seed_cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
|
||||
self.show_step(3)
|
||||
orig_controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||
self.orig_cluster_id, ["controller"])[0]
|
||||
seed_controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||
seed_cluster_id, ["controller"])[0]
|
||||
|
||||
mysql_req = (
|
||||
'mysql cinder <<< "select id from volumes;"; '
|
||||
'mysql glance <<< "select id from images"; '
|
||||
'mysql neutron <<< "(select id from networks) '
|
||||
'UNION (select id from routers) '
|
||||
'UNION (select id from subnets)"; '
|
||||
'mysql keystone <<< "(select id from project) '
|
||||
'UNION (select id from user)"')
|
||||
|
||||
self.show_step(4)
|
||||
target_ids = self.ssh_manager.execute_on_remote(
|
||||
ip=orig_controller["ip"], cmd=mysql_req)['stdout']
|
||||
|
||||
self.show_step(5)
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd="octane upgrade-db {0} {1}".format(
|
||||
self.orig_cluster_id, seed_cluster_id),
|
||||
err_msg="octane upgrade-db failed")
|
||||
|
||||
self.show_step(6)
|
||||
|
||||
crm_status = self.ssh_manager.execute_on_remote(
|
||||
ip=seed_controller["ip"], cmd="crm resource status")['stdout']
|
||||
|
||||
while crm_status:
|
||||
current = crm_status.pop(0)
|
||||
if "vip" in current:
|
||||
assert_true("Started" in current)
|
||||
elif "master_p" in current:
|
||||
next_element = crm_status.pop(0)
|
||||
assert_true("Masters: [ node-" in next_element)
|
||||
elif any(x in current for x in ["ntp", "mysql", "dns"]):
|
||||
next_element = crm_status.pop(0)
|
||||
assert_true("Started" in next_element)
|
||||
elif any(x in current for x in ["nova", "cinder", "keystone",
|
||||
"heat", "neutron", "glance"]):
|
||||
next_element = crm_status.pop(0)
|
||||
assert_true("Stopped" in next_element)
|
||||
|
||||
seed_ids = self.ssh_manager.execute_on_remote(
|
||||
ip=seed_controller["ip"], cmd=mysql_req)['stdout']
|
||||
assert_equal(sorted(target_ids), sorted(seed_ids),
|
||||
"Objects in target and seed dbs are different")
|
||||
|
||||
self.env.make_snapshot("upgrade_db", is_make=True)
|
||||
|
||||
@test(depends_on=[upgrade_db],
|
||||
groups=["upgrade_ceph"])
|
||||
@log_snapshot_after_test
|
||||
def upgrade_ceph(self):
|
||||
"""Upgrade ceph
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot upgrade_db
|
||||
2. Select cluster for upgrade and upgraded cluster
|
||||
3. Run octane upgrade-ceph <orig_env_id> <seed_env_id>
|
||||
4. Check CEPH health on seed env
|
||||
"""
|
||||
|
||||
self.check_release_requirements()
|
||||
self.check_run('upgrade_ceph')
|
||||
|
||||
self.show_step(1, initialize=True)
|
||||
self.env.revert_snapshot("upgrade_db")
|
||||
self.install_octane()
|
||||
|
||||
self.show_step(2)
|
||||
seed_cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
|
||||
seed_controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||
seed_cluster_id, ["controller"])[0]
|
||||
|
||||
self.show_step(3)
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd="octane upgrade-ceph {0} {1}".format(
|
||||
self.orig_cluster_id, seed_cluster_id),
|
||||
err_msg="octane upgrade-ceph failed")
|
||||
|
||||
self.show_step(4)
|
||||
self.check_ceph_health(seed_controller['ip'])
|
||||
|
||||
self.env.make_snapshot("upgrade_ceph", is_make=True)
|
||||
|
||||
@test(depends_on=[upgrade_ceph],
|
||||
groups=["upgrade_controllers"])
|
||||
@log_snapshot_after_test
|
||||
def upgrade_controllers(self):
|
||||
"""Upgrade control plane and remaining controllers
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot upgrade_ceph
|
||||
2. Select cluster for upgrade and upgraded cluster
|
||||
3. Run octane upgrade-control <orig_env_id> <seed_env_id>
|
||||
4. Check cluster consistency
|
||||
5. Collect old controllers for upgrade
|
||||
6. Run octane upgrade-node <seed_cluster_id> <node_id> <node_id>
|
||||
7. Check tasks status after upgrade run completion
|
||||
8. Run network verification on target cluster
|
||||
9. Run minimal OSTF sanity check (user list) on target cluster
|
||||
|
||||
"""
|
||||
|
||||
self.check_release_requirements()
|
||||
self.check_run('upgrade_controllers')
|
||||
|
||||
self.show_step(1, initialize=True)
|
||||
self.env.revert_snapshot("upgrade_ceph")
|
||||
self.install_octane()
|
||||
|
||||
self.show_step(2)
|
||||
seed_cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
|
||||
self.show_step(3)
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd="octane upgrade-control {0} {1}".format(
|
||||
self.orig_cluster_id, seed_cluster_id),
|
||||
err_msg="octane upgrade-control failed")
|
||||
|
||||
self.show_step(4)
|
||||
controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||
seed_cluster_id, ["controller"])
|
||||
|
||||
old_controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||
self.orig_cluster_id, ["controller"])
|
||||
|
||||
old_computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||
self.orig_cluster_id, ["compute"])
|
||||
|
||||
def collect_management_ips(node_list):
|
||||
result = []
|
||||
for item in node_list:
|
||||
for data in item["network_data"]:
|
||||
if data["name"] == "management":
|
||||
result.append(data["ip"].split("/")[0])
|
||||
return result
|
||||
|
||||
ping_ips = collect_management_ips(controllers + old_computes)
|
||||
ping_ips.append(self.fuel_web.get_mgmt_vip(seed_cluster_id))
|
||||
|
||||
non_ping_ips = collect_management_ips(old_controllers)
|
||||
|
||||
ping_cmd = "ping -W 1 -i 1 -s 56 -c 1 -w 10 {host}"
|
||||
|
||||
for node in controllers + old_computes:
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=node["ip"], cmd="ip -s -s neigh flush all")
|
||||
|
||||
for ip in ping_ips:
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=node["ip"],
|
||||
cmd=ping_cmd.format(host=ip),
|
||||
err_msg="Can not ping {0} from {1}"
|
||||
"need to check network"
|
||||
" connectivity".format(ip, node["ip"]))
|
||||
|
||||
for ip in non_ping_ips:
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=node["ip"],
|
||||
cmd=ping_cmd.format(host=ip),
|
||||
err_msg="Patch ports from old controllers isn't removed",
|
||||
assert_ec_equal=[1, 2]) # No reply, Other errors
|
||||
|
||||
crm = self.ssh_manager.execute_on_remote(
|
||||
ip=controllers[0]["ip"],
|
||||
cmd="crm resource status")["stdout"]
|
||||
|
||||
while crm:
|
||||
current = crm.pop(0)
|
||||
if "vip" in current:
|
||||
assert_true("Started" in current)
|
||||
elif "master_p" in current:
|
||||
next_element = crm.pop(0)
|
||||
assert_true("Masters: [ node-" in next_element)
|
||||
elif any(x in current for x in ["ntp", "mysql", "dns",
|
||||
"nova", "cinder", "keystone",
|
||||
"heat", "neutron", "glance"]):
|
||||
next_element = crm.pop(0)
|
||||
assert_true("Started" in next_element)
|
||||
|
||||
# upgrade controllers part
|
||||
self.show_step(5)
|
||||
seed_cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
|
||||
self.show_step(6)
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd="octane upgrade-node {0} {1}".format(
|
||||
seed_cluster_id,
|
||||
" ".join([str(ctrl["id"]) for ctrl in old_controllers])),
|
||||
err_msg="octane upgrade-node failed")
|
||||
|
||||
self.show_step(7)
|
||||
tasks_started_by_octane = [
|
||||
task for task in self.fuel_web.client.get_tasks()
|
||||
if task['cluster'] == seed_cluster_id]
|
||||
|
||||
for task in tasks_started_by_octane:
|
||||
self.fuel_web.assert_task_success(task)
|
||||
|
||||
self.show_step(8)
|
||||
self.show_step(9)
|
||||
self.minimal_check(seed_cluster_id=seed_cluster_id, nwk_check=True)
|
||||
|
||||
self.env.make_snapshot("upgrade_controllers", is_make=True)
|
||||
|
||||
@test(depends_on=[upgrade_controllers], groups=["upgrade_ceph_osd"])
|
||||
@log_snapshot_after_test
|
||||
def upgrade_ceph_osd(self):
|
||||
"""Upgrade ceph osd
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot upgrade_all_controllers
|
||||
2. Select cluster for upgrade and upgraded cluster
|
||||
3. Run octane upgrade-osd <target_env_id> <seed_env_id>
|
||||
4. Check CEPH health on seed env
|
||||
5. run network verification on target cluster
|
||||
6. run minimal OSTF sanity check (user list) on target cluster
|
||||
"""
|
||||
|
||||
self.check_release_requirements()
|
||||
self.check_run('upgrade_ceph_osd')
|
||||
|
||||
self.show_step(1, initialize=True)
|
||||
self.env.revert_snapshot("upgrade_controllers")
|
||||
self.install_octane()
|
||||
|
||||
self.show_step(2)
|
||||
seed_cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
|
||||
seed_controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||
seed_cluster_id, ["controller"]
|
||||
)[0]
|
||||
|
||||
self.show_step(3)
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd="octane upgrade-osd --admin-password {0} {1}".format(
|
||||
KEYSTONE_CREDS['password'],
|
||||
self.orig_cluster_id),
|
||||
err_msg="octane upgrade-osd failed"
|
||||
)
|
||||
|
||||
self.show_step(4)
|
||||
self.check_ceph_health(seed_controller['ip'])
|
||||
|
||||
self.minimal_check(seed_cluster_id=seed_cluster_id, nwk_check=True)
|
||||
|
||||
self.env.make_snapshot("upgrade_ceph_osd", is_make=True)
|
||||
|
||||
@test(depends_on=[upgrade_ceph_osd],
|
||||
groups=["upgrade_old_nodes",
|
||||
"upgrade_cloud_no_live_migration"])
|
||||
@log_snapshot_after_test
|
||||
def upgrade_old_nodes(self):
|
||||
"""Upgrade all non controller nodes - no live migration
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot upgrade_ceph_osd
|
||||
2. Select cluster for upgrade and upgraded cluster
|
||||
3. Collect nodes for upgrade
|
||||
4. Run octane upgrade-node --no-live-migration $SEED_ID <ID>
|
||||
5. Run network verification on target cluster
|
||||
6. Run OSTF check
|
||||
7. Drop orig cluster
|
||||
"""
|
||||
self.check_release_requirements()
|
||||
|
||||
self.show_step(1, initialize=True)
|
||||
self.env.revert_snapshot("upgrade_ceph_osd")
|
||||
self.install_octane()
|
||||
|
||||
self.show_step(2)
|
||||
seed_cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
|
||||
self.show_step(3)
|
||||
|
||||
old_nodes = self.fuel_web.client.list_cluster_nodes(
|
||||
self.orig_cluster_id)
|
||||
|
||||
self.show_step(4)
|
||||
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd="octane upgrade-node --no-live-migration {0} {1}".format(
|
||||
seed_cluster_id,
|
||||
" ".join([str(node["id"]) for node in old_nodes])),
|
||||
err_msg="octane upgrade-node failed")
|
||||
|
||||
self.show_step(5)
|
||||
self.fuel_web.verify_network(seed_cluster_id)
|
||||
|
||||
self.show_step(6)
|
||||
self.fuel_web.run_ostf(seed_cluster_id)
|
||||
|
||||
self.show_step(7)
|
||||
self.fuel_web.delete_env_wait(self.orig_cluster_id)
|
||||
|
||||
@test(depends_on=[upgrade_ceph_osd],
|
||||
groups=["upgrade_nodes_live_migration",
|
||||
"upgrade_cloud_live_migration"])
|
||||
@log_snapshot_after_test
|
||||
def upgrade_nodes_live_migration(self):
|
||||
"""Upgrade all non controller nodes with live migration
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot upgrade_ceph_osd
|
||||
2. Select cluster for upgrade and upgraded cluster
|
||||
3. Collect nodes for upgrade
|
||||
4. Upgrade each node using octane upgrade-node $SEED_ID <ID>
|
||||
5. Run network verification on target cluster
|
||||
6. Run OSTF check
|
||||
7. Drop orig cluster
|
||||
"""
|
||||
|
||||
self.check_release_requirements()
|
||||
|
||||
self.show_step(1, initialize=True)
|
||||
self.env.revert_snapshot("upgrade_ceph_osd")
|
||||
self.install_octane()
|
||||
|
||||
self.show_step(2)
|
||||
seed_cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
|
||||
self.show_step(3)
|
||||
old_nodes = self.fuel_web.client.list_cluster_nodes(
|
||||
self.orig_cluster_id)
|
||||
|
||||
self.show_step(4)
|
||||
for node in old_nodes:
|
||||
logger.info("Upgrading node {!s}, role {!s}".format(
|
||||
node['id'], node['roles']))
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd="octane upgrade-node {0} {1!s}".format(
|
||||
seed_cluster_id, node['id']))
|
||||
|
||||
self.show_step(5)
|
||||
self.fuel_web.verify_network(seed_cluster_id)
|
||||
|
||||
self.show_step(6)
|
||||
self.fuel_web.run_ostf(seed_cluster_id)
|
||||
|
||||
self.show_step(7)
|
||||
self.fuel_web.delete_env_wait(self.orig_cluster_id)
|
@ -19,11 +19,13 @@ from fuelweb_test.tests.tests_upgrade import test_data_driven_upgrade_ceph_ha #
|
||||
from fuelweb_test.tests.tests_upgrade import test_data_driven_upgrade_net_tmpl # noqa
|
||||
from fuelweb_test.tests.tests_upgrade import test_data_driven_upgrade_plugin # noqa
|
||||
from fuelweb_test.tests.tests_upgrade import upgrader_tool # noqa
|
||||
from fuelweb_test.tests.tests_upgrade import test_os_upgrade # noqa
|
||||
|
||||
__all__ = [
|
||||
'test_data_driven_upgrade',
|
||||
'test_data_driven_upgrade_ceph_ha',
|
||||
'test_data_driven_upgrade_net_tmpl',
|
||||
'test_data_driven_upgrade_plugin',
|
||||
'upgrader_tool'
|
||||
'upgrader_tool',
|
||||
'test_os_upgrade'
|
||||
]
|
||||
|
@ -404,7 +404,7 @@ class UpgradeNoCluster(DataDrivenUpgradeBase):
|
||||
self.show_step(6)
|
||||
self.fuel_web.change_default_network_settings()
|
||||
self.fuel_web.client.get_releases()
|
||||
# TODO(vkhlyunev): add aditional checks for validation of restored node
|
||||
# TODO(vkhlyunev): add additional checks for validation of node
|
||||
self.env.make_snapshot(self.snapshot_name, is_make=True)
|
||||
self.cleanup()
|
||||
|
||||
|
348
fuelweb_test/tests/tests_upgrade/test_os_upgrade.py
Normal file
348
fuelweb_test/tests/tests_upgrade/test_os_upgrade.py
Normal file
@ -0,0 +1,348 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from proboscis import test
|
||||
|
||||
from fuelweb_test import logger
|
||||
from fuelweb_test.helpers.decorators import log_snapshot_after_test
|
||||
from fuelweb_test.tests.tests_upgrade.upgrade_base import OSUpgradeBase
|
||||
|
||||
|
||||
@test(groups=["os_upgrade"])
|
||||
class TestOSupgrade(OSUpgradeBase):
|
||||
def __init__(self):
|
||||
super(TestOSupgrade, self).__init__()
|
||||
self.old_cluster_name = 'prepare_upgrade_ceph_ha'
|
||||
|
||||
@test(depends_on_groups=['upgrade_ceph_ha_restore'],
|
||||
groups=["os_upgrade_env"])
|
||||
@log_snapshot_after_test
|
||||
def os_upgrade_env(self):
|
||||
"""Octane clone target environment
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot upgrade_ceph_ha_restore
|
||||
2. Run "octane upgrade-env <orig_env_id>"
|
||||
3. Ensure that new cluster was created with correct release
|
||||
|
||||
"""
|
||||
self.check_release_requirements()
|
||||
self.check_run('os_upgrade_env')
|
||||
self.env.revert_snapshot("upgrade_ceph_ha_restore")
|
||||
self.install_octane()
|
||||
|
||||
self.upgrade_env_code()
|
||||
|
||||
self.env.make_snapshot("os_upgrade_env", is_make=True)
|
||||
|
||||
@test(depends_on=[os_upgrade_env], groups=["upgrade_first_cic"])
|
||||
@log_snapshot_after_test
|
||||
def upgrade_first_cic(self):
|
||||
"""Upgrade first controller
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot os_upgrade_env
|
||||
2. Select cluster for upgrade and upgraded cluster
|
||||
3. Select controller for upgrade
|
||||
4. Run "octane upgrade-node --isolated <seed_env_id> <node_id>"
|
||||
5. Check tasks status after upgrade run completion
|
||||
6. Run minimal OSTF sanity check (user list) on target cluster
|
||||
|
||||
"""
|
||||
self.check_release_requirements()
|
||||
self.check_run('upgrade_first_cic')
|
||||
|
||||
self.show_step(1, initialize=True)
|
||||
self.env.revert_snapshot("os_upgrade_env")
|
||||
self.install_octane()
|
||||
|
||||
self.show_step(2)
|
||||
seed_cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
|
||||
self.upgrade_first_controller_code(seed_cluster_id)
|
||||
|
||||
self.env.make_snapshot("upgrade_first_cic", is_make=True)
|
||||
|
||||
@test(depends_on=[upgrade_first_cic],
|
||||
groups=["upgrade_db"])
|
||||
@log_snapshot_after_test
|
||||
def upgrade_db(self):
|
||||
"""Move and upgrade mysql db from target cluster to seed cluster
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot upgrade_first_cic
|
||||
2. Select cluster for upgrade and upgraded cluster
|
||||
3. Select controller for db upgrade
|
||||
4. Collect from db IDs for upgrade (used in checks)
|
||||
5. Run "octane upgrade-db <orig_env_id> <seed_env_id>"
|
||||
6. Check upgrade status
|
||||
|
||||
"""
|
||||
|
||||
self.check_release_requirements()
|
||||
self.check_run('upgrade_db')
|
||||
|
||||
self.show_step(1, initialize=True)
|
||||
self.env.revert_snapshot("upgrade_first_cic")
|
||||
self.install_octane()
|
||||
|
||||
self.show_step(2)
|
||||
seed_cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
|
||||
self.upgrade_db_code(seed_cluster_id)
|
||||
|
||||
self.env.make_snapshot("upgrade_db", is_make=True)
|
||||
|
||||
@test(depends_on=[upgrade_db],
|
||||
groups=["upgrade_ceph"])
|
||||
@log_snapshot_after_test
|
||||
def upgrade_ceph(self):
|
||||
"""Upgrade ceph
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot upgrade_db
|
||||
2. Select cluster for upgrade and upgraded cluster
|
||||
3. Run octane upgrade-ceph <orig_env_id> <seed_env_id>
|
||||
4. Check CEPH health on seed env
|
||||
"""
|
||||
|
||||
self.check_release_requirements()
|
||||
self.check_run('upgrade_ceph')
|
||||
|
||||
self.show_step(1, initialize=True)
|
||||
self.env.revert_snapshot("upgrade_db")
|
||||
self.install_octane()
|
||||
|
||||
self.show_step(2)
|
||||
seed_cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
|
||||
self.upgrade_ceph_code(seed_cluster_id)
|
||||
|
||||
self.env.make_snapshot("upgrade_ceph", is_make=True)
|
||||
|
||||
@test(depends_on=[upgrade_ceph],
|
||||
groups=["upgrade_controllers"])
|
||||
@log_snapshot_after_test
|
||||
def upgrade_controllers(self):
|
||||
"""Upgrade control plane and remaining controllers
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot upgrade_ceph
|
||||
2. Select cluster for upgrade and upgraded cluster
|
||||
3. Run octane upgrade-control <orig_env_id> <seed_env_id>
|
||||
4. Check cluster consistency
|
||||
5. Collect old controllers for upgrade
|
||||
6. Run octane upgrade-node <seed_cluster_id> <node_id> <node_id>
|
||||
7. Check tasks status after upgrade run completion
|
||||
8. Run network verification on target cluster
|
||||
9. Run minimal OSTF sanity check (user list) on target cluster
|
||||
|
||||
"""
|
||||
|
||||
self.check_release_requirements()
|
||||
self.check_run('upgrade_controllers')
|
||||
|
||||
self.show_step(1, initialize=True)
|
||||
self.env.revert_snapshot("upgrade_ceph")
|
||||
self.install_octane()
|
||||
|
||||
self.show_step(2)
|
||||
seed_cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
|
||||
self.upgrade_control_plane_code(seed_cluster_id)
|
||||
|
||||
# upgrade controllers part
|
||||
|
||||
self.upgrade_controllers_code(seed_cluster_id)
|
||||
|
||||
self.env.make_snapshot("upgrade_controllers", is_make=True)
|
||||
|
||||
@test(depends_on=[upgrade_controllers], groups=["upgrade_ceph_osd"])
|
||||
@log_snapshot_after_test
|
||||
def upgrade_ceph_osd(self):
|
||||
"""Upgrade ceph osd
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot upgrade_all_controllers
|
||||
2. Select cluster for upgrade and upgraded cluster
|
||||
3. Run octane upgrade-osd <target_env_id> <seed_env_id>
|
||||
4. Check CEPH health on seed env
|
||||
5. run network verification on target cluster
|
||||
6. run minimal OSTF sanity check (user list) on target cluster
|
||||
"""
|
||||
|
||||
self.check_release_requirements()
|
||||
self.check_run('upgrade_ceph_osd')
|
||||
|
||||
self.show_step(1, initialize=True)
|
||||
self.env.revert_snapshot("upgrade_controllers")
|
||||
self.install_octane()
|
||||
|
||||
self.show_step(2)
|
||||
seed_cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
|
||||
self.upgrade_ceph_osd_code(seed_cluster_id)
|
||||
|
||||
self.env.make_snapshot("upgrade_ceph_osd", is_make=True)
|
||||
|
||||
@test(depends_on=[upgrade_ceph_osd],
|
||||
groups=["upgrade_old_nodes"])
|
||||
@log_snapshot_after_test
|
||||
def upgrade_old_nodes(self):
|
||||
"""Upgrade all non controller nodes - no live migration
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot upgrade_ceph_osd
|
||||
2. Select cluster for upgrade and upgraded cluster
|
||||
3. Collect nodes for upgrade
|
||||
4. Run octane upgrade-node --no-live-migration $SEED_ID <ID>
|
||||
5. Run network verification on target cluster
|
||||
6. Run minimal OSTF sanity check
|
||||
"""
|
||||
self.check_release_requirements()
|
||||
|
||||
self.show_step(1, initialize=True)
|
||||
self.env.revert_snapshot("upgrade_ceph_osd")
|
||||
self.install_octane()
|
||||
|
||||
self.show_step(2)
|
||||
seed_cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
|
||||
self.show_step(3)
|
||||
|
||||
old_nodes = self.fuel_web.client.list_cluster_nodes(
|
||||
self.orig_cluster_id)
|
||||
|
||||
self.show_step(4)
|
||||
|
||||
self.upgrade_nodes(
|
||||
seed_cluster_id=seed_cluster_id,
|
||||
nodes_str=" ".join([str(node["id"]) for node in old_nodes]),
|
||||
live_migration=False
|
||||
)
|
||||
|
||||
self.minimal_check(seed_cluster_id=seed_cluster_id, nwk_check=True)
|
||||
|
||||
self.env.make_snapshot("upgrade_old_nodes", is_make=True)
|
||||
|
||||
@test(depends_on=[upgrade_old_nodes],
|
||||
groups=['cleanup_no_live', 'upgrade_cloud_no_live_migration'])
|
||||
@log_snapshot_after_test
|
||||
def octane_cleanup(self):
|
||||
"""Clean-up octane
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot upgrade_ceph_osd
|
||||
2. Select upgraded cluster
|
||||
3. Cleanup upgraded env
|
||||
4. Run network verification on target cluster
|
||||
5. Run OSTF check
|
||||
6. Drop orig cluster
|
||||
"""
|
||||
self.check_release_requirements()
|
||||
|
||||
self.show_step(1, initialize=True)
|
||||
self.env.revert_snapshot("upgrade_old_nodes")
|
||||
|
||||
self.show_step(2)
|
||||
seed_cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
|
||||
self.clean_up(seed_cluster_id=seed_cluster_id)
|
||||
|
||||
self.show_step(4)
|
||||
self.fuel_web.verify_network(seed_cluster_id)
|
||||
|
||||
self.show_step(5)
|
||||
self.fuel_web.run_ostf(seed_cluster_id)
|
||||
|
||||
self.show_step(6)
|
||||
self.fuel_web.delete_env_wait(self.orig_cluster_id)
|
||||
|
||||
@test(depends_on=[upgrade_ceph_osd],
|
||||
groups=["upgrade_nodes_live_migration"])
|
||||
@log_snapshot_after_test
|
||||
def upgrade_nodes_live_migration(self):
|
||||
"""Upgrade all non controller nodes with live migration
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot upgrade_ceph_osd
|
||||
2. Select cluster for upgrade and upgraded cluster
|
||||
3. Collect nodes for upgrade
|
||||
4. Upgrade each node using octane upgrade-node $SEED_ID <ID>
|
||||
5. Run network verification on target cluster
|
||||
6. Run minimal OSTF sanity check
|
||||
"""
|
||||
|
||||
self.check_release_requirements()
|
||||
|
||||
self.show_step(1, initialize=True)
|
||||
self.env.revert_snapshot("upgrade_ceph_osd")
|
||||
self.install_octane()
|
||||
|
||||
self.show_step(2)
|
||||
seed_cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
|
||||
self.show_step(3)
|
||||
old_nodes = self.fuel_web.client.list_cluster_nodes(
|
||||
self.orig_cluster_id)
|
||||
|
||||
self.show_step(4)
|
||||
for node in old_nodes:
|
||||
logger.info("Upgrading node {!s}, role {!s}".format(
|
||||
node['id'], node['roles']))
|
||||
|
||||
self.upgrade_nodes(
|
||||
seed_cluster_id=seed_cluster_id,
|
||||
nodes_str=node['id'],
|
||||
live_migration=True
|
||||
)
|
||||
|
||||
self.minimal_check(seed_cluster_id=seed_cluster_id, nwk_check=True)
|
||||
|
||||
self.env.make_snapshot("upgrade_nodes_live_migration", is_make=True)
|
||||
|
||||
@test(depends_on=[upgrade_nodes_live_migration],
|
||||
groups=['cleanup_live', 'upgrade_cloud_live_migration'])
|
||||
@log_snapshot_after_test
|
||||
def octane_cleanup_live(self):
|
||||
"""Clean-up octane
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot upgrade_ceph_osd
|
||||
2. Select upgraded cluster
|
||||
3. Cleanup upgraded env
|
||||
4. Run network verification on target cluster
|
||||
5. Run OSTF check
|
||||
6. Drop orig cluster
|
||||
"""
|
||||
self.check_release_requirements()
|
||||
|
||||
self.show_step(1, initialize=True)
|
||||
self.env.revert_snapshot("upgrade_old_nodes")
|
||||
|
||||
self.show_step(2)
|
||||
seed_cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
|
||||
self.clean_up(seed_cluster_id=seed_cluster_id)
|
||||
|
||||
self.show_step(4)
|
||||
self.fuel_web.verify_network(seed_cluster_id)
|
||||
|
||||
self.show_step(5)
|
||||
self.fuel_web.run_ostf(seed_cluster_id)
|
||||
|
||||
self.show_step(6)
|
||||
self.fuel_web.delete_env_wait(self.orig_cluster_id)
|
331
fuelweb_test/tests/tests_upgrade/upgrade_base.py
Normal file
331
fuelweb_test/tests/tests_upgrade/upgrade_base.py
Normal file
@ -0,0 +1,331 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from proboscis.asserts import assert_equal
|
||||
from proboscis.asserts import assert_not_equal
|
||||
from proboscis.asserts import assert_true
|
||||
from proboscis import SkipTest
|
||||
import six
|
||||
|
||||
from fuelweb_test import logger
|
||||
from fuelweb_test.settings import KEYSTONE_CREDS
|
||||
from fuelweb_test.settings import OPENSTACK_RELEASE
|
||||
from fuelweb_test.settings import OPENSTACK_RELEASE_UBUNTU
|
||||
from fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_base import \
|
||||
DataDrivenUpgradeBase
|
||||
|
||||
|
||||
class OSUpgradeBase(DataDrivenUpgradeBase):
|
||||
def __init__(self):
|
||||
self.__old_cluster_name = None
|
||||
super(OSUpgradeBase, self).__init__()
|
||||
|
||||
@property
|
||||
def old_cluster_name(self):
|
||||
return self.__old_cluster_name
|
||||
|
||||
@old_cluster_name.setter
|
||||
def old_cluster_name(self, new_name):
|
||||
if not isinstance(new_name, (six.string_types, six.text_type)):
|
||||
logger.error('old_cluster_name === {!r}'.format(new_name))
|
||||
raise TypeError('{!r} is not string'.format(new_name))
|
||||
self.__old_cluster_name = new_name
|
||||
|
||||
@staticmethod
|
||||
def check_release_requirements():
|
||||
if OPENSTACK_RELEASE_UBUNTU not in OPENSTACK_RELEASE:
|
||||
raise SkipTest('{0} not in {1}'.format(
|
||||
OPENSTACK_RELEASE_UBUNTU, OPENSTACK_RELEASE))
|
||||
|
||||
def minimal_check(self, seed_cluster_id, nwk_check=False):
|
||||
if nwk_check:
|
||||
self.show_step(self.next_step)
|
||||
self.fuel_web.verify_network(seed_cluster_id)
|
||||
|
||||
self.show_step(self.next_step)
|
||||
self.fuel_web.run_single_ostf_test(
|
||||
cluster_id=seed_cluster_id, test_sets=['sanity'],
|
||||
test_name=('fuel_health.tests.sanity.test_sanity_identity'
|
||||
'.SanityIdentityTest.test_list_users'))
|
||||
|
||||
def check_ceph_health(self, ip):
|
||||
ceph_health = self.ssh_manager.check_call(
|
||||
ip=ip, command="ceph health").stdout_str
|
||||
|
||||
# There are an issue with PG calculation - LP#1464656
|
||||
try:
|
||||
assert_true("HEALTH_OK" in ceph_health,
|
||||
"Ceph health is not ok! Inspect output below:\n"
|
||||
"{!r}".format(ceph_health))
|
||||
except AssertionError:
|
||||
logger.warning("Ceph health is not ok! trying to check LP#1464656")
|
||||
if "HEALTH_WARN" in ceph_health and "too many PGs per OSD" in \
|
||||
ceph_health:
|
||||
logger.info("Known issue in ceph - see LP#1464656 for details")
|
||||
else:
|
||||
raise
|
||||
|
||||
@property
|
||||
def orig_cluster_id(self):
|
||||
"""Get cluster id for old_cluster_name
|
||||
|
||||
:rtype: int
|
||||
"""
|
||||
if self.old_cluster_name is None:
|
||||
raise RuntimeError('old_cluster_name is not set')
|
||||
return self.fuel_web.client.get_cluster_id(self.old_cluster_name)
|
||||
|
||||
def upgrade_env_code(self):
|
||||
self.show_step(self.next_step)
|
||||
self.ssh_manager.check_call(
|
||||
ip=self.env.get_admin_node_ip(),
|
||||
command="octane upgrade-env {0}".format(self.orig_cluster_id),
|
||||
error_info="'upgrade-env' command failed, inspect logs for details"
|
||||
)
|
||||
|
||||
new_cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
assert_not_equal(
|
||||
self.orig_cluster_id, new_cluster_id,
|
||||
"Cluster IDs are the same: {!r} and {!r}".format(
|
||||
self.orig_cluster_id, new_cluster_id))
|
||||
|
||||
self.show_step(self.next_step)
|
||||
assert_equal(
|
||||
self.fuel_web.get_cluster_release_id(new_cluster_id),
|
||||
self.fuel_web.client.get_release_id(
|
||||
release_name='Liberty on Ubuntu 14.04'))
|
||||
|
||||
def upgrade_first_controller_code(self, seed_cluster_id):
|
||||
self.show_step(self.next_step)
|
||||
controller = self.fuel_web.get_devops_node_by_nailgun_node(
|
||||
self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||
self.orig_cluster_id, ["controller"])[0])
|
||||
primary = self.fuel_web.get_nailgun_node_by_devops_node(
|
||||
self.fuel_web.get_nailgun_primary_node(controller)
|
||||
)
|
||||
|
||||
self.show_step(self.next_step)
|
||||
self.ssh_manager.check_call(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
command="octane upgrade-node --isolated "
|
||||
"{0} {1}".format(seed_cluster_id, primary["id"]),
|
||||
error_info="octane upgrade-node failed")
|
||||
|
||||
self.show_step(self.next_step)
|
||||
tasks_started_by_octane = [
|
||||
task for task in self.fuel_web.client.get_tasks()
|
||||
if task['cluster'] == seed_cluster_id]
|
||||
|
||||
for task in tasks_started_by_octane:
|
||||
self.fuel_web.assert_task_success(task)
|
||||
|
||||
self.show_step(self.next_step)
|
||||
self.minimal_check(seed_cluster_id=seed_cluster_id)
|
||||
|
||||
def upgrade_db_code(self, seed_cluster_id):
|
||||
self.show_step(self.next_step)
|
||||
orig_controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||
self.orig_cluster_id, ["controller"])[0]
|
||||
seed_controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||
seed_cluster_id, ["controller"])[0]
|
||||
|
||||
mysql_req = (
|
||||
'mysql cinder <<< "select id from volumes;"; '
|
||||
'mysql glance <<< "select id from images"; '
|
||||
'mysql neutron <<< "(select id from networks) '
|
||||
'UNION (select id from routers) '
|
||||
'UNION (select id from subnets)"; '
|
||||
'mysql keystone <<< "(select id from project) '
|
||||
'UNION (select id from user)"')
|
||||
|
||||
self.show_step(self.next_step)
|
||||
target_ids = self.ssh_manager.check_call(
|
||||
ip=orig_controller["ip"], command=mysql_req).stdout
|
||||
|
||||
self.show_step(self.next_step)
|
||||
self.ssh_manager.check_call(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
command="octane upgrade-db {0} {1}".format(
|
||||
self.orig_cluster_id, seed_cluster_id),
|
||||
error_info="octane upgrade-db failed")
|
||||
|
||||
self.show_step(self.next_step)
|
||||
|
||||
crm_status = self.ssh_manager.check_call(
|
||||
ip=seed_controller["ip"], command="crm resource status").stdout
|
||||
|
||||
while crm_status:
|
||||
current = crm_status.pop(0)
|
||||
if "vip" in current:
|
||||
assert_true("Started" in current)
|
||||
elif "master_p" in current:
|
||||
next_element = crm_status.pop(0)
|
||||
assert_true("Masters: [ node-" in next_element)
|
||||
elif any(x in current for x in ["ntp", "mysql", "dns"]):
|
||||
next_element = crm_status.pop(0)
|
||||
assert_true("Started" in next_element)
|
||||
elif any(x in current for x in ["nova", "cinder", "keystone",
|
||||
"heat", "neutron", "glance"]):
|
||||
next_element = crm_status.pop(0)
|
||||
assert_true("Stopped" in next_element)
|
||||
|
||||
seed_ids = self.ssh_manager.check_call(
|
||||
ip=seed_controller["ip"], command=mysql_req).stdout
|
||||
assert_equal(sorted(target_ids), sorted(seed_ids),
|
||||
"Objects in target and seed dbs are different")
|
||||
|
||||
def upgrade_ceph_code(self, seed_cluster_id):
|
||||
seed_controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||
seed_cluster_id, ["controller"])[0]
|
||||
|
||||
self.show_step(self.next_step)
|
||||
self.ssh_manager.check_call(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
command="octane upgrade-ceph {0} {1}".format(
|
||||
self.orig_cluster_id, seed_cluster_id),
|
||||
error_info="octane upgrade-ceph failed")
|
||||
|
||||
self.show_step(self.next_step)
|
||||
self.check_ceph_health(seed_controller['ip'])
|
||||
|
||||
def upgrade_control_plane_code(self, seed_cluster_id):
|
||||
self.show_step(self.next_step)
|
||||
self.ssh_manager.check_call(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
command="octane upgrade-control {0} {1}".format(
|
||||
self.orig_cluster_id, seed_cluster_id),
|
||||
error_info="octane upgrade-control failed")
|
||||
|
||||
self.show_step(self.next_step)
|
||||
controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||
seed_cluster_id, ["controller"])
|
||||
|
||||
old_controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||
self.orig_cluster_id, ["controller"])
|
||||
|
||||
old_computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||
self.orig_cluster_id, ["compute"])
|
||||
|
||||
def collect_management_ips(node_list):
|
||||
result = []
|
||||
for item in node_list:
|
||||
for data in item["network_data"]:
|
||||
if data["name"] == "management":
|
||||
result.append(data["ip"].split("/")[0])
|
||||
return result
|
||||
|
||||
ping_ips = collect_management_ips(controllers + old_computes)
|
||||
ping_ips.append(self.fuel_web.get_mgmt_vip(seed_cluster_id))
|
||||
|
||||
non_ping_ips = collect_management_ips(old_controllers)
|
||||
|
||||
ping_cmd = "ping -W 1 -i 1 -s 56 -c 1 -w 10 {host}"
|
||||
|
||||
for node in controllers + old_computes:
|
||||
self.ssh_manager.check_call(
|
||||
ip=node["ip"], command="ip -s -s neigh flush all")
|
||||
|
||||
for ip in ping_ips:
|
||||
self.ssh_manager.check_call(
|
||||
ip=node["ip"],
|
||||
command=ping_cmd.format(host=ip),
|
||||
error_info="Can not ping {0} from {1}"
|
||||
"need to check network"
|
||||
" connectivity".format(ip, node["ip"]))
|
||||
|
||||
for ip in non_ping_ips:
|
||||
self.ssh_manager.check_call(
|
||||
ip=node["ip"],
|
||||
command=ping_cmd.format(host=ip),
|
||||
error_info="Patch ports from old controllers wasn't "
|
||||
"removed",
|
||||
expected=[1, 2]) # No reply, Other errors
|
||||
|
||||
crm = self.ssh_manager.check_call(
|
||||
ip=controllers[0]["ip"],
|
||||
command="crm resource status").stdout
|
||||
|
||||
while crm:
|
||||
current = crm.pop(0)
|
||||
if "vip" in current:
|
||||
assert_true("Started" in current)
|
||||
elif "master_p" in current:
|
||||
next_element = crm.pop(0)
|
||||
assert_true("Masters: [ node-" in next_element)
|
||||
elif any(x in current for x in ["ntp", "mysql", "dns",
|
||||
"nova", "cinder", "keystone",
|
||||
"heat", "neutron", "glance"]):
|
||||
next_element = crm.pop(0)
|
||||
assert_true("Started" in next_element)
|
||||
|
||||
def upgrade_controllers_code(self, seed_cluster_id):
|
||||
self.show_step(self.next_step)
|
||||
old_controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||
self.orig_cluster_id, ["controller"])
|
||||
|
||||
self.ssh_manager.check_call(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
command="octane upgrade-node {0} {1}".format(
|
||||
seed_cluster_id,
|
||||
" ".join([str(ctrl["id"]) for ctrl in old_controllers])),
|
||||
error_info="octane upgrade-node failed")
|
||||
|
||||
self.show_step(self.next_step)
|
||||
tasks_started_by_octane = [
|
||||
task for task in self.fuel_web.client.get_tasks()
|
||||
if task['cluster'] == seed_cluster_id]
|
||||
|
||||
for task in tasks_started_by_octane:
|
||||
self.fuel_web.assert_task_success(task)
|
||||
|
||||
self.minimal_check(seed_cluster_id=seed_cluster_id, nwk_check=True)
|
||||
|
||||
def upgrade_ceph_osd_code(self, seed_cluster_id):
|
||||
seed_controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||
seed_cluster_id, ["controller"]
|
||||
)[0]
|
||||
|
||||
self.show_step(self.next_step)
|
||||
self.ssh_manager.check_call(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
command="octane upgrade-osd --admin-password {0} {1}".format(
|
||||
KEYSTONE_CREDS['password'],
|
||||
self.orig_cluster_id),
|
||||
error_info="octane upgrade-osd failed"
|
||||
)
|
||||
|
||||
self.show_step(self.next_step)
|
||||
self.check_ceph_health(seed_controller['ip'])
|
||||
|
||||
self.minimal_check(seed_cluster_id=seed_cluster_id, nwk_check=True)
|
||||
|
||||
def upgrade_nodes(self, seed_cluster_id, nodes_str, live_migration=False):
|
||||
self.ssh_manager.check_call(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
command=(
|
||||
"octane upgrade-node {migration} {seed_cluster_id} "
|
||||
"{nodes!s}".format(
|
||||
migration='' if live_migration else '--no-live-migration',
|
||||
seed_cluster_id=seed_cluster_id,
|
||||
nodes=nodes_str)),
|
||||
error_info="octane upgrade-node failed")
|
||||
|
||||
def clean_up(self, seed_cluster_id):
|
||||
self.show_step(self.next_step)
|
||||
self.ssh_manager.check_call(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
command="octane cleanup {0}".format(seed_cluster_id),
|
||||
error_info="octane cleanup cmd failed")
|
Loading…
Reference in New Issue
Block a user