Merge "Refactor nova-network cases to neutron"

This commit is contained in:
Jenkins
2015-08-26 11:47:51 +00:00
committed by Gerrit Code Review
9 changed files with 363 additions and 154 deletions

View File

@@ -73,6 +73,11 @@ Os Actions
.. automodule:: fuelweb_test.helpers.os_actions
:members:
Ovs helper
----------
.. automodule:: fuelweb_test.helpers.ovs
:members:
Patching
--------
.. automodule:: fuelweb_test.helpers.patching

View File

@@ -23,6 +23,7 @@ from devops.helpers.helpers import wait
from fuelweb_test import logger
from fuelweb_test import logwrap
from fuelweb_test.helpers.utils import run_on_remote
from fuelweb_test.settings import EXTERNAL_DNS
from fuelweb_test.settings import EXTERNAL_NTP
from fuelweb_test.settings import OPENSTACK_RELEASE
@@ -364,29 +365,23 @@ def restart_nailgun(remote):
def find_backup(remote):
try:
arch_dir = ''.join(
remote.execute("ls -1u /var/backup/fuel/ | sed -n 1p")['stdout'])
arch_path = ''.join(
remote.execute("ls -1u /var/backup/fuel/{0}/*.lrz".
format(arch_dir.strip()))["stdout"])
logger.debug('arch_path is {0}'.format(arch_path))
backups = remote.execute("ls -1u /var/backup/fuel/*/*.lrz")["stdout"]
if backups:
arch_path = backups[0]
logger.info('Backup archive found: {0}'.format(arch_path))
return arch_path
except Exception as e:
logger.error('exception is {0}'.format(e))
raise e
else:
raise ValueError("No backup file found in the '/var/backup/fuel/'")
@logwrap
def backup_check(remote):
logger.info("Backup check archive status")
path = find_backup(remote)
assert_true(path, "Can not find backup. Path value {0}".format(path))
arch_result = ''.join(
remote.execute(("if [ -e {0} ]; "
"then echo Archive exists;"
" fi").format(path.rstrip()))["stdout"])
assert_true("Archive exists" in arch_result, "Archive does not exist")
assert_true(path, "Can not find backup. Path value '{0}'".format(path))
test_result = remote.execute("test -e {0}".format(path.rstrip()))
assert_true(test_result['exit_code'] == 0,
"Archive '{0}' does not exist".format(path.rstrip()))
@logwrap
@@ -968,24 +963,48 @@ def check_ping(remote, host, deadline=10, size=56, timeout=1, interval=1):
@logwrap
def check_nova_dhcp_lease(remote, instance_ip, instance_mac, node_dhcp_ip):
def check_neutron_dhcp_lease(remote, instance_ip, instance_mac,
dhcp_server_ip, dhcp_port_tag):
"""Check if the DHCP server offers a lease for a client with the specified
MAC address
:param SSHClient remote: fuel-devops.helpers.helpers object
:param str instance_ip: IP address of instance
:param str instance_mac: MAC address that will be checked
:param str dhcp_server_ip: IP address of DHCP server for request a lease
:param str dhcp_port_tag: OVS port tag used for access the DHCP server
:return bool: True if DHCP lease for the 'instance_mac' was obtained
"""
logger.debug("Checking DHCP server {0} for lease {1} with MAC address {2}"
.format(node_dhcp_ip, instance_ip, instance_mac))
res = remote.execute('ip link add dhcptest0 type veth peer name dhcptest1;'
'brctl addif br100 dhcptest0;'
'ifconfig dhcptest0 up;'
'ifconfig dhcptest1 hw ether {1};'
'ifconfig dhcptest1 up;'
'dhcpcheck request dhcptest1 {2} --range_start {0} '
'--range_end 255.255.255.255 | fgrep \" {2} \";'
'ifconfig dhcptest1 down;'
'ifconfig dhcptest0 down;'
'brctl delif br100 dhcptest0;'
'ip link delete dhcptest0;'
.format(instance_ip, instance_mac, node_dhcp_ip))
res_str = ''.join(res['stdout'])
logger.debug("DHCP server answer: {}".format(res_str))
return ' ack ' in res_str
.format(dhcp_server_ip, instance_ip, instance_mac))
ovs_port_name = 'tapdhcptest1'
ovs_cmd = '/usr/bin/ovs-vsctl --timeout=10 --oneline --format=json -- '
ovs_add_port_cmd = ("--if-exists del-port {0} -- "
"add-port br-int {0} -- "
"set Interface {0} type=internal -- "
"set Port {0} tag={1}"
.format(ovs_port_name, dhcp_port_tag))
ovs_del_port_cmd = ("--if-exists del-port {0}".format(ovs_port_name))
# Add an OVS interface with a tag for accessing the DHCP server
run_on_remote(remote, ovs_cmd + ovs_add_port_cmd)
# Set to the created interface the same MAC address
# that was used for the instance.
run_on_remote(remote, "ifconfig {0} hw ether {1}".format(ovs_port_name,
instance_mac))
run_on_remote(remote, "ifconfig {0} up".format(ovs_port_name))
# Perform a 'dhcpcheck' request to check if the lease can be obtained
lease = run_on_remote(remote,
"dhcpcheck request {0} {1} --range_start {2} "
"--range_end 255.255.255.255 | fgrep \" {1} \""
.format(ovs_port_name, dhcp_server_ip, instance_ip))
# Remove the OVS interface
run_on_remote(remote, ovs_cmd + ovs_del_port_cmd)
logger.debug("DHCP server answer: {}".format(lease))
return ' ack ' in lease
def check_available_mode(remote):

View File

@@ -238,40 +238,41 @@ class OpenStackActions(common.Common):
def execute_through_host(self, ssh, vm_host, cmd, creds=()):
logger.debug("Making intermediate transport")
with ssh._ssh.get_transport() as interm_transp:
logger.debug("Opening channel to VM")
interm_chan = interm_transp.open_channel('direct-tcpip',
(vm_host, 22),
(ssh.host, 0))
logger.debug("Opening paramiko transport")
transport = paramiko.Transport(interm_chan)
logger.debug("Starting client")
transport.start_client()
logger.info("Passing authentication to VM: {}".format(creds))
if not creds:
creds = ('cirros', 'cubswin:)')
transport.auth_password(creds[0], creds[1])
interm_transp = ssh._ssh.get_transport()
logger.debug("Opening session")
channel = transport.open_session()
logger.info("Executing command: {}".format(cmd))
channel.exec_command(cmd)
logger.debug("Opening channel to VM")
interm_chan = interm_transp.open_channel('direct-tcpip',
(vm_host, 22),
(ssh.host, 0))
logger.debug("Opening paramiko transport")
transport = paramiko.Transport(interm_chan)
logger.debug("Starting client")
transport.start_client()
logger.info("Passing authentication to VM: {}".format(creds))
if not creds:
creds = ('cirros', 'cubswin:)')
transport.auth_password(creds[0], creds[1])
result = {
'stdout': [],
'stderr': [],
'exit_code': 0
}
logger.debug("Opening session")
channel = transport.open_session()
logger.info("Executing command: {}".format(cmd))
channel.exec_command(cmd)
logger.debug("Receiving exit_code")
result['exit_code'] = channel.recv_exit_status()
logger.debug("Receiving stdout")
result['stdout'] = channel.recv(1024)
logger.debug("Receiving stderr")
result['stderr'] = channel.recv_stderr(1024)
result = {
'stdout': [],
'stderr': [],
'exit_code': 0
}
logger.debug("Closing channel")
channel.close()
logger.debug("Receiving exit_code")
result['exit_code'] = channel.recv_exit_status()
logger.debug("Receiving stdout")
result['stdout'] = channel.recv(1024)
logger.debug("Receiving stderr")
result['stderr'] = channel.recv_stderr(1024)
logger.debug("Closing channel")
channel.close()
return result
@@ -424,6 +425,13 @@ class OpenStackActions(common.Common):
nodes = [i['host'] for i in result['agents']]
return nodes
def get_neutron_dhcp_ports(self, net_id):
ports = self.neutron.list_ports()['ports']
network_ports = [x for x in ports
if x['device_owner'] == 'network:dhcp'
and x['network_id'] == net_id]
return network_ports
def create_pool(self, pool_name):
sub_net = self.neutron.list_subnets()
body = {"pool": {"name": pool_name,

View File

@@ -0,0 +1,95 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from fuelweb_test import logger
from fuelweb_test.helpers.utils import run_on_remote
def ovs_get_data(remote, table, columns=None):
"""Get data from a specified OpenVSwitch table
:param SSHClient remote: fuel-devops.helpers.helpers object
:param str table: ovs table name (see `ovsdb-client list-tables`)
:param list columns:
list of strings to get specified columns. if None - all columns
will be requested.
:return dict: data from JSON object
"""
if columns:
col = '--columns=' + ','.join(columns)
else:
col = ''
cmd = ('ovs-vsctl --oneline --format=json {columns} list {table}'
.format(columns=col, table=table))
res = run_on_remote(remote, cmd, jsonify=True)
logger.debug("OVS output of the command '{0}': {1}".format(cmd, res))
return res
def ovs_decode_columns(ovs_data):
"""Decode columns from OVS data format to a python dict
:param str ovs_data: data from JSON object
:return list: list of decoded dicts
"""
data = ovs_data['data']
headings = ovs_data['headings']
res = []
for fields in data:
res_fields = {}
for i, field in enumerate(fields):
if isinstance(field, list):
if field[0] == 'map':
d = {}
for f in field[1]:
d[f[0]] = f[1]
res_fields[headings[i]] = d
elif field[0] == 'uuid':
res_fields[headings[i]] = {'uuid': field[1]}
else:
res_fields[headings[i]] = field
else:
res_fields[headings[i]] = field
res.append(res_fields)
return res
def ovs_get_tag_by_port(remote, port):
"""Get the tag used for OVS interface by Neutron port ID
:param SSHClient remote: fuel-devops.helpers.helpers object
:param str port: Neutron port ID
:return str: tag number
"""
interfaces_raw = ovs_get_data(remote,
table='Interface',
columns=['external_ids', 'name'])
interfaces = ovs_decode_columns(interfaces_raw)
ports_ifaces = {x['external_ids']['iface-id']: x['name']
for x in interfaces if 'iface-id' in x['external_ids']}
logger.debug("OVS interfaces: {0}".format(ports_ifaces))
if port not in ports_ifaces:
raise ValueError("Neutron port {0} not found in OVS interfaces."
.format(port))
iface_id = ports_ifaces[port]
ovs_port_raw = ovs_get_data(remote,
table='Port {0}'.format(iface_id),
columns=['tag'])
ovs_port = ovs_decode_columns(ovs_port_raw)
logger.debug("OVS tag for port {0}: {1}".format(iface_id, ovs_port))
ovs_tag = ovs_port[0]['tag']
return str(ovs_tag)

View File

@@ -314,7 +314,7 @@ def cond_upload(remote, source, target, condition=''):
@logwrap
def run_on_remote(remote, cmd, jsonify=False, clear=False):
def run_on_remote(remote, cmd, jsonify=False, clear=False, err_msg=None):
# TODO(ivankliuk): move it to devops.helpers.SSHClient
"""Execute ``cmd`` on ``remote`` and return result.
@@ -333,8 +333,10 @@ def run_on_remote(remote, cmd, jsonify=False, clear=False):
'stdout': result['stdout'],
'stderr': result['stderr'],
'exit_code': result['exit_code']}
error_msg = ("Unexpected error occurred during execution. "
"Details: {0}".format(error_details))
error_msg = ("{0} Command: '{1}' Details: {2}"
.format(err_msg or "Unexpected error occurred.",
cmd,
error_details))
logger.error(error_msg)
raise Exception(error_msg)

View File

@@ -30,6 +30,7 @@ from proboscis.asserts import assert_true
from fuelweb_test.helpers import ceph
from fuelweb_test.helpers import checkers
from fuelweb_test.helpers.utils import run_on_remote
from fuelweb_test import logwrap
from fuelweb_test import logger
from fuelweb_test import quiet_logger
@@ -2002,37 +2003,25 @@ class FuelWebClient(object):
remote.execute('sed -i "{0}" {1}'.format(modification, file))
def backup_master(self, remote):
logger.debug("Start backup of master node")
assert_equal(
0, remote.execute(
"echo CALC_MY_MD5SUM > /etc/fuel/data")['exit_code'],
'command calc_my_mdsum failed')
assert_equal(
0, remote.execute(
"iptables-save > /etc/fuel/iptables-backup")['exit_code'],
'can not save iptables in iptables-backup')
assert_equal(0, remote.execute(
"md5sum /etc/fuel/data | sed -n 1p | "
"awk '{print $1}'>/etc/fuel/sum")['exit_code'],
'failed to create sum file')
assert_equal(0, remote.execute('dockerctl backup')['exit_code'],
'dockerctl backup failed with non zero exit code')
assert_equal(0, remote.execute('rm -f /etc/fuel/data')['exit_code'],
'Can not remove /etc/fuel/data')
logger.debug("Finish backup of master node")
logger.info("Backup of the master node is started.")
run_on_remote(remote, "echo CALC_MY_MD5SUM > /etc/fuel/data",
err_msg='command calc_my_mdsum failed')
run_on_remote(remote, "iptables-save > /etc/fuel/iptables-backup",
err_msg='can not save iptables in iptables-backup')
run_on_remote(remote,
"md5sum /etc/fuel/data | cut -d" " -f1 > /etc/fuel/sum",
err_msg='failed to create sum file')
run_on_remote(remote, 'dockerctl backup')
run_on_remote(remote, 'rm -f /etc/fuel/data',
err_msg='Can not remove /etc/fuel/data')
logger.info("Backup of the master node is complete.")
@logwrap
def restore_master(self, remote):
logger.debug("Start restore master node")
logger.info("Restore of the master node is started.")
path = checkers.find_backup(remote)
assert_equal(
0,
remote.execute('dockerctl restore {0}'.format(path))['exit_code'],
'dockerctl restore finishes with non-zero exit code')
logger.debug("Finish restore master node")
run_on_remote(remote, 'dockerctl restore {0}'.format(path))
logger.info("Restore of the master node is complete.")
@logwrap
def restore_check_nailgun_api(self, remote):

View File

@@ -15,7 +15,9 @@
from proboscis import SkipTest
from proboscis import test
from fuelweb_test import logger
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test.helpers.utils import get_test_method_name
from fuelweb_test.helpers.utils import timestat
from fuelweb_test.models.environment import EnvironmentModel
from fuelweb_test.settings import REPLACE_DEFAULT_REPOS
@@ -44,6 +46,28 @@ class TestBasic(object):
if self.env.d_env.has_snapshot(snapshot_name):
raise SkipTest()
def show_step(self, step, details=''):
"""Show a description of the step taken from docstring
:param int/str step: step number to show
:param str details: additional info for a step
"""
test_func_name = get_test_method_name()
test_func = getattr(self.__class__, test_func_name)
docstring = test_func.__doc__
docstring = '\n'.join([s.strip() for s in docstring.split('\n')])
steps = {s.split('. ')[0]: s for s in
docstring.split('\n') if s and s[0].isdigit()}
if details:
details_msg = ': {0} '.format(details)
else:
details_msg = ''
if str(step) in steps:
logger.info("\n" + " " * 55 + "<<< {0} {1}>>>"
.format(steps[str(step)], details_msg))
else:
logger.info("\n" + " " * 55 + "<<< {0}. (no step description "
"in scenario) {1}>>>".format(str(step), details_msg))
@test
class SetupEnvironment(TestBasic):

View File

@@ -25,6 +25,7 @@ from fuelweb_test.helpers import os_actions
from fuelweb_test.helpers import ceph
from fuelweb_test.helpers import checkers
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test.helpers.ovs import ovs_get_tag_by_port
from fuelweb_test import ostf_test_mapping as map_ostf
from fuelweb_test import settings
from fuelweb_test.settings import NEUTRON_ENABLE
@@ -520,13 +521,22 @@ class VmBackedWithCephMigrationBasic(TestBasic):
8. Migrate VM
9. Check cluster and server state after migration
10. Terminate VM
11. Check that DHCP lease is not offered for MAC of deleted VM
12. Create a new VM for migration, assign floating ip
13. Create a volume and attach it to the VM
14. Create filesystem on the new volume and mount it to the VM
15. Migrate VM
16. Mount the volume after migration
17. Check cluster and server state after migration
18. Terminate VM
Duration 35m
Snapshot vm_backed_with_ceph_live_migration
"""
self.env.revert_snapshot("ready_with_3_slaves")
self.show_step(1)
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
@@ -534,10 +544,15 @@ class VmBackedWithCephMigrationBasic(TestBasic):
'volumes_ceph': True,
'images_ceph': True,
'ephemeral_ceph': True,
'volumes_lvm': False
'volumes_lvm': False,
'net_provider': 'neutron',
'net_segment_type': NEUTRON_SEGMENT_TYPE,
}
)
self.show_step(2)
self.show_step(3)
self.fuel_web.update_nodes(
cluster_id,
{
@@ -548,6 +563,8 @@ class VmBackedWithCephMigrationBasic(TestBasic):
)
creds = ("cirros", "test")
self.show_step(4)
# Cluster deploy
self.fuel_web.deploy_cluster_wait(cluster_id)
@@ -559,6 +576,8 @@ class VmBackedWithCephMigrationBasic(TestBasic):
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['smoke'],
test_name=test_path)
self.show_step(5)
try:
_check()
except AssertionError:
@@ -569,26 +588,33 @@ class VmBackedWithCephMigrationBasic(TestBasic):
time.sleep(60)
_check()
self.show_step(6)
# Run ostf
self.fuel_web.run_ostf(cluster_id)
self.show_step(7)
# Create new server
os = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id))
logger.info("Create new server")
srv = os.create_server_for_migration(
neutron=True,
scenario='./fuelweb_test/helpers/instance_initial_scenario')
logger.info("Srv is currently in status: %s" % srv.status)
srv_remote_node = self.fuel_web.get_ssh_for_node(
self.fuel_web.find_devops_node_by_nailgun_fqdn(
os.get_srv_hypervisor_name(srv),
self.env.d_env.nodes().slaves[:3]).name)
srv_instance_ip = os.get_nova_instance_ip(srv)
srv_instance_mac = os.get_instance_mac(srv_remote_node, srv)
res = ''.join(srv_remote_node.execute('ip r | fgrep br100')['stdout'])
srv_node_dhcp_ip = res.split()[-1]
# Prepare to DHCP leases checks
srv_instance_ip = os.get_nova_instance_ip(srv, net_name='net04')
srv_host_name = self.fuel_web.find_devops_node_by_nailgun_fqdn(
os.get_srv_hypervisor_name(srv),
self.env.d_env.nodes().slaves[:3]).name
net_id = os.get_network('net04')['id']
ports = os.get_neutron_dhcp_ports(net_id)
dhcp_server_ip = ports[0]['fixed_ips'][0]['ip_address']
with self.fuel_web.get_ssh_for_node(srv_host_name) as srv_remote_node:
srv_instance_mac = os.get_instance_mac(srv_remote_node, srv)
logger.info("Assigning floating ip to server")
floating_ip = os.assign_floating_ip(srv)
@@ -597,10 +623,11 @@ class VmBackedWithCephMigrationBasic(TestBasic):
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120)
md5before = os.get_md5sum(
"/home/test_file",
self.fuel_web.get_ssh_for_node("slave-01"),
floating_ip.ip, creds)
with self.fuel_web.get_ssh_for_node("slave-01") as remote:
md5before = os.get_md5sum(
"/home/test_file", remote, floating_ip.ip, creds)
self.show_step(8)
logger.info("Get available computes")
avail_hosts = os.get_hosts_for_migr(srv_host)
@@ -611,10 +638,9 @@ class VmBackedWithCephMigrationBasic(TestBasic):
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120)
md5after = os.get_md5sum(
"/home/test_file",
self.fuel_web.get_ssh_for_node("slave-01"),
floating_ip.ip, creds)
with self.fuel_web.get_ssh_for_node("slave-01") as remote:
md5after = os.get_md5sum(
"/home/test_file", remote, floating_ip.ip, creds)
assert_true(
md5after in md5before,
@@ -622,11 +648,14 @@ class VmBackedWithCephMigrationBasic(TestBasic):
"Before migration md5 was equal to: {bef}"
"Now it eqals: {aft}".format(bef=md5before, aft=md5after))
res = os.execute_through_host(
self.fuel_web.get_ssh_for_node("slave-01"),
floating_ip.ip, "ping -q -c3 -w10 {0} | grep 'received' |"
" grep -v '0 packets received'"
.format(settings.PUBLIC_TEST_IP), creds)
self.show_step(9)
with self.fuel_web.get_ssh_for_node("slave-01") as remote:
res = os.execute_through_host(
remote, floating_ip.ip,
"ping -q -c3 -w10 {0} | grep 'received' |"
" grep -v '0 packets received'"
.format(settings.PUBLIC_TEST_IP), creds)
logger.info("Ping {0} result on vm is: {1}"
.format(settings.PUBLIC_TEST_IP, res['stdout']))
@@ -636,25 +665,32 @@ class VmBackedWithCephMigrationBasic(TestBasic):
logger.info("Server is now on host %s" %
os.get_srv_host_name(new_srv))
self.show_step(10)
logger.info("Terminate migrated server")
os.delete_instance(new_srv)
assert_true(os.verify_srv_deleted(new_srv),
"Verify server was deleted")
self.show_step(11)
# Check if the dhcp lease for instance still remains
# on the previous compute node. Related Bug: #1391010
assert_false(checkers.check_nova_dhcp_lease(srv_remote_node,
srv_instance_ip,
srv_instance_mac,
srv_node_dhcp_ip),
"Instance has been deleted, but it\'s DHCP lease "
"for IP:{0} with MAC:{1} still remains on the "
"compute node {2}".format(srv_instance_ip,
srv_instance_mac,
srv_host))
with self.fuel_web.get_ssh_for_node('slave-01') as remote:
dhcp_port_tag = ovs_get_tag_by_port(remote, ports[0]['id'])
assert_false(checkers.check_neutron_dhcp_lease(remote,
srv_instance_ip,
srv_instance_mac,
dhcp_server_ip,
dhcp_port_tag),
"Instance has been deleted, but it's DHCP lease "
"for IP:{0} with MAC:{1} still offers by Neutron DHCP"
" agent.".format(srv_instance_ip,
srv_instance_mac))
self.show_step(12)
# Create a new server
logger.info("Create new server")
logger.info("Create a new server for migration with volume")
srv = os.create_server_for_migration(
neutron=True,
scenario='./fuelweb_test/helpers/instance_initial_scenario')
logger.info("Srv is currently in status: %s" % srv.status)
@@ -663,47 +699,59 @@ class VmBackedWithCephMigrationBasic(TestBasic):
srv_host = os.get_srv_host_name(srv)
logger.info("Server is on host %s" % srv_host)
self.show_step(13)
logger.info("Create volume")
vol = os.create_volume()
logger.info("Attach volume to server")
os.attach_volume(vol, srv)
self.show_step(14)
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120)
logger.info("Create filesystem and mount volume")
os.execute_through_host(
self.fuel_web.get_ssh_for_node("slave-01"),
floating_ip.ip, 'sudo sh /home/mount_volume.sh', creds)
os.execute_through_host(
self.fuel_web.get_ssh_for_node("slave-01"),
floating_ip.ip, 'sudo touch /mnt/file-on-volume', creds)
with self.fuel_web.get_ssh_for_node("slave-01") as remote:
os.execute_through_host(
remote,
floating_ip.ip, 'sudo sh /home/mount_volume.sh', creds)
os.execute_through_host(
remote,
floating_ip.ip, 'sudo touch /mnt/file-on-volume', creds)
self.show_step(15)
logger.info("Get available computes")
avail_hosts = os.get_hosts_for_migr(srv_host)
logger.info("Migrating server")
new_srv = os.migrate_server(srv, avail_hosts[0], timeout=120)
logger.info("Check cluster and server state after migration")
logger.info("Check cluster and server state after migration")
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120)
self.show_step(16)
logger.info("Mount volume after migration")
out = os.execute_through_host(
self.fuel_web.get_ssh_for_node("slave-01"),
floating_ip.ip, 'sudo mount /dev/vdb /mnt', creds)
with self.fuel_web.get_ssh_for_node("slave-01") as remote:
out = os.execute_through_host(
remote,
floating_ip.ip, 'sudo mount /dev/vdb /mnt', creds)
logger.info("out of mounting volume is: %s" % out['stdout'])
assert_true("file-on-volume" in os.execute_through_host(
self.fuel_web.get_ssh_for_node("slave-01"),
floating_ip.ip, "sudo ls /mnt", creds)['stdout'],
with self.fuel_web.get_ssh_for_node("slave-01") as remote:
out = os.execute_through_host(
remote,
floating_ip.ip, "sudo ls /mnt", creds)
assert_true("file-on-volume" in out['stdout'],
"File is abscent in /mnt")
self.show_step(17)
logger.info("Check Ceph health is ok after migration")
self.fuel_web.check_ceph_status(cluster_id)
logger.info("Server is now on host %s" %
os.get_srv_host_name(new_srv))
self.show_step(18)
logger.info("Terminate migrated server")
os.delete_instance(new_srv)
assert_true(os.verify_srv_deleted(new_srv),
@@ -744,6 +792,8 @@ class CheckCephPartitionsAfterReboot(TestBasic):
"""
self.env.revert_snapshot("ready_with_3_slaves")
self.show_step(1)
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
@@ -751,10 +801,15 @@ class CheckCephPartitionsAfterReboot(TestBasic):
'volumes_ceph': True,
'images_ceph': True,
'ephemeral_ceph': True,
'volumes_lvm': False
'volumes_lvm': False,
'net_provider': 'neutron',
'net_segment_type': NEUTRON_SEGMENT_TYPE,
}
)
self.show_step(2)
self.show_step(3)
self.show_step(4)
self.fuel_web.update_nodes(
cluster_id,
{
@@ -763,19 +818,27 @@ class CheckCephPartitionsAfterReboot(TestBasic):
'slave-03': ['ceph-osd']
}
)
self.show_step(5)
# Deploy cluster
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(6)
for node in ["slave-02", "slave-03"]:
self.show_step(7, node)
logger.info("Get partitions for {node}".format(node=node))
_ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
before_reboot_partitions = [checkers.get_ceph_partitions(
self.env.d_env.get_ssh_to_remote(_ip),
"/dev/vd{p}".format(p=part)) for part in ["b", "c"]]
self.show_step(8, node)
logger.info("Warm-restart nodes")
self.fuel_web.warm_restart_nodes(
[self.fuel_web.environment.d_env.get_node(name=node)])
self.show_step(9, node)
logger.info("Get partitions for {node} once again".format(
node=node
))
@@ -790,13 +853,16 @@ class CheckCephPartitionsAfterReboot(TestBasic):
logger.info("After reboot: %s" % after_reboot_partitions)
raise Exception()
self.show_step(10, node)
logger.info("Check Ceph health is ok after reboot")
self.fuel_web.check_ceph_status(cluster_id)
self.show_step(11, node)
logger.info("Cold-restart nodes")
self.fuel_web.cold_restart_nodes(
[self.fuel_web.environment.d_env.get_node(name=node)])
self.show_step(12, node)
_ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
after_reboot_partitions = [checkers.get_ceph_partitions(
self.env.d_env.get_ssh_to_remote(_ip),
@@ -808,5 +874,6 @@ class CheckCephPartitionsAfterReboot(TestBasic):
logger.info("After reboot: %s" % after_reboot_partitions)
raise Exception()
self.show_step(13, node)
logger.info("Check Ceph health is ok after reboot")
self.fuel_web.check_ceph_status(cluster_id)

View File

@@ -1084,20 +1084,20 @@ class VcenterDeploy(TestBasic):
# VMs on different hypervisors should communicate between each other
for ip_1 in srv_ip:
ssh = self.fuel_web.get_ssh_for_node("slave-01")
logger.info("Connect to VM {0}".format(ip_1))
for ip_2 in srv_ip:
if ip_1 != ip_2:
# Check server's connectivity
res = int(
os_conn.execute_through_host(
ssh, ip_1, "ping -q -c3 " + ip_2 +
"| grep -o '[0-9] packets received'"
"| cut -f1 -d ' '")['stdout'])
assert_true(
res == 3,
"VM{0} not ping from Vm {1}, received {2} icmp".format(
ip_1, ip_2, res))
with self.fuel_web.get_ssh_for_node("slave-01") as ssh:
logger.info("Connect to VM {0}".format(ip_1))
for ip_2 in srv_ip:
if ip_1 != ip_2:
# Check server's connectivity
res = int(
os_conn.execute_through_host(
ssh, ip_1, "ping -q -c3 " + ip_2 +
"| grep -o '[0-9] packets received'"
"| cut -f1 -d ' '")['stdout'])
assert_true(
res == 3,
"VM{0} not ping from Vm {1}, received {2} icmp"
.format(ip_1, ip_2, res))
@test(depends_on=[SetupEnvironment.prepare_slaves_9],
groups=["vcenter_delete_controler"])