Add tests for multipath devices

SSHManager.update_connection: add possibility to open new connection with
parameters, if no connection presents.

This testgroup should be run with this parameters:

    export MULTIPATH=True
    export SLAVE_MULTIPATH_DISKS_COUNT=2
    export MULTIPATH_TEMPLATE=system_test/tests_templates/tests_configs/multipath_3_nodes.yaml

Implements: blueprint test-multipath-devices

Change-Id: I73ffc4338b59c8b44eabe27b2d759b7fbbba19a4
This commit is contained in:
zatserklyany 2016-04-15 16:33:14 +03:00
parent 2f13281daf
commit d8f7b0d221
8 changed files with 437 additions and 10 deletions

View File

@ -710,4 +710,9 @@ Test for network outage
Test for separate master node deployment
----------------------------------------
.. automodule:: system_test.tests.test_centos_master_deploy_ceph
:members:
:members:
Test for multipath devices
--------------------------
.. automodule:: fuelweb_test.tests.test_multipath_devices
:members:

View File

@ -143,13 +143,13 @@ class SSHManager(object):
logger.info('SSH_MANAGER:Create new connection for '
'{ip}:{port}'.format(ip=ip, port=port))
self.connections[(ip, port)] = SSHClient(
host=ip,
port=port,
username=login,
password=password,
private_keys=keys if keys is not None else []
)
self.connections[(ip, port)] = SSHClient(
host=ip,
port=port,
username=login,
password=password,
private_keys=keys if keys is not None else []
)
def clean_all_connections(self):
for (ip, port), connection in self.connections.items():

View File

@ -2005,7 +2005,7 @@ class FuelWebClient29(object):
self.assert_task_success(task, progress=progress)
@logwrap
def deploy_task_wait(self, cluster_id, progress):
def deploy_task_wait(self, cluster_id, progress=None):
logger.info('Start cluster #%s deployment', cluster_id)
task = self.client.deploy_nodes(cluster_id)
self.assert_task_success(

View File

@ -114,6 +114,19 @@ HARDWARE["slave_node_memory"] = int(
NODE_VOLUME_SIZE = int(os.environ.get('NODE_VOLUME_SIZE', 50))
NODES_COUNT = os.environ.get('NODES_COUNT', 10)
MULTIPATH = get_var_as_bool('MULTIPATH', False)
SLAVE_MULTIPATH_DISKS_COUNT = int(os.environ.get('SLAVE_MULTIPATH_DISKS_COUNT',
'0'))
MULTIPATH_TEMPLATE = os.environ.get(
'MULTIPATH_TEMPLATE',
os.path.join(
os.getcwd(),
'system_test/tests_templates/tests_configs/multipath_3_nodes.yaml'))
if MULTIPATH and not SLAVE_MULTIPATH_DISKS_COUNT:
os.environ.setdefault('SLAVE_MULTIPATH_DISKS_COUNT', '2')
SLAVE_MULTIPATH_DISKS_COUNT = int(
os.environ.get('SLAVE_MULTIPATH_DISKS_COUNT'))
MULTIPLE_NETWORKS = get_var_as_bool('MULTIPLE_NETWORKS', False)
MULTIPLE_NETWORKS_TEMPLATE = os.environ.get(
'MULTIPLE_NETWORKS_TEMPLATE',

View File

@ -31,6 +31,7 @@ from fuelweb_test.settings import MULTIPLE_NETWORKS
from fuelweb_test.settings import MULTIPLE_NETWORKS_TEMPLATE
from fuelweb_test.settings import REPLACE_DEFAULT_REPOS
from fuelweb_test.settings import REPLACE_DEFAULT_REPOS_ONLY_ONCE
from system_test.core.discover import load_yaml
from gates_tests.helpers import exceptions
@ -343,7 +344,6 @@ class SetupEnvironment(TestBasic):
# inside 'address_pool', so we can use 'network_pools' section
# for L3 configuration in tests for multi racks
if MULTIPLE_NETWORKS:
from system_test.core.discover import load_yaml
self._devops_config = load_yaml(MULTIPLE_NETWORKS_TEMPLATE)
self.check_run("empty")

View File

@ -0,0 +1,203 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from proboscis.asserts import assert_equal
from proboscis import test
from fuelweb_test.helpers.checkers import ssh_manager
from fuelweb_test.helpers.utils import TimeStat
from fuelweb_test.settings import DEPLOYMENT_MODE
from fuelweb_test.settings import MULTIPATH
from fuelweb_test.settings import MULTIPATH_TEMPLATE
from fuelweb_test.settings import NEUTRON_SEGMENT
from fuelweb_test.settings import SLAVE_MULTIPATH_DISKS_COUNT
from fuelweb_test.tests import base_test_case
from gates_tests.helpers import exceptions
from system_test.core.discover import load_yaml
@test
class TestMultipath(base_test_case.TestBasic):
"""TestMultipath.
Required environment variables:
* MULTIPATH=true
* SLAVE_MULTIPATH_DISKS_COUNT>=2
* MULTIPATH_TEMPLATE=
system_test/tests_templates/tests_configs/multipath_3_nodes.yaml
"""
@staticmethod
def check_multipath_devices(ip, slave_multipath_disks_count):
"""Check if multipath devices contain SLAVE_MULTIPATH_DISKS_COUNT of
disks. If yes return True, if no - False.
:rtype: bool
"""
cmd = "multipath -l -v2"
ssh_manager.update_connection(ip, login='root',
keys=ssh_manager._get_keys())
ssh_manager.get_remote(ip)
result = ssh_manager.execute_on_remote(
ip=ip,
cmd=cmd,
err_msg="Failed to check multipath on node {}".format(ip)
)
multipath_info = [res.rstrip() for res in result['stdout']]
disk = re.compile('(?P<id>^[\d|\w]+)\s+(?P<dm>dm-\d+)')
status = re.compile(
'\d+:\d+:\d+:\d+\s+(?P<devnode>\w+)'
'\s+\d+:\d+\s+(?P<dm_status>\w+)'
'\s+(?P<path_status>\w+)'
'\s+(?P<online_status>\w+)'
)
dm = None
disks = dict()
for line in multipath_info:
m = re.match(disk, line)
if m:
dm = m.group('dm')
disks[dm] = []
continue
m = re.search(status, line)
if m:
disks[dm].append(m.group('devnode'))
assert_equal(
m.group('dm_status'),
'active',
"Device {0} is in '{1}' status on {2}".format(
m.group('devnode'), m.group('dm_status'), dm))
assert_equal(
m.group('online_status'),
'running',
"Device {0} is in '{1}' status on {2}".format(
m.group('devnode'), m.group('online_status'), dm))
for disk in disks:
assert_equal(len(disks[disk]),
slave_multipath_disks_count,
"{0}: wrong path count: {1}. Must be {2}".format(
disk, len(disk), slave_multipath_disks_count))
@staticmethod
def get_os_root_multipath_count(ip):
"""Returns count of root partitions on multipath devices.
:rtype: int
"""
cmd = "lsblk -lo NAME,TYPE,MOUNTPOINT | grep '/$' | grep lvm | wc -l"
ssh_manager.update_connection(ip, login='root',
keys=ssh_manager._get_keys())
ssh_manager.get_remote(ip)
result = ssh_manager.execute_on_remote(
ip=ip,
cmd=cmd,
err_msg="Failed to check lsblk on node {}".format(ip))
return int(result['stdout_str'])
@test(groups=["bootstrap_multipath"])
def bootstrap_multipath(self):
"""Bootstrap node with multipath devices
Scenario:
1. Setup environment
2. Bootstrap slave nodes
3. Verify multipath devices on the nodes
Duration 30m
"""
if not MULTIPATH:
raise exceptions.FuelQAVariableNotSet(
'MULTIPATH', 'true')
if not MULTIPATH_TEMPLATE:
raise exceptions.FuelQAVariableNotSet(
'MULTIPATH_TEMPLATE',
'system_test/tests_templates/tests_configs/'
'multipath_3_nodes.yaml')
if int(SLAVE_MULTIPATH_DISKS_COUNT) < 1:
raise exceptions.FuelQAVariableNotSet(
'SLAVE_MULTIPATH_DISKS_COUNT', '2')
self.show_step(1)
self._devops_config = load_yaml(MULTIPATH_TEMPLATE)
with TimeStat("setup_environment", is_uniq=True):
self.env.setup_environment()
self.fuel_post_install_actions()
self.fuel_web.get_nailgun_version()
self.fuel_web.change_default_network_settings()
self.show_step(2)
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:3],
skip_timesync=True)
self.show_step(3)
for ip in [node['ip'] for node in self.fuel_web.client.list_nodes()]:
self.check_multipath_devices(ip, SLAVE_MULTIPATH_DISKS_COUNT)
@test(depends_on_groups=["bootstrap_multipath"],
groups=["deploy_multipath"])
def deploy_multipath(self):
"""Deploy cluster with multipath devices
Scenario:
1. Create cluster with 1 controller, 1 compute and 1 cinder roles
2. Run network verification
3. Provision the cluster
4. Verify multipath devices on nodes
5. Deploy the cluster
6. Run OSTF
Duration 50m
"""
self.show_step(1)
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_segment_type": NEUTRON_SEGMENT['vlan'],
}
)
self.fuel_web.update_nodes(
cluster_id, {
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': ['cinder']
}
)
self.show_step(2)
self.fuel_web.verify_network(cluster_id)
self.show_step(3)
self.fuel_web.provisioning_cluster_wait(cluster_id)
self.show_step(4)
for ip in [node['ip'] for node in self.fuel_web.client.list_nodes()]:
self.check_multipath_devices(ip, SLAVE_MULTIPATH_DISKS_COUNT)
assert_equal(
self.get_os_root_multipath_count(ip),
SLAVE_MULTIPATH_DISKS_COUNT,
"Wrong lvm structure of multipath device on {}".format(ip))
self.show_step(5)
self.fuel_web.deploy_task_wait(cluster_id)
self.show_step(6)
self.fuel_web.run_ostf(cluster_id=cluster_id)

View File

@ -0,0 +1,165 @@
---
aliases:
dynamic_address_pool:
- &pool_default !os_env POOL_DEFAULT, 10.109.0.0/16:24
default_interface_model:
- &interface_model !os_env INTERFACE_MODEL, e1000
rack-01-slave-interfaces: &rack-01-slave-interfaces
- label: enp0s3
l2_network_device: admin # Libvirt bridge name. It is *NOT* Nailgun networks
interface_model: *interface_model
- label: enp0s4
l2_network_device: public
interface_model: *interface_model
- label: enp0s5
l2_network_device: storage
interface_model: *interface_model
- label: enp0s6
l2_network_device: management
interface_model: *interface_model
- label: enp0s7
l2_network_device: private
interface_model: *interface_model
rack-01-slave-network_config: &rack-01-slave-network_config
enp0s3:
networks:
- fuelweb_admin
enp0s4:
networks:
- public
enp0s5:
networks:
- storage
enp0s6:
networks:
- management
enp0s7:
networks:
- private
rack-01-slave-node-params: &rack-01-slave-node-params
vcpu: !os_env SLAVE_NODE_CPU, 1
memory: !os_env SLAVE_NODE_MEMORY, 2048
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 55
format: raw
multipath_count: !os_env SLAVE_MULTIPATH_DISKS_COUNT, 2
interfaces: *rack-01-slave-interfaces
network_config: *rack-01-slave-network_config
env_name: !os_env ENV_NAME
address_pools:
# Network pools used by the environment
fuelweb_admin-pool01:
net: *pool_default
params:
tag: 0
public-pool01:
net: *pool_default
params:
tag: 0
storage-pool01:
net: *pool_default
params:
tag: 101
management-pool01:
net: *pool_default
params:
tag: 102
private-pool01:
net: *pool_default
params:
tag: 103
groups:
- name: rack-01
driver:
name: devops.driver.libvirt.libvirt_driver
params:
connection_string: !os_env CONNECTION_STRING, qemu:///system
storage_pool_name: !os_env STORAGE_POOL_NAME, default
stp: True
hpet: False
use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
network_pools: # Address pools for OpenStack networks.
# Actual names should be used for keys
# (the same as in Nailgun, for example)
fuelweb_admin: fuelweb_admin-pool01
public: public-pool01
storage: storage-pool01
management: management-pool01
private: private-pool01
l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks
admin:
address_pool: fuelweb_admin-pool01
dhcp: false
forward:
mode: nat
public:
address_pool: public-pool01
dhcp: false
forward:
mode: nat
storage:
address_pool: storage-pool01
dhcp: false
management:
address_pool: management-pool01
dhcp: false
private:
address_pool: private-pool01
dhcp: false
nodes:
- name: admin # Custom name of VM for Fuel admin node
role: fuel_master # Fixed role for Fuel master node properties
params:
vcpu: !os_env ADMIN_NODE_CPU, 2
memory: !os_env ADMIN_NODE_MEMORY, 3072
boot:
- hd
- cdrom # for boot from usb - without 'cdrom'
volumes:
- name: system
capacity: !os_env ADMIN_NODE_VOLUME_SIZE, 80
format: qcow2
- name: iso
source_image: !os_env ISO_PATH # if 'source_image' set, then volume capacity is calculated from it's size
format: raw
device: cdrom # for boot from usb - 'disk'
bus: ide # for boot from usb - 'usb'
interfaces:
- label: enp0s3
l2_network_device: admin # Libvirt bridge name. It is *NOT* a Nailgun network
interface_model: *interface_model
network_config:
enp0s3:
networks:
- fuelweb_admin
- name: slave-01
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-02
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-03
role: fuel_slave
params: *rack-01-slave-node-params

View File

@ -0,0 +1,41 @@
---
network-config: &network-config
provider: neutron
segment-type: vlan
pubip-to-all: false
storages-config: &storages-config
volume-lvm: true
volume-ceph: false
image-ceph: false
ephemeral-ceph: false
rados-ceph: false
replica-ceph: 2
nodes: &nodes
- roles:
- controller
count: 1
- roles:
- compute
count: 1
- roles:
- cinder
count: 1
template:
name: 1 Controller, 1 Compute, 1 Cinder on Neutron/VLAN
slaves: 3
devops_settings: !include devops_configs/multipath.yaml
cluster_template: &environment-config
name: env1
release: ubuntu
settings:
components:
sahara: false
murano: false
ceilometer: false
storages: *storages-config
network: *network-config
nodes: *nodes