Merge "Add new methods to resize and check disks for cinder\ceph nodes" into stable/mitaka

This commit is contained in:
Jenkins 2016-11-07 08:20:01 +00:00 committed by Gerrit Code Review
commit 101491476b
3 changed files with 119 additions and 18 deletions

View File

@ -47,6 +47,38 @@ from fuelweb_test.settings import PUBLIC_TEST_IP
ssh_manager = SSHManager()
@logwrap
def check_partition_exists(ip, disk, size_in_mb):
"""Check if exists partition at node
:type ip: str
:type disk: str
:param ip: ip address of node
:param disk: partition name
:param size_in_mb: size of partition
:return: bool (True if disk exists)
"""
disks = ssh_manager.execute(
ip, "lsblk -lb -o NAME,FSTYPE,SIZE")['stdout']
disks = [x.split() for x in disks if 'LVM2_member' in x]
cur_disks = [x for x in disks if disk in x[0]]
for vol in cur_disks:
real_size = int(vol[2]) / 1024 / 1024
if abs(float(real_size) / float(size_in_mb) - 1) < 0.1:
logger.info(
'Partition exists: {0}, expected size {1} '
'real size {2}'.format(vol[0], size_in_mb, real_size))
return True
logger.error('Partition not exists: {0}, expected size {1} '.format(
disk, size_in_mb))
for vol in disks:
real_size = int(vol[2]) / 1024 / 1024
logger.error('Available disk {0} with: {1} mb'.format(
vol[0], real_size))
return False
@logwrap
def validate_minimal_amount_nodes(
nodes, expected_amount,

View File

@ -1602,21 +1602,47 @@ class FuelWebClient29(object):
size += volume['size']
return size
def get_node_partition_size(self, node_id, partition_name):
def get_node_partition_size(self, node_id, partition_name, v_name=None):
disks = self.client.get_node_disks(node_id)
size = 0
logger.debug('Disks of node-{}: \n{}'.format(node_id,
pretty_log(disks)))
for disk in disks:
for volume in disk['volumes']:
if volume['name'] == partition_name:
size += volume['size']
if v_name:
if disk['name'] == partition_name:
for volume in disk['volumes']:
if volume['name'] == v_name:
size += volume['size']
else:
for volume in disk['volumes']:
if volume['name'] == partition_name:
size += volume['size']
return size
@logwrap
def get_node_disks_by_volume_name(self, node, volume_name):
"""Get node disks by volume name
:param node: node id
:param volume_name: name of volume to look for
:return: list of disks
"""
disks_volumes = {disk['name']: volume['name'] for disk in
self.client.get_node_disks(node) for volume in
disk['volumes'] if volume['size'] > 0}
return [disk for disk in disks_volumes
if disks_volumes[disk] == volume_name]
@logwrap
def update_node_partitioning(self, node, disk='vdc',
node_role='cinder', unallocated_size=11116):
node_size = self.get_node_disk_size(node['id'], disk)
node_role='cinder',
unallocated_size=11116,
by_vol_name=False):
if by_vol_name:
node_size = self.get_node_partition_size(node['id'], disk,
v_name=node_role)
else:
node_size = self.get_node_disk_size(node['id'], disk)
disk_part = {
disk: {
node_role: node_size - unallocated_size

View File

@ -17,6 +17,7 @@ from proboscis import test
from fuelweb_test.helpers import checkers
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test import settings
from fuelweb_test import logger
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@ -126,7 +127,8 @@ class HaVlanGroup5(TestBasic):
12. Verify networks
13. Deploy cluster
14. Verify networks
15. Run OSTF
15. Checking size of partitions
16. Run OSTF
Duration 180m
Snapshot cinder_ceph_for_images_ephemeral_rados
@ -171,24 +173,50 @@ class HaVlanGroup5(TestBasic):
}
)
self.show_step(8)
self.fuel_web.update_network_cidr(cluster_id, 'public')
if self.fuel_web.get_public_gw().startswith('10.109.'):
self.fuel_web.update_network_cidr(cluster_id,
network_name='public')
else:
logger.info('Skipping change net mask from /24 to /25')
self.show_step(9)
self.show_step(10)
self.show_step(11)
ceph_image_size = {}
cinder_image_size = {}
ceph_nodes = self.fuel_web.\
get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
role_status='pending_roles')
for ceph_node in ceph_nodes:
ceph_image_size = self.fuel_web.\
update_node_partitioning(ceph_node, node_role='ceph')
ceph_image_size[ceph_node['ip']] = {}
ceph_disks = self.fuel_web.get_node_disks_by_volume_name(
node=ceph_node['id'],
volume_name='ceph')
for disk in ceph_disks:
ceph_image_size[ceph_node['ip']][disk] = \
self.fuel_web.update_node_partitioning(
ceph_node,
node_role='ceph',
disk=disk,
by_vol_name=True)
cinder_nodes = self.fuel_web.\
get_nailgun_cluster_nodes_by_roles(cluster_id, ['cinder'],
role_status='pending_roles')
for cinder_node in cinder_nodes:
cinder_image_size = self.fuel_web.\
update_node_partitioning(cinder_node, node_role='cinder')
cinder_image_size[cinder_node['ip']] = {}
cinder_disks = self.fuel_web.get_node_disks_by_volume_name(
node=cinder_node['id'],
volume_name='cinder')
for disk in cinder_disks:
cinder_image_size[cinder_node['ip']][disk] = \
self.fuel_web.update_node_partitioning(
cinder_node,
node_role='cinder',
disk=disk,
by_vol_name=True)
self.show_step(12)
self.fuel_web.verify_network(cluster_id)
@ -198,13 +226,28 @@ class HaVlanGroup5(TestBasic):
self.show_step(14)
self.fuel_web.verify_network(cluster_id)
for ceph in ceph_nodes:
checkers.check_ceph_image_size(ceph['ip'], ceph_image_size)
for cinder in cinder_nodes:
checkers.check_cinder_image_size(cinder['ip'], cinder_image_size)
self.show_step(15)
for ceph_node in ceph_nodes:
ceph_disks = self.fuel_web.get_node_disks_by_volume_name(
node=ceph_node['id'],
volume_name='ceph')
for disk in ceph_disks:
exp_size = ceph_image_size[ceph_node['ip']][disk]
checkers.check_ceph_image_size(ceph_node['ip'],
expected_size=exp_size,
device=disk)
for cinder_node in cinder_nodes:
cinder_disks = self.fuel_web.get_node_disks_by_volume_name(
node=cinder_node['id'],
volume_name='cinder')
for disk in cinder_disks:
exp_size = cinder_image_size[cinder_node['ip']][disk]
checkers.check_partition_exists(cinder_node['ip'],
disk=disk,
size_in_mb=exp_size)
self.show_step(16)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("cinder_ceph_for_images_ephemeral_rados")