System tests movement from fuel-main

Apply refactoring patch https://review.openstack.org/#/c/149612/.
  Move contents of fuel-main/fuelweb_test to the repo.
  Add run_tests.sh.
  Change fuel-devops version to 2.9.0 in requirements file.

Change-Id: Id321d63d97290f2fb22736abbe1d74315aed2893
This commit is contained in:
Ivan Kliuk 2015-02-04 14:02:33 +02:00
parent bd1e43fb98
commit 0168055fe6
88 changed files with 16043 additions and 2 deletions

View File

@ -1,3 +1,10 @@
# fuel-qa
Tests documentation
-------------------
[Devops documentation](http://docs.mirantis.com/fuel-dev/devops.html)
For 'make iso'
--------------
[Building ISO documentation](http://docs.mirantis.com/fuel-dev/develop/env.html#building-the-fuel-iso)
Hello, this is repo for fuel-qa!

55
fuelweb_test/__init__.py Normal file
View File

@ -0,0 +1,55 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import logging
import os
from fuelweb_test.settings import LOGS_DIR
if not os.path.exists(LOGS_DIR):
os.makedirs(LOGS_DIR)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(levelname)s %(filename)s:'
'%(lineno)d -- %(message)s',
filename=os.path.join(LOGS_DIR, 'sys_test.log'),
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s %(filename)s:'
'%(lineno)d -- %(message)s')
console.setFormatter(formatter)
logger = logging.getLogger(__name__)
logger.addHandler(console)
def debug(logger):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
logger.debug(
"Calling: {} with args: {} {}".format(
func.__name__, args, kwargs
)
)
result = func(*args, **kwargs)
logger.debug(
"Done: {} with result: {}".format(func.__name__, result))
return result
return wrapped
return wrapper
logwrap = debug(logger)

View File

View File

@ -0,0 +1,819 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import json
import os
import re
import traceback
from ipaddr import IPAddress
from ipaddr import IPNetwork
from fuelweb_test import logger
from fuelweb_test import logwrap
from fuelweb_test.settings import OPENSTACK_RELEASE
from fuelweb_test.settings import OPENSTACK_RELEASE_UBUNTU
from fuelweb_test.settings import POOLS
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_false
from proboscis.asserts import assert_true
from devops.error import TimeoutError
from devops.helpers.helpers import wait
from devops.helpers.helpers import _wait
from time import sleep
@logwrap
def check_ceph_ready(remote, exit_code=0):
if OPENSTACK_RELEASE_UBUNTU in OPENSTACK_RELEASE:
cmd = 'service ceph-all status'
else:
cmd = 'service ceph status'
if remote.execute(cmd)['exit_code'] == exit_code:
return True
return False
@logwrap
def get_ceph_health(remote):
return ''.join(remote.execute('ceph health')['stdout']).rstrip()
@logwrap
def check_ceph_health(remote, health_status=['HEALTH_OK']):
ceph_health = get_ceph_health(remote)
if all(x in ceph_health.split() for x in health_status):
return True
logger.debug('Ceph health {0} doesn\'t equal to {1}'.format(
ceph_health, ''.join(health_status)))
return False
@logwrap
def check_ceph_disks(remote, nodes_ids):
nodes_names = ['node-{0}'.format(node_id) for node_id in nodes_ids]
disks_tree = get_osd_tree(remote)
logger.debug("Disks output information: \\n{0}".format(disks_tree))
disks_ids = []
for node in disks_tree['nodes']:
if node['type'] == 'host' and node['name'] in nodes_names:
disks_ids.extend(node['children'])
for node in disks_tree['nodes']:
if node['type'] == 'osd' and node['id'] in disks_ids:
assert_equal(node['status'], 'up', 'OSD node {0} is down'.
format(node['id']))
@logwrap
def check_cinder_status(remote):
"""Parse output and return False
if any enabled service is down.
'cinder service-list' stdout example:
| cinder-scheduler | node-1.test.domain.local | nova | enabled | up |
| cinder-scheduler | node-2.test.domain.local | nova | enabled | down |
"""
cmd = '. openrc; cinder service-list'
result = remote.execute(cmd)
cinder_services = ''.join(result['stdout'])
logger.debug('>$ cinder service-list\n{}'.format(cinder_services))
if result['exit_code'] == 0:
return all(' up ' in x.split('enabled')[1]
for x in cinder_services.split('\n')
if 'cinder' in x and 'enabled' in x
and len(x.split('enabled')))
return False
@logwrap
def check_image(image, md5, path):
local_path = "{0}/{1}".format(path, image)
logger.debug('Check md5 {0} of image {1}/{2}'.format(md5, path, image))
if not os.path.isfile(local_path):
logger.error('Image {0} not found in {1} directory'.format(
image, path))
return False
with open(local_path, mode='rb') as fimage:
digits = hashlib.md5()
while True:
buf = fimage.read(4096)
if not buf:
break
digits.update(buf)
md5_local = digits.hexdigest()
if md5_local != md5:
logger.error('MD5 of {0}/{1} is not correct, aborting'.format(
path, image))
return False
return True
@logwrap
def get_interface_description(ctrl_ssh, interface_short_name):
return ''.join(
ctrl_ssh.execute(
'/sbin/ip addr show dev %s' % interface_short_name
)['stdout']
)
def verify_network_configuration(remote, node):
for interface in node['network_data']:
if interface.get('vlan') is None:
continue # todo excess check fix interface json format
interface_name = "{}.{}@{}".format(
interface['dev'], interface['vlan'], interface['dev'])
interface_short_name = "{}.{}".format(
interface['dev'], interface['vlan'])
interface_description = get_interface_description(
remote, interface_short_name)
assert_true(interface_name in interface_description)
if interface.get('name') == 'floating':
continue
if interface.get('ip'):
assert_true(
"inet {}".format(interface.get('ip')) in
interface_description)
else:
assert_false("inet " in interface_description)
if interface.get('brd'):
assert_true(
"brd {}".format(interface['brd']) in interface_description)
@logwrap
def verify_service(remote, service_name, count=1):
ps_output = remote.execute('ps ax')['stdout']
api = filter(lambda x: service_name in x, ps_output)
logger.debug("{} \\n: {}".format(service_name, str(api)))
assert_equal(len(api), count,
"{0} count not equal to {1}".format(service_name, count))
@logwrap
def verify_service_list_api(os_conn, service_count):
def _verify():
ret = os_conn.get_nova_service_list()
logger.debug('Service list {0}'.format(ret))
assert_equal(service_count, len(ret),
'Expected service count is {0},'
' but get {1} count, actual list {2}'.format(
service_count, len(ret), ret))
for service in ret:
logger.debug('service is {0}'.format(service))
assert_equal(
service.state, 'up',
'Service {0} on host {1} has next '
'state {2}'.format(service.binary,
service.host,
service.state))
try:
_verify()
except AssertionError:
logger.debug(
"Services still not read. Sleeping for 60 seconds and retrying")
sleep(60)
_verify()
@logwrap
def verify_glance_image_api(os_conn):
ret = os_conn.get_image_list()
assert_equal(1, len([i for i in ret if i.name == 'TestVM']),
"TestVM not found in glance image-list")
@logwrap
def verify_network_list_api(os_conn, net_count=None):
ret = os_conn.get_nova_network_list()
assert_equal(net_count, len(ret),
'Unexpected count of networks detected, '
'expected: {0}, current {1} count,'
' full list {2}'.format(net_count, len(ret), ret))
@logwrap
def get_ceph_partitions(remote, device, type="xfs"):
ret = remote.check_call("parted {device} print | grep {type}".format(
device=device, type=type))['stdout']
if not ret:
logger.error("Partition not present! {partitions}: ".format(
remote.check_call("parted {device} print")))
raise Exception
logger.debug("Partitions: {part}".format(part=ret))
return ret
@logwrap
def get_mongo_partitions(remote, device):
ret = remote.check_call("lsblk | grep {device} | awk {size}".format(
device=device,
size=re.escape('{print $4}')))['stdout']
if not ret:
logger.error("Partition not present! {partitions}: ".format(
remote.check_call("parted {device} print")))
raise Exception
logger.debug("Partitions: {part}".format(part=ret))
return ret
@logwrap
def check_unallocated_space(disks, contr_img_ceph=False):
for disk in disks:
# In case we have Ceph for images all space on controller
# should be given to Base System space:
if contr_img_ceph:
logger.info("Check that all space on /dev/{d} is allocated for "
"Base System Space".format(d=disk['name']))
if not bool(disk["volumes"][0]["size"] == disk["size"]):
return False
else:
logger.info("Get overall size of volumes")
sizes = [v['size'] for v in disk["volumes"]]
logger.info("Space on disk: {s}".format(s=disk['size']))
logger.info("Summary space of disks on /dev/{d}: {s}".format(
d=disk["name"], s=sum(sizes)))
if not bool(sum(sizes) == disk["size"]):
return False
return True
@logwrap
def check_upgraded_containers(remote, version_from, version_to):
containers = remote.execute("docker ps | tail -n +2 |"
"awk '{ print $NF;}'")['stdout']
symlink = remote.execute("readlink /etc/supervisord.d/current")['stdout']
logger.debug('containers are {0}'.format(containers))
logger.debug('symlinks are {0}'.format(symlink))
components = [co.split('-') for x in containers for co in x.split(',')]
for i in components:
assert_true(version_from != i[2],
'There are {0} containers'.format(version_from))
for i in components:
assert_true(version_to == i[2],
'There are no {0} containers'.format(version_to))
assert_true('/etc/supervisord.d/{0}'.format(version_to)
in symlink[0],
'Symlink is set not to {0}'.format(version_to))
@logwrap
def upload_tarball(node_ssh, tar_path, tar_target):
check_archive_type(tar_path)
try:
logger.debug("Start to upload tar file")
node_ssh.upload(tar_path, tar_target)
except Exception:
logger.error('Failed to upload file')
logger.error(traceback.format_exc())
@logwrap
def check_archive_type(tar_path):
if os.path.splitext(tar_path)[1] not in [".tar", ".lrz", ".fp"]:
raise Exception("Wrong archive type!")
@logwrap
def check_tarball_exists(node_ssh, name, path):
result = ''.join(node_ssh.execute(
'ls -all {0} | grep {1}'.format(path, name))['stdout'])
assert_true(name in result, 'Can not find tarball')
@logwrap
def untar(node_ssh, name, path):
filename, ext = os.path.splitext(name)
cmd = "tar -xpvf" if ext.endswith("tar") else "lrzuntar"
result = ''.join(node_ssh.execute(
'cd {0} && {2} {1}'.format(path, name, cmd))['stdout'])
logger.debug('Result from tar command is {0}'.format(result))
@logwrap
def run_script(node_ssh, script_path, script_name, password='admin',
rollback=False, exit_code=0):
path = os.path.join(script_path, script_name)
c_res = node_ssh.execute('chmod 755 {0}'.format(path))
logger.debug("Result of cmod is {0}".format(c_res))
if rollback:
path = "UPGRADERS='host-system bootstrap docker openstack" \
" raise-error targetimages' {0}/{1}" \
" --password {2}".format(script_path, script_name, password)
chan, stdin, stderr, stdout = node_ssh.execute_async(path)
logger.debug('Try to read status code from chain...')
assert_equal(chan.recv_exit_status(), exit_code,
'Upgrade script fails with next message {0}'.format(
''.join(stderr)))
else:
path = "{0}/{1} --no-rollback --password {2}".format(script_path,
script_name,
password)
chan, stdin, stderr, stdout = node_ssh.execute_async(path)
logger.debug('Try to read status code from chain...')
assert_equal(chan.recv_exit_status(), exit_code,
'Upgrade script fails with next message {0}'.format(
''.join(stderr)))
@logwrap
def wait_upgrade_is_done(node_ssh, timeout, phrase):
cmd = "grep '{0}' /var/log/fuel_upgrade.log".format(phrase)
try:
wait(
lambda: not node_ssh.execute(cmd)['exit_code'], timeout=timeout)
except Exception as e:
a = node_ssh.execute(cmd)
logger.error(e)
assert_equal(0, a['exit_code'], a['stderr'])
@logwrap
def wait_rollback_is_done(node_ssh, timeout):
logger.debug('start waiting for rollback done')
wait(
lambda: not node_ssh.execute(
"grep 'UPGRADE FAILED' /var/log/fuel_upgrade.log"
)['exit_code'], timeout=timeout)
@logwrap
def get_package_versions_from_node(remote, name, os_type):
if os_type and 'Ubuntu' in os_type:
cmd = "dpkg-query -W -f='${Version}' %s" % name
else:
cmd = "rpm -q {0}".format(name)
try:
result = ''.join(remote.execute(cmd)['stdout'])
return result.strip()
except Exception:
logger.error(traceback.format_exc())
raise
@logwrap
def check_enable_experimental_mode(remote, path):
cmd = "sed '/feature_groups:" \
"/a \ \ \ \ - experimental' -i {0}".format(path)
result = remote.execute(cmd)
assert_equal(0, result['exit_code'], result['stderr'])
@logwrap
def restart_nailgun(remote):
cmd = 'dockerctl shell nailgun supervisorctl restart nailgun'
result = remote.execute(cmd)
assert_equal(0, result['exit_code'], result['stderr'])
@logwrap
def get_osd_tree(remote):
cmd = 'ceph osd tree -f json'
return json.loads(''.join(remote.execute(cmd)['stdout']))
def find_backup(remote):
try:
arch_dir = ''.join(
remote.execute("ls -1u /var/backup/fuel/ | sed -n 1p")['stdout'])
arch_path = ''.join(
remote.execute("ls -1u /var/backup/fuel/{0}/*.lrz".
format(arch_dir.strip()))["stdout"])
return arch_path
except Exception as e:
logger.error('exception is {0}'.format(e))
raise e
@logwrap
def backup_check(remote):
logger.info("Backup check archive status")
path = find_backup(remote)
assert_true(path, "Can not find backup. Path value {0}".format(path))
arch_result = None
try:
arch_result = ''.join(
remote.execute(("if [ -e {0} ]; then echo "
" Archive exists; fi").
format(path.rstrip()))["stdout"])
except Exception as e:
logger.error('exception is {0}'.format(e))
raise e
assert_true("Archive exists" in arch_result, "Archive does not exist")
@logwrap
def restore_check_sum(remote):
logger.info("Restore check md5sum")
md5sum_backup = remote.execute("cat /etc/fuel/sum")
md5sum_restore = remote.execute("md5sum /etc/fuel/data | sed -n 1p "
" | awk '{print $1}'")
assert_equal(md5sum_backup, md5sum_restore,
"md5sums not equal: backup{0}, restore{1}".
format(md5sum_backup, md5sum_restore))
@logwrap
def iptables_check(remote):
logger.info("Iptables check")
remote.execute("iptables-save > /etc/fuel/iptables-restore")
iptables_backup = remote.execute("sed -e '/^:/d; /^#/d' "
" /etc/fuel/iptables-backup")
iptables_restore = remote.execute("sed -e '/^:/d; /^#/d' "
" /etc/fuel/iptables-restore")
assert_equal(iptables_backup, iptables_restore,
"list of iptables rules are not equal")
@logwrap
def check_mysql(remote, node_name):
check_cmd = 'pkill -0 -x mysqld'
check_crm_cmd = ('crm resource status clone_p_mysql |'
' grep -q "is running on: $HOSTNAME"')
check_galera_cmd = ("mysql --connect_timeout=5 -sse \"SELECT"
" VARIABLE_VALUE FROM"
" information_schema.GLOBAL_STATUS"
" WHERE VARIABLE_NAME"
" = 'wsrep_local_state_comment';\"")
try:
wait(lambda: remote.execute(check_cmd)['exit_code'] == 0,
timeout=300)
logger.info('MySQL daemon is started on {0}'.format(node_name))
except TimeoutError:
logger.error('MySQL daemon is down on {0}'.format(node_name))
raise
_wait(lambda: assert_equal(remote.execute(check_crm_cmd)['exit_code'], 0,
'MySQL resource is NOT running on {0}'.format(
node_name)), timeout=60)
try:
wait(lambda: ''.join(remote.execute(
check_galera_cmd)['stdout']).rstrip() == 'Synced', timeout=600)
except TimeoutError:
logger.error('galera status is {0}'.format(''.join(remote.execute(
check_galera_cmd)['stdout']).rstrip()))
raise
@logwrap
def install_plugin_check_code(
remote, plugin, exit_code=0):
cmd = "cd /var && fuel plugins --install {0} ".format(plugin)
chan, stdin, stderr, stdout = remote.execute_async(cmd)
logger.debug('Try to read status code from chain...')
assert_equal(
chan.recv_exit_status(), exit_code,
'Install script fails with next message {0}'.format(''.join(stderr)))
@logwrap
def check_action_logs(scenario, postgres_actions):
def _check(_action, _group=False):
assert_true(postgres_actions.action_logs_contain(_action, _group),
"Action logs are missed for '{0}'!".format(
_action))
actions = [
{
'desc': [''],
'name': ['master_node_settings'],
'group': [],
'regex': False,
},
{
'desc': [r'create\s+.*(cluster|environment|cloud)'],
'name': ['cluster_collection'],
'group': ['cluster_attributes', 'network_configuration'],
'regex': True,
},
{
'desc': ['deploy'],
'name': ['deploy_changes', 'provision', 'deployment',
'cluster_collection', 'check_before_deployment'],
'group': ['cluster_changes', 'cluster_checking'],
'regex': True,
},
{
'desc': [r'verif.*\s+.*network|network.*\s+.*verif'],
'name': ['check_networks', 'verify_networks'],
'group': ['network_verification'],
'regex': True,
},
{
'desc': [r'(stop|abort).*(deployment|provision)'],
'name': ['stop_deployment'],
'group': ['cluster_changes'],
'regex': True,
},
{
'desc': [r'reset.*(deployment|provision)'],
'name': ['reset'],
'group': ['cluster_changes'],
'regex': True,
},
{
'desc': [r'rename.*(cluster|environment|cloud)'],
'name': ['cluster_instance'],
'group': ['cluster_changes'],
'regex': True,
},
{
'desc': [r'upgrade'],
'name': ['releases_collection'],
'group': ['release_changes'],
'regex': True,
},
{
'desc': [r'update.*(cluster|environment|cloud)'],
'name': ['update'],
'group': ['cluster_changes'],
'regex': True,
},
{
'desc': [r'upload.*deployment'],
'name': ['deployment_info'],
'group': ['orchestrator'],
'regex': True,
},
{
'desc': [r'upload.*provisioning'],
'name': ['provisioning_info'],
'group': ['orchestrator'],
'regex': True,
},
# Logging of OSTF isn't implemented yet, so actions list is
# empty
{
'desc': ['OSTF', 'Health'],
'name': [],
'group': [],
'regex': False,
},
]
# Check logs in Nailgun database
for action in actions:
if action['regex']:
if not any(re.search(regex, scenario, re.IGNORECASE)
for regex in action['desc']):
continue
elif not any(action in scenario for action in action['desc']):
logger.info(action['desc'])
continue
for action_name in action['name']:
_check(action_name, _group=False)
for action_group in action['group']:
_check(action_group, _group=True)
def execute_query_on_collector(collector_remote, master_uuid, query,
collector_db='collector',
collector_db_user='collector',
collector_db_pass='collector'):
if master_uuid is not None:
query = "{0} where master_node_uid = '{1}';".format(query, master_uuid)
cmd = 'PGPASSWORD={0} psql -qt -h 127.0.0.1 -U {1} -d {2} -c "{3}"'.\
format(collector_db_pass, collector_db_user, collector_db, query)
return ''.join(collector_remote.execute(cmd)['stdout']).strip()
def count_stats_on_collector(collector_remote, master_uuid):
return execute_query_on_collector(collector_remote, master_uuid=None,
query="select (select count(*) from "
"action_logs where master_node_uid"
" = \'{0}\') + (select count(*) "
"from installation_structures "
"where master_node_uid = \'{0}\')".
format(master_uuid))
@logwrap
def check_stats_on_collector(collector_remote, postgres_actions, master_uuid):
sent_logs_count = postgres_actions.count_sent_action_logs()
logger.info("Number of logs that were sent to collector: {}".format(
sent_logs_count
))
logs = execute_query_on_collector(collector_remote, master_uuid,
query="select count(*) from action_logs")
logger.info("Number of logs that were saved on collector: {}".format(logs))
assert_true(sent_logs_count <= int(logs),
("Count of action logs in Nailgun DB ({0}) is bigger than on "
"Collector ({1}), but should be less or equal").format(
sent_logs_count, logs))
sum_stats_count = execute_query_on_collector(
collector_remote, master_uuid=master_uuid,
query="select count(*) from installation_structures")
assert_equal(int(sum_stats_count), 1,
"Installation structure wasn't saved on Collector side proper"
"ly: found: {0}, expected: 1 record.".format(sum_stats_count))
summ_stats_raw = execute_query_on_collector(
collector_remote, master_uuid,
query="select structure from installation_structures")
summ_stats = json.loads(summ_stats_raw)
general_stats = {
'clusters_num': int,
'allocated_nodes_num': int,
'unallocated_nodes_num': int,
'fuel_release': dict,
'clusters': list,
'user_information': dict,
}
# Check that important data (clusters number, nodes number, nodes roles,
# user's email, used operation system, OpenStack stats) is saved correctly
for stat_type in general_stats.keys():
assert_true(type(summ_stats[stat_type]) == general_stats[stat_type],
"Installation structure in Collector's DB doesn't contain"
"the following stats: {0}".format(stat_type))
real_clusters_number = int(postgres_actions.run_query(
db='nailgun', query='select count(*) from clusters;'))
assert_equal(real_clusters_number, summ_stats['clusters_num'],
'Real clusters number is {0}, but usage statistics says '
'that clusters number is {1}'.format(
real_clusters_number, summ_stats['clusters_num']))
real_allocated_nodes_num = int(postgres_actions.run_query(
db='nailgun',
query='select count(id) from nodes where cluster_id is not Null;'))
assert_equal(real_allocated_nodes_num, summ_stats['allocated_nodes_num'],
'Real allocated nodes number is {0}, but usage statistics '
'says that allocated nodes number is {1}'.format(
real_allocated_nodes_num,
summ_stats['allocated_nodes_num']))
real_user_email = json.loads(postgres_actions.run_query(
db='nailgun', query='select settings from master_node_settings;')
)['statistics']['email']['value']
assert_equal(real_user_email, summ_stats['user_information']['email'],
"Usage statistics contains incorrect user's email address: "
"'{0}', but should be {1}".format(
summ_stats['user_information']['email'],
real_user_email))
for cluster in summ_stats['clusters']:
for node in cluster['nodes']:
assert_true(len(node['roles']) > 0,
"Usage statistics contains nodes without roles: node-"
"{0} roles: {1}".format(node['id'], node['roles']))
assert_equal(len(cluster['nodes']), cluster['nodes_num'],
"Usage statistics contains incorrect number of nodes"
"assigned to cluster!")
real_cluster_os = postgres_actions.run_query(
db="nailgun", query="select operating_system from releases where "
"id = (select release_id from clusters where "
"id = {0});".format(cluster['id']))
assert_equal(real_cluster_os, cluster['release']['os'],
"Usage statistics contains incorrect operation system "
"that is used for environment with ID '{0}'. Expected: "
"'{1}', reported: '{2}'.".format(
cluster['id'], real_cluster_os,
cluster['release']['os']))
logger.info("Usage stats were properly saved to collector's database.")
@logwrap
def check_stats_private_info(collector_remote, postgres_actions,
master_uuid, _settings):
def _contain_secret_data(data):
_has_private_data = False
# Check that stats doesn't contain private data (e.g.
# specific passwords, settings, emails)
for _private in private_data.keys():
_regex = r'(?P<key>"\S+"): (?P<value>[^:]*"{0}"[^:]*)'.format(
private_data[_private])
for _match in re.finditer(_regex, data):
logger.warning('Found private info in usage statistics using '
'pattern: {0}'. format(_regex))
logger.debug('Usage statistics with private data:\n {0}'.
format(data))
logger.error("Usage statistics contains private info: '{type}:"
" {value}'. Part of the stats: {match}".format(
type=_private,
value=private_data[_private],
match=_match.group('key', 'value')))
_has_private_data = True
# Check that stats doesn't contain private types of data (e.g. any kind
# of passwords)
for _data_type in secret_data_types.keys():
_regex = (r'(?P<secret>"[^"]*{0}[^"]*": (\{{[^\}}]+\}}|\[[^\]+]\]|'
r'"[^"]+"))').format(secret_data_types[_data_type])
for _match in re.finditer(_regex, data, re.IGNORECASE):
logger.warning('Found private info in usage statistics using '
'pattern: {0}'. format(_regex))
logger.debug('Usage statistics with private data:\n {0}'.
format(data))
logger.error("Usage statistics contains private info: '{type}:"
" {value}'. Part of the stats: {match}".format(
type=_data_type,
value=secret_data_types[_data_type],
match=_match.group('secret')))
_has_private_data = True
return _has_private_data
def _contain_public_ip(data, _used_networks):
_has_puplic_ip = False
_ip_regex = (r'\b((\d|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])\.){3}'
r'(\d|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])\b')
_not_public_regex = [
r'\b10(\.\d{1,3}){3}',
r'\b127(\.\d{1,3}){3}',
r'\b169\.254(\.\d{1,3}){2}',
r'172\.(1[6-9]|2[0-9]|3[0-1])(\.\d{1,3}){2}',
r'192\.168(\.\d{1,3}){2}',
r'2(2[4-9]|[3-5][0-9])(\.\d{1,3}){3}'
]
for _match in re.finditer(_ip_regex, data):
# If IP address isn't public and doesn't belong to defined for
# deployment pools (e.g. admin, public, storage), then skip it
if any(re.search(_r, _match.group()) for _r in _not_public_regex) \
and not any(IPAddress(_match.group()) in IPNetwork(net) for
net in _used_networks):
continue
logger.debug('Usage statistics with piblic IP(s):\n {0}'.
format(data))
logger.error('Found public IP in usage statistics: "{0}"'.format(
_match.group()))
_has_puplic_ip = True
return _has_puplic_ip
private_data = {
'hostname': _settings['HOSTNAME'],
'dns_domain': _settings['DNS_DOMAIN'],
'dns_search': _settings['DNS_SEARCH'],
'dns_upstream': _settings['DNS_UPSTREAM'],
'fuel_password': _settings['FUEL_ACCESS']['password'] if
_settings['FUEL_ACCESS']['password'] != 'admin'
else 'DefaultPasswordIsNotAcceptableForSearch',
'nailgun_password': _settings['postgres']['nailgun_password'],
'keystone_password': _settings['postgres']['keystone_password'],
'ostf_password': _settings['postgres']['ostf_password'],
'cobbler_password': _settings['cobbler']['password'],
'astute_password': _settings['astute']['password'],
'mcollective_password': _settings['mcollective']['password'],
'keystone_admin_token': _settings['keystone']['admin_token'],
'keystone_nailgun_password': _settings['keystone']['nailgun_password'],
'kesytone_ostf_password': _settings['keystone']['ostf_password'],
}
secret_data_types = {
'some_password': 'password',
'some_login': 'login',
'some_tenant': 'tenant',
'some_token': 'token',
'some_ip': '\bip\b',
'some_netmask': 'netmask',
'some_network': 'network\b',
}
action_logs = [l.strip() for l in postgres_actions.run_query(
'nailgun', 'select id from action_logs;').split('\n')]
sent_stats = execute_query_on_collector(
collector_remote, master_uuid,
query="SELECT structure from installation_structures"
)
used_networks = [POOLS[net_name][0] for net_name in POOLS.keys()]
has_no_private_data = True
logger.debug("Looking for private data in the installation structure, "
"that was sent to collector")
if _contain_secret_data(sent_stats) or _contain_public_ip(sent_stats,
used_networks):
has_no_private_data = False
for log_id in action_logs:
log_data = postgres_actions.run_query(
'nailgun',
"select additional_info from action_logs where id = '{0}';".format(
log_id
))
logger.debug("Looking for private data in action log with ID={0}".
format(log_id))
if _contain_secret_data(log_data) or _contain_public_ip(log_data,
used_networks):
has_no_private_data = False
assert_true(has_no_private_data, 'Found private data in stats, check test '
'output and logs for details.')
logger.info('Found no private data in logs')
def check_kernel(kernel, expected_kernel):
assert_equal(kernel, expected_kernel,
"kernel version is wrong, it is {0}".format(kernel))

View File

@ -0,0 +1,141 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from fuelweb_test import logger as LOGGER
from fuelweb_test import logwrap as LOGWRAP
from cinderclient import client as cinderclient
from glanceclient.v1 import Client as glanceclient
from keystoneclient.v2_0 import Client as keystoneclient
from novaclient.v1_1 import Client as novaclient
import neutronclient.v2_0.client as neutronclient
from proboscis.asserts import assert_equal
class Common(object):
def __init__(self, controller_ip, user, password, tenant):
self.controller_ip = controller_ip
auth_url = 'http://{0}:5000/v2.0/'.format(self.controller_ip)
LOGGER.debug('Auth URL is {0}'.format(auth_url))
self.nova = novaclient(username=user,
api_key=password,
project_id=tenant,
auth_url=auth_url)
self.keystone = keystoneclient(username=user,
password=password,
tenant_name=tenant,
auth_url=auth_url)
self.cinder = cinderclient.Client(1, user, password,
tenant, auth_url)
self.neutron = neutronclient.Client(
username=user,
password=password,
tenant_name=tenant,
auth_url=auth_url)
token = self.keystone.auth_token
LOGGER.debug('Token is {0}'.format(token))
glance_endpoint = self.keystone.service_catalog.url_for(
service_type='image', endpoint_type='publicURL')
LOGGER.debug('Glance endpoind is {0}'.format(glance_endpoint))
self.glance = glanceclient(endpoint=glance_endpoint, token=token)
def goodbye_security(self):
secgroup_list = self.nova.security_groups.list()
LOGGER.debug("Security list is {0}".format(secgroup_list))
secgroup_id = [i.id for i in secgroup_list if i.name == 'default'][0]
LOGGER.debug("Id of security group default is {0}".format(
secgroup_id))
LOGGER.debug('Permit all TCP and ICMP in security group default')
self.nova.security_group_rules.create(secgroup_id,
ip_protocol='tcp',
from_port=1,
to_port=65535)
self.nova.security_group_rules.create(secgroup_id,
ip_protocol='icmp',
from_port=-1,
to_port=-1)
def image_import(self, local_path, image, image_name, properties=None):
LOGGER.debug('Import image {0}/{1} to glance'.
format(local_path, image))
with open('{0}/{1}'.format(local_path, image)) as fimage:
LOGGER.debug('Try to open image')
self.glance.images.create(
name=image_name, is_public=True,
disk_format='qcow2',
container_format='bare', data=fimage,
properties=properties)
def create_key(self, key_name):
LOGGER.debug('Try to create key {0}'.format(key_name))
self.nova.keypairs.create(key_name)
def create_instance(self, flavor_name='test_flavor', ram=64, vcpus=1,
disk=1, server_name='test_instance', image_name=None,
neutron_network=False):
LOGGER.debug('Try to create instance')
start_time = time.time()
while time.time() - start_time < 100:
try:
if image_name:
image = [i.id for i in self.nova.images.list()
if i.name == image_name]
else:
image = [i.id for i in self.nova.images.list()]
break
except:
pass
else:
raise Exception('Can not get image')
kwargs = {}
if neutron_network:
network = self.nova.networks.find(label='net04')
kwargs['nics'] = [{'net-id': network.id, 'v4-fixed-ip': ''}]
LOGGER.info('image uuid is {0}'.format(image))
flavor = self.nova.flavors.create(
name=flavor_name, ram=ram, vcpus=vcpus, disk=disk)
LOGGER.info('flavor is {0}'.format(flavor.name))
server = self.nova.servers.create(
name=server_name, image=image[0], flavor=flavor, **kwargs)
LOGGER.info('server is {0}'.format(server.name))
return server
@LOGWRAP
def get_instance_detail(self, server):
details = self.nova.servers.get(server)
return details
def verify_instance_status(self, server, expected_state):
def _verify_instance_state():
curr_state = self.get_instance_detail(server).status
assert_equal(expected_state, curr_state)
try:
_verify_instance_state()
except AssertionError:
LOGGER.debug('Instance is not active, '
'lets provide it the last chance and sleep 60 sec')
time.sleep(60)
_verify_instance_state()
def delete_instance(self, server):
LOGGER.debug('Try to create instance')
self.nova.servers.delete(server)

View File

@ -0,0 +1,155 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import os
import jinja2
from fuelweb_test import logger
from fuelweb_test.models import nailgun_client
from fuelweb_test.helpers import os_actions
class TempestConfigState(object):
default_options = {'username': 'test',
'password': 'test',
'tenant_name': 'test',
'alt_username': 'alt_test',
'alt_password': 'alt_test',
'alt_tenant_name': 'alt_test',
'public_network_name': 'net04_ext',
'image_name': 'TestVM'}
def __init__(self, admin_ip, cluster_id,
tempest_conf=None, **kwargs):
self.cluster_id = str(cluster_id)
self.admin_ip = admin_ip
self.tempest_template = os.path.join(os.path.dirname(
os.path.realpath(__file__)), 'tempest.conf.template')
self.tempest_conf = tempest_conf
self.options = dict(self.default_options, **kwargs)
def configure_nailgun(self):
"""Should be used for configuration that can be
gathered from nailgun api, e.g:
1. admin username, password, tenant_name
2. management_vip/public_vip
3. private network cidr
"""
client = nailgun_client.NailgunClient(self.admin_ip)
cluster_info = client.get_cluster(self.cluster_id)
self.options['net_provider'] = cluster_info['net_provider']
self._configure_nailgun_access(client)
self._configure_nailgun_networks(client)
def _configure_nailgun_access(self, client):
cluster_attrs = client.get_cluster_attributes(
self.cluster_id)
access = cluster_attrs['editable']['access']
self.options['admin_username'] = access['user']['value']
self.options['admin_tenant_name'] = access['tenant']['value']
self.options['admin_password'] = access['password']['value']
def _configure_nailgun_networks(self, client):
network_attrs = client.get_networks(self.cluster_id)
networking_params = network_attrs['networking_parameters']
if self.options['net_provider'] == 'neutron':
cidr = networking_params['internal_cidr']
else:
cidr = networking_params['fixed_networks_cidr']
self.options['internal_cidr'] = cidr
_, self.options['internal_mask'] = cidr.split('/')
self.options['management_vip'] = network_attrs['management_vip']
def configure_openstack(self):
"""
1. Fetch id of TestVM image
2. Fetch id of neutron public network and public router
3. Create non-admin user for keystone
"""
client = os_actions.OpenStackActions(
self.options['management_vip'],
user=self.options['admin_username'],
passwd=self.options['admin_password'],
tenant=self.options['admin_tenant_name'])
self._configure_openstack_keystone(client)
self._configure_openstack_glance(client)
if self.options['net_provider'] == 'neutron':
self._configure_openstack_neutron(client)
else:
self._configure_nova_network(client)
def _configure_openstack_neutron(self, client):
network = client.get_network(self.options['public_network_name'])
router = client.get_router(network)
self.options['public_network'] = network['id']
self.options['public_router'] = router['id']
def _configure_nova_network(self, client):
pass
def _configure_openstack_keystone(self, client):
# Keystone should create tenant/user or return existing
client.create_user_and_tenant(
self.options['tenant_name'],
self.options['username'],
self.options['password'])
client.create_user_and_tenant(
self.options['alt_tenant_name'],
self.options['alt_username'],
self.options['alt_password'])
def _configure_openstack_glance(self, client):
test_image = client.get_image(self.options['image_name'])
self.options['image_ref'] = test_image.id
def configure(self):
self.configure_nailgun()
self.configure_openstack()
def copy_config(self):
with open(self.tempest_template, 'r') as template:
j_template = jinja2.Template(template.read()).render(self.options)
with open(self.tempest_conf, 'w') as conf:
conf.write(j_template)
def main():
parser = argparse.ArgumentParser(description="""
Example: python helpers/conf_tempest.py -c 1 \
-n 10.108.10.2 \
-t /home/fuel/tempest/etc/tempest.conf
""")
parser.add_argument("-n", "--nailgun", help="Provide nailgun node ip.",
required=True)
parser.add_argument("-c", "--cluster", help="Provide cluster id",
required=True)
parser.add_argument(
"-t", "--tempest_config",
help="Path where tempest will look for config",
default='/etc/tempest/tempest.conf')
args = parser.parse_args()
conf = TempestConfigState(
args.nailgun, args.cluster,
tempest_conf=args.tempest_config)
conf.configure()
conf.copy_config()
if __name__ == '__main__':
logger.info('Starting tempest config generation.')
main()
logger.info('Finished tempest config generation.')

View File

@ -0,0 +1,298 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import inspect
import json
import os
import sys
import time
import traceback
import urllib2
from os.path import expanduser
from devops.helpers import helpers
from fuelweb_test.helpers.checkers import check_action_logs
from fuelweb_test.helpers.checkers import check_stats_on_collector
from fuelweb_test.helpers.checkers import check_stats_private_info
from fuelweb_test.helpers.checkers import count_stats_on_collector
from proboscis import SkipTest
from proboscis.asserts import assert_equal
from fuelweb_test import logger
from fuelweb_test import settings
from fuelweb_test.helpers.regenerate_repo import CustomRepo
from fuelweb_test.helpers.utils import pull_out_logs_via_ssh
from fuelweb_test.helpers.utils import store_astute_yaml
def save_logs(url, filename):
logger.info('Saving logs to "{}" file'.format(filename))
try:
with open(filename, 'w') as f:
f.write(
urllib2.urlopen(url).read()
)
except (urllib2.HTTPError, urllib2.URLError) as e:
logger.error(e)
def log_snapshot_on_error(func):
"""Snapshot environment in case of error.
Decorator to snapshot environment when error occurred in test.
And always fetch diagnostic snapshot from master node
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
logger.info("\n" + "<" * 5 + "#" * 30 + "[ {} ]"
.format(func.__name__) + "#" * 30 + ">" * 5 + "\n{}"
.format(func.__doc__))
try:
return func(*args, **kwargs)
except SkipTest:
raise SkipTest()
except Exception as test_exception:
exc_trace = sys.exc_traceback
name = 'error_%s' % func.__name__
description = "Failed in method '%s'." % func.__name__
if args[0].env is not None:
try:
create_diagnostic_snapshot(args[0].env,
"fail", name)
except:
logger.error("Fetching of diagnostic snapshot failed: {0}".
format(traceback.format_exc()))
try:
admin_remote = args[0].env.get_admin_remote()
pull_out_logs_via_ssh(admin_remote, name)
except:
logger.error("Fetching of raw logs failed: {0}".
format(traceback.format_exc()))
finally:
logger.debug(args)
try:
args[0].env.make_snapshot(snapshot_name=name[-50:],
description=description,
is_make=True)
except:
logger.error("Error making the environment snapshot:"
" {0}".format(traceback.format_exc()))
raise test_exception, None, exc_trace
return wrapper
def json_parse(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
response = func(*args, **kwargs)
return json.loads(response.read())
return wrapped
def upload_manifests(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
try:
if settings.UPLOAD_MANIFESTS:
logger.info("Uploading new manifests from %s" %
settings.UPLOAD_MANIFESTS_PATH)
if args[0].__class__.__name__ == "EnvironmentModel":
environment = args[0]
elif args[0].__class__.__name__ == "FuelWebClient":
environment = args[0].environment
else:
logger.warning("Can't upload manifests: method of "
"unexpected class is decorated.")
return result
remote = environment.get_admin_remote()
remote.execute('rm -rf /etc/puppet/modules/*')
remote.upload(settings.UPLOAD_MANIFESTS_PATH,
'/etc/puppet/modules/')
logger.info("Copying new site.pp from %s" %
settings.SITEPP_FOR_UPLOAD)
remote.execute("cp %s /etc/puppet/manifests" %
settings.SITEPP_FOR_UPLOAD)
if settings.SYNC_DEPL_TASKS:
remote.execute("fuel release --sync-deployment-tasks"
" --dir /etc/puppet/")
except Exception:
logger.error("Could not upload manifests")
raise
return result
return wrapper
def revert_info(snapshot_name, description=""):
logger.info("<" * 5 + "*" * 100 + ">" * 5)
logger.info("{} Make snapshot: {}".format(description, snapshot_name))
logger.info("You could revert this snapshot using [{command}]".format(
command="dos.py revert {env} --snapshot-name {name} && "
"dos.py resume {env} && virsh net-dumpxml {env}_admin | "
"grep -P {pattern} -o "
"| awk {awk_command}".format(
env=settings.ENV_NAME,
name=snapshot_name,
pattern="\"(\d+\.){3}\"",
awk_command="'{print \"Admin node IP: \"$0\"2\"}'"
)
)
)
logger.info("<" * 5 + "*" * 100 + ">" * 5)
def update_ostf(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
try:
if settings.UPLOAD_PATCHSET:
if not settings.GERRIT_REFSPEC:
raise ValueError('REFSPEC should be set for CI tests.')
logger.info("Uploading new patchset from {0}"
.format(settings.GERRIT_REFSPEC))
remote = args[0].environment.get_admin_remote()
remote.upload(settings.PATCH_PATH.rstrip('/'),
'/var/www/nailgun/fuel-ostf')
remote.execute('dockerctl shell ostf '
'bash -c "cd /var/www/nailgun/fuel-ostf; '
'python setup.py develop"')
remote.execute('dockerctl shell ostf '
'bash -c "supervisorctl restart ostf"')
helpers.wait(
lambda: "0" in
remote.execute('dockerctl shell ostf '
'bash -c "pgrep [o]stf; echo $?"')
['stdout'][1], timeout=60)
logger.info("OSTF status: RUNNING")
except Exception as e:
logger.error("Could not upload patch set {e}".format(e=e))
raise
return result
return wrapper
def create_diagnostic_snapshot(env, status, name=""):
task = env.fuel_web.task_wait(env.fuel_web.client.generate_logs(), 60 * 5)
url = "http://{}:8000{}".format(
env.get_admin_node_ip(), task['message']
)
log_file_name = '{status}_{name}-{time}.tar.gz'.format(
status=status,
name=name,
time=time.strftime("%Y_%m_%d__%H_%M_%S", time.gmtime())
)
save_logs(url, os.path.join(settings.LOGS_DIR, log_file_name))
def retry(count=3, delay=30):
def wrapped(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
i = 0
while True:
try:
return func(*args, **kwargs)
except:
i += 1
if i >= count:
raise
time.sleep(delay)
return wrapper
return wrapped
def custom_repo(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
custom_pkgs = CustomRepo(args[0].environment)
try:
if settings.CUSTOM_PKGS_MIRROR:
custom_pkgs.prepare_repository()
except Exception:
logger.error("Unable to get custom packages from {0}\n{1}"
.format(settings.CUSTOM_PKGS_MIRROR,
traceback.format_exc()))
raise
try:
return func(*args, **kwargs)
except Exception:
custom_pkgs.check_puppet_logs()
raise
return wrapper
def check_fuel_statistics(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if not settings.FUEL_STATS_CHECK:
return result
logger.info('Test "{0}" passed. Checking stats.'.format(func.__name__))
fuel_settings = args[0].env.get_fuel_settings()
nailgun_actions = args[0].env.nailgun_actions
postgres_actions = args[0].env.postgres_actions
remote_collector = args[0].env.get_ssh_to_remote_by_key(
settings.FUEL_STATS_HOST,
'{0}/.ssh/id_rsa'.format(expanduser("~")))
master_uuid = args[0].env.get_masternode_uuid()
logger.info("Master Node UUID: '{0}'".format(master_uuid))
nailgun_actions.force_fuel_stats_sending()
if not settings.FUEL_STATS_ENABLED:
assert_equal(0, int(count_stats_on_collector(remote_collector,
master_uuid)),
"Sending of Fuel stats is disabled in test, but "
"usage info was sent to collector!")
assert_equal(args[0].env.postgres_actions.count_sent_action_logs(),
0, ("Sending of Fuel stats is disabled in test, but "
"usage info was sent to collector!"))
return result
test_scenario = inspect.getdoc(func)
if 'Scenario' not in test_scenario:
logger.warning(("Can't check that fuel statistics was gathered "
"and sent to collector properly because '{0}' "
"test doesn't contain correct testing scenario. "
"Skipping...").format(func.__name__))
return func(*args, **kwargs)
try:
check_action_logs(test_scenario, postgres_actions)
check_stats_private_info(remote_collector,
postgres_actions,
master_uuid,
fuel_settings)
check_stats_on_collector(remote_collector,
postgres_actions,
master_uuid)
return result
except Exception:
logger.error(traceback.format_exc())
raise
return wrapper
def download_astute_yaml(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if settings.STORE_ASTUTE_YAML:
store_astute_yaml(args[0].env)
return result
return wrapper

View File

@ -0,0 +1,82 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
from fuelweb_test import logwrap
class Ebtables(object):
def __init__(self, target_devs, vlans):
super(Ebtables, self).__init__()
self.target_devs = target_devs
self.vlans = vlans
@logwrap
def restore_vlans(self):
for vlan in self.vlans:
for target_dev in self.target_devs:
Ebtables.restore_vlan(target_dev, vlan)
@logwrap
def restore_first_vlan(self):
for target_dev in self.target_devs:
Ebtables.restore_vlan(target_dev, self.vlans[0])
@logwrap
def block_first_vlan(self):
for target_dev in self.target_devs:
Ebtables.block_vlan(target_dev, self.vlans[0])
@staticmethod
@logwrap
def block_mac(mac):
return subprocess.check_output(
['sudo', 'ebtables', '-t', 'filter', '-A', 'FORWARD', '-s',
mac, '-j', 'DROP'],
stderr=subprocess.STDOUT
)
@staticmethod
@logwrap
def restore_mac(mac):
return subprocess.call(
[
'sudo', 'ebtables', '-t', 'filter',
'-D', 'FORWARD', '-s', mac, '-j', 'DROP'
],
stderr=subprocess.STDOUT,
)
@staticmethod
@logwrap
def restore_vlan(target_dev, vlan):
return subprocess.call(
[
'sudo', 'ebtables', '-t', 'broute', '-D', 'BROUTING', '-i',
target_dev, '-p', '8021Q', '--vlan-id', str(vlan), '-j', 'DROP'
],
stderr=subprocess.STDOUT,
)
@staticmethod
@logwrap
def block_vlan(target_dev, vlan):
return subprocess.check_output(
[
'sudo', 'ebtables', '-t', 'broute', '-A', 'BROUTING', '-i',
target_dev, '-p', '8021Q', '--vlan-id', str(vlan), '-j', 'DROP'
],
stderr=subprocess.STDOUT
)

View File

@ -0,0 +1,127 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
import re
from proboscis.asserts import assert_equal
from fuelweb_test import logger
class BaseActions(object):
def __init__(self, admin_remote):
self.admin_remote = admin_remote
self.container = None
def execute_in_container(self, command, container=None, exit_code=None,
stdin=None):
if not container:
container = self.container
cmd = 'dockerctl shell {0} {1}'.format(container, command)
if stdin is not None:
cmd = 'echo "{0}" | {1}'.format(stdin, cmd)
result = self.admin_remote.execute(cmd)
if exit_code is not None:
assert_equal(exit_code,
result['exit_code'],
('Command {cmd} returned exit code "{e}", but '
'expected "{c}". Output: {out}; {err} ').format(
cmd=cmd,
e=result['exit_code'],
c=exit_code,
out=result['stdout'],
err=result['stderr']
))
return ''.join(result['stdout']).strip()
class NailgunActions(BaseActions):
def __init__(self, admin_remote):
super(NailgunActions, self).__init__(admin_remote)
self.container = 'nailgun'
def update_nailgun_settings_once(self, settings):
# temporary change Nailgun settings (until next container restart)
cfg_file = '/etc/nailgun/settings.yaml'
ng_settings = yaml.load(self.execute_in_container(
'cat {0}'.format(cfg_file), exit_code=0))
ng_settings.update(settings)
logger.debug('Uploading new nailgun settings: {}'.format(
ng_settings))
self.execute_in_container('tee {0}'.format(cfg_file),
stdin=yaml.dump(ng_settings),
exit_code=0)
def set_collector_address(self, host, port, ssl=False):
cmd = ("awk '/COLLECTOR.*URL/' /usr/lib/python2.6"
"/site-packages/nailgun/settings.yaml")
protocol = 'http' if not ssl else 'https'
parameters = {}
for p in self.execute_in_container(cmd, exit_code=0).split('\n'):
parameters[p.split(': ')[0]] = re.sub(
r'https?://\{collector_server\}',
'{0}://{1}:{2}'.format(protocol, host, port),
p.split(': ')[1])[1:-1]
logger.debug('Custom collector parameters: {0}'.format(parameters))
self.update_nailgun_settings_once(parameters)
if ssl:
# if test collector server doesn't have trusted SSL cert
# installed we have to use this hack in order to disable cert
# verification and allow using of self-signed SSL certificate
cmd = ("sed -i '/elf.verify/ s/True/False/' /usr/lib/python2.6"
"/site-packages/requests/sessions.py")
self.execute_in_container(cmd, exit_code=0)
def force_fuel_stats_sending(self):
log_file = '/var/log/nailgun/statsenderd.log'
# Rotate logs on restart in order to get rid of old errors
cmd = 'mv {0}{{,.backup_$(date +%s)}}'.format(log_file)
self.execute_in_container(cmd)
cmd = 'supervisorctl restart statsenderd'
self.execute_in_container(cmd, exit_code=0)
cmd = 'grep -sw "ERROR" {0}'.format(log_file)
try:
self.execute_in_container(cmd, exit_code=1)
except AssertionError:
logger.error(("Fuel stats were sent with errors! Check its log"
"s in {0} for details.").format(log_file))
raise
class PostgresActions(BaseActions):
def __init__(self, admin_remote):
super(PostgresActions, self).__init__(admin_remote)
self.container = 'postgres'
def run_query(self, db, query):
cmd = "su - postgres -c 'psql -qt -d {0} -c \"{1};\"'".format(
db, query)
return self.execute_in_container(cmd, exit_code=0)
def action_logs_contain(self, action, group=False,
table='action_logs'):
logger.info("Checking that '{0}' action was logged..".format(
action))
log_filter = "action_name" if not group else "action_group"
q = "select id from {0} where {1} = '\"'\"'{2}'\"'\"'".format(
table, log_filter, action)
logs = [i.strip() for i in self.run_query('nailgun', q).split('\n')
if re.compile(r'\d+').match(i.strip())]
logger.info("Found log records with ids: {0}".format(logs))
return len(logs) > 0
def count_sent_action_logs(self, table='action_logs'):
q = "select count(id) from {0} where is_sent = True".format(table)
return int(self.run_query('nailgun', q))

View File

@ -0,0 +1,131 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import traceback
import urllib2
from keystoneclient.v2_0 import Client as keystoneclient
from keystoneclient import exceptions
from fuelweb_test import logger
class HTTPClient(object):
def __init__(self, url, keystone_url, credentials, **kwargs):
logger.info('Initiate HTTPClient with url %s', url)
self.url = url
self.keystone_url = keystone_url
self.creds = dict(credentials, **kwargs)
self.keystone = None
self.opener = urllib2.build_opener(urllib2.HTTPHandler)
def authenticate(self):
try:
logger.info('Initialize keystoneclient with url %s',
self.keystone_url)
self.keystone = keystoneclient(
auth_url=self.keystone_url, **self.creds)
# it depends on keystone version, some versions doing auth
# explicitly some dont, but we are making it explicitly always
self.keystone.authenticate()
logger.debug('Authorization token is successfully updated')
except exceptions.AuthorizationFailure:
logger.warning(
'Cant establish connection to keystone with url %s',
self.keystone_url)
@property
def token(self):
if self.keystone is not None:
try:
return self.keystone.auth_token
except exceptions.AuthorizationFailure:
logger.warning(
'Cant establish connection to keystone with url %s',
self.keystone_url)
except exceptions.Unauthorized:
logger.warning("Keystone returned unauthorized error, trying "
"to pass authentication.")
self.authenticate()
return self.keystone.auth_token
return None
def get(self, endpoint):
req = urllib2.Request(self.url + endpoint)
return self._open(req)
def post(self, endpoint, data=None, content_type="application/json"):
if not data:
data = {}
logger.info('self url is %s' % self.url)
req = urllib2.Request(self.url + endpoint, data=json.dumps(data))
req.add_header('Content-Type', content_type)
return self._open(req)
def put(self, endpoint, data=None, content_type="application/json"):
if not data:
data = {}
req = urllib2.Request(self.url + endpoint, data=json.dumps(data))
req.add_header('Content-Type', content_type)
req.get_method = lambda: 'PUT'
return self._open(req)
def delete(self, endpoint):
req = urllib2.Request(self.url + endpoint)
req.get_method = lambda: 'DELETE'
return self._open(req)
def _open(self, req):
try:
return self._get_response(req)
except urllib2.HTTPError as e:
if e.code == 401:
logger.warning('Authorization failure: {0}'.format(e.read()))
self.authenticate()
return self._get_response(req)
else:
raise
def _get_response(self, req):
if self.token is not None:
try:
logger.debug('Set X-Auth-Token to {0}'.format(self.token))
req.add_header("X-Auth-Token", self.token)
except exceptions.AuthorizationFailure:
logger.warning('Failed with auth in http _get_response')
logger.warning(traceback.format_exc())
return self.opener.open(req)
class HTTPClientZabbix(object):
def __init__(self, url):
self.url = url
self.opener = urllib2.build_opener(urllib2.HTTPHandler)
def get(self, endpoint=None, cookie=None):
req = urllib2.Request(self.url + endpoint)
if cookie:
req.add_header('cookie', cookie)
return self.opener.open(req)
def post(self, endpoint=None, data=None, content_type="text/css",
cookie=None):
if not data:
data = {}
req = urllib2.Request(self.url + endpoint, data=json.dumps(data))
req.add_header('Content-Type', content_type)
if cookie:
req.add_header('cookie', cookie)
return self.opener.open(req)

View File

@ -0,0 +1,10 @@
#!/bin/sh
echo "Creating test file"
touch /home/test_file
echo "Creating volume mount script on instance"
echo "#!/bin/sh\nsudo /usr/sbin/mkfs.ext4 /dev/vdb\nsudo mount /dev/vdb /mnt" | tee /home/mount_volume.sh
chmod 777 /home/mount_volume.sh
echo -e "test\ntest" | passwd cirros

View File

@ -0,0 +1,77 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import select
import socket
import threading
from fuelweb_test import logwrap
class LogServer(threading.Thread):
@logwrap
def __init__(self, address="localhost", port=5514):
super(LogServer, self).__init__()
self.socket = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM
)
self.socket.bind((str(address), port))
self.rlist = [self.socket]
self._stop = threading.Event()
self._handler = self.handler
self._status = False
def handler(self, messages):
pass
def set_status(self, status):
self._status = status
def get_status(self):
return self._status
def set_handler(self, handler):
self._handler = handler
@logwrap
def stop(self):
self.socket.close()
self._stop.set()
def started(self):
return not self._stop.is_set()
def rude_join(self, timeout=None):
self._stop.set()
super(LogServer, self).join(timeout)
def join(self, timeout=None):
self.rude_join(timeout)
@logwrap
def run(self):
while self.started():
r, w, e = select.select(self.rlist, [], [], 1)
if self.socket in r:
message, addr = self.socket.recvfrom(2048)
self._handler(message)
class TriggeredLogServer(LogServer):
def __init__(self, address="localhost", port=5514):
super(TriggeredLogServer, self).__init__(address, port)
self.set_handler(self.handler)
def handler(self, message):
self.set_status(True)

View File

@ -0,0 +1,105 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# TODO(apanchenko): This file contains hacks (e.g. configuring of dhcp-server
# or firewall on master node) which are used for testing multiple cluster
# networks feature:
# https://blueprints.launchpad.net/fuel/+spec/multiple-cluster-networks
# This code should be removed from tests as soon as automatic cobbler
# configuring for non-default admin (PXE) networks is implemented in Fuel
from ipaddr import IPNetwork
from proboscis.asserts import assert_equal
from fuelweb_test import settings
from fuelweb_test import logwrap
@logwrap
def configure_second_admin_cobbler(self):
dhcp_template = '/etc/cobbler/dnsmasq.template'
remote = self.get_admin_remote()
main_admin_ip = str(self.nodes().admin.
get_ip_address_by_network_name(self.admin_net))
second_admin_ip = str(self.nodes().admin.
get_ip_address_by_network_name(self.admin_net2))
second_admin_network = self._get_network(self.admin_net2).split('/')[0]
second_admin_netmask = self.get_net_mask(self.admin_net2)
network = IPNetwork('{0}/{1}'.format(second_admin_network,
second_admin_netmask))
discovery_subnet = [net for net in network.iter_subnets(1)][-1]
first_discovery_address = str(discovery_subnet.network)
last_discovery_address = str(discovery_subnet.broadcast - 1)
new_range = ('dhcp-range=internal2,{0},{1},{2}\\n'
'dhcp-option=net:internal2,option:router,{3}\\n'
'dhcp-boot=net:internal2,pxelinux.0,boothost,{4}\\n').\
format(first_discovery_address, last_discovery_address,
second_admin_netmask, second_admin_ip, main_admin_ip)
cmd = ("dockerctl shell cobbler sed -r '$a \{0}' -i {1};"
"dockerctl shell cobbler cobbler sync").format(new_range,
dhcp_template)
result = remote.execute(cmd)
assert_equal(result['exit_code'], 0, ('Failed to add second admin'
'network to cobbler: {0}').format(result))
@logwrap
def configure_second_admin_firewall(self, network, netmask):
remote = self.get_admin_remote()
# Allow forwarding and correct remote logging
# for nodes from the second admin network
rules = [
('-t nat -I POSTROUTING -s {0}/{1} -p udp -m udp --dport 514 -m'
' comment --comment "rsyslog-udp-514-unmasquerade" -j ACCEPT;').
format(network, netmask),
('-t nat -I POSTROUTING -s {0}/{1} -p tcp -m tcp --dport 514 -m'
' comment --comment "rsyslog-tcp-514-unmasquerade" -j ACCEPT;').
format(network, netmask),
('-t nat -I POSTROUTING -s {0}/{1} -o eth+ -m comment --comment '
'"004 forward_admin_net2" -j MASQUERADE').
format(network, netmask),
('-I FORWARD -i {0} -o docker0 -p tcp -m state --state NEW -m tcp'
' --dport 514 -m comment --comment "rsyslog-tcp-514-accept" -j '
'ACCEPT').format(settings.INTERFACES.get(self.admin_net2)),
('-I FORWARD -i {0} -o docker0 -p udp -m state --state NEW -m udp'
' --dport 514 -m comment --comment "rsyslog-udp-514-accept" -j '
'ACCEPT').format(settings.INTERFACES.get(self.admin_net2))
]
for rule in rules:
cmd = 'iptables {0}'.format(rule)
result = remote.execute(cmd)
assert_equal(result['exit_code'], 0,
('Failed to add firewall rule for second admin net'
'on master node: {0}, {1}').format(rule, result))
# Save new firewall configuration
cmd = 'service iptables save'
result = remote.execute(cmd)
assert_equal(result['exit_code'], 0,
('Failed to save firewall configuration on master node:'
' {0}').format(result))
@logwrap
def configure_second_dhcrelay(self):
remote = self.get_admin_remote()
second_admin_if = settings.INTERFACES.get(self.admin_net2)
sed_cmd = "/ interface:/a \ interface: {0}".format(second_admin_if)
self.fuel_web.modify_python_file(remote, sed_cmd,
settings.FUEL_SETTINGS_YAML)
cmd = ('supervisorctl restart dhcrelay_monitor; '
'pgrep -f "[d]hcrelay.*{0}"').format(second_admin_if)
result = remote.execute(cmd)
assert_equal(result['exit_code'], 0, ('Failed to start DHCP relay on '
'second admin interface: {0}').format(result))

View File

@ -0,0 +1,377 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import paramiko
from proboscis import asserts
import random
import time
from devops.error import TimeoutError
from devops.helpers import helpers
from fuelweb_test.helpers import common
from fuelweb_test import logger
class OpenStackActions(common.Common):
def __init__(self, controller_ip, user='admin',
passwd='admin', tenant='admin'):
super(OpenStackActions, self).__init__(controller_ip,
user, passwd,
tenant)
def _get_cirros_image(self):
for image in self.glance.images.list():
if image.name.startswith("TestVM"):
return image
def get_hypervisors(self):
hypervisors = self.nova.hypervisors.list()
if hypervisors:
return hypervisors
def get_hypervisor_vms_count(self, hypervisor):
hypervisor = self.nova.hypervisors.get(hypervisor.id)
return getattr(hypervisor, "running_vms")
def get_hypervisor_hostname(self, hypervisor):
hypervisor = self.nova.hypervisors.get(hypervisor.id)
return getattr(hypervisor, "hypervisor_hostname")
def get_srv_hypervisor_name(self, srv):
srv = self.nova.servers.get(srv.id)
return getattr(srv, "OS-EXT-SRV-ATTR:hypervisor_hostname")
def get_servers(self):
servers = self.nova.servers.list()
if servers:
return servers
def create_server_for_migration(self, neutron=False, scenario='',
timeout=100, file=None, key_name=None):
name = "test-serv" + str(random.randint(1, 0x7fffffff))
security_group = {}
try:
if scenario:
with open(scenario, "r+") as f:
scenario = f.read()
except Exception as exc:
logger.info("Error opening file: %s" % exc)
raise Exception()
image_id = self._get_cirros_image().id
security_group[self.keystone.tenant_id] =\
self.create_sec_group_for_ssh()
security_group = [security_group[
self.keystone.tenant_id].name]
if neutron:
network = [net.id for net in self.nova.networks.list()
if net.label == 'net04']
kwargs = {'nics': [{'net-id': network[0]}],
'security_groups': security_group}
else:
kwargs = {'security_groups': security_group}
srv = self.nova.servers.create(name=name,
image=image_id,
flavor=1,
userdata=scenario,
files=file,
key_name=key_name,
**kwargs)
try:
helpers.wait(
lambda: self.get_instance_detail(srv).status == "ACTIVE",
timeout=timeout)
return self.get_instance_detail(srv.id)
except TimeoutError:
logger.debug("Create server for migration failed by timeout")
asserts.assert_equal(
self.get_instance_detail(srv).status,
"ACTIVE",
"Instance do not reach active state, current state"
" is {0}".format(self.get_instance_detail(srv).status))
def verify_srv_deleted(self, srv):
try:
if self.get_instance_detail(srv.id):
logger.info("Try getting server another time.")
time.sleep(30)
if self.get_instance_detail(srv.id) in \
self.nova.servers.list():
return False
except Exception:
logger.info("Server was successfully deleted")
return True
def assign_floating_ip(self, srv):
fl_ips_pool = self.nova.floating_ip_pools.list()
if fl_ips_pool:
floating_ip = self.nova.floating_ips.create(
pool=fl_ips_pool[0].name)
self.nova.servers.add_floating_ip(srv, floating_ip)
return floating_ip
def create_sec_group_for_ssh(self):
name = "test-sg" + str(random.randint(1, 0x7fffffff))
secgroup = self.nova.security_groups.create(
name, "descr")
rulesets = [
{
# ssh
'ip_protocol': 'tcp',
'from_port': 22,
'to_port': 22,
'cidr': '0.0.0.0/0',
},
{
# ping
'ip_protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '0.0.0.0/0',
}
]
for ruleset in rulesets:
self.nova.security_group_rules.create(
secgroup.id, **ruleset)
return secgroup
def get_srv_host_name(self, srv):
# Get host name server is currently on
srv = self.nova.servers.get(srv.id)
return getattr(srv, "OS-EXT-SRV-ATTR:host")
def migrate_server(self, server, host, timeout):
curr_host = self.get_srv_host_name(server)
logger.debug("Current compute host is {0}".format(curr_host))
logger.debug("Start live migration of instance")
server.live_migrate(host._info['host_name'])
try:
helpers.wait(
lambda: self.get_instance_detail(server).status == "ACTIVE",
timeout=timeout)
except TimeoutError:
logger.debug("Instance do not became active after migration")
asserts.assert_true(
self.get_instance_detail(server).status == "ACTIVE",
"Instance do not become Active after live migration, "
"current status is {0}".format(
self.get_instance_detail(server).status))
asserts.assert_true(
self.get_srv_host_name(
self.get_instance_detail(server)) != curr_host,
"Server did not migrate")
server = self.get_instance_detail(server.id)
return server
def create_volume(self, size=1):
volume = self.cinder.volumes.create(size)
helpers.wait(
lambda: self.cinder.volumes.get(volume.id).status == "available",
timeout=100)
logger.info("Created volume")
return self.cinder.volumes.get(volume.id)
def attach_volume(self, volume, server, mount='/dev/vdb'):
self.cinder.volumes.attach(volume, server.id, mount)
return self.cinder.volumes.get(volume.id)
def get_hosts_for_migr(self, srv_host_name):
# Determine which host is available for live migration
host_list = filter(lambda host: host.host_name != srv_host_name,
self.nova.hosts.list())
return filter(lambda host: host._info['service'] == 'compute',
host_list)
def get_md5sum(self, file_path, controller_ssh, vm_ip, creds=()):
logger.info("Get file md5sum and compare it with previous one")
out = self.execute_through_host(
controller_ssh, vm_ip, "md5sum %s" % file_path, creds)
return out
def execute_through_host(self, ssh, vm_host, cmd, creds=()):
try:
logger.info("Making intermediate transport")
interm_transp = ssh._ssh.get_transport()
logger.info("Opening channel to VM")
interm_chan = interm_transp.open_channel('direct-tcpip',
(vm_host, 22),
(ssh.host, 0))
logger.info("Opening paramiko transport")
transport = paramiko.Transport(interm_chan)
logger.info("Starting client")
transport.start_client()
logger.info("Passing authentication to VM")
if not creds:
creds = ('cirros', 'cubswin:)')
transport.auth_password(creds[0], creds[1])
logger.info("Opening session")
channel = transport.open_session()
logger.info("Executing command")
channel.exec_command(cmd)
logger.info("Getting exit status")
output = channel.recv(1024)
logger.info("Sending shutdown write signal")
channel.shutdown_write()
return output
except Exception as exc:
logger.error("An exception occurred: %s" % exc)
return ''
def get_tenant(self, tenant_name):
tenant_list = self.keystone.tenants.list()
for ten in tenant_list:
if ten.name == tenant_name:
return ten
return None
def get_user(self, username):
user_list = self.keystone.users.list()
for user in user_list:
if user.name == username:
return user
return None
def create_tenant(self, tenant_name):
tenant = self.get_tenant(tenant_name)
if tenant:
return tenant
return self.keystone.tenants.create(enabled=True,
tenant_name=tenant_name)
def create_user(self, username, passw, tenant):
user = self.get_user(username)
if user:
return user
return self.keystone.users.create(
name=username, password=passw, tenant_id=tenant.id)
def create_user_and_tenant(self, tenant_name, username, password):
tenant = self.create_tenant(tenant_name)
return self.create_user(username, password, tenant)
def get_network(self, network_name):
net_list = self.neutron.list_networks()
for net in net_list['networks']:
if net['name'] == network_name:
return net
return None
def get_router(self, network):
router_list = self.neutron.list_routers()
for router in router_list['routers']:
network_id = router['external_gateway_info'].get('network_id')
if network_id == network['id']:
return router
return None
def get_image_list(self):
return self.glance.images.list()
def get_image(self, image_name):
image_list = self.get_image_list()
for img in image_list:
if img.name == image_name:
return img
return None
def get_image_data(self, image_name):
return self.glance.images.data(image_name)
def get_nova_service_list(self):
return self.nova.services.list()
def get_nova_network_list(self):
return self.nova.networks.list()
def get_neutron_router(self):
return self.neutron.list_routers()
def get_routers_ids(self):
result = self.get_neutron_router()
ids = [i['id'] for i in result['routers']]
return ids
def get_l3_for_router(self, router_id):
return self.neutron.list_l3_agent_hosting_routers(router_id)
def get_l3_agent_ids(self, router_id):
result = self.get_l3_for_router(router_id)
ids = [i['id'] for i in result['agents']]
return ids
def get_l3_agent_hosts(self, router_id):
result = self.get_l3_for_router(router_id)
hosts = [i['host'] for i in result['agents']]
return hosts
def remove_l3_from_router(self, l3_agent, router_id):
return self.neutron.remove_router_from_l3_agent(l3_agent, router_id)
def add_l3_to_router(self, l3_agent, router_id):
return self.neutron.add_router_to_l3_agent(
l3_agent, {"router_id": router_id})
def list_agents(self):
return self.neutron.list_agents()
def get_available_l3_agents_ids(self, hosted_l3_agent_id):
result = self.list_agents()
ids = [i['id'] for i in result['agents']
if i['binary'] == 'neutron-l3-agent']
ids.remove(hosted_l3_agent_id)
return ids
def list_dhcp_agents_for_network(self, net_id):
return self.neutron.list_dhcp_agent_hosting_networks(net_id)
def get_node_with_dhcp_for_network(self, net_id):
result = self.list_dhcp_agents_for_network(net_id)
nodes = [i['host'] for i in result['agents']]
return nodes
def create_pool(self, pool_name):
sub_net = self.neutron.list_subnets()
body = {"pool": {"name": pool_name,
"lb_method": "ROUND_ROBIN",
"protocol": "HTTP",
"subnet_id": sub_net['subnets'][0]['id']}}
return self.neutron.create_pool(body=body)
def get_vips(self):
return self.neutron.list_vips()
def create_vip(self, name, protocol, port, pool):
sub_net = self.neutron.list_subnets()
logger.debug("subnet list is {0}".format(sub_net))
logger.debug("pool is {0}".format(pool))
body = {"vip": {
"name": name,
"protocol": protocol,
"protocol_port": port,
"subnet_id": sub_net['subnets'][0]['id'],
"pool_id": pool['pool']['id']
}}
return self.neutron.create_vip(body=body)
def delete_vip(self, vip):
return self.neutron.delete_vip(vip)
def get_vip(self, vip):
return self.neutron.show_vip(vip)

View File

@ -0,0 +1,8 @@
#!/bin/bash
REPO_PATH=$1
COMPSXML=$(awk -F'"' '$4 ~ /comps.xml$/{print $4; exit}' ${REPO_PATH}/repodata/repomd.xml)
createrepo -g ${REPO_PATH}/${COMPSXML} -o ${REPO_PATH} ${REPO_PATH} 2>/dev/null

View File

@ -0,0 +1,389 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import traceback
import os
import re
import urllib2
import zlib
from proboscis.asserts import assert_equal
from xml.etree import ElementTree
from fuelweb_test import logger
from fuelweb_test import settings
class CustomRepo(object):
def __init__(self, environment):
self.env = environment
self.path_scripts = ('{0}/fuelweb_test/helpers/'
.format(os.environ.get("WORKSPACE", "./")))
self.remote_path_scripts = '/tmp/'
self.ubuntu_script = 'regenerate_ubuntu_repo'
self.centos_script = 'regenerate_centos_repo'
self.local_mirror_ubuntu = settings.LOCAL_MIRROR_UBUNTU
self.local_mirror_centos = settings.LOCAL_MIRROR_CENTOS
self.ubuntu_release = settings.UBUNTU_RELEASE
self.ubuntu_yaml_versions = ('/etc/puppet/manifests/'
'ubuntu-versions.yaml')
self.centos_yaml_versions = ('/etc/puppet/manifests/'
'centos-versions.yaml')
self.centos_supported_archs = ['noarch', 'x86_64']
self.pkgs_list = []
self.custom_pkgs_mirror_path = ''
if settings.OPENSTACK_RELEASE_UBUNTU in settings.OPENSTACK_RELEASE:
# Trying to determine the root of Ubuntu repository
pkgs_path = settings.CUSTOM_PKGS_MIRROR.split('/dists/')
if len(pkgs_path) == 2:
self.custom_pkgs_mirror = pkgs_path[0]
self.custom_pkgs_mirror_path = '/dists/{}'.format(pkgs_path[1])
else:
self.custom_pkgs_mirror = settings.CUSTOM_PKGS_MIRROR
else:
self.custom_pkgs_mirror = settings.CUSTOM_PKGS_MIRROR
def prepare_repository(self):
"""Prepare admin node to packages testing
Scenario:
1. Temporary set nameserver to local router on admin node
2. Install tools to manage rpm/deb repository
3. Retrive list of packages from custom repository
4. Download packages to local rpm/deb repository
5. Update .yaml file with new packages version
6. Re-generate repo using shell scripts on admin node
"""
# Check necessary settings and revert a snapshot
if not self.custom_pkgs_mirror:
return
logger.info("Custom mirror with new packages: {0}"
.format(settings.CUSTOM_PKGS_MIRROR))
# Modify admin resolv.conf to use local host resolver
dns_server = self.env.get_virtual_environment().router()
new_resolv_conf = ["nameserver {0}".format(dns_server)]
# Set the local router as nameserver that will allow
# the admin node to access the Mirantis custom repositories.
old_resolv_conf = self.env.modify_resolv_conf(new_resolv_conf)
if settings.OPENSTACK_RELEASE_UBUNTU in settings.OPENSTACK_RELEASE:
# Ubuntu
master_tools = ['dpkg', 'dpkg-devel']
self.install_tools(master_tools)
self.get_pkgs_list_ubuntu()
pkgs_local_path = ('{0}/pool/'
.format(self.local_mirror_ubuntu))
self.download_pkgs(pkgs_local_path)
self.update_yaml(self.ubuntu_yaml_versions)
self.regenerate_repo(self.ubuntu_script, self.local_mirror_ubuntu)
else:
# CentOS
master_tools = ['createrepo']
self.install_tools(master_tools)
self.get_pkgs_list_centos()
pkgs_local_path = '{0}/Packages/'.format(self.local_mirror_centos)
self.download_pkgs(pkgs_local_path)
self.update_yaml(self.centos_yaml_versions)
self.regenerate_repo(self.centos_script, self.local_mirror_centos)
# Restore original admin resolv.conf
self.env.modify_resolv_conf(old_resolv_conf, merge=False)
# Install tools to masternode
def install_tools(self, master_tools=[]):
logger.info("Installing necessary tools for {0}"
.format(settings.OPENSTACK_RELEASE))
for master_tool in master_tools:
exit_code = self.env.admin_install_pkg(master_tool)
assert_equal(0, exit_code, 'Cannot install package {0} '
'on admin node.'.format(master_tool))
# Ubuntu: Creating list of packages from the additional mirror
def get_pkgs_list_ubuntu(self):
url = "{0}/{1}/Packages".format(self.custom_pkgs_mirror,
self.custom_pkgs_mirror_path)
logger.info("Retriving additional packages from the custom mirror:"
" {0}".format(url))
try:
pkgs_release = urllib2.urlopen(url).read()
except (urllib2.HTTPError, urllib2.URLError):
logger.error(traceback.format_exc())
url_gz = '{0}.gz'.format(url)
logger.info("Retriving additional packages from the custom mirror:"
" {0}".format(url_gz))
try:
pkgs_release_gz = urllib2.urlopen(url_gz).read()
except (urllib2.HTTPError, urllib2.URLError):
logger.error(traceback.format_exc())
raise
try:
d = zlib.decompressobj(zlib.MAX_WBITS | 32)
pkgs_release = d.decompress(pkgs_release_gz)
except Exception:
logger.error('Ubuntu mirror error: Could not decompress {0}\n'
'{1}'.format(url_gz, traceback.format_exc()))
raise
packages = (pkg for pkg in pkgs_release.split("\n\n") if pkg)
for package in packages:
upkg = {pstr.split()[0].lower(): ''.join(pstr.split()[1:])
for pstr in package.split("\n") if pstr[0].strip()}
upkg_keys = ["package:", "version:", "filename:"]
assert_equal(True, all(x in upkg for x in upkg_keys),
'Missing one of the statements ["Package:", '
'"Version:", "Filename:"] in {0}'.format(url))
# TODO: add dependences list to upkg
self.pkgs_list.append(upkg)
# Centos: Creating list of packages from the additional mirror
def get_pkgs_list_centos(self):
logger.info("Retriving additional packages from the custom mirror: {0}"
.format(self.custom_pkgs_mirror))
url = "{0}/repodata/repomd.xml".format(self.custom_pkgs_mirror)
try:
repomd_data = urllib2.urlopen(url).read()
except (urllib2.HTTPError, urllib2.URLError):
logger.error(traceback.format_exc())
raise
# Remove namespace attribute before parsing XML
repomd_data = re.sub(' xmlns="[^"]+"', '', repomd_data, count=1)
tree_repomd_data = ElementTree.fromstring(repomd_data)
lists_location = ''
for repomd in tree_repomd_data.findall('data'):
if repomd.get('type') == 'primary':
repomd_location = repomd.find('location')
lists_location = repomd_location.get('href')
assert_equal(True, lists_location is not '', 'CentOS mirror error:'
' Could not parse {0}\nlists_location = "{1}"\n{2}'
.format(url, lists_location, traceback.format_exc()))
url = "{0}/{1}".format(self.custom_pkgs_mirror, lists_location)
try:
lists_data = urllib2.urlopen(url).read()
except (urllib2.HTTPError, urllib2.URLError):
logger.error(traceback.format_exc())
raise
if '.xml.gz' in lists_location:
try:
d = zlib.decompressobj(zlib.MAX_WBITS | 32)
lists_data = d.decompress(lists_data)
except Exception:
logger.error('CentOS mirror error: Could not decompress {0}\n'
'{1}'.format(url, traceback.format_exc()))
raise
# Remove namespace attribute before parsing XML
lists_data = re.sub(' xmlns="[^"]+"', '', lists_data, count=1)
tree_lists_data = ElementTree.fromstring(lists_data)
for flist in tree_lists_data.findall('package'):
if flist.get('type') == 'rpm':
flist_arch = flist.find('arch').text
if flist_arch in self.centos_supported_archs:
flist_name = flist.find('name').text
flist_location = flist.find('location')
flist_file = flist_location.get('href')
flist_version = flist.find('version')
flist_ver = '{0}-{1}'.format(flist_version.get('ver'),
flist_version.get('rel'))
cpkg = {'package:': flist_name,
'version:': flist_ver,
'filename:': flist_file}
# TODO: add dependences list to cpkg
self.pkgs_list.append(cpkg)
# Download packages (local_folder)
def download_pkgs(self, pkgs_local_path):
# Process the packages list:
total_pkgs = len(self.pkgs_list)
logger.info('Found {0} custom package(s)'.format(total_pkgs))
remote = self.env.get_admin_remote()
for npkg, pkg in enumerate(self.pkgs_list):
# TODO: Previous versions of the updating packages must be removed
# to avoid unwanted packet manager dependences resolution
# (when some package still depends on other package which
# is not going to be installed)
logger.info('({0}/{1}) Downloading package: {2}/{3}'
.format(npkg + 1, total_pkgs,
self.custom_pkgs_mirror,
pkg["filename:"]))
pkg_ext = pkg["filename:"].split('.')[-1]
if pkg_ext == 'deb':
path_suff = 'main/'
elif pkg_ext == 'udeb':
path_suff = 'debian-installer/'
else:
path_suff = ''
wget_cmd = "wget --no-verbose --directory-prefix {0} {1}/{2}"\
.format(pkgs_local_path + path_suff,
self.custom_pkgs_mirror,
pkg["filename:"])
wget_result = remote.execute(wget_cmd)
assert_equal(0, wget_result['exit_code'],
self.assert_msg(wget_cmd, wget_result['stderr']))
# Update yaml (pacth_to_yaml)
def update_yaml(self, yaml_versions):
# Update the corresponding .yaml with the new package version.
for pkg in self.pkgs_list:
remote = self.env.get_admin_remote()
result = remote.execute('grep -e "^{0}: " {1}'
''.format(pkg["package:"], yaml_versions))
if result['exit_code'] == 0:
sed_cmd = ('sed -i \'s/^{0}: .*/{0}: "{1}"/\' {2}'
.format(pkg["package:"],
pkg["version:"],
yaml_versions))
sed_result = remote.execute(sed_cmd)
assert_equal(0, sed_result['exit_code'],
self.assert_msg(sed_cmd, sed_result['stderr']))
else:
assert_equal(1, result['exit_code'], 'Error updating {0}\n{1}'
.format(yaml_versions, traceback.format_exc()))
echo_cmd = ('echo "{0}: \\"{1}\\"" >> {2}'
.format(pkg["package:"],
pkg["version:"],
yaml_versions))
echo_result = remote.execute(echo_cmd)
assert_equal(0, echo_result['exit_code'],
self.assert_msg(echo_cmd,
echo_result['stderr']))
# Upload regenerate* script to masternode (script name)
def regenerate_repo(self, regenerate_script, local_mirror_path):
# Uploading scripts that prepare local repositories:
# 'regenerate_centos_repo' and 'regenerate_ubuntu_repo'
try:
remote = self.env.get_admin_remote()
remote.upload('{0}/{1}'.format(self.path_scripts,
regenerate_script),
self.remote_path_scripts)
remote.execute('chmod 755 {0}/{1}'.format(self.remote_path_scripts,
regenerate_script))
except Exception:
logger.error('Could not upload scripts for updating repositories.'
'\n{0}'.format(traceback.format_exc()))
raise
# Update the local repository using prevously uploaded script.
script_cmd = '{0}/{1} {2} {3}'.format(self.remote_path_scripts,
regenerate_script,
local_mirror_path,
self.ubuntu_release)
script_result = remote.execute(script_cmd)
assert_equal(0, script_result['exit_code'],
self.assert_msg(script_cmd, script_result['stderr']))
logger.info('Local "{0}" repository {1} has been updated successfuly.'
.format(settings.OPENSTACK_RELEASE, local_mirror_path))
def assert_msg(self, cmd, err):
return 'Executing \'{0}\' on the admin node has failed with: {1}'\
.format(cmd, err)
def check_puppet_logs(self):
logger.info("Check puppet logs for packages with unmet dependences.")
if settings.OPENSTACK_RELEASE_UBUNTU in settings.OPENSTACK_RELEASE:
err_deps = self.check_puppet_logs_ubuntu()
else:
err_deps = self.check_puppet_logs_centos()
for err_deps_key in err_deps.keys():
logger.info('Error: Package: {0} has unmet dependencies:'
.format(err_deps_key))
for dep in err_deps[err_deps_key]:
logger.info(' {0}'.format(dep.strip()))
logger.info("Check puppet logs completed.")
def check_puppet_logs_ubuntu(self):
""" Check puppet-agent.log files on all nodes for package
dependency errors during a cluster deployment (ubuntu)"""
remote = self.env.get_admin_remote()
err_start = 'The following packages have unmet dependencies:'
err_end = ('Unable to correct problems,'
' you have held broken packages.')
cmd = ('fgrep -h -e " Depends: " -e "{0}" -e "{1}" '
'/var/log/docker-logs/remote/node-*/'
'puppet*.log'.format(err_start, err_end))
result = remote.execute(cmd)['stdout']
err_deps = {}
err_deps_key = ''
err_deps_flag = False
# Forming a dictionary of package names
# with sets of required packages.
for res_str in result:
if err_deps_flag:
if err_end in res_str:
err_deps_flag = False
elif ": Depends:" in res_str:
str0, str1, str2 = res_str.partition(': Depends:')
err_deps_key = ''.join(str0.split()[-1:])
if err_deps_key not in err_deps:
err_deps[err_deps_key] = set()
if 'but it is not' in str2 or 'is to be installed' in str2:
err_deps[err_deps_key].add('Depends:{0}'
.format(str2))
elif 'Depends:' in res_str and err_deps_key:
str0, str1, str2 = res_str.partition('Depends:')
if 'but it is not' in str2 or 'is to be installed' in str2:
err_deps[err_deps_key].add(str1 + str2)
else:
err_deps_key = ''
elif err_start in res_str:
err_deps_flag = True
return err_deps
def check_puppet_logs_centos(self):
""" Check puppet-agent.log files on all nodes for package
dependency errors during a cluster deployment (centos)"""
remote = self.env.get_admin_remote()
cmd = ('fgrep -h -e "Error: Package: " -e " Requires: " /var/log/'
'docker-logs/remote/node-*/puppet*.log')
result = remote.execute(cmd)['stdout']
err_deps = {}
err_deps_key = ''
# Forming a dictionary of package names
# with sets of required packages.
for res_str in result:
if 'Error: Package:' in res_str:
err_deps_key = res_str.partition('Error: Package: ')[2]
if err_deps_key not in err_deps:
err_deps[err_deps_key] = set()
elif ' Requires: ' in res_str and err_deps_key:
str0, str1, str2 = res_str.partition(' Requires: ')
err_deps[err_deps_key].add(str1 + str2)
else:
err_deps_key = ''
return err_deps

View File

@ -0,0 +1,59 @@
#!/bin/bash
# Based on the method described here:
# http://troubleshootingrange.blogspot.com/2012/09/hosting-simple-apt-repository-on-centos.html
set -e
ARCH=amd64
REPO_PATH=$1
REPONAME=$2
BINDIR=${REPO_PATH}/dists/${REPONAME}/main
binoverride=indices/override.${REPONAME}.main
binoverride_udeb=indices/override.${REPONAME}.main.debian-installer
extraoverride=indices/override.${REPONAME}.extra.main
release_header=`head -8 ${REPO_PATH}/dists/${REPONAME}/Release`
package_deb=${BINDIR}/binary-${ARCH}/Packages
package_udeb=${BINDIR}/debian-installer/binary-${ARCH}/Packages
cd ${REPO_PATH}
# Scan *.deb packages
dpkg-scanpackages -m --extra-override ${extraoverride} -a ${ARCH} pool/main ${binoverride} > ${package_deb}.tmp 2>/dev/null
gzip -9c ${package_deb}.tmp > ${package_deb}.gz.tmp
# Scan *.udeb packages
dpkg-scanpackages --udeb -m -a ${ARCH} pool/debian-installer ${binoverride_udeb} > ${package_udeb}.tmp 2>/dev/null
gzip -9c ${package_udeb}.tmp > ${package_udeb}.gz.tmp
# Replace original files with new ones
mv --backup -f ${package_deb}.tmp ${package_deb}
mv --backup -f ${package_deb}.gz.tmp ${package_deb}.gz
mv --backup -f ${package_udeb}.tmp ${package_udeb}
mv --backup -f ${package_udeb}.gz.tmp ${package_udeb}.gz
# Generate release file
cd ${REPO_PATH}/dists/${REPONAME}
echo "$release_header" > Release.tmp
# Generate hashes
c1=(MD5Sum: SHA1: SHA256: SHA512:)
c2=(md5 sha1 sha256 sha512)
i=0
while [ $i -lt ${#c1[*]} ]; do
echo ${c1[i]}
for hashme in `find main -type f \( -not -name "*~" -name "Package*" -o -name "Release*" \)`; do
ohash=`openssl dgst -${c2[$i]} ${hashme}`
chash="${ohash##* }"
size=`stat -c %s ${hashme}`
echo " ${chash} ${size} ${hashme}"
done
i=$(( $i + 1));
done >> Release.tmp
mv --backup -f Release.tmp Release

View File

@ -0,0 +1,120 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis.asserts import assert_equal
from random import randrange
from fuelweb_test import logwrap
from fuelweb_test import logger
from fuelweb_test.settings import OPENSTACK_RELEASE
from fuelweb_test.settings import OPENSTACK_RELEASE_UBUNTU
class SecurityChecks(object):
def __init__(self, nailgun_client, environment):
self.client = nailgun_client
self.environment = environment
super(SecurityChecks, self).__init__()
@logwrap
def _listen_random_port(self, ip_address, protocol, tmp_file_path):
remote = self.environment.get_ssh_to_remote(ip_address)
# Install socat
if OPENSTACK_RELEASE_UBUNTU in OPENSTACK_RELEASE:
cmd = '/usr/bin/apt-get install -y {pkg}'.format(pkg='socat')
else:
cmd = '/usr/bin/yum install -y {pkg}'.format(pkg='socat')
result = remote.execute(cmd)
if not result['exit_code'] == 0:
raise Exception('Could not install package: {0}\n{1}'.
format(result['stdout'], result['stderr']))
# Get all used ports
cmd = ('netstat -A inet -ln --{proto} | awk \'$4 ~ /^({ip}'
'|0\.0\.0\.0):[0-9]+/ {{split($4,port,":"); print '
'port[2]}}\'').format(ip=ip_address, proto=protocol)
used_ports = [int(p.strip()) for p in remote.execute(cmd)['stdout']]
# Get list of opened ports
cmd = ('iptables -t filter -S INPUT | sed -rn -e \'s/^.*\s\-p\s+'
'{proto}\s.*\-\-(dport|ports|dports)\s+([0-9,\,,:]+)\s.*'
'-j\s+ACCEPT.*$/\\2/p\' | sed -r \'s/,/\\n/g; s/:/ /g\' |'
' while read ports; do if [[ "$ports" =~ [[:digit:]]'
'[[:blank:]][[:digit:]] ]]; then seq $ports; else echo '
'"$ports";fi; done').format(proto=protocol)
allowed_ports = [int(p.strip()) for p in remote.execute(cmd)['stdout']]
test_port = randrange(10000)
while test_port in used_ports or test_port in allowed_ports:
test_port = randrange(10000)
# Create dump of iptables rules
cmd = 'iptables-save > {0}.dump'.format(tmp_file_path)
result = remote.execute(cmd)
assert_equal(result['exit_code'], 0,
'Dumping of iptables rules failed on {0}: {1}; {2}'.
format(ip_address, result['stdout'], result['stderr']))
# Start listening for connections on test_port
cmd = ('socat {proto}4-LISTEN:{port},bind={ip} {file} '
'&>/dev/null & pid=$! ; disown; sleep 1; kill -0 $pid').\
format(proto=protocol, ip=ip_address, file=tmp_file_path,
port=test_port)
result = remote.execute(cmd)
assert_equal(result['exit_code'], 0,
'Listening on {0}:{1}/{2} port failed: {3}'.
format(ip_address, test_port, protocol,
result['stderr']))
return test_port
@logwrap
def verify_firewall(self, cluster_id):
admin_remote = self.environment.get_admin_remote()
# Install NetCat
if not self.environment.admin_install_pkg('nc') == 0:
raise Exception('Can not install package "nc".')
cluster_nodes = self.client.list_cluster_nodes(cluster_id)
tmp_file_path = '/var/tmp/iptables_check_file'
check_string = 'FirewallHole'
for node in cluster_nodes:
protocols_to_check = ['tcp', 'udp']
for protocol in protocols_to_check:
port = self._listen_random_port(ip_address=node['ip'],
protocol=protocol,
tmp_file_path=tmp_file_path)
nc_opts = ''
if protocol == 'udp':
nc_opts = '{} -u'.format(nc_opts)
cmd = 'echo {string} | nc {opts} {ip} {port}'.\
format(opts=nc_opts, string=check_string, ip=node['ip'],
port=port)
admin_remote.execute(cmd)
remote = self.environment.get_ssh_to_remote(node['ip'])
cmd = 'cat {0}; mv {0}{{,.old}}'.format(tmp_file_path)
result = remote.execute(cmd)
if ''.join(result['stdout']).strip() == check_string:
raise Exception(('Firewall vulnerability detected. '
'Unused port {0}/{1} can be accessed'
' on {2} (node-{3}) node. Check {4}.old'
' and {4}.dump files on the node for de'
'tails').format(port, protocol,
node['name'], node['id'],
tmp_file_path))
logger.info('Firewall test passed')

View File

@ -0,0 +1,76 @@
[DEFAULT]
debug=true
verbose=true
[identity]
disable_ssl_certificate_validation=true
uri=http://{{ management_vip }}:5000/v2.0/
uri_v3=http://{{ management_vip }}:5000/v3.0/
username={{username}}
password={{password}}
tenant_name={{tenant_name}}
admin_username={{admin_username}}
admin_tenant_name={{admin_tenant_name}}
admin_password={{admin_password}}
alt_username={{alt_username}}
alt_password={{alt_password}}
alt_tenant_name={{alt_tenant_name}}
[dashboard]
dashboard_url=http://{{ management_vip }}/dashboard/
login_url=http://{{ management_vip }}/dashboard/auth/login/
[network]
tenant_network_cidr={{ internal_cidr }}
tenant_network_mask_bits={{ internal_mask }}
public_network_id={{ public_network }}
public_router_id={{ public_router}}
[network-feature-enabled]
ipv6=false
[service_available]
{% if net_provider == 'neutron' %}
neutron=true
{% endif %}
[object-storage]
operator_role=admin
[compute]
image_ref={{image_ref}}
image_ssh_user=cirros
image_ssh_password=cubswin:)
{% if image_ref_alt %}
image_ref_alt={{image_ref_alt}}
{% else %}
image_ref_alt={{image_ref}}
{% endif %}
{% if net_provider == 'nova_network' %}
fixed_network_name=novanetwork_0
network_for_ssh=fixed
{% endif %}
[compute-feature-enabled]
api_v3=false
[cli]
#Dont provide full path - PATH variable will help us
cli_dir=
has_manage=false

View File

@ -0,0 +1,122 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import time
import traceback
from proboscis import asserts
from fuelweb_test import logger
from fuelweb_test import logwrap
from fuelweb_test import settings
@logwrap
def get_yaml_to_json(node_ssh, file):
cmd = ("python -c 'import sys, yaml, json; json.dump("
"yaml.load(sys.stdin),"
" sys.stdout)' < {0}").format(file)
err_res = ''
res = node_ssh.execute(cmd)
err_res.join(res['stderr'])
asserts.assert_equal(
res['exit_code'], 0,
'Command {0} execution failed '
'with message {1}'.format(cmd, err_res))
return res['stdout']
@logwrap
def nova_service_get_pid(node_ssh, nova_services=None):
pid_dict = {}
for el in nova_services:
cmd = "pgrep {0}".format(el)
pid_dict[el] = node_ssh.execute(cmd)['stdout']
logger.debug('current dict is {0}'. format(pid_dict))
return pid_dict
@logwrap
def check_if_service_restarted(node_ssh, services_list=None,
pattern='(re)?start', skip=0):
if services_list:
# from the log file {2}, scan all lines after line {0} with the
# pattern {1} to find restarted services, print their names to stdout
cmd = ("awk 'NR >= {0} && /{1}/ {{print $11}}' {2}"
.format(skip, pattern, '/var/log/puppet.log'))
res = ''.join(node_ssh.execute(cmd)['stdout'])
logger.debug('Next services were restarted {0}'.format(res))
for service in services_list:
asserts.assert_true(
any(service in x for x in res),
'Seems service {0} was not restarted {1}'.format(service, res))
@logwrap
def pull_out_logs_via_ssh(admin_remote, name,
logs_dirs=('/var/log/', '/root/', '/etc/fuel/')):
def _compress_logs(_dirs, _archive_path):
cmd = 'tar --absolute-names --warning=no-file-changed -czf {t} {d}'.\
format(t=_archive_path, d=' '.join(_dirs))
result = admin_remote.execute(cmd)
if result['exit_code'] != 0:
logger.error("Compressing of logs on master node failed: {0}".
format(result))
return False
return True
archive_path = '/var/tmp/fail_{0}_diagnostic-logs_{1}.tgz'.format(
name, time.strftime("%Y_%m_%d__%H_%M_%S", time.gmtime()))
try:
if _compress_logs(logs_dirs, archive_path):
if not admin_remote.download(archive_path, settings.LOGS_DIR):
logger.error(("Downloading of archive with logs failed, file"
"wasn't saved on local host"))
except Exception:
logger.error(traceback.format_exc())
@logwrap
def store_astute_yaml(env):
func_name = get_test_method_name()
for node in env.nodes().slaves:
nailgun_node = env.fuel_web.get_nailgun_node_by_devops_node(node)
if node.driver.node_active(node) and nailgun_node['roles']:
try:
remote = env.get_ssh_to_remote_by_name(node.name)
filename = '{0}/{1}-{2}.yaml'.format(settings.LOGS_DIR,
func_name, node.name)
logger.info("Storing {0}".format(filename))
if not remote.download('/etc/astute.yaml', filename):
logger.error("Downloading 'astute.yaml' from the node "
"{0} failed.".format(node.name))
except Exception:
logger.error(traceback.format_exc())
@logwrap
def get_test_method_name():
# Find the name of the current test in the stack. It can be found
# right under the class name 'NoneType' (when proboscis
# run the test method with unittest.FunctionTestCase)
stack = inspect.stack()
method = ''
for m in stack:
if 'self' in m[0].f_locals:
if m[0].f_locals['self'].__class__.__name__ == 'NoneType':
break
method = m[3]
return method

View File

@ -0,0 +1,336 @@
<?xml version="1.0" encoding="UTF-8"?>
<jmeterTestPlan version="1.2" properties="2.3" jmeter="2.8.20130705">
<hashTree>
<TestPlan guiclass="TestPlanGui" testclass="TestPlan" testname="Test Plan" enabled="true">
<stringProp name="TestPlan.comments"></stringProp>
<boolProp name="TestPlan.functional_mode">false</boolProp>
<boolProp name="TestPlan.serialize_threadgroups">false</boolProp>
<elementProp name="TestPlan.user_defined_variables" elementType="Arguments" guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
<collectionProp name="Arguments.arguments"/>
</elementProp>
<stringProp name="TestPlan.user_define_classpath"></stringProp>
</TestPlan>
<hashTree>
<ThreadGroup guiclass="ThreadGroupGui" testclass="ThreadGroup" testname="Create_cluster_100_100" enabled="true">
<stringProp name="ThreadGroup.on_sample_error">continue</stringProp>
<elementProp name="ThreadGroup.main_controller" elementType="LoopController" guiclass="LoopControlPanel" testclass="LoopController" testname="Loop Controller" enabled="true">
<boolProp name="LoopController.continue_forever">false</boolProp>
<stringProp name="LoopController.loops">100</stringProp>
</elementProp>
<stringProp name="ThreadGroup.num_threads">1</stringProp>
<stringProp name="ThreadGroup.ramp_time">1</stringProp>
<longProp name="ThreadGroup.start_time">1411570374000</longProp>
<longProp name="ThreadGroup.end_time">1411570374000</longProp>
<boolProp name="ThreadGroup.scheduler">false</boolProp>
<stringProp name="ThreadGroup.duration"></stringProp>
<stringProp name="ThreadGroup.delay"></stringProp>
</ThreadGroup>
<hashTree>
<HTTPSamplerProxy guiclass="HttpTestSampleGui" testclass="HTTPSamplerProxy" testname="Login" enabled="true">
<elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
<collectionProp name="Arguments.arguments">
<elementProp name="" elementType="HTTPArgument">
<boolProp name="HTTPArgument.always_encode">false</boolProp>
<stringProp name="Argument.value">{&quot;auth&quot;:{&quot;passwordCredentials&quot;:{&quot;username&quot;:&quot;admin&quot;,&quot;password&quot;:&quot;admin&quot;},&quot;tenantName&quot;:&quot;admin&quot;}}</stringProp>
<stringProp name="Argument.metadata">=</stringProp>
<boolProp name="HTTPArgument.use_equals">true</boolProp>
<stringProp name="Argument.name"></stringProp>
</elementProp>
</collectionProp>
</elementProp>
<stringProp name="HTTPSampler.domain">10.108.0.2</stringProp>
<stringProp name="HTTPSampler.port">8000</stringProp>
<stringProp name="HTTPSampler.connect_timeout"></stringProp>
<stringProp name="HTTPSampler.response_timeout"></stringProp>
<stringProp name="HTTPSampler.protocol"></stringProp>
<stringProp name="HTTPSampler.contentEncoding"></stringProp>
<stringProp name="HTTPSampler.path">keystone/v2.0/tokens</stringProp>
<stringProp name="HTTPSampler.method">POST</stringProp>
<boolProp name="HTTPSampler.follow_redirects">true</boolProp>
<boolProp name="HTTPSampler.auto_redirects">false</boolProp>
<boolProp name="HTTPSampler.use_keepalive">true</boolProp>
<boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
<boolProp name="HTTPSampler.monitor">false</boolProp>
<stringProp name="HTTPSampler.embedded_url_re"></stringProp>
</HTTPSamplerProxy>
<hashTree>
<RegexExtractor guiclass="RegexExtractorGui" testclass="RegexExtractor" testname="Regular Expression Extractor" enabled="true">
<stringProp name="RegexExtractor.useHeaders">false</stringProp>
<stringProp name="RegexExtractor.refname">token</stringProp>
<stringProp name="RegexExtractor.regex">&quot;token&quot;\s*:\s*{[^{]*&quot;id&quot;\s*:\s*&quot;(\w+)</stringProp>
<stringProp name="RegexExtractor.template">$1$</stringProp>
<stringProp name="RegexExtractor.default">BLQAAA</stringProp>
<stringProp name="RegexExtractor.match_number"></stringProp>
<stringProp name="Scope.variable"></stringProp>
</RegexExtractor>
<hashTree/>
</hashTree>
<HTTPSamplerProxy guiclass="HttpTestSampleGui" testclass="HTTPSamplerProxy" testname="Create_cluster" enabled="true">
<elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
<collectionProp name="Arguments.arguments">
<elementProp name="" elementType="HTTPArgument">
<boolProp name="HTTPArgument.always_encode">false</boolProp>
<stringProp name="Argument.value">{&quot;nodes&quot;:[],&quot;tasks&quot;:[],&quot;name&quot;: &quot;cluster${__Random(1, 10000)}&quot;, &quot;release&quot;:1,&quot;mode&quot;:&quot;ha_compact&quot;,&quot;net_provider&quot;:&quot;nova_network&quot;}</stringProp>
<stringProp name="Argument.metadata">=</stringProp>
<boolProp name="HTTPArgument.use_equals">true</boolProp>
<stringProp name="Argument.name"></stringProp>
</elementProp>
</collectionProp>
</elementProp>
<stringProp name="HTTPSampler.domain">10.108.0.2</stringProp>
<stringProp name="HTTPSampler.port">8000</stringProp>
<stringProp name="HTTPSampler.connect_timeout"></stringProp>
<stringProp name="HTTPSampler.response_timeout"></stringProp>
<stringProp name="HTTPSampler.protocol"></stringProp>
<stringProp name="HTTPSampler.contentEncoding"></stringProp>
<stringProp name="HTTPSampler.path">http://10.108.0.2:8000/api/clusters</stringProp>
<stringProp name="HTTPSampler.method">POST</stringProp>
<boolProp name="HTTPSampler.follow_redirects">true</boolProp>
<boolProp name="HTTPSampler.auto_redirects">false</boolProp>
<boolProp name="HTTPSampler.use_keepalive">true</boolProp>
<boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
<boolProp name="HTTPSampler.monitor">false</boolProp>
<stringProp name="HTTPSampler.embedded_url_re"></stringProp>
</HTTPSamplerProxy>
<hashTree>
<CSVDataSet guiclass="TestBeanGUI" testclass="CSVDataSet" testname="CSV Data Set Config" enabled="true">
<stringProp name="delimiter"></stringProp>
<stringProp name="fileEncoding"></stringProp>
<stringProp name="filename">cluster|_name.cvs</stringProp>
<boolProp name="quotedData">false</boolProp>
<boolProp name="recycle">true</boolProp>
<stringProp name="shareMode">All threads</stringProp>
<boolProp name="stopThread">false</boolProp>
<stringProp name="variableNames">CLUSTER_NAME</stringProp>
</CSVDataSet>
<hashTree/>
<RandomVariableConfig guiclass="TestBeanGUI" testclass="RandomVariableConfig" testname="name" enabled="true">
<stringProp name="maximumValue">100</stringProp>
<stringProp name="minimumValue">1</stringProp>
<stringProp name="outputFormat">name_00</stringProp>
<boolProp name="perThread">true</boolProp>
<stringProp name="randomSeed"></stringProp>
<stringProp name="variableName">CLUSTER_NAME</stringProp>
</RandomVariableConfig>
<hashTree/>
<HeaderManager guiclass="HeaderPanel" testclass="HeaderManager" testname="HTTP_Request_cluster_header" enabled="true">
<collectionProp name="HeaderManager.headers">
<elementProp name="" elementType="Header">
<stringProp name="Header.name">Content-Type</stringProp>
<stringProp name="Header.value">application/json</stringProp>
</elementProp>
<elementProp name="" elementType="Header">
<stringProp name="Header.name">X-Auth-Token</stringProp>
<stringProp name="Header.value">${token}</stringProp>
</elementProp>
<elementProp name="" elementType="Header">
<stringProp name="Header.name">Cookie</stringProp>
<stringProp name="Header.value">i18next=en-US</stringProp>
</elementProp>
</collectionProp>
</HeaderManager>
<hashTree/>
<RegexExtractor guiclass="RegexExtractorGui" testclass="RegexExtractor" testname="Regular Expression Extractor" enabled="true">
<stringProp name="RegexExtractor.useHeaders">false</stringProp>
<stringProp name="RegexExtractor.refname">cluster_id</stringProp>
<stringProp name="RegexExtractor.regex">&quot;id&quot;\s*:\s*(\d+)</stringProp>
<stringProp name="RegexExtractor.template">$1$</stringProp>
<stringProp name="RegexExtractor.default">cluster_id_did_not_get</stringProp>
<stringProp name="RegexExtractor.match_number"></stringProp>
</RegexExtractor>
<hashTree/>
</hashTree>
<ResultCollector guiclass="StatGraphVisualizer" testclass="ResultCollector" testname="Aggregate Graph" enabled="true">
<boolProp name="ResultCollector.error_logging">false</boolProp>
<objProp>
<name>saveConfig</name>
<value class="SampleSaveConfiguration">
<time>true</time>
<latency>true</latency>
<timestamp>true</timestamp>
<success>true</success>
<label>true</label>
<code>true</code>
<message>true</message>
<threadName>true</threadName>
<dataType>true</dataType>
<encoding>false</encoding>
<assertions>true</assertions>
<subresults>true</subresults>
<responseData>true</responseData>
<samplerData>false</samplerData>
<xml>true</xml>
<fieldNames>false</fieldNames>
<responseHeaders>true</responseHeaders>
<requestHeaders>false</requestHeaders>
<responseDataOnError>false</responseDataOnError>
<saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
<assertionsResultsToSave>0</assertionsResultsToSave>
<bytes>true</bytes>
</value>
</objProp>
<stringProp name="filename"></stringProp>
</ResultCollector>
<hashTree/>
<ResultCollector guiclass="ViewResultsFullVisualizer" testclass="ResultCollector" testname="View Results Tree" enabled="true">
<boolProp name="ResultCollector.error_logging">false</boolProp>
<objProp>
<name>saveConfig</name>
<value class="SampleSaveConfiguration">
<time>true</time>
<latency>true</latency>
<timestamp>true</timestamp>
<success>true</success>
<label>true</label>
<code>true</code>
<message>true</message>
<threadName>true</threadName>
<dataType>true</dataType>
<encoding>false</encoding>
<assertions>true</assertions>
<subresults>true</subresults>
<responseData>false</responseData>
<samplerData>false</samplerData>
<xml>true</xml>
<fieldNames>false</fieldNames>
<responseHeaders>false</responseHeaders>
<requestHeaders>false</requestHeaders>
<responseDataOnError>false</responseDataOnError>
<saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
<assertionsResultsToSave>0</assertionsResultsToSave>
<bytes>true</bytes>
</value>
</objProp>
<stringProp name="filename"></stringProp>
</ResultCollector>
<hashTree/>
<HTTPSamplerProxy guiclass="HttpTestSampleGui" testclass="HTTPSamplerProxy" testname="HTTP_Delete" enabled="true">
<elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
<collectionProp name="Arguments.arguments"/>
</elementProp>
<stringProp name="HTTPSampler.domain">10.108.0.2</stringProp>
<stringProp name="HTTPSampler.port">8000</stringProp>
<stringProp name="HTTPSampler.connect_timeout"></stringProp>
<stringProp name="HTTPSampler.response_timeout"></stringProp>
<stringProp name="HTTPSampler.protocol"></stringProp>
<stringProp name="HTTPSampler.contentEncoding"></stringProp>
<stringProp name="HTTPSampler.path">http://10.108.0.2:8000/api/clusters/${cluster_id}</stringProp>
<stringProp name="HTTPSampler.method">DELETE</stringProp>
<boolProp name="HTTPSampler.follow_redirects">true</boolProp>
<boolProp name="HTTPSampler.auto_redirects">false</boolProp>
<boolProp name="HTTPSampler.use_keepalive">true</boolProp>
<boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
<boolProp name="HTTPSampler.monitor">false</boolProp>
<stringProp name="HTTPSampler.embedded_url_re"></stringProp>
</HTTPSamplerProxy>
<hashTree>
<HeaderManager guiclass="HeaderPanel" testclass="HeaderManager" testname="HTTP_Request_cluster_header" enabled="true">
<collectionProp name="HeaderManager.headers">
<elementProp name="" elementType="Header">
<stringProp name="Header.name">Content-Type</stringProp>
<stringProp name="Header.value">application/json</stringProp>
</elementProp>
<elementProp name="" elementType="Header">
<stringProp name="Header.name">X-Auth-Token</stringProp>
<stringProp name="Header.value">${token}</stringProp>
</elementProp>
<elementProp name="" elementType="Header">
<stringProp name="Header.name">Cookie</stringProp>
<stringProp name="Header.value">i18next=en-US</stringProp>
</elementProp>
</collectionProp>
</HeaderManager>
<hashTree/>
</hashTree>
<DebugSampler guiclass="TestBeanGUI" testclass="DebugSampler" testname="Debug Sampler" enabled="false">
<boolProp name="displayJMeterProperties">false</boolProp>
<boolProp name="displayJMeterVariables">true</boolProp>
<boolProp name="displaySystemProperties">false</boolProp>
</DebugSampler>
<hashTree/>
<ResultCollector guiclass="StatVisualizer" testclass="ResultCollector" testname="Aggregate Report" enabled="true">
<boolProp name="ResultCollector.error_logging">false</boolProp>
<objProp>
<name>saveConfig</name>
<value class="SampleSaveConfiguration">
<time>true</time>
<latency>true</latency>
<timestamp>true</timestamp>
<success>true</success>
<label>true</label>
<code>true</code>
<message>true</message>
<threadName>true</threadName>
<dataType>true</dataType>
<encoding>false</encoding>
<assertions>true</assertions>
<subresults>true</subresults>
<responseData>false</responseData>
<samplerData>false</samplerData>
<xml>true</xml>
<fieldNames>false</fieldNames>
<responseHeaders>false</responseHeaders>
<requestHeaders>false</requestHeaders>
<responseDataOnError>false</responseDataOnError>
<saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
<assertionsResultsToSave>0</assertionsResultsToSave>
<bytes>true</bytes>
</value>
</objProp>
<stringProp name="filename"></stringProp>
</ResultCollector>
<hashTree/>
</hashTree>
<HeaderManager guiclass="HeaderPanel" testclass="HeaderManager" testname="HTTP Header Manager" enabled="true">
<collectionProp name="HeaderManager.headers">
<elementProp name="" elementType="Header">
<stringProp name="Header.name">Content-Type</stringProp>
<stringProp name="Header.value">application/json</stringProp>
</elementProp>
</collectionProp>
</HeaderManager>
<hashTree/>
<!-- Yandex.Tank Addition -->
<ResultCollector guiclass="SimpleDataWriter" testclass="ResultCollector" testname="Yandex.Tank results writer"
enabled="true">
<boolProp name="ResultCollector.error_logging">false</boolProp>
<objProp>
<name>saveConfig</name>
<value class="SampleSaveConfiguration">
<time>true</time>
<latency>true</latency>
<timestamp>true</timestamp>
<success>true</success>
<label>true</label>
<code>true</code>
<message>false</message>
<threadName>false</threadName>
<dataType>false</dataType>
<encoding>false</encoding>
<assertions>false</assertions>
<subresults>false</subresults>
<responseData>false</responseData>
<samplerData>false</samplerData>
<xml>false</xml>
<fieldNames>true</fieldNames>
<responseHeaders>false</responseHeaders>
<requestHeaders>false</requestHeaders>
<responseDataOnError>false</responseDataOnError>
<saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
<assertionsResultsToSave>0</assertionsResultsToSave>
<bytes>true</bytes>
<threadCounts>true</threadCounts>
</value>
</objProp>
<stringProp name="filename">/home/tleontovich/.jenkins/jobs/jmeter_job/workspace/jmeter_glhRzL.jtl</stringProp>
<stringProp name="TestPlan.comments">Added automatically</stringProp>
</ResultCollector>
<hashTree/>
<Arguments guiclass="ArgumentsPanel" testclass="Arguments" testname="Yandex.Tank Variables" enabled="true">
<collectionProp name="Arguments.arguments">
</collectionProp>
</Arguments>
<!-- /Yandex.Tank Addition --> </hashTree>
</hashTree>
</jmeterTestPlan>

View File

@ -0,0 +1,21 @@
"""
Script for creating Puppet integration tests scripts using template engine.
"""
import argparse
from puppet_tests.pp_testgenerator import PuppetTestGenerator
parser = argparse.ArgumentParser()
parser.add_argument("tests", type=str, help="Directory to save tests")
parser.add_argument("modules", type=str, help="Path to Puppet modules")
parser.add_argument("-k", "--keep_tests",
action='store_true',
help="Keep previous test files",
default=False)
args = parser.parse_args()
generator = PuppetTestGenerator(args.tests, args.modules)
if not args.keep_tests:
generator.remove_all_tests()
generator.make_all_scripts()

View File

@ -0,0 +1 @@
__author__ = 'nprikazchikov'

View File

@ -0,0 +1,566 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import yaml
from devops.helpers.helpers import _tcp_ping
from devops.helpers.helpers import _wait
from devops.helpers.helpers import SSHClient
from devops.helpers.helpers import wait
from devops.models import Environment
from ipaddr import IPNetwork
from keystoneclient import exceptions
from paramiko import Agent
from paramiko import RSAKey
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_true
from fuelweb_test.helpers import checkers
from fuelweb_test.helpers.decorators import revert_info
from fuelweb_test.helpers.decorators import retry
from fuelweb_test.helpers.decorators import upload_manifests
from fuelweb_test.helpers.eb_tables import Ebtables
from fuelweb_test.helpers.fuel_actions import NailgunActions
from fuelweb_test.helpers.fuel_actions import PostgresActions
from fuelweb_test.helpers import multiple_networks_hacks
from fuelweb_test.models.fuel_web_client import FuelWebClient
from fuelweb_test import settings
from fuelweb_test import logwrap
from fuelweb_test import logger
class EnvironmentModel(object):
def __init__(self):
self._virtual_environment = None
self._keys = None
self.fuel_web = FuelWebClient(self.get_admin_node_ip(), self)
@property
def nailgun_actions(self):
return NailgunActions(self.get_admin_remote())
@property
def postgres_actions(self):
return PostgresActions(self.get_admin_remote())
@property
def admin_node_ip(self):
return self.fuel_web.admin_node_ip
@logwrap
def add_syslog_server(self, cluster_id, port=5514):
self.fuel_web.add_syslog_server(
cluster_id, self.get_virtual_environment().router(), port)
def bootstrap_nodes(self, devops_nodes, timeout=600):
"""Lists registered nailgun nodes
Start vms and wait until they are registered on nailgun.
:rtype : List of registered nailgun nodes
"""
# self.dhcrelay_check()
for node in devops_nodes:
node.start()
# TODO(aglarendil): LP#1317213 temporary sleep
# remove after better fix is applied
time.sleep(2)
wait(lambda: all(self.nailgun_nodes(devops_nodes)), 15, timeout)
for node in self.nailgun_nodes(devops_nodes):
self.sync_node_time(self.get_ssh_to_remote(node["ip"]))
return self.nailgun_nodes(devops_nodes)
@logwrap
def get_admin_remote(self, login=settings.SSH_CREDENTIALS['login'],
password=settings.SSH_CREDENTIALS['password']):
"""SSH to admin node
:rtype : SSHClient
"""
return self.get_virtual_environment().nodes().admin.remote(
self.get_virtual_environment().admin_net,
login=login,
password=password)
@logwrap
def get_admin_node_ip(self):
return str(
self.get_virtual_environment().nodes(
).admin.get_ip_address_by_network_name(
self.get_virtual_environment().admin_net))
@logwrap
def get_ebtables(self, cluster_id, devops_nodes):
return Ebtables(self.get_target_devs(devops_nodes),
self.fuel_web.client.get_cluster_vlans(cluster_id))
def get_keys(self, node, custom=None, build_images=None):
params = {
'ip': node.get_ip_address_by_network_name(
self.get_virtual_environment().admin_net),
'mask': self.get_net_mask(self.get_virtual_environment(
).admin_net),
'gw': self.get_virtual_environment().router(),
'hostname': '.'.join((self.get_virtual_environment().hostname,
self.get_virtual_environment().domain)),
'nat_interface': self.get_virtual_environment().nat_interface,
'dns1': settings.DNS,
'showmenu': 'yes' if custom else 'no',
'build_images': '1' if build_images else '0'
}
keys = (
"<Wait>\n"
"<Esc><Enter>\n"
"<Wait>\n"
"vmlinuz initrd=initrd.img ks=cdrom:/ks.cfg\n"
" ip=%(ip)s\n"
" netmask=%(mask)s\n"
" gw=%(gw)s\n"
" dns1=%(dns1)s\n"
" hostname=%(hostname)s\n"
" dhcp_interface=%(nat_interface)s\n"
" showmenu=%(showmenu)s\n"
" build_images=%(build_images)s\n"
" <Enter>\n"
) % params
return keys
@logwrap
def get_private_keys(self, force=False):
if force or self._keys is None:
self._keys = []
for key_string in ['/root/.ssh/id_rsa',
'/root/.ssh/bootstrap.rsa']:
with self.get_admin_remote().open(key_string) as f:
self._keys.append(RSAKey.from_private_key(f))
return self._keys
@logwrap
def get_ssh_to_remote(self, ip):
return SSHClient(ip,
username=settings.SSH_CREDENTIALS['login'],
password=settings.SSH_CREDENTIALS['password'],
private_keys=self.get_private_keys())
@logwrap
def get_ssh_to_remote_by_key(self, ip, keyfile):
try:
with open(keyfile) as f:
keys = [RSAKey.from_private_key(f)]
return SSHClient(ip, private_keys=keys)
except IOError:
logger.warning('Loading of SSH key from file failed. Trying to use'
' SSH agent ...')
keys = Agent().get_keys()
return SSHClient(ip, private_keys=keys)
@logwrap
def get_ssh_to_remote_by_name(self, node_name):
return self.get_ssh_to_remote(
self.fuel_web.get_nailgun_node_by_devops_node(
self.get_virtual_environment().get_node(name=node_name))['ip']
)
def get_target_devs(self, devops_nodes):
return [
interface.target_dev for interface in [
val for var in map(lambda node: node.interfaces, devops_nodes)
for val in var]]
def get_virtual_environment(self):
if self._virtual_environment is None:
try:
return Environment.get(name=settings.ENV_NAME)
except Exception:
self._virtual_environment = Environment.describe_environment()
self._virtual_environment.define()
return self._virtual_environment
def _get_network(self, net_name):
return str(
IPNetwork(
self.get_virtual_environment().get_network(name=net_name).
ip_network))
def get_net_mask(self, net_name):
return str(
IPNetwork(
self.get_virtual_environment().get_network(
name=net_name).ip_network).netmask)
def make_snapshot(self, snapshot_name, description="", is_make=False):
if settings.MAKE_SNAPSHOT or is_make:
self.get_virtual_environment().suspend(verbose=False)
time.sleep(10)
self.get_virtual_environment().snapshot(snapshot_name, force=True)
revert_info(snapshot_name, description)
if settings.FUEL_STATS_CHECK:
self.get_virtual_environment().resume()
try:
self.get_virtual_environment().nodes().admin.await(
self.get_virtual_environment().admin_net, timeout=60)
except Exception:
logger.error('Admin node is unavailable via SSH after '
'environment resume ')
raise
def nailgun_nodes(self, devops_nodes):
return map(
lambda node: self.fuel_web.get_nailgun_node_by_devops_node(node),
devops_nodes
)
def revert_snapshot(self, name):
if self.get_virtual_environment().has_snapshot(name):
logger.info('We have snapshot with such name %s' % name)
self.get_virtual_environment().revert(name)
logger.info('Starting snapshot reverting ....')
self.get_virtual_environment().resume()
logger.info('Starting snapshot resuming ...')
admin = self.get_virtual_environment().nodes().admin
try:
admin.await(
self.get_virtual_environment().admin_net, timeout=10 * 60,
by_port=8000)
except Exception as e:
logger.warning("From first time admin isn't reverted: "
"{0}".format(e))
admin.destroy()
logger.info('Admin node was destroyed. Wait 10 sec.')
time.sleep(10)
self.get_virtual_environment().start(
self.get_virtual_environment().nodes().admins)
logger.info('Admin node started second time.')
self.get_virtual_environment().nodes().admin.await(
self.get_virtual_environment().admin_net, timeout=10 * 60,
by_port=8000)
self.set_admin_ssh_password()
try:
_wait(self.fuel_web.client.get_releases,
expected=EnvironmentError, timeout=300)
except exceptions.Unauthorized:
self.set_admin_keystone_password()
self.fuel_web.get_nailgun_version()
self.sync_time_admin_node()
for node in self.get_virtual_environment().nodes(
).slaves:
if not node.driver.node_active(node):
continue
try:
logger.info("Sync time on revert for node %s" % node.name)
self.sync_node_time(
self.get_ssh_to_remote_by_name(node.name))
except Exception as e:
logger.warning(
'Exception caught while trying to sync time on {0}:'
' {1}'.format(node.name, e))
self.run_nailgun_agent(
self.get_ssh_to_remote_by_name(node.name))
return True
return False
def set_admin_ssh_password(self):
try:
remote = self.get_admin_remote(
login=settings.SSH_CREDENTIALS['login'],
password=settings.SSH_CREDENTIALS['password'])
self.execute_remote_cmd(remote, 'date')
logger.debug('Accessing admin node using SSH: SUCCESS')
except Exception:
logger.debug('Accessing admin node using SSH credentials:'
' FAIL, trying to change password from default')
remote = self.get_admin_remote(login='root', password='r00tme')
self.execute_remote_cmd(
remote, 'echo -e "{1}\\n{1}" | passwd {0}'
.format(settings.SSH_CREDENTIALS['login'],
settings.SSH_CREDENTIALS['password']))
logger.debug("Admin node password has changed.")
logger.info("Admin node login name: '{0}' , password: '{1}'".
format(settings.SSH_CREDENTIALS['login'],
settings.SSH_CREDENTIALS['password']))
def set_admin_keystone_password(self):
remote = self.get_admin_remote()
try:
self.fuel_web.client.get_releases()
except exceptions.Unauthorized:
self.execute_remote_cmd(
remote, 'fuel user --newpass {0} --change-password'
.format(settings.KEYSTONE_CREDS['password']))
logger.info(
'New Fuel UI (keystone) username: "{0}", password: "{1}"'
.format(settings.KEYSTONE_CREDS['username'],
settings.KEYSTONE_CREDS['password']))
def setup_environment(self, custom=settings.CUSTOM_ENV,
build_images=settings.BUILD_IMAGES):
# start admin node
admin = self.get_virtual_environment().nodes().admin
admin.disk_devices.get(device='cdrom').volume.upload(settings.ISO_PATH)
self.get_virtual_environment().start(self.get_virtual_environment(
).nodes().admins)
logger.info("Waiting for admin node to start up")
wait(lambda: admin.driver.node_active(admin), 60)
logger.info("Proceed with installation")
# update network parameters at boot screen
admin.send_keys(self.get_keys(admin, custom=custom,
build_images=build_images))
if custom:
self.setup_customisation()
# wait while installation complete
admin.await(self.get_virtual_environment().admin_net, timeout=10 * 60)
self.set_admin_ssh_password()
self.wait_bootstrap()
time.sleep(10)
self.set_admin_keystone_password()
self.sync_time_admin_node()
if settings.MULTIPLE_NETWORKS:
self.describe_second_admin_interface()
multiple_networks_hacks.configure_second_admin_cobbler(self)
multiple_networks_hacks.configure_second_dhcrelay(self)
self.nailgun_actions.set_collector_address(
settings.FUEL_STATS_HOST,
settings.FUEL_STATS_PORT,
settings.FUEL_STATS_SSL)
if settings.FUEL_STATS_ENABLED:
self.fuel_web.client.send_fuel_stats(enabled=True)
logger.info('Enabled sending of statistics to {0}:{1}'.format(
settings.FUEL_STATS_HOST, settings.FUEL_STATS_PORT
))
@upload_manifests
def wait_for_provisioning(self):
_wait(lambda: _tcp_ping(
self.get_virtual_environment().nodes(
).admin.get_ip_address_by_network_name
(self.get_virtual_environment().admin_net), 22), timeout=5 * 60)
def setup_customisation(self):
self.wait_for_provisioning()
try:
remote = self.get_admin_remote()
pid = remote.execute("pgrep 'fuelmenu'")['stdout'][0]
pid.rstrip('\n')
remote.execute("kill -sigusr1 {0}".format(pid))
except Exception:
logger.error("Could not kill pid of fuelmenu")
raise
@retry(count=10, delay=60)
@logwrap
def sync_node_time(self, remote):
self.execute_remote_cmd(remote, 'hwclock -s')
self.execute_remote_cmd(remote, 'NTPD=$(find /etc/init.d/ -regex \''
'/etc/init.d/\(ntp.?\|ntp-dev\)\');'
'$NTPD stop && ntpd -dqg && $NTPD '
'start')
self.execute_remote_cmd(remote, 'hwclock -w')
remote_date = remote.execute('date')['stdout']
logger.info("Node time: %s" % remote_date)
@retry(count=10, delay=60)
@logwrap
def sync_time_admin_node(self):
logger.info("Sync time on revert for admin")
remote = self.get_admin_remote()
self.execute_remote_cmd(remote, 'hwclock -s')
# Sync time using ntpd
try:
# If public NTP servers aren't accessible ntpdate will fail and
# ntpd daemon shouldn't be restarted to avoid 'Server has gone
# too long without sync' error while syncing time from slaves
self.execute_remote_cmd(remote, "ntpdate -vu $(awk '/^server/ && "
"$2 !~ /127.*/ {print $2}' "
"/etc/ntp.conf)")
except AssertionError as e:
logger.warning('Error occurred while synchronizing time on master'
': {0}'.format(e))
raise
else:
self.execute_remote_cmd(remote, 'service ntpd stop && ntpd -dqg &&'
' service ntpd start')
self.execute_remote_cmd(remote, 'hwclock -w')
remote_date = remote.execute('date')['stdout']
logger.info("Master node time: {0}".format(remote_date))
def verify_network_configuration(self, node_name):
checkers.verify_network_configuration(
node=self.fuel_web.get_nailgun_node_by_name(node_name),
remote=self.get_ssh_to_remote_by_name(node_name)
)
def wait_bootstrap(self):
logger.info("Waiting while bootstrapping is in progress")
log_path = "/var/log/puppet/bootstrap_admin_node.log"
logger.info("Puppet timeout set in {0}".format(
float(settings.PUPPET_TIMEOUT)))
wait(
lambda: not
self.get_admin_remote().execute(
"grep 'Fuel node deployment' '%s'" % log_path
)['exit_code'],
timeout=(float(settings.PUPPET_TIMEOUT))
)
result = self.get_admin_remote().execute("grep 'Fuel node deployment "
"complete' '%s'" % log_path
)['exit_code']
if result != 0:
raise Exception('Fuel node deployment failed.')
def dhcrelay_check(self):
admin_remote = self.get_admin_remote()
out = admin_remote.execute("dhcpcheck discover "
"--ifaces eth0 "
"--repeat 3 "
"--timeout 10")['stdout']
assert_true(self.get_admin_node_ip() in "".join(out),
"dhcpcheck doesn't discover master ip")
def run_nailgun_agent(self, remote):
agent = remote.execute('/opt/nailgun/bin/agent')['exit_code']
logger.info("Nailgun agent run with exit_code: %s" % agent)
def get_fuel_settings(self, remote=None):
if not remote:
remote = self.get_admin_remote()
cmd = 'cat {cfg_file}'.format(cfg_file=settings.FUEL_SETTINGS_YAML)
result = remote.execute(cmd)
if result['exit_code'] == 0:
fuel_settings = yaml.load(''.join(result['stdout']))
else:
raise Exception('Can\'t output {cfg_file} file: {error}'.
format(cfg_file=settings.FUEL_SETTINGS_YAML,
error=result['stderr']))
return fuel_settings
def admin_install_pkg(self, pkg_name):
"""Install a package <pkg_name> on the admin node"""
admin_remote = self.get_admin_remote()
remote_status = admin_remote.execute("rpm -q {0}'".format(pkg_name))
if remote_status['exit_code'] == 0:
logger.info("Package '{0}' already installed.".format(pkg_name))
else:
logger.info("Installing package '{0}' ...".format(pkg_name))
remote_status = admin_remote.execute("yum -y install {0}"
.format(pkg_name))
logger.info("Installation of the package '{0}' has been"
" completed with exit code {1}"
.format(pkg_name, remote_status['exit_code']))
return remote_status['exit_code']
def admin_run_service(self, service_name):
"""Start a service <service_name> on the admin node"""
admin_remote = self.get_admin_remote()
admin_remote.execute("service {0} start".format(service_name))
remote_status = admin_remote.execute("service {0} status"
.format(service_name))
if any('running...' in status for status in remote_status['stdout']):
logger.info("Service '{0}' is running".format(service_name))
else:
logger.info("Service '{0}' failed to start"
" with exit code {1} :\n{2}"
.format(service_name,
remote_status['exit_code'],
remote_status['stdout']))
# Modifies a resolv.conf on the Fuel master node and returns
# its original content.
# * adds 'nameservers' at start of resolv.conf if merge=True
# * replaces resolv.conf with 'nameservers' if merge=False
def modify_resolv_conf(self, nameservers=[], merge=True):
remote = self.get_admin_remote()
resolv_conf = remote.execute('cat /etc/resolv.conf')
assert_equal(0, resolv_conf['exit_code'], 'Executing "{0}" on the '
'admin node has failed with: {1}'
.format('cat /etc/resolv.conf', resolv_conf['stderr']))
if merge:
nameservers.extend(resolv_conf['stdout'])
resolv_keys = ['search', 'domain', 'nameserver']
resolv_new = "".join('{0}\n'.format(ns) for ns in nameservers
if any(x in ns for x in resolv_keys))
logger.debug('echo "{0}" > /etc/resolv.conf'.format(resolv_new))
echo_cmd = 'echo "{0}" > /etc/resolv.conf'.format(resolv_new)
echo_result = remote.execute(echo_cmd)
assert_equal(0, echo_result['exit_code'], 'Executing "{0}" on the '
'admin node has failed with: {1}'
.format(echo_cmd, echo_result['stderr']))
return resolv_conf['stdout']
@logwrap
def execute_remote_cmd(self, remote, cmd, exit_code=0):
result = remote.execute(cmd)
assert_equal(result['exit_code'], exit_code,
'Failed to execute "{0}" on remote host: {1}'.
format(cmd, result))
return result['stdout']
@logwrap
def describe_second_admin_interface(self):
remote = self.get_admin_remote()
second_admin_network = self._get_network(self.get_virtual_environment(
).admin_net2).split('/')[0]
second_admin_netmask = self.get_net_mask(self.get_virtual_environment(
).admin_net2)
second_admin_if = settings.INTERFACES.get(self.get_virtual_environment(
).admin_net2)
second_admin_ip = str(self.get_virtual_environment().nodes(
).admin.get_ip_address_by_network_name(self.get_virtual_environment(
).admin_net2))
logger.info(('Parameters for second admin interface configuration: '
'Network - {0}, Netmask - {1}, Interface - {2}, '
'IP Address - {3}').format(second_admin_network,
second_admin_netmask,
second_admin_if,
second_admin_ip))
add_second_admin_ip = ('DEVICE={0}\\n'
'ONBOOT=yes\\n'
'NM_CONTROLLED=no\\n'
'USERCTL=no\\n'
'PEERDNS=no\\n'
'BOOTPROTO=static\\n'
'IPADDR={1}\\n'
'NETMASK={2}\\n').format(second_admin_if,
second_admin_ip,
second_admin_netmask)
cmd = ('echo -e "{0}" > /etc/sysconfig/network-scripts/ifcfg-{1};'
'ifup {1}; ip -o -4 a s {1} | grep -w {2}').format(
add_second_admin_ip, second_admin_if, second_admin_ip)
logger.debug('Trying to assign {0} IP to the {1} on master node...'.
format(second_admin_ip, second_admin_if))
result = remote.execute(cmd)
assert_equal(result['exit_code'], 0, ('Failed to assign second admin '
'IP address on master node: {0}').format(result))
logger.debug('Done: {0}'.format(result['stdout']))
multiple_networks_hacks.configure_second_admin_firewall(
self,
second_admin_network,
second_admin_netmask)
@logwrap
def get_masternode_uuid(self):
return self.postgres_actions.run_query(
db='nailgun',
query="select master_node_uid from master_node_settings limit 1;")

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,372 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from fuelweb_test import logwrap
from fuelweb_test import logger
from fuelweb_test.helpers.decorators import json_parse
from fuelweb_test.helpers.http import HTTPClient
from fuelweb_test.settings import KEYSTONE_CREDS
from fuelweb_test.settings import OPENSTACK_RELEASE
class NailgunClient(object):
def __init__(self, admin_node_ip, **kwargs):
url = "http://{0}:8000".format(admin_node_ip)
logger.info('Initiate Nailgun client with url %s', url)
self.keystone_url = "http://{0}:5000/v2.0".format(admin_node_ip)
self._client = HTTPClient(url=url, keystone_url=self.keystone_url,
credentials=KEYSTONE_CREDS,
**kwargs)
super(NailgunClient, self).__init__()
@property
def client(self):
return self._client
@logwrap
def get_root(self):
return self.client.get("/")
@json_parse
def list_nodes(self):
return self.client.get("/api/nodes/")
@json_parse
def list_cluster_nodes(self, cluster_id):
return self.client.get("/api/nodes/?cluster_id={}".format(cluster_id))
@logwrap
@json_parse
def get_networks(self, cluster_id):
net_provider = self.get_cluster(cluster_id)['net_provider']
return self.client.get(
"/api/clusters/{}/network_configuration/{}".format(
cluster_id, net_provider
)
)
@logwrap
@json_parse
def verify_networks(self, cluster_id):
net_provider = self.get_cluster(cluster_id)['net_provider']
return self.client.put(
"/api/clusters/{}/network_configuration/{}/verify/".format(
cluster_id, net_provider
),
data=self.get_networks(cluster_id)
)
@json_parse
def get_cluster_attributes(self, cluster_id):
return self.client.get(
"/api/clusters/{}/attributes/".format(cluster_id)
)
@logwrap
@json_parse
def update_cluster_attributes(self, cluster_id, attrs):
return self.client.put(
"/api/clusters/{}/attributes/".format(cluster_id),
attrs
)
@logwrap
@json_parse
def get_cluster(self, cluster_id):
return self.client.get(
"/api/clusters/{}".format(cluster_id)
)
@logwrap
@json_parse
def update_cluster(self, cluster_id, data):
return self.client.put(
"/api/clusters/{}/".format(cluster_id),
data
)
@logwrap
@json_parse
def delete_cluster(self, cluster_id):
return self.client.delete(
"/api/clusters/{}/".format(cluster_id)
)
@logwrap
@json_parse
def update_node(self, node_id, data):
return self.client.put(
"/api/nodes/{}/".format(node_id), data
)
@logwrap
@json_parse
def update_nodes(self, data):
return self.client.put(
"/api/nodes", data
)
@logwrap
@json_parse
def deploy_cluster_changes(self, cluster_id):
return self.client.put(
"/api/clusters/{}/changes/".format(cluster_id)
)
@logwrap
@json_parse
def get_task(self, task_id):
return self.client.get("/api/tasks/{}".format(task_id))
@logwrap
@json_parse
def get_tasks(self):
return self.client.get("/api/tasks")
@logwrap
@json_parse
def get_releases(self):
return self.client.get("/api/releases/")
@logwrap
@json_parse
def get_releases_details(self, release_id):
return self.client.get("/api/releases/{}".format(release_id))
@logwrap
@json_parse
def get_node_disks(self, node_id):
return self.client.get("/api/nodes/{}/disks".format(node_id))
@logwrap
@json_parse
def put_node_disks(self, node_id, data):
return self.client.put("/api/nodes/{}/disks".format(node_id), data)
@logwrap
def get_release_id(self, release_name=OPENSTACK_RELEASE):
for release in self.get_releases():
if release["name"].lower().find(release_name.lower()) != -1:
return release["id"]
@logwrap
@json_parse
def get_node_interfaces(self, node_id):
return self.client.get("/api/nodes/{}/interfaces".format(node_id))
@logwrap
@json_parse
def put_node_interfaces(self, data):
return self.client.put("/api/nodes/interfaces", data)
@logwrap
@json_parse
def list_clusters(self):
return self.client.get("/api/clusters/")
@logwrap
@json_parse
def create_cluster(self, data):
logger.info('Before post to nailgun')
return self.client.post(
"/api/clusters",
data=data)
@logwrap
@json_parse
def get_ostf_test_sets(self, cluster_id):
return self.client.get("/ostf/testsets/{}".format(cluster_id))
@logwrap
@json_parse
def get_ostf_tests(self, cluster_id):
return self.client.get("/ostf/tests/{}".format(cluster_id))
@logwrap
@json_parse
def get_ostf_test_run(self, cluster_id):
return self.client.get("/ostf/testruns/last/{}".format(cluster_id))
@logwrap
@json_parse
def ostf_run_tests(self, cluster_id, test_sets_list):
logger.info('Run OSTF tests at cluster #%s: %s',
cluster_id, test_sets_list)
data = []
for test_set in test_sets_list:
data.append(
{
'metadata': {'cluster_id': str(cluster_id), 'config': {}},
'testset': test_set
}
)
# get tests otherwise 500 error will be thrown
self.get_ostf_tests(cluster_id)
return self.client.post("/ostf/testruns", data)
@logwrap
@json_parse
def ostf_run_singe_test(self, cluster_id, test_sets_list, test_name):
# get tests otherwise 500 error will be thrown
self.get_ostf_tests(cluster_id)
logger.info('Get tests finish with success')
data = []
for test_set in test_sets_list:
data.append(
{
'metadata': {'cluster_id': str(cluster_id), 'config': {}},
'tests': [test_name],
'testset': test_set
}
)
return self.client.post("/ostf/testruns", data)
@logwrap
@json_parse
def update_network(self, cluster_id, networking_parameters=None,
networks=None):
nc = self.get_networks(cluster_id)
if networking_parameters is not None:
for k in networking_parameters:
nc["networking_parameters"][k] = networking_parameters[k]
if networks is not None:
nc["networks"] = networks
net_provider = self.get_cluster(cluster_id)['net_provider']
return self.client.put(
"/api/clusters/{}/network_configuration/{}".format(
cluster_id, net_provider
),
nc
)
@logwrap
def get_cluster_id(self, name):
for cluster in self.list_clusters():
if cluster["name"] == name:
logger.info('cluster name is %s' % name)
logger.info('cluster id is %s' % cluster["id"])
return cluster["id"]
@logwrap
def add_syslog_server(self, cluster_id, host, port):
# Here we updating cluster editable attributes
# In particular we set extra syslog server
attributes = self.get_cluster_attributes(cluster_id)
attributes["editable"]["syslog"]["syslog_server"]["value"] = host
attributes["editable"]["syslog"]["syslog_port"]["value"] = port
self.update_cluster_attributes(cluster_id, attributes)
@logwrap
def get_cluster_vlans(self, cluster_id):
cluster_vlans = []
nc = self.get_networks(cluster_id)['networking_parameters']
vlan_start = nc["fixed_networks_vlan_start"]
network_amound = int(nc["fixed_networks_amount"] - 1)
cluster_vlans.extend([vlan_start, vlan_start + network_amound])
return cluster_vlans
@logwrap
@json_parse
def get_notifications(self):
return self.client.get("/api/notifications")
@logwrap
@json_parse
def update_redhat_setup(self, data):
return self.client.post("/api/redhat/setup", data=data)
@logwrap
@json_parse
def generate_logs(self):
return self.client.put("/api/logs/package")
@logwrap
def provision_nodes(self, cluster_id):
return self.do_cluster_action(cluster_id)
@logwrap
def deploy_nodes(self, cluster_id):
return self.do_cluster_action(cluster_id, "deploy")
@logwrap
def stop_deployment(self, cluster_id):
return self.do_stop_reset_actions(cluster_id)
@logwrap
def reset_environment(self, cluster_id):
return self.do_stop_reset_actions(cluster_id, action="reset")
@logwrap
@json_parse
def do_cluster_action(self, cluster_id, action="provision"):
nailgun_nodes = self.list_cluster_nodes(cluster_id)
cluster_node_ids = map(lambda _node: str(_node['id']), nailgun_nodes)
return self.client.put(
"/api/clusters/{0}/{1}?nodes={2}".format(
cluster_id,
action,
','.join(cluster_node_ids))
)
@logwrap
@json_parse
def do_stop_reset_actions(self, cluster_id, action="stop_deployment"):
return self.client.put(
"/api/clusters/{0}/{1}/".format(str(cluster_id), action))
@logwrap
@json_parse
def get_api_version(self):
return self.client.get("/api/version")
@logwrap
@json_parse
def run_update(self, cluster_id):
return self.client.put(
"/api/clusters/{0}/update/".format(str(cluster_id)))
@logwrap
@json_parse
def create_nodegroup(self, cluster_id, group_name):
data = {"cluster_id": cluster_id, "name": group_name}
return self.client.post("/api/nodegroups/", data=data)
@logwrap
@json_parse
def get_nodegroups(self):
return self.client.get("/api/nodegroups/")
@logwrap
@json_parse
def assign_nodegroup(self, group_id, nodes):
return self.client.post("/api/nodegroups/{0}/".format(group_id),
data=nodes)
@logwrap
@json_parse
def update_settings(self, data=None):
return self.client.put("/api/settings", data=data)
@logwrap
def send_fuel_stats(self, enabled=False, user_email="test@localhost"):
settings = self.update_settings()
params = ('send_anonymous_statistic', 'send_user_info',
'user_choice_saved')
for p in params:
settings['settings']['statistics'][p]['value'] = enabled
if user_email:
settings['settings']['statistics']['email']['value'] = user_email
self.update_settings(data=settings)

View File

@ -0,0 +1,55 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from devops.helpers.helpers import tcp_ping
from devops.helpers.helpers import wait
from fuelweb_test.models.environment import EnvironmentModel
from fuelweb_test import settings
class PuppetEnvironment(EnvironmentModel):
"""Create environment for puppet modules testing."""
def __init__(self, os_image=None):
"""Constructor for create environment."""
self.os_image = os_image or settings.OS_IMAGE
super(PuppetEnvironment, self).__init__(self.os_image)
self.environment = \
super(PuppetEnvironment, self).get_virtual_environment()
self.start_env()
@property
def env_name(self):
return os.environ.get('PPENV_NAME', 'pp-integration')
def start_env(self):
self.get_virtual_environment().start(self.nodes())
def execute_cmd(self, command, debug=True):
"""Execute command on node."""
return self.get_admin_remote().execute(command,
verbose=debug)['exit_code']
def await(self, timeout=1200):
wait(
lambda: tcp_ping(self.get_admin_node_ip(), 22), timeout=timeout)
if __name__ == "__main__":
env = PuppetEnvironment(
'/var/lib/libvirt/images/ubuntu-12.04.1-server-amd64-p2.qcow2')
env.await()
env.make_snapshot(snapshot_name="test1")
env.execute_cmd('apt-get install mc')

View File

@ -0,0 +1,130 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
OSTF_TEST_MAPPING = {
'Check data replication over mysql': 'fuel_health.tests.ha.test_'
'mysql_replication.'
'TestMysqlReplication.'
'test_mysql_replication',
'Check amount of tables in '
'databases is the same on each node': 'fuel_health.tests.ha.'
'test_mysql_replication.'
'TestMysqlReplication.'
'test_os_databases',
'Check mysql environment state': 'fuel_health.tests.ha.'
'test_mysql_replication.'
'TestMysqlReplication.'
'test_state_of_mysql_cluster',
'Check galera environment state': 'fuel_health.tests.ha.'
'test_mysql_replication.'
'TestMysqlReplication.'
'test_state_of_galera_cluster',
'Check RabbitMQ is available': 'fuel_health.tests.ha.'
'test_rabbit.RabbitSmokeTest.'
'test_001_rabbitmqctl_status',
'RabbitMQ availability': 'fuel_health.tests.ha.test_rabbit.'
'RabbitSmokeTest.'
'test_002_rabbitmqctl_status_ubuntu',
'List ceilometer availability': 'fuel_health.tests.sanity.'
'test_sanity_ceilometer.'
'CeilometerApiTests.test_list_meters',
'Request instance list': 'fuel_health.tests.sanity.test_sanity_compute.'
'SanityComputeTest.test_list_instances',
'Request image list': 'fuel_health.tests.sanity.test_sanity_compute.'
'SanityComputeTest.test_list_images',
'Request volume list': 'fuel_health.tests.sanity.test_sanity_compute.'
'SanityComputeTest.test_list_volumes',
'Request snapshot list': 'fuel_health.tests.sanity.test_sanity_compute.'
'SanityComputeTest.test_list_snapshots',
'Request flavor list': 'fuel_health.tests.sanity.test_sanity_compute.'
'SanityComputeTest.test_list_flavors',
'Request absolute limits list': 'fuel_health.tests.sanity.'
'test_sanity_compute.SanityComputeTest.'
'test_list_rate_limits',
'Request stack list': 'fuel_health.tests.sanity.test_sanity_heat.'
'SanityHeatTest.test_list_stacks',
'Request active services list': 'fuel_health.tests.sanity.'
'test_sanity_identity.'
'SanityIdentityTest.test_list_services',
'Request user list': 'fuel_health.tests.sanity.test_sanity_identity.'
'SanityIdentityTest.test_list_users',
'Check that required services are running': 'fuel_health.tests.sanity.'
'test_sanity_infrastructure.'
'SanityInfrastructureTest.'
'test_001_services_state',
'Check internet connectivity from a compute': 'fuel_health.tests.sanity.'
'test_sanity_infrastructure.'
'SanityInfrastructureTest.'
'test_002_internet_'
'connectivity_from_compute',
'Check DNS resolution on compute node': 'fuel_health.tests.sanity.'
'test_sanity_infrastructure.'
'SanityInfrastructureTest.'
'test_003_dns_resolution',
'Create and delete Murano environment': 'fuel_health.tests.sanity.'
'test_sanity_murano.'
'MuranoSanityTests.'
'test_create_and_delete_service',
'Request list of networks': 'fuel_health.tests.sanity.'
'test_sanity_networking.NetworksTest.'
'test_list_networks',
'Sahara tests to create/list/delete node'
' group and cluster templates': 'fuel_health.tests.sanity.'
'test_sanity_sahara.'
'SanitySaharaTests.test_sanity_sahara',
'Create instance flavor': 'fuel_health.tests.smoke.test_create_flavor.'
'FlavorsAdminTest.test_create_flavor',
'Create volume and attach it to instance': 'fuel_health.tests.smoke.'
'test_create_volume.'
'VolumesTest.'
'test_volume_create',
'Create keypair': 'fuel_health.tests.smoke.'
'test_nova_create_instance_with_connectivity.'
'TestNovaNetwork.test_001_create_keypairs',
'Create security group': 'fuel_health.tests.smoke.'
'test_nova_create_instance_with_connectivity.'
'TestNovaNetwork.'
'test_002_create_security_groups',
'Check network parameters': 'fuel_health.tests.smoke.'
'test_nova_create_instance_with_connectivity.'
'TestNovaNetwork.test_003_check_networks',
'Launch instance': 'fuel_health.tests.smoke.'
'test_nova_create_instance_with_connectivity.'
'TestNovaNetwork.test_004_create_servers',
'Check that VM is accessible '
'via floating IP address': 'fuel_health.tests.smoke.'
'test_nova_create_instance_with_connectivity.'
'TestNovaNetwork.'
'test_005_check_public_network_connectivity',
'Check network connectivity'
' from instance via floating IP': 'fuel_health.tests.smoke.'
'test_nova_create_instance_with_'
'connectivity.TestNovaNetwork.'
'test_008_check_public_instance_'
'connectivity_from_instance',
'Check network connectivity from '
'instance without floating IP': 'fuel_health.tests.smoke.test_nova_create_'
'instance_with_connectivity.'
'TestNovaNetwork.test_006_check_'
'internet_connectivity_instance_'
'without_floatingIP',
'Launch instance, create snapshot,'
' launch instance from snapshot': 'fuel_health.tests.smoke.'
'test_nova_image_actions.'
'TestImageAction.test_snapshot',
'Create user and authenticate with it to Horizon': 'fuel_health.tests.'
'smoke.test_'
'user_create.TestUserTe'
'nantRole.test_'
'create_user', }

View File

@ -0,0 +1 @@
__author__ = 'aurlapova'

View File

@ -0,0 +1,132 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import jinja2
from puppet_module import PuppetModule
class PuppetTestGenerator:
"""Puppet Test Generator
This is main class. It finds all modules in the given directory and creates
tests for them.
You should give constructor following arguments:
- local_modules_path* Path to puppet modules which will be scanned for
test files
- tests_directory_path* Output directory where files will be written
- debug level
"""
def __init__(self, tests_directory_path, modules_path):
"""Constructor
Constructor
"""
if not os.path.isdir(modules_path):
logging.error('No such directory: ' + modules_path)
if not os.path.isdir(tests_directory_path):
logging.error('No such directory: ' + tests_directory_path)
self.modules_path = modules_path
self.tests_directory = tests_directory_path
self.default_template = 'puppet_module_test.py.tmpl'
self.test_file_prefix = 'TestPuppetModule'
self.modules = []
self.module_templates = {}
self.make_tests_dir = os.path.dirname(os.path.abspath(__file__))
if not os.path.isdir('puppet_tests/templates'):
logging.error("No such directory: puppet_tests/templates")
self.template_directory = 'puppet_tests/templates'
self.template_loader = jinja2.FileSystemLoader(
searchpath='puppet_tests/templates')
self.template_environment = jinja2.Environment(
loader=self.template_loader,
)
self.internal_modules_path = '/etc/puppet/modules'
self.internal_manifests_path = '/etc/puppet/manifests'
self.find_modules()
def find_modules(self):
"""Find modules in library path
Find all Puppet modules in module_library_path
and create array of PuppetModule objects
"""
logging.debug('Starting find modules in "%s"' % self.modules_path)
for module_dir in os.listdir(self.modules_path):
full_module_path = os.path.join(self.modules_path, module_dir)
full_tests_path = os.path.join(full_module_path, 'tests')
if not os.path.isdir(full_tests_path):
continue
logging.debug('Found Puppet module: "%s"' % full_module_path)
puppet_module = PuppetModule(full_module_path)
self.modules.append(puppet_module)
def compile_script(self, module):
"""Compile script template
Compile script template for given module and return it
"""
template_file = self.module_templates.get(module.name,
self.default_template)
template = self.template_environment.get_template(template_file)
general = {
'local_modules_path': self.modules_path,
'internal_modules_path': self.internal_modules_path,
'internal_manifests_path': self.internal_manifests_path,
'tests_directory_path': self.tests_directory
}
compiled_template = template.render(module=module, **general)
return compiled_template
def save_script(self, module):
"""Save compiled script
Saves compiled script to a file
"""
file_name = self.test_file_prefix + module.name.title() + '.py'
full_file_path = os.path.join(self.tests_directory, file_name)
script_content = self.compile_script(module)
script_file = open(full_file_path, 'w+')
script_file.write(script_content)
script_file.close()
def make_all_scripts(self):
"""Compile and save all scripts
Compile and save to tests_directory_path all the test scripts.
Main function.
"""
for module in self.modules:
logging.debug('Processing module: "%s"' % module.name)
self.save_script(module)
def remove_all_tests(self):
"""Remove all tests
Remove all tests from tests_directory_path
"""
file_list = os.listdir(self.tests_directory)
for test_file in file_list:
if not test_file.endswith('.py'):
continue
if not test_file.startswith('TestPuppetModule'):
continue
full_file_path = os.path.join(self.tests_directory, test_file)
logging.debug('Removing test file: "%s"' % full_file_path)
os.remove(full_file_path)

View File

@ -0,0 +1,122 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import re
import sys
from puppet_test import PuppetTest
path = os.path.abspath(__file__)
path = os.path.dirname(path)
sys.path.insert(0, path)
class PuppetModule:
"""This class represents Puppet module."""
def __init__(self, local_module_path):
"""You should give this constructor the full path to the module."""
self.local_module_path = local_module_path
self.module_name = os.path.basename(self.local_module_path)
self.__tests = []
self.__dependencies = []
self.comment_regexp = re.compile(r'^\s*#')
self.dependency_regexp = \
re.compile(r'^\s*dependency\s*[\'\"]*([^\'\"]+)[\'\"]*')
self.find_tests()
self.find_dependencies()
def find_dependencies(self):
"""Get dependencies of this module from Modulefile if present."""
module_file = 'Modulefile'
dependencies = []
module_file_path = os.path.join(self.local_module_path, module_file)
if not os.path.isfile(module_file_path):
self.__dependencies = dependencies
return False
opened_file = open(module_file_path, 'r')
for line in opened_file.readlines():
if re.match(self.comment_regexp, line):
# skip commented line
continue
match = re.match(self.dependency_regexp, line)
if match:
# found dependency line
dependency_name = match.group(1).split('/')[-1]
dependencies.append(dependency_name)
self.__dependencies = dependencies
return True
def find_tests(self):
"""Find all tests.
Find all tests in this module and fill tests array
with PuppetTest objects.
"""
current_path = os.path.abspath(os.curdir)
try:
os.chdir(self.local_module_path)
except OSError as error:
logging.error("Cannot change directory to %s: %s" %
(self.local_module_path, error.message))
else:
for root, dirs, files in os.walk('tests'):
for test_file in files:
if not test_file[-3:] == '.pp':
continue
test_file_path = os.path.join(root, test_file)
puppet_test = PuppetTest(test_file_path)
self.__tests.append(puppet_test)
finally:
# try to restore original folder on exit
try:
os.chdir(current_path)
except OSError as error:
logging.error("Cannot change directory to %s: %s" %
(self.local_module_path, error.message), 1)
@property
def tests(self):
"""Property returns list of tests."""
return self.__tests
@property
def name(self):
"""Property returns module name."""
return self.module_name
@property
def path(self):
"""Property returns path to this module."""
return self.local_module_path
@property
def dependencies(self):
"""Property returns list of module dependencies."""
return self.__dependencies
def __repr__(self):
"""String representation of PuppetModule."""
tests_string = ''
if len(self.tests) > 0:
tests = [repr(test) for test in self.tests]
tests_string += ", ".join(tests)
tpl = "PuppetModule(name=%s, path=%s, tests=[%s]" \
% (self.name, self.path, tests_string)
return tpl

View File

@ -0,0 +1,81 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glob import glob
import os
import stat
class PuppetTest:
"""This class represents single test of the Puppet module."""
def __init__(self, test_file_path):
"""You should give this constructor path to test file."""
self.test_file_path = test_file_path
self.tests_path = os.path.dirname(self.test_file_path)
self.test_file_name = os.path.basename(self.test_file_path)
self.test_name = self.test_file_name.replace('.pp', '')
self.find_verify_file()
def find_verify_file(self):
"""Get verify script for this test if there is one."""
pattern = os.path.join(self.tests_path, self.test_name) + '*'
verify_files = glob(pattern)
verify_files = [os.path.basename(verify_file)
for verify_file in verify_files
if not verify_file.endswith('.pp')]
if verify_files:
self.__verify_file = verify_files[0]
self.make_verify_executable()
else:
self.__verify_file = None
def make_verify_executable(self):
"""Set executable bit for a file."""
file_path = os.path.join(self.tests_path, self.__verify_file)
if not os.path.isfile(file_path):
return False
file_stat = os.stat(file_path)
os.chmod(
file_path,
file_stat.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
return True
@property
def path(self):
"""Return path to test.
Property returns path to this test relative to module and excluding
file name
"""
return self.tests_path
@property
def file(self):
"""Property returns this tests' file name."""
return self.test_file_name
@property
def name(self):
"""Property returns name of this test."""
return self.test_name
@property
def verify_file(self):
"""Property returns verify file name."""
return self.__verify_file
def __repr__(self):
"""String representation of PuppetTest."""
return "PuppetTest(name=%s, path=%s, file=%s)" % \
(self.name, self.path, self.file)

View File

@ -0,0 +1,22 @@
# modules_path="{{ modules_path }}"
# local_modules_path="{{ local_modules_path }}"
# internal_modules_path="{{ internal_modules_path }}"
# internal_manifests_path="{{ internal_manifests_path }}"
# tests_directory_path="{{ tests_directory_path }}"
# Module
# module.name="{{ module.name }}"
# module.path="{{ module.path }}"
# module.tests="{{ module.tests }}"
# module.dependencies="{{ module.dependencies }}"
# Module "{{ module.name }}" has {{ module.tests|count }} tests:
{% set count = 0 -%}
{% for test in module.tests -%}
{% set count = count + 1 -%}
# Test {{ count }}:
# test.name="{{ test.name }}"
# test.path="{{ test.path }}"
# test.file="{{ test.file }}"
{% endfor -%}

View File

@ -0,0 +1,53 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from fuelweb_test.helpers.decorators import debug
from fuelweb_test.helpers.decorators import upload_manifests
from fuelweb_test.models.pp_environment import PuppetEnvironment
import logging
import unittest
logger = logging.getLogger('integration')
logwrap = debug(logger)
class TestPuppetModule{{ module.name|title }}(unittest.TestCase): # flake8: noqa
@upload_manifests
def setUp(self):
self.env = PuppetEnvironment()
self.env.await()
self.puppet_apply = "puppet apply " \
"--verbose " \
"--detailed-exitcodes " \
"--modulepath='{{ internal_modules_path }}'"
if not self.env.get_virtual_environment().has_snapshot("before_test"):
self.env.make_snapshot(snapshot_name="before_test")
{% for test in module.tests %} # flake8: noqa
def test_{{ test.name|title }}(self): # flake8: noqa
manifest = \
"{{ internal_modules_path }}/{{ module.name }}/{{ test.path }}/{{test.file }}" # flake8: noqa
result = self.env.execute_cmd("%s '%s'" % (self.puppet_apply,manifest)) # flake8: noqa
self.assertIn(result, [0, 2])
{% endfor %} # flake8: noqa
def tearDown(self): # flake8: noqa
self.env.revert_snapshot("before_test")
if __name__ == '__main__':
unittest.main()
{# Enable this to get a debug list with all template values
{% include 'debug_template.txt' %}
#}

View File

@ -0,0 +1,42 @@
# Class: acpid
#
# Sample for usage acpid
#
#
class acpid($status = true) {
if ($::osfamily == 'Debian') {
$package = 'acpid'
$service = 'acpid'
}
elsif ($::osfamily == 'RedHat') {
$package = 'acpid'
$service = 'acpid'
}
else {
fail("Module ${module_name} is not supported on ${::operatingsystem}!")
}
if ($status) {
$ensure = 'running'
$enable = true
}
else {
$ensure = 'stopped'
$enable = false
}
package { $package :
ensure => installed,
}
service { $service :
ensure => $ensure,
enable => $enable,
hasrestart => true,
hasstatus => true,
}
Package[$package] -> Service[$service]
}

View File

@ -0,0 +1,4 @@
# acpid_off
class { 'acpid' :
status => false,
}

View File

@ -0,0 +1,9 @@
#!/bin/sh
PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
ps axuwww | grep -v grep | grep -q " acpid "
if [ $? -gt 0 ]; then
return 0
else
return 1
fi

View File

@ -0,0 +1,4 @@
# acpid_on
class { 'acpid' :
status => true,
}

View File

@ -0,0 +1,5 @@
#!/bin/sh
PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
ps axuwww | grep -v grep | grep -q " acpid "
return $?

View File

@ -0,0 +1,2 @@
--colour
--format documentation

View File

@ -0,0 +1,9 @@
require 'rake'
require 'rspec/core/rake_task'
task :default => :spec
RSpec::Core::RakeTask.new(:spec) do |t|
t.pattern = 'spec/*/*_spec.rb'
end

View File

@ -0,0 +1,13 @@
# = Class: motd
#
# Create file /etc/motd.
#
class motd {
file { '/etc/motd' :
ensure => present,
owner => 'root',
group => 'root',
mode => '0644',
content => 'Hello!',
}
}

View File

@ -0,0 +1,17 @@
require 'spec_helper'
describe 'motd' do
it do
should contain_file('/etc/motd').with({
'ensure' => 'present',
'owner' => 'root',
'group' => 'root',
'mode' => '0644',
})
end
it do
should contain_file('/etc/motd').with_content('Hello!')
end
end

View File

@ -0,0 +1 @@
../../../../manifests

View File

@ -0,0 +1,8 @@
require 'rspec-puppet'
fixture_path = File.expand_path(File.join(__FILE__, '..', 'fixtures'))
RSpec.configure do |c|
c.module_path = File.join(fixture_path, 'modules')
c.manifest_dir = File.join(fixture_path, 'manifests')
end

View File

@ -0,0 +1,2 @@
# motd test
include motd

View File

@ -0,0 +1,57 @@
# For more information about this file, see the man pages
# ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5).
driftfile /var/lib/ntp/drift
# Permit time synchronization with our time source, but do not
# permit the source to query or modify the service on this system.
restrict default kod nomodify notrap nopeer noquery
restrict -6 default kod nomodify notrap nopeer noquery
# Permit all access over the loopback interface. This could
# be tightened as well, but to do so would effect some of
# the administrative functions.
restrict 127.0.0.1
restrict -6 ::1
# Hosts on local network are less restricted.
#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap
# Use public servers from the pool.ntp.org project.
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
server 0.centos.pool.ntp.org
server 1.centos.pool.ntp.org
server 2.centos.pool.ntp.org
#broadcast 192.168.1.255 autokey # broadcast server
#broadcastclient # broadcast client
#broadcast 224.0.1.1 autokey # multicast server
#multicastclient 224.0.1.1 # multicast client
#manycastserver 239.255.254.254 # manycast server
#manycastclient 239.255.254.254 autokey # manycast client
# Undisciplined Local Clock. This is a fake driver intended for backup
# and when no outside source of synchronized time is available.
#server 127.127.1.0 # local clock
#fudge 127.127.1.0 stratum 10
# Enable public key cryptography.
#crypto
includefile /etc/ntp/crypto/pw
# Key file containing the keys and key identifiers used when operating
# with symmetric key cryptography.
keys /etc/ntp/keys
# Specify the key identifiers which are trusted.
#trustedkey 4 8 42
# Specify the key identifier to use with the ntpdc utility.
#requestkey 8
# Specify the key identifier to use with the ntpq utility.
#controlkey 8
# Enable writing of statistics records.
#statistics clockstats cryptostats loopstats peerstats

View File

@ -0,0 +1,55 @@
# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help
driftfile /var/lib/ntp/ntp.drift
# Enable this if you want statistics to be logged.
#statsdir /var/log/ntpstats/
statistics loopstats peerstats clockstats
filegen loopstats file loopstats type day enable
filegen peerstats file peerstats type day enable
filegen clockstats file clockstats type day enable
# Specify one or more NTP servers.
# Use servers from the NTP Pool Project. Approved by Ubuntu Technical Board
# on 2011-02-08 (LP: #104525). See http://www.pool.ntp.org/join.html for
# more information.
server 0.ubuntu.pool.ntp.org
server 1.ubuntu.pool.ntp.org
server 2.ubuntu.pool.ntp.org
server 3.ubuntu.pool.ntp.org
# Use Ubuntu's ntp server as a fallback.
server ntp.ubuntu.com
# Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for
# details. The web page <http://support.ntp.org/bin/view/Support/AccessRestrictions>
# might also be helpful.
#
# Note that "restrict" applies to both servers and clients, so a configuration
# that might be intended to block requests from certain clients could also end
# up blocking replies from your own upstream servers.
# By default, exchange time with everybody, but don't allow configuration.
restrict -4 default kod notrap nomodify nopeer noquery
restrict -6 default kod notrap nomodify nopeer noquery
# Local users may interrogate the ntp server more closely.
restrict 127.0.0.1
restrict ::1
# Clients from this (example!) subnet have unlimited access, but only if
# cryptographically authenticated.
#restrict 192.168.123.0 mask 255.255.255.0 notrust
# If you want to provide time to your local subnet, change the next line.
# (Again, the address is an example only.)
#broadcast 192.168.123.255
# If you want to listen to time broadcasts on your local subnet, de-comment the
# next lines. Please do this only if you trust everybody on the network!
#disable auth
#broadcastclient

View File

@ -0,0 +1,43 @@
# = Class: ntp
#
# Sample of usage ntp,
# create config and start service ntp.
#
class ntp {
if $::osfamily == 'RedHat' {
$package = 'ntp'
$service = 'ntpd'
$config = '/etc/ntp.conf'
$conf_from = 'centos-ntp.conf'
} elsif $::osfamily == 'Debian' {
$package = 'ntp'
$service = 'ntp'
$config = '/etc/ntp.conf'
$conf_from = 'ubuntu-ntp.conf'
}
else {
fail("Module ${module_name} is not supported on ${::operatingsystem}!")
}
package { $package :
ensure => installed,
}
file { $config :
ensure => present,
owner => 'root',
group => 'root',
mode => '0644',
source => "puppet:///modules/ntp/${conf_from}",
}
service { $service :
ensure => 'running',
enable => true,
hasrestart => true,
hasstatus => true,
}
Package[$package] -> File[$config] ~> Service[$service]
}

View File

@ -0,0 +1,2 @@
# ntp class test
include ntp

View File

@ -0,0 +1 @@
*.py

View File

@ -0,0 +1,14 @@
nose==1.2.1
git+git://github.com/stackforge/fuel-devops.git@2.9.0
anyjson==0.3.1
paramiko
proboscis==1.2.6.0
ipaddr
junitxml>=0.7.0
python-glanceclient>=0.9.0
python-keystoneclient>=0.3.2
python-novaclient>=2.15.0
python-cinderclient>=1.0.5
python-neutronclient>=2.0
Jinja2
AllPairs==2.0.1

33
fuelweb_test/run_tests.py Normal file
View File

@ -0,0 +1,33 @@
def run_tests():
from proboscis import TestProgram # noqa
from tests import test_admin_node # noqa
from tests import test_ceph # noqa
from tests import test_environment_action # noqa
from tests import test_ha # noqa
from tests import test_neutron # noqa
from tests import test_pullrequest # noqa
from tests import test_services # noqa
from tests import test_ha_one_controller # noqa
from tests import test_vcenter # noqa
from tests.tests_strength import test_failover # noqa
from tests.tests_strength import test_master_node_failover # noqa
from tests.tests_strength import test_ostf_repeatable_tests # noqa
from tests.tests_strength import test_restart # noqa
from tests.tests_strength import test_huge_environments # noqa
from tests.tests_strength import test_image_based # noqa
from tests import test_bonding # noqa
from tests.tests_strength import test_neutron # noqa
from tests import test_zabbix # noqa
from tests import test_upgrade # noqa
from tests.plugins.plugin_example import test_fuel_plugin_example # noqa
from tests.plugins.plugin_glusterfs import test_plugin_glusterfs # noqa
from tests.plugins.plugin_lbaas import test_plugin_lbaas # noqa
from tests import test_multiple_networks # noqa
# Run Proboscis and exit.
TestProgram().run_and_exit()
if __name__ == '__main__':
run_tests()

363
fuelweb_test/settings.py Normal file
View File

@ -0,0 +1,363 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
# Default timezone for clear logging
TIME_ZONE = 'UTC'
ENV_NAME = os.environ.get("ENV_NAME", "fuel_system_test")
ISO_PATH = os.environ.get('ISO_PATH')
DNS = os.environ.get('DNS', '8.8.8.8')
OPENSTACK_RELEASE_CENTOS = 'centos'
OPENSTACK_RELEASE_UBUNTU = 'ubuntu'
OPENSTACK_RELEASE_REDHAT = 'rhos 3.0 for rhel 6.4'
OPENSTACK_RELEASE = os.environ.get(
'OPENSTACK_RELEASE', OPENSTACK_RELEASE_CENTOS).lower()
REDHAT_LICENSE_TYPE = os.environ.get('REDHAT_LICENSE_TYPE')
REDHAT_USERNAME = os.environ.get('REDHAT_USERNAME')
REDHAT_PASSWORD = os.environ.get('REDHAT_PASSWORD')
REDHAT_SATELLITE_HOST = os.environ.get('REDHAT_SATELLITE_HOST')
REDHAT_ACTIVATION_KEY = os.environ.get('REDHAT_ACTIVATION_KEY')
DEPLOYMENT_MODE_SIMPLE = "multinode"
DEPLOYMENT_MODE_HA = "ha_compact"
DEPLOYMENT_MODE = os.environ.get("DEPLOYMENT_MODE", DEPLOYMENT_MODE_HA)
ADMIN_NODE_SETUP_TIMEOUT = os.environ.get("ADMIN_NODE_SETUP_TIMEOUT", 30)
PUPPET_TIMEOUT = os.environ.get("PUPPET_TIMEOUT", 6000)
HARDWARE = {
"admin_node_memory": os.environ.get("ADMIN_NODE_MEMORY", 2048),
"admin_node_cpu": os.environ.get("ADMIN_NODE_CPU", 2),
"slave_node_cpu": os.environ.get("SLAVE_NODE_CPU", 1),
}
if OPENSTACK_RELEASE_UBUNTU in OPENSTACK_RELEASE:
slave_mem_default = 2560
else:
slave_mem_default = 2048
HARDWARE["slave_node_memory"] = int(
os.environ.get("SLAVE_NODE_MEMORY", slave_mem_default))
NODE_VOLUME_SIZE = int(os.environ.get('NODE_VOLUME_SIZE', 50))
NODES_COUNT = os.environ.get('NODES_COUNT', 10)
MULTIPLE_NETWORKS = os.environ.get('MULTIPLE_NETWORKS', False) == 'true'
if MULTIPLE_NETWORKS:
NODEGROUPS = (
{
'name': 'default',
'pools': ['admin', 'public', 'management', 'private',
'storage']
},
{
'name': 'group-custom-1',
'pools': ['admin2', 'public2', 'management2', 'private2',
'storage2']
}
)
FORWARD_DEFAULT = os.environ.get('FORWARD_DEFAULT', 'route')
ADMIN_FORWARD = os.environ.get('ADMIN_FORWARD', 'nat')
PUBLIC_FORWARD = os.environ.get('PUBLIC_FORWARD', 'nat')
else:
NODEGROUPS = {}
FORWARD_DEFAULT = os.environ.get('FORWARD_DEFAULT', None)
ADMIN_FORWARD = os.environ.get('ADMIN_FORWARD', FORWARD_DEFAULT or 'nat')
PUBLIC_FORWARD = os.environ.get('PUBLIC_FORWARD', FORWARD_DEFAULT or 'nat')
MGMT_FORWARD = os.environ.get('MGMT_FORWARD', FORWARD_DEFAULT)
PRIVATE_FORWARD = os.environ.get('PRIVATE_FORWARD', FORWARD_DEFAULT)
STORAGE_FORWARD = os.environ.get('STORAGE_FORWARD', FORWARD_DEFAULT)
DEFAULT_INTERFACE_ORDER = 'admin,public,management,private,storage'
INTERFACE_ORDER = os.environ.get('INTERFACE_ORDER',
DEFAULT_INTERFACE_ORDER).split(',')
FORWARDING = {
'admin': ADMIN_FORWARD,
'public': PUBLIC_FORWARD,
'management': MGMT_FORWARD,
'private': PRIVATE_FORWARD,
'storage': STORAGE_FORWARD,
}
DHCP = {
'admin': False,
'public': False,
'management': False,
'private': False,
'storage': False,
}
INTERFACES = {
'admin': 'eth0',
'public': 'eth1',
'management': 'eth2',
'private': 'eth3',
'storage': 'eth4',
}
# May be one of virtio, e1000, pcnet, rtl8139
INTERFACE_MODEL = os.environ.get('INTERFACE_MODEL', 'virtio')
POOL_DEFAULT = os.environ.get('POOL_DEFAULT', '10.108.0.0/16:24')
POOL_ADMIN = os.environ.get('POOL_ADMIN', POOL_DEFAULT)
POOL_PUBLIC = os.environ.get('POOL_PUBLIC', POOL_DEFAULT)
POOL_MANAGEMENT = os.environ.get('POOL_MANAGEMENT', POOL_DEFAULT)
POOL_PRIVATE = os.environ.get('POOL_PRIVATE', POOL_DEFAULT)
POOL_STORAGE = os.environ.get('POOL_STORAGE', POOL_DEFAULT)
DEFAULT_POOLS = {
'admin': POOL_ADMIN,
'public': POOL_PUBLIC,
'management': POOL_MANAGEMENT,
'private': POOL_PRIVATE,
'storage': POOL_STORAGE,
}
POOLS = {
'admin': os.environ.get(
'PUBLIC_POOL',
DEFAULT_POOLS.get('admin')).split(':'),
'public': os.environ.get(
'PUBLIC_POOL',
DEFAULT_POOLS.get('public')).split(':'),
'management': os.environ.get(
'PRIVATE_POOL',
DEFAULT_POOLS.get('management')).split(':'),
'private': os.environ.get(
'INTERNAL_POOL',
DEFAULT_POOLS.get('private')).split(':'),
'storage': os.environ.get(
'NAT_POOL',
DEFAULT_POOLS.get('storage')).split(':'),
}
if MULTIPLE_NETWORKS:
FORWARDING['admin2'] = ADMIN_FORWARD
FORWARDING['public2'] = PUBLIC_FORWARD
FORWARDING['management2'] = MGMT_FORWARD
FORWARDING['private2'] = PRIVATE_FORWARD
FORWARDING['storage2'] = STORAGE_FORWARD
DHCP['admin2'] = False
DHCP['public2'] = False
DHCP['management2'] = False
DHCP['private2'] = False
DHCP['storage2'] = False
INTERFACES['admin2'] = 'eth5'
POOL_DEFAULT2 = os.environ.get('POOL_DEFAULT2', '10.108.0.0/16:24')
POOL_ADMIN2 = os.environ.get('POOL_ADMIN2', POOL_DEFAULT2)
POOL_PUBLIC2 = os.environ.get('POOL_PUBLIC2', POOL_DEFAULT2)
POOL_MANAGEMENT2 = os.environ.get('POOL_MANAGEMENT', POOL_DEFAULT2)
POOL_PRIVATE2 = os.environ.get('POOL_PRIVATE', POOL_DEFAULT2)
POOL_STORAGE2 = os.environ.get('POOL_STORAGE', POOL_DEFAULT2)
CUSTOM_POOLS = {
'admin2': POOL_ADMIN2,
'public2': POOL_PUBLIC2,
'management2': POOL_MANAGEMENT2,
'private2': POOL_PRIVATE2,
'storage2': POOL_STORAGE2,
}
POOLS['admin2'] = os.environ.get(
'PUBLIC_POOL2',
CUSTOM_POOLS.get('admin2')).split(':')
POOLS['public2'] = os.environ.get(
'PUBLIC_POOL2',
CUSTOM_POOLS.get('public2')).split(':')
POOLS['management2'] = os.environ.get(
'PUBLIC_POOL2',
CUSTOM_POOLS.get('management2')).split(':')
POOLS['private2'] = os.environ.get(
'PUBLIC_POOL2',
CUSTOM_POOLS.get('private2')).split(':')
POOLS['storage2'] = os.environ.get(
'PUBLIC_POOL2',
CUSTOM_POOLS.get('storage2')).split(':')
CUSTOM_INTERFACE_ORDER = os.environ.get(
'CUSTOM_INTERFACE_ORDER',
'admin2,public2,management2,private2,storage2')
INTERFACE_ORDER.extend(CUSTOM_INTERFACE_ORDER.split(','))
BONDING = os.environ.get("BONDING", 'false') == 'true'
BONDING_INTERFACES = {
'admin': ['eth0'],
'public': ['eth1', 'eth2', 'eth3', 'eth4']
}
NETWORK_MANAGERS = {
'flat': 'FlatDHCPManager',
'vlan': 'VlanManager'
}
NEUTRON = 'neutron'
NEUTRON_SEGMENT = {
'gre': 'gre',
'vlan': 'vlan'
}
LOGS_DIR = os.environ.get('LOGS_DIR', os.getcwd())
USE_ALL_DISKS = os.environ.get('USE_ALL_DISKS', 'true') == 'true'
UPLOAD_MANIFESTS = os.environ.get('UPLOAD_MANIFESTS', 'false') == 'true'
SYNC_DEPL_TASKS = os.environ.get('SYNC_DEPL_TASKS', 'false') == 'true'
UPLOAD_MANIFESTS_PATH = os.environ.get(
'UPLOAD_MANIFESTS_PATH', '~/git/fuel/deployment/puppet/')
SITEPP_FOR_UPLOAD = os.environ.get(
'SITEPP_PATH', '/etc/puppet/modules/osnailyfacter/examples/site.pp')
UPLOAD_PATCHSET = os.environ.get('UPLOAD_PATCHSET', 'false') == 'true'
GERRIT_REFSPEC = os.environ.get('GERRIT_REFSPEC')
PATCH_PATH = os.environ.get(
'PATCH_PATH', '/tmp/fuel-ostf')
KVM_USE = os.environ.get('KVM_USE', 'false') == 'true'
VCENTER_USE = os.environ.get('VCENTER_USE', 'false') == 'true'
DEBUG_MODE = os.environ.get('DEBUG_MODE', 'true') == 'true'
# vCenter tests
VCENTER_IP = os.environ.get('VCENTER_IP')
VCENTER_USERNAME = os.environ.get('VCENTER_USERNAME')
VCENTER_PASSWORD = os.environ.get('VCENTER_PASSWORD')
VCENTER_CLUSTERS = os.environ.get('VCENTER_CLUSTERS')
# Cinder with VMDK backend settings
VC_HOST = os.environ.get('VCENTER_IP')
VC_USER = os.environ.get('VCENTER_USERNAME')
VC_PASSWORD = os.environ.get('VCENTER_PASSWORD')
VC_DATACENTER = os.environ.get('VC_DATACENTER')
VC_DATASTORE = os.environ.get('VC_DATASTORE')
VC_IMAGE_DIR = os.environ.get('VC_IMAGE_DIR')
IMAGES_VCENTER = os.environ.get('IMAGES_VCENTER')
# Services tests
SERVTEST_LOCAL_PATH = os.environ.get('SERVTEST_LOCAL_PATH', '/tmp')
SERVTEST_USERNAME = os.environ.get('SERVTEST_USERNAME', 'admin')
SERVTEST_PASSWORD = os.environ.get('SERVTEST_PASSWORD', SERVTEST_USERNAME)
SERVTEST_TENANT = os.environ.get('SERVTEST_TENANT', SERVTEST_USERNAME)
SERVTEST_SAHARA_VANILLA_2_IMAGE = ('sahara-juno-vanilla-'
'2.4.1-ubuntu-14.04.qcow2')
SERVTEST_SAHARA_VANILLA_2_IMAGE_NAME = 'sahara-juno-vanilla-2.4.1-ubuntu-14.04'
SERVTEST_SAHARA_VANILLA_2_IMAGE_MD5 = 'e32bef0d3bc4b2c906f5499e14f9b377'
SERVTEST_SAHARA_VANILLA_2_IMAGE_META = {'_sahara_tag_2.4.1': 'True',
'_sahara_tag_vanilla': 'True',
'_sahara_username': 'ubuntu'}
SERVTEST_MURANO_IMAGE = "ubuntu_14_04-murano-agent_stable_juno.qcow2"
SERVTEST_MURANO_IMAGE_MD5 = '9f562f3f577dc32698c11a99d3f15070'
SERVTEST_MURANO_IMAGE_NAME = 'murano'
SERVTEST_MURANO_IMAGE_META = {
'murano_image_info': '{"type": "linux", "title": "murano"}'}
DEFAULT_IMAGES_CENTOS = os.environ.get(
'DEFAULT_IMAGES_CENTOS',
'/var/lib/libvirt/images/centos6.4-base.qcow2')
DEFAULT_IMAGES_UBUNTU = os.environ.get(
'DEFAULT_IMAGES_UBUNTU',
'/var/lib/libvirt/images/ubuntu-12.04.1-server-amd64-p2.qcow2')
OS_IMAGE = os.environ.get('OS_IMAGE', DEFAULT_IMAGES_CENTOS)
OSTF_TEST_NAME = os.environ.get('OSTF_TEST_NAME',
'Check network connectivity'
' from instance via floating IP')
OSTF_TEST_RETRIES_COUNT = int(os.environ.get('OSTF_TEST_RETRIES_COUNT', 50))
# The variable below is only for test:
# fuelweb_test.tests.tests_strength.test_ostf_repeatable_tests
# :OstfRepeatableTests.run_ostf_n_times_against_custom_deployment
DEPLOYMENT_NAME = os.environ.get('DEPLOYMENT_NAME')
# Need for iso with docker
TIMEOUT = int(os.environ.get('TIMEOUT', 60))
ATTEMPTS = int(os.environ.get('ATTEMPTS', 5))
#Create snapshots as last step in test-case
MAKE_SNAPSHOT = os.environ.get('MAKE_SNAPSHOT', 'false') == 'true'
NEUTRON_ENABLE = os.environ.get('NEUTRON_ENABLE', 'false') == 'true'
NEUTRON_SEGMENT_TYPE = os.environ.get('NEUTRON_SEGMENT_TYPE',
NEUTRON_SEGMENT["vlan"])
FUEL_SETTINGS_YAML = os.environ.get('FUEL_SETTINGS_YAML',
'/etc/fuel/astute.yaml')
# TarBall data for updates and upgrades
TARBALL_PATH = os.environ.get('TARBALL_PATH')
UPGRADE_FUEL_FROM = os.environ.get('UPGRADE_FUEL_FROM', '5.0')
UPGRADE_FUEL_TO = os.environ.get('UPGRADE_FUEL_TO', '6.0')
SNAPSHOT = os.environ.get('SNAPSHOT', '')
# For 5.1.1 we have 2 releases in tarball and should specify what we need
RELEASE_VERSION = os.environ.get('RELEASE_VERSION', "2014.1.3-5.1.1")
# URL to custom mirror with new OSCI packages wich should be tested,
# for example:
# CentOS: http://osci-obs.vm.mirantis.net:82/centos-fuel-master-20921/centos/
# Ubuntu: http://osci-obs.vm.mirantis.net:82/ubuntu-fuel-master-20921/ubuntu/
CUSTOM_PKGS_MIRROR = os.environ.get('CUSTOM_PKGS_MIRROR', '')
# Location of local mirrors on master node.
LOCAL_MIRROR_UBUNTU = os.environ.get('LOCAL_MIRROR_UBUNTU',
'/var/www/nailgun/ubuntu/x86_64')
LOCAL_MIRROR_CENTOS = os.environ.get('LOCAL_MIRROR_CENTOS',
'/var/www/nailgun/centos/x86_64')
# Release name of local Ubuntu mirror on Fuel master node.
UBUNTU_RELEASE = os.environ.get('UBUNTU_RELEASE', 'precise')
UPDATE_TIMEOUT = os.environ.get('UPDATE_TIMEOUT', 3600)
IMAGE_PROVISIONING = os.environ.get('IMAGE_PROVISIONING', 'false') == 'true'
KEYSTONE_CREDS = {'username': os.environ.get('KEYSTONE_USERNAME', 'admin'),
'password': os.environ.get('KEYSTONE_PASSWORD', 'admin'),
'tenant_name': os.environ.get('KEYSTONE_TENANT', 'admin')}
SSH_CREDENTIALS = {
'login': os.environ.get('ENV_FUEL_LOGIN', 'root'),
'password': os.environ.get('ENV_FUEL_PASSWORD', 'r00tme')}
# Plugin path for plugins tests
GLUSTER_PLUGIN_PATH = os.environ.get('GLUSTER_PLUGIN_PATH')
GLUSTER_CLUSTER_ENDPOINT = os.environ.get('GLUSTER_CLUSTER_ENDPOINT')
EXAMPLE_PLUGIN_PATH = os.environ.get('EXAMPLE_PLUGIN_PATH')
LBAAS_PLUGIN_PATH = os.environ.get('LBAAS_PLUGIN_PATH')
FUEL_STATS_CHECK = os.environ.get('FUEL_STATS_CHECK', 'false') == 'true'
FUEL_STATS_ENABLED = os.environ.get('FUEL_STATS_ENABLED', 'true') == 'true'
FUEL_STATS_SSL = os.environ.get('FUEL_STATS_SSL', 'true') == 'true'
FUEL_STATS_HOST = os.environ.get('FUEL_STATS_HOST',
'172.18.2.169')
FUEL_STATS_PORT = os.environ.get('FUEL_STATS_PORT', '443')
CUSTOM_ENV = os.environ.get('CUSTOM_ENV', 'false') == 'true'
BUILD_IMAGES = os.environ.get('BUILD_IMAGES', 'false') == 'true'
STORE_ASTUTE_YAML = os.environ.get('STORE_ASTUTE_YAML', 'false') == 'true'

View File

@ -0,0 +1,13 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -0,0 +1,157 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import SkipTest
from proboscis import test
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.models.environment import EnvironmentModel
from fuelweb_test.settings import OPENSTACK_RELEASE
from fuelweb_test.settings import OPENSTACK_RELEASE_REDHAT
class TestBasic(object):
"""Basic test case class for all system tests.
Initializes EnvironmentModel and FuelWebModel.
"""
def __init__(self):
self.env = EnvironmentModel()
self.fuel_web = self.env.fuel_web
def check_run(self, snapshot_name):
"""Checks if run of current test is required.
:param snapshot_name: Name of the snapshot the function should make
:type snapshot_name: str
:raises: SkipTest
"""
if snapshot_name:
if self.env.get_virtual_environment().has_snapshot(snapshot_name):
raise SkipTest()
@test
class SetupEnvironment(TestBasic):
@test(groups=["setup"])
def setup_master(self):
"""Create environment and set up master node
Snapshot: empty
"""
self.check_run("empty")
self.env.setup_environment()
self.env.make_snapshot("empty", is_make=True)
@test(depends_on=[setup_master])
def prepare_release(self):
"""Prepare master node
Scenario:
1. Revert snapshot "empty"
2. Download the release if needed. Uploads custom manifest.
Snapshot: ready
"""
self.check_run("ready")
self.env.revert_snapshot("empty")
if OPENSTACK_RELEASE == OPENSTACK_RELEASE_REDHAT:
self.fuel_web.update_redhat_credentials()
self.fuel_web.assert_release_state(
OPENSTACK_RELEASE_REDHAT,
state='available'
)
self.fuel_web.get_nailgun_version()
self.env.make_snapshot("ready", is_make=True)
@test(depends_on=[prepare_release],
groups=["prepare_slaves_1"])
def prepare_slaves_1(self):
"""Bootstrap 1 slave nodes
Scenario:
1. Revert snapshot "ready"
2. Start 1 slave nodes
Snapshot: ready_with_1_slaves
"""
self.check_run("ready_with_1_slaves")
self.env.revert_snapshot("ready")
self.env.bootstrap_nodes(self.env.get_virtual_environment(
).nodes().slaves[:1])
self.env.make_snapshot("ready_with_1_slaves", is_make=True)
@test(depends_on=[prepare_release],
groups=["prepare_slaves_3"])
@log_snapshot_on_error
def prepare_slaves_3(self):
"""Bootstrap 3 slave nodes
Scenario:
1. Revert snapshot "ready"
2. Start 3 slave nodes
Snapshot: ready_with_3_slaves
"""
self.check_run("ready_with_3_slaves")
self.env.revert_snapshot("ready")
self.env.bootstrap_nodes(self.env.get_virtual_environment(
).nodes().slaves[:3])
self.env.make_snapshot("ready_with_3_slaves", is_make=True)
@test(depends_on=[prepare_release],
groups=["prepare_slaves_5"])
@log_snapshot_on_error
def prepare_slaves_5(self):
"""Bootstrap 5 slave nodes
Scenario:
1. Revert snapshot "ready"
2. Start 5 slave nodes
Snapshot: ready_with_5_slaves
"""
self.check_run("ready_with_5_slaves")
self.env.revert_snapshot("ready")
self.env.bootstrap_nodes(self.env.get_virtual_environment(
).nodes().slaves[:5])
self.env.make_snapshot("ready_with_5_slaves", is_make=True)
@test(depends_on=[prepare_release],
groups=["prepare_slaves_9"])
@log_snapshot_on_error
def prepare_slaves_9(self):
"""Bootstrap 9 slave nodes
Scenario:
1. Revert snapshot "ready"
2. Start 9 slave nodes
Snapshot: ready_with_9_slaves
"""
self.check_run("ready_with_9_slaves")
self.env.revert_snapshot("ready")
self.env.bootstrap_nodes(self.env.get_virtual_environment(
).nodes().slaves[:9])
self.env.make_snapshot("ready_with_9_slaves", is_make=True)

View File

@ -0,0 +1 @@
__author__ = 'tleontovich'

View File

@ -0,0 +1 @@
__author__ = 'tleontovich'

View File

@ -0,0 +1,313 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from proboscis.asserts import assert_equal
from proboscis import test
from fuelweb_test import logger
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.helpers import checkers
from fuelweb_test.settings import DEPLOYMENT_MODE
from fuelweb_test.settings import EXAMPLE_PLUGIN_PATH
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@test(groups=["plugins"])
class ExamplePlugin(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_ha_controller_neutron_example"])
@log_snapshot_on_error
def deploy_ha_one_controller_neutron_example(self):
"""Deploy cluster in ha mode with example plugin
Scenario:
1. Upload plugin to the master node
2. Install plugin
3. Create cluster
4. Add 1 node with controller role
5. Add 2 nodes with compute role
6. Deploy the cluster
7. Run network verification
8. Check plugin health
9. Run OSTF
Duration 35m
Snapshot deploy_ha_one_controller_neutron_example
"""
self.env.revert_snapshot("ready_with_3_slaves")
# copy plugin to the master node
checkers.upload_tarball(
self.env.get_admin_remote(),
EXAMPLE_PLUGIN_PATH, '/var')
# install plugin
checkers.install_plugin_check_code(
self.env.get_admin_remote(),
plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))
segment_type = 'vlan'
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": segment_type,
}
)
attr = self.fuel_web.client.get_cluster_attributes(cluster_id)
if 'fuel_plugin_example' in attr['editable']:
plugin_data = attr['editable']['fuel_plugin_example']['metadata']
plugin_data['enabled'] = True
self.fuel_web.client.update_cluster_attributes(cluster_id, attr)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.verify_network(cluster_id)
# check if service ran on controller
logger.debug("Start to check service on node {0}".format('slave-01'))
cmd_curl = 'curl localhost:8234'
cmd = 'pgrep -f fuel-simple-service'
res_pgrep = self.env.get_ssh_to_remote_by_name(
'slave-01').execute(cmd)
assert_equal(0, res_pgrep['exit_code'],
'Failed with error {0}'.format(res_pgrep['stderr']))
assert_equal(1, len(res_pgrep['stdout']),
'Failed with error {0}'.format(res_pgrep['stderr']))
# curl to service
res_curl = self.env.get_ssh_to_remote_by_name(
'slave-01').execute(cmd_curl)
assert_equal(0, res_pgrep['exit_code'],
'Failed with error {0}'.format(res_curl['stderr']))
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_ha_one_controller_neutron_example")
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["deploy_nova_example_ha"])
@log_snapshot_on_error
def deploy_nova_example_ha(self):
"""Deploy cluster in ha mode with example plugin
Scenario:
1. Upload plugin to the master node
2. Install plugin
3. Create cluster
4. Add 3 node with controller role
5. Add 1 nodes with compute role
6. Add 1 nodes with cinder role
7. Deploy the cluster
8. Run network verification
9. check plugin health
10. Run OSTF
Duration 70m
Snapshot deploy_nova_example_ha
"""
self.env.revert_snapshot("ready_with_5_slaves")
# copy plugin to the master node
checkers.upload_tarball(
self.env.get_admin_remote(), EXAMPLE_PLUGIN_PATH, '/var')
# install plugin
checkers.install_plugin_check_code(
self.env.get_admin_remote(),
plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
)
attr = self.fuel_web.client.get_cluster_attributes(cluster_id)
if 'fuel_plugin_example' in attr['editable']:
plugin_data = attr['editable']['fuel_plugin_example']['metadata']
plugin_data['enabled'] = True
self.fuel_web.client.update_cluster_attributes(cluster_id, attr)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['cinder']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.verify_network(cluster_id)
for node in ('slave-01', 'slave-02', 'slave-03'):
logger.debug("Start to check service on node {0}".format(node))
cmd_curl = 'curl localhost:8234'
cmd = 'pgrep -f fuel-simple-service'
res_pgrep = self.env.get_ssh_to_remote_by_name(
node).execute(cmd)
assert_equal(0, res_pgrep['exit_code'],
'Failed with error {0} '
'on node {1}'.format(res_pgrep['stderr'], node))
assert_equal(1, len(res_pgrep['stdout']),
'Failed with error {0} on the '
'node {1}'.format(res_pgrep['stderr'], node))
# curl to service
res_curl = self.env.get_ssh_to_remote_by_name(
node).execute(cmd_curl)
assert_equal(0, res_pgrep['exit_code'],
'Failed with error {0} '
'on node {1}'.format(res_curl['stderr'], node))
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_nova_example_ha")
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["deploy_neutron_example_ha_add_node"])
@log_snapshot_on_error
def deploy_neutron_example_ha_add_node(self):
"""Deploy and scale cluster in ha mode with example plugin
Scenario:
1. Upload plugin to the master node
2. Install plugin
3. Create cluster
4. Add 1 node with controller role
5. Add 1 nodes with compute role
6. Add 1 nodes with cinder role
7. Deploy the cluster
8. Run network verification
9. Check plugin health
10 Add 2 nodes with controller role
11. Deploy cluster
12. Check plugin health
13. Run OSTF
Duration 150m
Snapshot deploy_neutron_example_ha_add_node
"""
self.env.revert_snapshot("ready_with_5_slaves")
# copy plugin to the master node
checkers.upload_tarball(
self.env.get_admin_remote(), EXAMPLE_PLUGIN_PATH, '/var')
# install plugin
checkers.install_plugin_check_code(
self.env.get_admin_remote(),
plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": 'gre',
}
)
attr = self.fuel_web.client.get_cluster_attributes(cluster_id)
if 'fuel_plugin_example' in attr['editable']:
plugin_data = attr['editable']['fuel_plugin_example']['metadata']
plugin_data['enabled'] = True
self.fuel_web.client.update_cluster_attributes(cluster_id, attr)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': ['cinder']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.verify_network(cluster_id)
# check if service ran on controller
logger.debug("Start to check service on node {0}".format('slave-01'))
cmd_curl = 'curl localhost:8234'
cmd = 'pgrep -f fuel-simple-service'
res_pgrep = self.env.get_ssh_to_remote_by_name(
'slave-01').execute(cmd)
assert_equal(0, res_pgrep['exit_code'],
'Failed with error {0}'.format(res_pgrep['stderr']))
assert_equal(1, len(res_pgrep['stdout']),
'Failed with error {0}'.format(res_pgrep['stderr']))
# curl to service
res_curl = self.env.get_ssh_to_remote_by_name(
'slave-01').execute(cmd_curl)
assert_equal(0, res_pgrep['exit_code'],
'Failed with error {0}'.format(res_curl['stderr']))
self.fuel_web.update_nodes(
cluster_id,
{
'slave-04': ['controller'],
'slave-05': ['controller'],
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
for node in ('slave-01', 'slave-04', 'slave-05'):
logger.debug("Start to check service on node {0}".format(node))
cmd_curl = 'curl localhost:8234'
cmd = 'pgrep -f fuel-simple-service'
res_pgrep = self.env.get_ssh_to_remote_by_name(
node).execute(cmd)
assert_equal(0, res_pgrep['exit_code'],
'Failed with error {0} '
'on node {1}'.format(res_pgrep['stderr'], node))
assert_equal(1, len(res_pgrep['stdout']),
'Failed with error {0} on the '
'node {1}'.format(res_pgrep['stderr'], node))
# curl to service
res_curl = self.env.get_ssh_to_remote_by_name(
node).execute(cmd_curl)
assert_equal(0, res_pgrep['exit_code'],
'Failed with error {0} '
'on node {1}'.format(res_curl['stderr'], node))
# add verification here
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_neutron_example_ha_add_node")

View File

@ -0,0 +1 @@
__author__ = 'tleontovich'

View File

@ -0,0 +1,226 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_true
from proboscis import test
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.helpers import checkers
from fuelweb_test.settings import DEPLOYMENT_MODE
from fuelweb_test.settings import GLUSTER_CLUSTER_ENDPOINT
from fuelweb_test.settings import GLUSTER_PLUGIN_PATH
from fuelweb_test.settings import NEUTRON_ENABLE
from fuelweb_test.settings import NEUTRON_SEGMENT_TYPE
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@test(groups=["plugins"])
class GlusterfsPlugin(TestBasic):
@classmethod
def check_glusterfs_conf(cls, remote, path, gfs_endpoint):
cmd = ' cat {0}'.format(path)
result = remote.execute(cmd)
assert_equal(result['exit_code'],
0,
'Command {0} execution failed with non-zero exit code. '
'Actual result {1} stderr {2}'.format(
cmd, result['exit_code'], result['stderr']))
assert_true(gfs_endpoint in ''.join(result['stdout']),
'Can not find gsf endpoint in gfs configs')
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_ha_one_controller_glusterfs"])
@log_snapshot_on_error
def deploy_ha_one_controller_glusterfs_simple(self):
"""Deploy cluster in ha mode with glusterfs plugin
Scenario:
1. Upload plugin to the master node
2. Install plugin
3. Create cluster
4. Add 1 node with controller and cinder roles
5. Add 1 nodes with compute role
6. Add 1 nodes with cinder role
7. Deploy the cluster
8. Run network verification
9. Check plugin health
10. Run OSTF
Duration 35m
Snapshot deploy_ha_one_controller_glusterfs
"""
self.env.revert_snapshot("ready_with_3_slaves")
# copy plugin to the master node
checkers.upload_tarball(
self.env.get_admin_remote(), GLUSTER_PLUGIN_PATH, '/var')
# install plugin
checkers.install_plugin_check_code(
self.env.get_admin_remote(),
plugin=os.path.basename(GLUSTER_PLUGIN_PATH))
settings = None
if NEUTRON_ENABLE:
settings = {
"net_provider": 'neutron',
"net_segment_type": NEUTRON_SEGMENT_TYPE
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings=settings
)
attr = self.fuel_web.client.get_cluster_attributes(cluster_id)
if 'external_glusterfs' in attr['editable']:
plugin_enabled = attr['editable']['external_glusterfs']['metadata']
plugin_enabled['enabled'] = True
plugin_data = attr['editable']['external_glusterfs']['endpoint']
plugin_data['value'] = GLUSTER_CLUSTER_ENDPOINT
self.fuel_web.client.update_cluster_attributes(cluster_id, attr)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller', 'cinder'],
'slave-02': ['compute'],
'slave-03': ['cinder']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
for node in ('slave-01', 'slave-03'):
self.check_glusterfs_conf(
remote=self.env.get_ssh_to_remote_by_name(node),
path='/etc/cinder/glusterfs',
gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_ha_one_controller_glusterfs")
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["deploy_glusterfs_ha"])
@log_snapshot_on_error
def deploy_glusterfs_ha(self):
"""Deploy cluster in ha mode with glusterfs plugin
Scenario:
1. Upload plugin to the master node
2. Install plugin
3. Create cluster
4. Add 1 node with controller and cinder roles
5. Add 1 nodes with compute role
6. Add 1 nodes with cinder role
7. Deploy the cluster
8. Run network verification
9. Check plugin health
10. Run OSTF
11. Add 2 cinder + controller nodes
12. Re-deploy cluster
13. Check plugin health
14. Run ostf
Duration 50m
Snapshot deploy_glasterfs_ha
"""
self.env.revert_snapshot("ready_with_5_slaves")
# copy plugin to the master node
checkers.upload_tarball(
self.env.get_admin_remote(), GLUSTER_PLUGIN_PATH, '/var')
# install plugin
checkers.install_plugin_check_code(
self.env.get_admin_remote(),
plugin=os.path.basename(GLUSTER_PLUGIN_PATH))
settings = None
if NEUTRON_ENABLE:
settings = {
"net_provider": 'neutron',
"net_segment_type": NEUTRON_SEGMENT_TYPE
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings=settings
)
attr = self.fuel_web.client.get_cluster_attributes(cluster_id)
if 'external_glusterfs' in attr['editable']:
plugin_enabled = attr['editable']['external_glusterfs']['metadata']
plugin_enabled['enabled'] = True
plugin_data = attr['editable']['external_glusterfs']['endpoint']
plugin_data['value'] = GLUSTER_CLUSTER_ENDPOINT
self.fuel_web.client.update_cluster_attributes(cluster_id, attr)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': ['cinder']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.check_glusterfs_conf(
remote=self.env.get_ssh_to_remote_by_name('slave-03'),
path='/etc/cinder/glusterfs',
gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-04': ['controller, cinder'],
'slave-05': ['controller, cinder'],
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
for node in ('slave-03', 'slave-04', 'slave-05'):
self.check_glusterfs_conf(
remote=self.env.get_ssh_to_remote_by_name(node),
path='/etc/cinder/glusterfs',
gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_glusterfs_ha")

View File

@ -0,0 +1 @@
__author__ = 'tleontovich'

View File

@ -0,0 +1,267 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import traceback
from devops.helpers.helpers import wait
from proboscis import asserts
from proboscis import test
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.helpers import os_actions
from fuelweb_test.helpers import checkers
from fuelweb_test import logger
from fuelweb_test.settings import DEPLOYMENT_MODE_SIMPLE
from fuelweb_test.settings import LBAAS_PLUGIN_PATH
from fuelweb_test.settings import NEUTRON_SEGMENT_TYPE
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@test(enabled=False, groups=["plugins"])
class LbaasPlugin(TestBasic):
@classmethod
def check_neutron_agents_statuses(cls, os_conn):
agents_list = os_conn.list_agents()
for a in agents_list['agents']:
asserts.assert_equal(
a['alive'], True,
'Neutron agent {0} is not alive'. format(a['binary']))
asserts.assert_true(
a['admin_state_up'],
"Admin state is down for agent {0}".format(a['binary']))
lb_agent = [a for a in agents_list["agents"]
if a['binary'] == 'neutron-lbaas-agent']
logger.debug("LbaaS agent list is {0}".format(lb_agent))
asserts.assert_equal(
len(lb_agent), 1,
'There is not LbaaS agent in neutron agent list output')
@classmethod
def check_lbass_work(cls, os_conn):
# create pool
pool = os_conn.create_pool(pool_name='lbaas_pool')
logger.debug('pull is {0}'.format(pool))
# create vip
vip = os_conn.create_vip(name='lbaas_vip',
protocol='HTTP',
port=80,
pool=pool)
logger.debug('vip is {0}'.format(vip))
# get list of vips
lb_vip_list = os_conn.get_vips()
logger.debug(
'Initial state of vip is {0}'.format(
os_conn.get_vip(lb_vip_list['vips'][0]['id'])))
# wait for active status
try:
wait(lambda: os_conn.get_vip(
lb_vip_list['vips'][0]['id'])['vip']['status'] == 'ACTIVE',
timeout=120 * 60)
except:
logger.error(traceback.format_exc())
vip_state = os_conn.get_vip(
lb_vip_list['vips'][0]['id'])['vip']['status']
asserts.assert_equal(
'ACTIVE', vip_state,
"Vip is not active, current state is {0}".format(vip_state))
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_neutron_lbaas_simple"])
@log_snapshot_on_error
def deploy_neutron_lbaas_simple(self):
"""Deploy cluster in simple mode with LbaaS plugin
Scenario:
1. Upload plugin to the master node
2. Install plugin
3. Create cluster
4. Add 1 node with controller role
5. Add 2 nodes with compute role
6. Deploy the cluster
7. Run network verification
8. Check health of lbaas agent on the node
9. Create pool and vip
10. Run OSTF
Duration 35m
Snapshot deploy_neutron_vlan_lbaas_simple
"""
self.env.revert_snapshot("ready_with_3_slaves")
# copy plugin to the master node
checkers.upload_tarball(
self.env.get_admin_remote(), LBAAS_PLUGIN_PATH, '/var')
# install plugin
checkers.install_plugin_check_code(
self.env.get_admin_remote(),
plugin=os.path.basename(LBAAS_PLUGIN_PATH))
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE_SIMPLE,
settings={
"net_provider": 'neutron',
"net_segment_type": NEUTRON_SEGMENT_TYPE,
}
)
attr = self.fuel_web.client.get_cluster_attributes(cluster_id)
if 'lbaas' in attr['editable']:
logger.debug('we have lbaas element')
plugin_data = attr['editable']['lbaas']['metadata']
plugin_data['enabled'] = True
self.fuel_web.client.update_cluster_attributes(cluster_id, attr)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
cluster = self.fuel_web.client.get_cluster(cluster_id)
asserts.assert_equal(str(cluster['net_provider']), 'neutron')
self.fuel_web.verify_network(cluster_id)
controller = self.fuel_web.get_nailgun_node_by_name('slave-01')
os_conn = os_actions.OpenStackActions(controller['ip'])
self.check_neutron_agents_statuses(os_conn)
self.check_lbass_work(os_conn)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_neutron_vlan_lbaas_simple")
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_neutron_lbaas_simple_reset_ready"])
@log_snapshot_on_error
def deploy_neutron_lbaas_simple_reset_ready(self):
"""Deploy and re-deploy cluster in simple mode with LbaaS plugin
Scenario:
1. Upload plugin to the master node
2. Install plugin
3. Create cluster
4. Add 1 node with controller role
5. Add 1 nodes with compute role
6. Deploy the cluster
7. Run network verification
8. Check health of lbaas agent on the node
9. Create pool and vip
10. Reset cluster
11. Add 1 compute
12. Re-deploy cluster
13. Check health of lbaas agent on the node
14. Create pool and vip
15. Run OSTF
Duration 65m
Snapshot deploy_neutron_lbaas_simple_reset_ready
"""
self.env.revert_snapshot("ready_with_3_slaves")
# copy plugin to the master node
checkers.upload_tarball(
self.env.get_admin_remote(), LBAAS_PLUGIN_PATH, '/var')
# install plugin
checkers.install_plugin_check_code(
self.env.get_admin_remote(),
plugin=os.path.basename(LBAAS_PLUGIN_PATH))
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE_SIMPLE,
settings={
"net_provider": 'neutron',
"net_segment_type": NEUTRON_SEGMENT_TYPE,
}
)
attr = self.fuel_web.client.get_cluster_attributes(cluster_id)
if 'lbaas' in attr['editable']:
logger.debug('we have lbaas element')
plugin_data = attr['editable']['lbaas']['metadata']
plugin_data['enabled'] = True
self.fuel_web.client.update_cluster_attributes(cluster_id, attr)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute'],
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
cluster = self.fuel_web.client.get_cluster(cluster_id)
asserts.assert_equal(str(cluster['net_provider']), 'neutron')
self.fuel_web.verify_network(cluster_id)
controller = self.fuel_web.get_nailgun_node_by_name('slave-01')
os_conn = os_actions.OpenStackActions(controller['ip'])
self.check_neutron_agents_statuses(os_conn)
self.check_lbass_work(os_conn)
self.fuel_web.stop_reset_env_wait(cluster_id)
self.fuel_web.wait_nodes_get_online_state(
self.env.get_virtual_environment().nodes().slaves[:2])
self.fuel_web.update_nodes(
cluster_id,
{
'slave-03': ['compute'],
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.check_neutron_agents_statuses(os_conn)
self.check_lbass_work(os_conn)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_neutron_lbaas_simple_reset_ready")

View File

@ -0,0 +1,134 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from devops.helpers.helpers import http
from devops.helpers.helpers import wait
from proboscis.asserts import assert_equal
from proboscis import SkipTest
from proboscis import test
import xmlrpclib
from fuelweb_test.helpers import checkers
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.settings import OPENSTACK_RELEASE
from fuelweb_test.settings import OPENSTACK_RELEASE_CENTOS
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
from fuelweb_test import logger
@test(groups=["thread_1"])
class TestAdminNode(TestBasic):
@test(depends_on=[SetupEnvironment.setup_master],
groups=["test_cobbler_alive"])
def test_cobbler_alive(self):
"""Test current installation has correctly setup cobbler
API and cobbler HTTP server are alive
Scenario:
1. Revert snapshot "empty"
2. test cobbler API and HTTP server through send http request
Duration 1m
"""
if OPENSTACK_RELEASE_CENTOS not in OPENSTACK_RELEASE:
raise SkipTest()
self.env.revert_snapshot("empty")
wait(
lambda: http(host=self.env.get_admin_node_ip(), url='/cobbler_api',
waited_code=501),
timeout=60
)
server = xmlrpclib.Server(
'http://%s/cobbler_api' % self.env.get_admin_node_ip())
config = self.env.get_fuel_settings()
username = config['cobbler']['user']
password = config['cobbler']['password']
# raises an error if something isn't right
server.login(username, password)
@test(depends_on=[SetupEnvironment.setup_master],
groups=["test_astuted_alive"])
@log_snapshot_on_error
def test_astuted_alive(self):
"""Test astute master and worker processes are alive on master node
Scenario:
1. Revert snapshot "empty"
2. Search for master and child processes
Duration 1m
"""
if OPENSTACK_RELEASE_CENTOS not in OPENSTACK_RELEASE:
raise SkipTest()
self.env.revert_snapshot("empty")
ps_output = self.env.get_admin_remote().execute('ps ax')['stdout']
astute_master = filter(lambda x: 'astute master' in x, ps_output)
logger.info("Found astute processes: %s" % astute_master)
assert_equal(len(astute_master), 1)
astute_workers = filter(lambda x: 'astute worker' in x, ps_output)
logger.info(
"Found %d astute worker processes: %s" %
(len(astute_workers), astute_workers))
assert_equal(True, len(astute_workers) > 1)
@test(groups=["known_issues"])
class TestAdminNodeBackupRestore(TestBasic):
@test(depends_on=[SetupEnvironment.setup_master],
groups=["backup_restore_master_base"])
@log_snapshot_on_error
def backup_restore_master_base(self):
"""Backup/restore master node
Scenario:
1. Revert snapshot "empty"
2. Backup master
3. Check backup
4. Restore master
5. Check restore
Duration 30m
"""
self.env.revert_snapshot("empty")
self.fuel_web.backup_master(self.env.get_admin_remote())
checkers.backup_check(self.env.get_admin_remote())
self.fuel_web.restore_master(self.env.get_admin_remote())
self.fuel_web.restore_check_nailgun_api(self.env.get_admin_remote())
checkers.restore_check_sum(self.env.get_admin_remote())
checkers.iptables_check(self.env.get_admin_remote())
@test(groups=["setup_master_custom"])
class TestAdminNodeCustomManifests(TestBasic):
@test(groups=["setup_master_custom_manifests"])
@log_snapshot_on_error
def setup_with_custom_manifests(self):
"""Setup master node with custom manifests
Scenario:
1. Start installation of master
2. Enter "fuelmenu"
3. Upload custom manifests
4. Kill "fuelmenu" pid
Duration 20m
"""
self.env.setup_environment(custom=True, build_images=True)

View File

@ -0,0 +1,391 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis.asserts import assert_equal
from proboscis import SkipTest
from proboscis import test
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.settings import DEPLOYMENT_MODE
from fuelweb_test.settings import OPENSTACK_RELEASE
from fuelweb_test.settings import OPENSTACK_RELEASE_REDHAT
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@test(groups=["bonding_ha_one_controller", "bonding"])
class BondingHAOneController(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_bonding_active_backup"])
@log_snapshot_on_error
def deploy_bonding_active_backup(self):
"""Deploy cluster in ha mode with one controller bonding
Scenario:
1. Create cluster
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Setup bonding for all interfaces
4. Deploy the cluster
5. Run network verification
6. Run OSTF
Duration 30m
Snapshot deploy_bonding_active_backup
"""
if OPENSTACK_RELEASE == OPENSTACK_RELEASE_REDHAT:
raise SkipTest()
self.env.revert_snapshot("ready_with_3_slaves")
segment_type = 'gre'
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": segment_type,
}
)
self.fuel_web.update_nodes(
cluster_id, {
'slave-01': ['controller'],
'slave-02': ['compute']
}
)
raw_data = {
'mac': None,
'mode': 'active-backup',
'name': 'ovs-bond0',
'slaves': [
{'name': 'eth4'},
{'name': 'eth3'},
{'name': 'eth2'},
{'name': 'eth1'}
],
'state': None,
'type': 'bond',
'assigned_networks': []
}
interfaces = {
'eth0': ['fuelweb_admin'],
'ovs-bond0': [
'public',
'management',
'storage'
]
}
net_params = self.fuel_web.client.get_networks(cluster_id)
nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
for node in nailgun_nodes:
self.fuel_web.update_node_networks(
node['id'], interfaces_dict=interfaces,
raw_data=raw_data
)
self.fuel_web.deploy_cluster_wait(cluster_id)
cluster = self.fuel_web.client.get_cluster(cluster_id)
assert_equal(str(cluster['net_provider']), 'neutron')
assert_equal(str(net_params["networking_parameters"]
['segmentation_type']), segment_type)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_bonding_active_backup")
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_bonding_balance_slb"])
@log_snapshot_on_error
def deploy_bonding_balance_slb(self):
"""Deploy cluster in ha mode with 1 controller and bonding
Scenario:
1. Create cluster
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Setup bonding for all interfaces
4. Deploy the cluster
5. Run network verification
6. Run OSTF
Duration 30m
Snapshot deploy_bonding_balance_slb
"""
if OPENSTACK_RELEASE == OPENSTACK_RELEASE_REDHAT:
raise SkipTest()
self.env.revert_snapshot("ready_with_3_slaves")
segment_type = 'vlan'
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": segment_type,
}
)
self.fuel_web.update_nodes(
cluster_id, {
'slave-01': ['controller'],
'slave-02': ['compute']
}
)
raw_data = {
'mac': None,
'mode': 'balance-slb',
'name': 'ovs-bond0',
'slaves': [
{'name': 'eth4'},
{'name': 'eth3'},
{'name': 'eth2'},
{'name': 'eth1'}
],
'state': None,
'type': 'bond',
'assigned_networks': []
}
interfaces = {
'eth0': ['fuelweb_admin'],
'ovs-bond0': [
'public',
'management',
'storage',
'private'
]
}
net_params = self.fuel_web.client.get_networks(cluster_id)
nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
for node in nailgun_nodes:
self.fuel_web.update_node_networks(
node['id'], interfaces_dict=interfaces,
raw_data=raw_data
)
self.fuel_web.deploy_cluster_wait(cluster_id)
cluster = self.fuel_web.client.get_cluster(cluster_id)
assert_equal(str(cluster['net_provider']), 'neutron')
assert_equal(str(net_params["networking_parameters"]
['segmentation_type']), segment_type)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_bonding_balance_slb")
@test(groups=["bonding_ha", "bonding"])
class BondingHA(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["deploy_bonding_ha_active_backup"])
@log_snapshot_on_error
def deploy_bonding_ha_active_backup(self):
"""Deploy cluster in HA mode with bonding (active backup)
Scenario:
1. Create cluster
2. Add 3 nodes with controller role
3. Add 2 node with compute role
4. Setup bonding for all interfaces
4. Deploy the cluster
5. Run network verification
6. Run OSTF
Duration 70m
Snapshot deploy_bonding_ha_active_backup
"""
if OPENSTACK_RELEASE == OPENSTACK_RELEASE_REDHAT:
raise SkipTest()
self.env.revert_snapshot("ready_with_5_slaves")
segment_type = 'vlan'
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": segment_type,
}
)
self.fuel_web.update_nodes(
cluster_id, {
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['compute']
}
)
raw_data = {
'mac': None,
'mode': 'active-backup',
'name': 'ovs-bond0',
'slaves': [
{'name': 'eth4'},
{'name': 'eth3'},
{'name': 'eth2'},
{'name': 'eth1'}
],
'state': None,
'type': 'bond',
'assigned_networks': []
}
interfaces = {
'eth0': ['fuelweb_admin'],
'ovs-bond0': [
'public',
'management',
'storage',
'private'
]
}
net_params = self.fuel_web.client.get_networks(cluster_id)
nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
for node in nailgun_nodes:
self.fuel_web.update_node_networks(
node['id'], interfaces_dict=interfaces,
raw_data=raw_data
)
self.fuel_web.deploy_cluster_wait(cluster_id)
cluster = self.fuel_web.client.get_cluster(cluster_id)
assert_equal(str(cluster['net_provider']), 'neutron')
assert_equal(str(net_params["networking_parameters"]
['segmentation_type']), segment_type)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_bonding_ha_active_backup")
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["deploy_bonding_ha_balance_slb"])
@log_snapshot_on_error
def deploy_bonding_ha_balance_slb(self):
"""Deploy cluster in HA mode with bonding (balance SLB)
Scenario:
1. Create cluster
2. Add 3 nodes with controller role
3. Add 2 node with compute role
4. Setup bonding for all interfaces
4. Deploy the cluster
5. Run network verification
6. Run OSTF
Duration 70m
Snapshot deploy_bonding_ha_balance_slb
"""
if OPENSTACK_RELEASE == OPENSTACK_RELEASE_REDHAT:
raise SkipTest()
self.env.revert_snapshot("ready_with_5_slaves")
segment_type = 'gre'
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": segment_type,
}
)
self.fuel_web.update_nodes(
cluster_id, {
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['compute']
}
)
raw_data = {
'mac': None,
'mode': 'balance-slb',
'name': 'ovs-bond0',
'slaves': [
{'name': 'eth4'},
{'name': 'eth3'},
{'name': 'eth2'},
{'name': 'eth1'}
],
'state': None,
'type': 'bond',
'assigned_networks': []
}
interfaces = {
'eth0': ['fuelweb_admin'],
'ovs-bond0': [
'public',
'management',
'storage'
]
}
net_params = self.fuel_web.client.get_networks(cluster_id)
nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
for node in nailgun_nodes:
self.fuel_web.update_node_networks(
node['id'], interfaces_dict=interfaces,
raw_data=raw_data
)
self.fuel_web.deploy_cluster_wait(cluster_id)
cluster = self.fuel_web.client.get_cluster(cluster_id)
assert_equal(str(cluster['net_provider']), 'neutron')
assert_equal(str(net_params["networking_parameters"]
['segmentation_type']), segment_type)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_bonding_ha_balance_slb")

View File

@ -0,0 +1,227 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Launchers for Tempest scenarios
To launch these Fuel-tests, you should specify several properties in global
environment.
Tempest should be configured with your cluster. You can do it manually and set
path to existing configuration file in TEMPEST_CONFIG_FILE. Automatic
configuration is also presented and required cluster name
(variable: CLUSTER_NAME) and name of environment (variable: PARENT_ENV_NAME),
wherein the cluster has been created.
Another important variable is name of snapshot (variable: SNAPSHOT) which
Tempest will verify.
Optional properties:
TEMPEST_PATH - path to Tempest (default: './tempest')
TEMPEST_XML_LOG_FILE - path to file which will store results of
verification in JUnit XML format
(default: './logs/$EXEC_NUMBER_tempest.xml')
Cheat:
TEMPEST_GOD_MODE - if you specify this variable, fuel-tests will be
marked as failed (will raise exception) only when xml log file is
missed(don't matter Tempest scenarios are finished successfully or
some of them are crashed).
"""
import errno
import os
import subprocess as sp
import tempfile
from xml.etree import ElementTree
from proboscis import SkipTest
from proboscis import test
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.models import nailgun_client as nc
from fuelweb_test.tests import base_test_case
from fuelweb_test.helpers import conf_tempest
def _prepare_and_run(*testr_args):
""" Prepare and run Tempest scenarios via testr.
Required variables in environment: CLUSTER_NAME, PARENT_ENV_NAME,
TEMPEST_PATH, TEMPEST_CONFIG_FILE, EXECUTOR_NUMBER,
TEMPEST_XML_LOG_FILE, TEMPEST_GOD_MODE
"""
# Preparation
cluster = os.environ.get("CLUSTER_NAME")
env_name = os.environ.get("PARENT_ENV_NAME")
tempest_path = os.environ.get("TEMPEST_PATH", "./tempest")
tempest_conf = os.environ.get("TEMPEST_CONFIG_FILE")
exec_number = os.environ.get("EXECUTOR_NUMBER")
xml_logfile = os.environ.get("TEMPEST_XML_LOG_FILE",
"./logs/%s_tempest.xml" % exec_number)
god_mode = os.environ.get("TEMPEST_GOD_MODE", False)
# Check the possibility of configuration Tempest
if not tempest_conf and (not env_name and not cluster):
raise ValueError(
"Use should specify Tempest configuration file or environment and "
"cluster names for generation configuration file.")
# Prepare directory for log file
try:
os.makedirs(os.path.dirname(xml_logfile))
except OSError as exc:
if exc.errno == errno.EEXIST:
if not os.path.isdir(os.path.dirname(xml_logfile)):
raise
else:
raise
if not tempest_conf:
tempest_conf = tempfile.NamedTemporaryFile().name
# Get nailgun node ip address
netdump = sp.Popen(["virsh", "net-dumpxml", "%s_admin" % env_name],
stdout=sp.PIPE).communicate()[0]
try:
network = ElementTree.fromstring(netdump).find('ip')
node_ip = "%s2" % network.attrib['address'][:-1]
except (AttributeError, KeyError):
raise ValueError(
"Nailgun node ip address can not be obtained using the "
"specified name of environment('%s')" % env_name)
cluster_id = nc.NailgunClient(node_ip).get_cluster_id(cluster)
if not cluster_id:
raise ValueError(
"Cluster id can not be obtained by using specified envname"
"('%(env_name)s') and discovered nailgun node ip address"
"('%(ip_address)s')." % {"env_name": env_name,
"ip_address": node_ip})
# Generate config file
conf = conf_tempest.TempestConfigState(
node_ip, cluster_id, tempest_conf)
conf.configure()
conf.copy_config()
# Tempest needs modified environment
tempest_env = os.environ.copy()
tempest_env["TEMPEST_CONFIG_DIR"] = tempest_path
tempest_env["TEMPEST_CONFIG"] = os.path.basename(tempest_conf)
tempest_env["OS_TEST_PATH"] = os.path.join(
tempest_path, "tempest/test_discover")
# Run Tempest
tempest_cmd = ["testr", "run", "--parallel", "--subunit"]
tempest_cmd.extend(testr_args)
to_xml_cmd = ['subunit2junitxml' '--output-to', xml_logfile]
try:
tempest_process = sp.Popen(tempest_cmd, cwd=tempest_path,
env=tempest_env, stdout=sp.PIPE)
sp.check_call(to_xml_cmd, stdin=tempest_process.stdout,
cwd=tempest_path)
except sp.CalledProcessError:
if god_mode and not os.path.exists(xml_logfile):
raise RuntimeError(
"An error occurred during the execution of Tempest. "
"Please see log files for detailed information.")
elif not god_mode:
raise RuntimeError(
"Tempest tests are finished with errors. Please see xml "
"file with results for detailed information.")
@test(groups=["tempest"])
class TestByTempest(base_test_case.TestBasic):
def revert_snapshot(self):
""" Prepare snapshot specified in environment"""
success = self.env.revert_snapshot(os.environ.get("SNAPSHOT"))
if not success:
raise SkipTest()
@test(groups=["tempest_set"])
@log_snapshot_on_error
def tempest_set(self):
"""Prepare cluster and launch Tempest tests from TEMPEST_TEST_SET
Scenario:
1. Revert cluster(snapshot) which Tempest will test.
2. Prepare Tempest
2.1 Discover nailgun node ip and cluster id (if Tempest
configuration file is not presented)
2.2 Modify environment
3. Validate cluster with set of Tempest-tests
Specific test variable:
TEMPEST_TEST_SET - name of Tempest tests set, which will be
launched. Allowed names:
- full (used by default)
- smoke
- baremetal
- compute
- data_processing
- identity
- image
- network
- object_storage
- orchestration
- telemetry
- volume
"""
self.revert_snapshot()
# Parse Tempest set name
tempest_set = os.environ.get("TEMPEST_TEST_SET", "")
if tempest_set and tempest_set not in ['full', 'smoke']:
tempest_set = "tempest.api.%s" % tempest_set
elif tempest_set != "smoke":
tempest_set = ""
_prepare_and_run(tempest_set)
@test(groups=["tempest_list"])
@log_snapshot_on_error
def tempest_list(self):
"""Prepare cluster and launch Tempest tests from TEMPEST_TESTS_LIST
Scenario:
1. Revert cluster(snapshot) which Tempest will test.
2. Prepare Tempest
2.1 Discover nailgun node ip and cluster id (if Tempest
configuration file is not presented)
2.2 Modify environment
3. Validate cluster with list of Tempest-tests
Specific test variable:
TEMPEST_TESTS_LIST - path to file with names of Tempests-tests
(structure of file: each name on a separate line)
"""
self.revert_snapshot()
file_with_tests = os.environ.get("TEMPEST_TESTS_LIST")
if not os.path.exists(file_with_tests):
raise ValueError(
"File %s should not exist. Please, specify correct path to "
"file, which contains list of tests." % file_with_tests)
_prepare_and_run("list-tests", file_with_tests)

View File

@ -0,0 +1,608 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import proboscis
import time
from proboscis.asserts import assert_true, assert_false
from proboscis import SkipTest
from proboscis import test
from fuelweb_test.helpers import os_actions
from fuelweb_test.helpers import checkers
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test import ostf_test_mapping as map_ostf
from fuelweb_test import settings
from fuelweb_test.settings import NEUTRON_ENABLE
from fuelweb_test import logger
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@test(groups=["thread_1", "ceph"])
class CephCompact(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["ceph_ha_one_controller_compact",
"ha_one_controller_nova_ceph"])
@log_snapshot_on_error
def ceph_ha_one_controller_compact(self):
"""Deploy ceph in HA mode with 1 controller
Scenario:
1. Create cluster
2. Add 1 node with controller and ceph OSD roles
3. Add 2 node with compute and ceph OSD roles
4. Deploy the cluster
5. Check ceph status
Duration 35m
Snapshot ceph_ha_one_controller_compact
"""
self.env.revert_snapshot("ready_with_3_slaves")
data = {
'volumes_ceph': True,
'images_ceph': True,
'volumes_lvm': False,
'tenant': 'ceph1',
'user': 'ceph1',
'password': 'ceph1'
}
if NEUTRON_ENABLE:
data["net_provider"] = 'neutron'
data["net_segment_type"] = 'vlan'
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings=data)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller', 'ceph-osd'],
'slave-02': ['compute', 'ceph-osd'],
'slave-03': ['compute', 'ceph-osd']
}
)
# Cluster deploy
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.check_ceph_status(cluster_id)
# Run ostf
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("ceph_ha_one_controller_compact")
@test(groups=["thread_3", "ceph"])
class CephCompactWithCinder(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_release],
groups=["ceph_ha_one_controller_with_cinder"])
@log_snapshot_on_error
def ceph_ha_one_controller_with_cinder(self):
"""Deploy ceph with cinder in ha mode with 1 controller
Scenario:
1. Create cluster
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Add 2 nodes with cinder and ceph OSD roles
5. Deploy the cluster
6. Check ceph status
7. Check partitions on controller node
Duration 40m
Snapshot ceph_ha_one_controller_with_cinder
"""
try:
self.check_run('ceph_ha_one_controller_with_cinder')
except SkipTest:
return
self.env.revert_snapshot("ready")
self.env.bootstrap_nodes(
self.env.get_virtual_environment().nodes().slaves[:4])
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings={
'volumes_ceph': False,
'images_ceph': True,
'volumes_lvm': True,
'tenant': 'ceph2',
'user': 'ceph2',
'password': 'ceph2'
}
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': ['cinder', 'ceph-osd'],
'slave-04': ['cinder', 'ceph-osd']
}
)
# Cluster deploy
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.check_ceph_status(cluster_id)
disks = self.fuel_web.client.get_node_disks(
self.fuel_web.get_nailgun_node_by_name('slave-01')['id'])
logger.info("Current disk partitions are: \n{d}".format(d=disks))
logger.info("Check unallocated space")
# We expect failure here only for release 5.0 due to bug
# https://bugs.launchpad.net/fuel/+bug/1306625, so it is
# necessary to assert_true in the next release.
assert_false(
checkers.check_unallocated_space(disks, contr_img_ceph=True),
"Check unallocated space on controller")
# Run ostf
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("ceph_ha_one_controller_with_cinder",
is_make=True)
@test(groups=["thread_3", "ceph", "image_based"])
class CephHA(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_release],
groups=["ceph_ha", "ha_nova_ceph", "ha_neutron_ceph", "bvt_2"])
@log_snapshot_on_error
def ceph_ha(self):
"""Deploy ceph with cinder in HA mode
Scenario:
1. Create cluster
2. Add 3 nodes with controller and ceph OSD roles
3. Add 1 node with ceph OSD roles
4. Add 2 nodes with compute and ceph OSD roles
5. Deploy the cluster
6. Check ceph status
Duration 90m
Snapshot ceph_ha
"""
try:
self.check_run('ceph_ha')
except SkipTest:
return
self.env.revert_snapshot("ready")
self.env.bootstrap_nodes(
self.env.get_virtual_environment().nodes().slaves[:6])
csettings = {}
if settings.NEUTRON_ENABLE:
csettings = {
"net_provider": 'neutron',
"net_segment_type": "vlan"
}
csettings.update(
{
'volumes_ceph': True,
'images_ceph': True,
'volumes_lvm': False,
'tenant': 'cephHA',
'user': 'cephHA',
'password': 'cephHA'
}
)
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings=csettings
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller', 'ceph-osd'],
'slave-02': ['controller', 'ceph-osd'],
'slave-03': ['controller', 'ceph-osd'],
'slave-04': ['compute', 'ceph-osd'],
'slave-05': ['compute', 'ceph-osd'],
'slave-06': ['ceph-osd']
}
)
# Depoy cluster
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.check_ceph_status(cluster_id)
# Run ostf
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("ceph_ha", is_make=True)
@test(groups=["thread_4", "ceph"])
class CephRadosGW(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["ceph_rados_gw"])
@log_snapshot_on_error
def ceph_rados_gw(self):
"""Deploy ceph ha with 1 controller with RadosGW for objects
Scenario:
1. Create cluster
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Add 3 nodes with ceph-osd role
5. Deploy the cluster
6. Check ceph status
7. Run OSTF tests
8. Check the radosqw daemon is started
Duration 40m
Snapshot ceph_rados_gw
"""
if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_REDHAT:
raise SkipTest()
self.env.revert_snapshot("ready_with_5_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings={
'volumes_lvm': False,
'volumes_ceph': True,
'images_ceph': True,
'objects_ceph': True,
'tenant': 'rados',
'user': 'rados',
'password': 'rados'
}
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': ['ceph-osd'],
'slave-04': ['ceph-osd'],
'slave-05': ['ceph-osd']
}
)
# Deploy cluster
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.check_ceph_status(cluster_id)
try:
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['smoke'],
test_name=map_ostf.OSTF_TEST_MAPPING.get(
'Create volume and attach it to instance'))
except AssertionError:
logger.debug("Test failed from first probe,"
" we sleep 60 second try one more time "
"and if it fails again - test will fails ")
time.sleep(60)
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['smoke'],
test_name=map_ostf.OSTF_TEST_MAPPING.get(
'Create volume and attach it to instance'))
# Run ostf
self.fuel_web.run_ostf(
cluster_id=cluster_id,
test_sets=['smoke', 'sanity', 'platform_tests'])
# Check the radosqw daemon is started
remote = self.fuel_web.get_ssh_for_node('slave-01')
radosgw_started = lambda: len(remote.check_call(
'ps aux | grep "/usr/bin/radosgw -n '
'client.radosgw.gateway"')['stdout']) == 3
assert_true(radosgw_started(), 'radosgw daemon started')
self.env.make_snapshot("ceph_rados_gw")
@test(groups=["thread_1", "ceph_migration"])
class VmBackedWithCephMigrationBasic(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["ceph_migration"])
@log_snapshot_on_error
def migrate_vm_backed_with_ceph(self):
"""Check VM backed with ceph migration in ha mode with 1 controller
Scenario:
1. Create cluster
2. Add 1 node with controller and ceph OSD roles
3. Add 2 node with compute and ceph OSD roles
4. Deploy the cluster
5. Check ceph status
6. Run OSTF
7. Create a new VM, assign floating ip
8. Migrate VM
9. Check cluster and server state after migration
10. Terminate VM
Duration 35m
Snapshot vm_backed_with_ceph_live_migration
"""
if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_REDHAT:
raise proboscis.SkipTest()
self.env.revert_snapshot("ready_with_3_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings={
'volumes_ceph': True,
'images_ceph': True,
'ephemeral_ceph': True,
'volumes_lvm': False
}
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller', 'ceph-osd'],
'slave-02': ['compute', 'ceph-osd'],
'slave-03': ['compute', 'ceph-osd']
}
)
creds = ("cirros", "test")
# Cluster deploy
self.fuel_web.deploy_cluster_wait(cluster_id)
def _check():
# Run volume test several times with hope that it pass
test_path = map_ostf.OSTF_TEST_MAPPING.get(
'Create volume and attach it to instance')
logger.debug('Start to run test {0}'.format(test_path))
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['smoke'],
test_name=test_path)
try:
_check()
except AssertionError:
logger.debug(AssertionError)
logger.debug("Test failed from first probe,"
" we sleep 60 second try one more time "
"and if it fails again - test will fails ")
time.sleep(60)
_check()
# Run ostf
self.fuel_web.run_ostf(cluster_id)
# Create new server
os = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id))
logger.info("Create new server")
srv = os.create_server_for_migration(
scenario='./fuelweb_test/helpers/instance_initial_scenario')
logger.info("Srv is currently in status: %s" % srv.status)
logger.info("Assigning floating ip to server")
floating_ip = os.assign_floating_ip(srv)
srv_host = os.get_srv_host_name(srv)
logger.info("Server is on host %s" % srv_host)
time.sleep(100)
md5before = os.get_md5sum(
"/home/test_file",
self.env.get_ssh_to_remote_by_name("slave-01"),
floating_ip.ip, creds)
logger.info("Get available computes")
avail_hosts = os.get_hosts_for_migr(srv_host)
logger.info("Migrating server")
new_srv = os.migrate_server(srv, avail_hosts[0], timeout=200)
logger.info("Check cluster and server state after migration")
md5after = os.get_md5sum(
"/home/test_file",
self.env.get_ssh_to_remote_by_name("slave-01"),
floating_ip.ip, creds)
assert_true(
md5after in md5before,
"Md5 checksums don`t match."
"Before migration md5 was equal to: {bef}"
"Now it eqals: {aft}".format(bef=md5before, aft=md5after))
res = os.execute_through_host(
self.env.get_ssh_to_remote_by_name("slave-01"),
floating_ip.ip, "ping -q -c3 -w10 %s | grep 'received' |"
" grep -v '0 packets received'", creds)
logger.info("Ping 8.8.8.8 result on vm is: %s" % res)
logger.info("Check Ceph health is ok after migration")
self.fuel_web.check_ceph_status(cluster_id)
logger.info("Server is now on host %s" %
os.get_srv_host_name(new_srv))
logger.info("Terminate migrated server")
os.delete_instance(new_srv)
assert_true(os.verify_srv_deleted(new_srv),
"Verify server was deleted")
# Create new server
logger.info("Create new server")
srv = os.create_server_for_migration(
scenario='./fuelweb_test/helpers/instance_initial_scenario')
logger.info("Srv is currently in status: %s" % srv.status)
logger.info("Assigning floating ip to server")
floating_ip = os.assign_floating_ip(srv)
srv_host = os.get_srv_host_name(srv)
logger.info("Server is on host %s" % srv_host)
logger.info("Create volume")
vol = os.create_volume()
logger.info("Attach volume to server")
os.attach_volume(vol, srv)
time.sleep(100)
logger.info("Create filesystem and mount volume")
os.execute_through_host(
self.env.get_ssh_to_remote_by_name('slave-01'),
floating_ip.ip, 'sudo sh /home/mount_volume.sh', creds)
os.execute_through_host(
self.env.get_ssh_to_remote_by_name('slave-01'),
floating_ip.ip, 'sudo touch /mnt/file-on-volume', creds)
logger.info("Get available computes")
avail_hosts = os.get_hosts_for_migr(srv_host)
logger.info("Migrating server")
new_srv = os.migrate_server(srv, avail_hosts[0], timeout=120)
logger.info("Check cluster and server state after migration")
logger.info("Mount volume after migration")
out = os.execute_through_host(
self.env.get_ssh_to_remote_by_name('slave-01'),
floating_ip.ip, 'sudo mount /dev/vdb /mnt', creds)
logger.info("out of mounting volume is: %s" % out)
assert_true("file-on-volume" in os.execute_through_host(
self.env.get_ssh_to_remote_by_name('slave-01'),
floating_ip.ip, "sudo ls /mnt", creds),
"File is abscent in /mnt")
logger.info("Check Ceph health is ok after migration")
self.fuel_web.check_ceph_status(cluster_id)
logger.info("Server is now on host %s" %
os.get_srv_host_name(new_srv))
logger.info("Terminate migrated server")
os.delete_instance(new_srv)
assert_true(os.verify_srv_deleted(new_srv),
"Verify server was deleted")
self.env.make_snapshot(
"vm_backed_with_ceph_live_migration")
@test(groups=["thread_1", "ceph_partitions"])
class CheckCephPartitionsAfterReboot(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["ceph_partitions"])
@log_snapshot_on_error
def check_ceph_partitions_after_reboot(self):
"""Check that Ceph OSD partitions are remounted after reboot
Scenario:
1. Create cluster in Ha mode with 1 controller
2. Add 1 node with controller role
3. Add 1 node with compute and Ceph OSD roles
4. Add 1 node with Ceph OSD role
5. Deploy the cluster
7. Check Ceph status
8. Read current partitions
9. Warm-reboot Ceph nodes
10. Read partitions again
11. Check Ceph health
12. Cold-reboot Ceph nodes
13. Read partitions again
14. Check Ceph health
Duration 40m
Snapshot check_ceph_partitions_after_reboot
"""
if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_REDHAT:
raise proboscis.SkipTest()
self.env.revert_snapshot("ready_with_3_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings={
'volumes_ceph': True,
'images_ceph': True,
'ephemeral_ceph': True,
'volumes_lvm': False
}
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute', 'ceph-osd'],
'slave-03': ['ceph-osd']
}
)
# Deploy cluster
self.fuel_web.deploy_cluster_wait(cluster_id)
for node in ["slave-02", "slave-03"]:
logger.info("Get partitions for {node}".format(node=node))
before_reboot_partitions = [checkers.get_ceph_partitions(
self.env.get_ssh_to_remote_by_name(node),
"/dev/vd{p}".format(p=part)) for part in ["b", "c"]]
logger.info("Warm-restart nodes")
self.fuel_web.warm_restart_nodes(
[self.fuel_web.environment.get_virtual_environment().
get_node(name=node)])
logger.info("Get partitions for {node} once again".format(
node=node
))
after_reboot_partitions = [checkers.get_ceph_partitions(
self.env.get_ssh_to_remote_by_name(node),
"/dev/vd{p}".format(p=part)) for part in ["b", "c"]]
if before_reboot_partitions != after_reboot_partitions:
logger.info("Partitions don`t match")
logger.info("Before reboot: %s" % before_reboot_partitions)
logger.info("After reboot: %s" % after_reboot_partitions)
raise Exception()
logger.info("Check Ceph health is ok after reboot")
self.fuel_web.check_ceph_status(cluster_id)
logger.info("Cold-restart nodes")
self.fuel_web.cold_restart_nodes(
[self.fuel_web.environment.get_virtual_environment().
get_node(name=node)])
after_reboot_partitions = [checkers.get_ceph_partitions(
self.env.get_ssh_to_remote_by_name(node),
"/dev/vd{p}".format(p=part)) for part in ["b", "c"]]
if before_reboot_partitions != after_reboot_partitions:
logger.info("Partitions don`t match")
logger.info("Before reboot: %s" % before_reboot_partitions)
logger.info("After reboot: %s" % after_reboot_partitions)
raise Exception()
logger.info("Check Ceph health is ok after reboot")
self.fuel_web.check_ceph_status(cluster_id)

View File

@ -0,0 +1,283 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import traceback
from proboscis import asserts
from proboscis import test
from fuelweb_test.helpers.decorators import check_fuel_statistics
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.helpers import os_actions
from fuelweb_test import settings as hlp_data
from fuelweb_test import logger
from fuelweb_test.tests import base_test_case
@test(groups=["cluster_actions"])
class EnvironmentAction(base_test_case.TestBasic):
@test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_3],
groups=["smoke", "deploy_flat_stop_reset_on_deploying",
"image_based"])
@log_snapshot_on_error
@check_fuel_statistics
def deploy_flat_stop_on_deploying(self):
"""Stop reset cluster in HA mode with flat nova-network
Scenario:
1. Create cluster in HA mode with 1 controller
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Run provisioning task
5. Run deployment task
6. Stop deployment
7. Add 1 node with cinder role
8. Re-deploy cluster
9. Run OSTF
Duration 50m
Snapshot: deploy_flat_stop_reset_on_deploying
"""
self.env.revert_snapshot("ready_with_3_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=hlp_data.DEPLOYMENT_MODE,
settings={
'tenant': 'stop_deploy',
'user': 'stop_deploy',
'password': 'stop_deploy'
}
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute']
}
)
self.fuel_web.provisioning_cluster_wait(cluster_id)
self.fuel_web.deploy_task_wait(cluster_id=cluster_id, progress=10)
self.fuel_web.stop_deployment_wait(cluster_id)
self.fuel_web.wait_nodes_get_online_state(
self.env.get_virtual_environment().nodes().slaves[:2])
self.fuel_web.update_nodes(
cluster_id,
{
'slave-03': ['cinder']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
asserts.assert_equal(
3, len(self.fuel_web.client.list_cluster_nodes(cluster_id)))
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_flat_stop_reset_on_deploying")
@test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_3],
groups=["smoke", "deploy_flat_stop_reset_on_provisioning"])
@log_snapshot_on_error
def deploy_flat_stop_reset_on_provisioning(self):
"""Stop provisioning cluster in HA mode with flat nova-network
Scenario:
1. Create cluster in HA mode with 1 controller
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Run provisioning task
5. Stop provisioning
6. Reset settings
7. Add 1 node with cinder role
8. Re-deploy cluster
9. Run OSTF
Duration 40m
Snapshot: deploy_flat_stop_reset_on_deploying
"""
self.env.revert_snapshot("ready_with_3_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=hlp_data.DEPLOYMENT_MODE
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute']
}
)
self.fuel_web.provisioning_cluster_wait(
cluster_id=cluster_id, progress=20)
try:
self.fuel_web.stop_deployment_wait(cluster_id)
except Exception:
logger.debug(traceback.format_exc())
self.fuel_web.wait_nodes_get_online_state(
self.env.get_virtual_environment().nodes().slaves[:2])
self.fuel_web.update_nodes(
cluster_id,
{
'slave-03': ['cinder']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
asserts.assert_equal(
3, len(self.fuel_web.client.list_cluster_nodes(cluster_id)))
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_flat_stop_reset_on_provisioning")
@test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_3],
groups=["smoke", "deploy_reset_on_ready"])
@log_snapshot_on_error
@check_fuel_statistics
def deploy_reset_on_ready(self):
"""Stop reset cluster in HA mode with 1 controller
Scenario:
1. Create cluster in Ha mode with 1 controller
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Deploy cluster
5. Reset settings
6. Update net
7. Re-deploy cluster
8. Verify network
9. Run OSTF
Duration 40m
Snapshot: deploy_reset_on_ready
"""
self.env.revert_snapshot("ready_with_3_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=hlp_data.DEPLOYMENT_MODE
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id))
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=6, networks_count=1, timeout=300)
self.fuel_web.stop_reset_env_wait(cluster_id)
self.fuel_web.wait_nodes_get_online_state(
self.env.get_virtual_environment().nodes().slaves[:2])
self.fuel_web.update_vlan_network_fixed(
cluster_id, amount=8, network_size=32)
self.fuel_web.deploy_cluster_wait(cluster_id)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id))
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=6, networks_count=8, timeout=300)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_reset_on_ready")
@test(groups=["cluster_actions"])
class EnvironmentActionOnHA(base_test_case.TestBasic):
@test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_5],
groups=["smoke", "deploy_stop_reset_on_ha"])
@log_snapshot_on_error
def deploy_stop_reset_on_ha(self):
"""Stop reset cluster in ha mode
Scenario:
1. Create cluster
2. Add 3 node with controller role
3. Deploy cluster
4. Stop deployment
5. Reset settings
6. Add 2 nodes with compute role
7. Re-deploy cluster
8. Run OSTF
Duration 60m
Snapshot: deploy_stop_reset_on_ha
"""
self.env.revert_snapshot("ready_with_5_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=hlp_data.DEPLOYMENT_MODE_HA
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller']
}
)
self.fuel_web.deploy_cluster_wait_progress(cluster_id, progress=10)
self.fuel_web.stop_deployment_wait(cluster_id)
self.fuel_web.wait_nodes_get_online_state(
self.env.get_virtual_environment().nodes().slaves[:3])
self.fuel_web.update_nodes(
cluster_id,
{
'slave-04': ['compute'],
'slave-05': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id))
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=16, networks_count=1, timeout=300)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'])
self.env.make_snapshot("deploy_stop_reset_on_ha")

View File

@ -0,0 +1,367 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_true
from proboscis import test
from fuelweb_test.helpers import checkers
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.helpers import os_actions
from fuelweb_test.settings import DEPLOYMENT_MODE_HA
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@test(groups=["thread_3", "ha", "bvt_1"])
class TestHaVLAN(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["deploy_ha_vlan", "ha_nova_vlan"])
@log_snapshot_on_error
def deploy_ha_vlan(self):
"""Deploy cluster in HA mode with VLAN Manager
Scenario:
1. Create cluster
2. Add 3 nodes with controller roles
3. Add 2 nodes with compute roles
4. Set up cluster to use Network VLAN manager with 8 networks
5. Deploy the cluster
6. Validate cluster was set up correctly, there are no dead
services, there are no errors in logs
7. Run network verification
8. Run OSTF
9. Create snapshot
Duration 70m
Snapshot deploy_ha_vlan
"""
self.env.revert_snapshot("ready_with_5_slaves")
data = {
'tenant': 'novaHAVlan',
'user': 'novaHAVlan',
'password': 'novaHAVlan'
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE_HA,
settings=data
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['compute']
}
)
self.fuel_web.update_vlan_network_fixed(
cluster_id, amount=8, network_size=32
)
self.fuel_web.deploy_cluster_wait(cluster_id)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id),
data['user'], data['password'], data['tenant'])
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=16, networks_count=8, timeout=300)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'])
self.env.make_snapshot("deploy_ha_vlan")
@test(groups=["thread_4", "ha"])
class TestHaFlat(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["deploy_ha_flat", "ha_nova_flat"])
@log_snapshot_on_error
def deploy_ha_flat(self):
"""Deploy cluster in HA mode with flat nova-network
Scenario:
1. Create cluster
2. Add 3 nodes with controller roles
3. Add 2 nodes with compute roles
4. Deploy the cluster
5. Validate cluster was set up correctly, there are no dead
services, there are no errors in logs
6. Run verify networks
7. Run OSTF
8. Make snapshot
Duration 70m
Snapshot deploy_ha_flat
"""
self.env.revert_snapshot("ready_with_5_slaves")
data = {
'tenant': 'novaHaFlat',
'user': 'novaHaFlat',
'password': 'novaHaFlat'
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE_HA,
settings=data
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id),
data['user'], data['password'], data['tenant'])
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=16, networks_count=1, timeout=300)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.security.verify_firewall(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'])
self.env.make_snapshot("deploy_ha_flat")
@test(groups=["thread_4", "ha", "image_based"])
class TestHaFlatAddCompute(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["ha_flat_add_compute"])
@log_snapshot_on_error
def ha_flat_add_compute(self):
"""Add compute node to cluster in HA mode with flat nova-network
Scenario:
1. Create cluster
2. Add 3 nodes with controller roles
3. Add 2 nodes with compute roles
4. Deploy the cluster
5. Validate cluster was set up correctly, there are no dead
services, there are no errors in logs
6. Add 1 node with compute role
7. Deploy the cluster
8. Run network verification
9. Run OSTF
Duration 80m
Snapshot ha_flat_add_compute
"""
self.env.revert_snapshot("ready_with_5_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE_HA
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id))
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=16, networks_count=1, timeout=300)
self.env.bootstrap_nodes(
self.env.get_virtual_environment().nodes().slaves[5:6])
self.fuel_web.update_nodes(
cluster_id, {'slave-06': ['compute']}, True, False
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'])
self.env.make_snapshot("ha_flat_add_compute")
@test(groups=["thread_4", "ha"])
class TestHaFlatScalability(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["ha_flat_scalability", "ha_nova_flat_scalability"])
@log_snapshot_on_error
def ha_flat_scalability(self):
"""Check HA mode on scalability
Scenario:
1. Create cluster
2. Add 1 controller node
3. Deploy the cluster
4. Add 2 controller nodes
5. Deploy changes
6. Run network verification
7. Add 2 controller nodes
8. Deploy changes
9. Run network verification
10. Run OSTF
Duration 110m
Snapshot ha_flat_scalability
"""
self.env.revert_snapshot("ready_with_5_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE_HA
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.update_nodes(
cluster_id, {'slave-02': ['controller'],
'slave-03': ['controller']},
True, False
)
self.fuel_web.deploy_cluster_wait(cluster_id)
for devops_node in self.env.get_virtual_environment(
).nodes().slaves[:3]:
self.fuel_web.assert_pacemaker(
devops_node.name,
self.env.get_virtual_environment(
).nodes().slaves[:3], [])
self.fuel_web.update_nodes(
cluster_id, {'slave-04': ['controller'],
'slave-05': ['controller']},
True, False
)
self.fuel_web.deploy_cluster_wait(cluster_id)
for devops_node in self.env.get_virtual_environment(
).nodes().slaves[:5]:
self.fuel_web.assert_pacemaker(
devops_node.name,
self.env.get_virtual_environment(
).nodes().slaves[:5], [])
ret = self.fuel_web.get_pacemaker_status(devops_node.name)
assert_true(
re.search('vip__management\s+\(ocf::fuel:ns_IPaddr2\):'
'\s+Started node', ret), 'vip management started')
assert_true(
re.search('vip__public\s+\(ocf::fuel:ns_IPaddr2\):'
'\s+Started node', ret), 'vip public started')
self.fuel_web.security.verify_firewall(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id,
test_sets=['ha', 'sanity'])
self.env.make_snapshot("ha_flat_scalability")
@test(groups=["known_issues", "ha"])
class BackupRestoreHa(TestBasic):
@test(depends_on=[TestHaFlat.deploy_ha_flat],
groups=["known_issues", "backup_restore_ha_flat"])
@log_snapshot_on_error
def backup_restore_ha_flat(self):
"""Backup/restore master node with cluster in ha mode
Scenario:
1. Revert snapshot "deploy_ha_flat"
2. Backup master
3. Check backup
4 Run OSTF
5. Add 1 node with compute role
6. Restore master
7. Check restore
8. Run OSTF
Duration 50m
"""
self.env.revert_snapshot("deploy_ha_flat")
cluster_id = self.fuel_web.get_last_created_cluster()
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id),
'novaHaFlat', 'novaHaFlat', 'novaHaFlat')
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=16, networks_count=1, timeout=300)
self.fuel_web.backup_master(self.env.get_admin_remote())
checkers.backup_check(self.env.get_admin_remote())
self.env.bootstrap_nodes(
self.env.get_virtual_environment().nodes().slaves[5:6])
self.fuel_web.update_nodes(
cluster_id, {'slave-06': ['compute']}, True, False
)
assert_equal(
6, len(self.fuel_web.client.list_cluster_nodes(cluster_id)))
self.fuel_web.restore_master(self.env.get_admin_remote())
checkers.restore_check_sum(self.env.get_admin_remote())
self.fuel_web.restore_check_nailgun_api(self.env.get_admin_remote())
checkers.iptables_check(self.env.get_admin_remote())
assert_equal(
5, len(self.fuel_web.client.list_cluster_nodes(cluster_id)))
self.env.bootstrap_nodes(
self.env.get_virtual_environment().nodes().slaves[5:6])
self.fuel_web.update_nodes(
cluster_id, {'slave-06': ['compute']}, True, False
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'])
self.env.make_snapshot("backup_restore_ha_flat")

View File

@ -0,0 +1,960 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from devops.helpers.helpers import wait
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_true
from proboscis import test
from fuelweb_test.helpers import checkers
from devops.helpers.helpers import tcp_ping
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.helpers.eb_tables import Ebtables
from fuelweb_test.helpers import os_actions
from fuelweb_test.settings import DEPLOYMENT_MODE
from fuelweb_test.settings import NODE_VOLUME_SIZE
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
from fuelweb_test import logger
@test(groups=["thread_2"])
class OneNodeDeploy(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_release],
groups=["deploy_one_node"])
@log_snapshot_on_error
def deploy_one_node(self):
"""Deploy cluster with controller node only
Scenario:
1. Create cluster
2. Add 1 node with controller role
3. Deploy the cluster
4. Validate cluster was set up correctly, there are no dead
services, there are no errors in logs
Duration 20m
"""
self.env.revert_snapshot("ready")
self.fuel_web.client.get_root()
self.env.bootstrap_nodes(
self.env.get_virtual_environment().nodes().slaves[:1])
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE
)
logger.info('cluster is %s' % str(cluster_id))
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller']}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id))
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=4, networks_count=1, timeout=300)
self.fuel_web.run_single_ostf_test(
cluster_id=cluster_id, test_sets=['sanity'],
test_name=('fuel_health.tests.sanity.test_sanity_identity'
'.SanityIdentityTest.test_list_users'))
@test(groups=["thread_2"])
class HAOneControllerFlat(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["smoke", "deploy_ha_one_controller_flat",
"ha_one_controller_nova_flat", "image_based", "smoke_nova"])
@log_snapshot_on_error
def deploy_ha_one_controller_flat(self):
"""Deploy cluster in HA mode with flat nova-network
Scenario:
1. Create cluster in HA mode with 1 controller
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Deploy the cluster
5. Validate cluster was set up correctly, there are no dead
services, there are no errors in logs
6. Verify networks
7. Verify network configuration on controller
8. Run OSTF
Duration 30m
Snapshot: deploy_ha_one_controller_flat
"""
self.env.revert_snapshot("ready_with_3_slaves")
data = {
'tenant': 'novaSimpleFlat',
'user': 'novaSimpleFlat',
'password': 'novaSimpleFlat'
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings=data
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute']
}
)
self.fuel_web.update_internal_network(cluster_id, '10.1.0.0/24')
self.fuel_web.deploy_cluster_wait(cluster_id)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id),
data['user'], data['password'], data['tenant'])
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=6, networks_count=1, timeout=300)
self.fuel_web.check_fixed_network_cidr(
cluster_id, self.env.get_ssh_to_remote_by_name('slave-01'))
self.fuel_web.verify_network(cluster_id)
self.env.verify_network_configuration("slave-01")
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_ha_one_controller_flat", is_make=True)
@test(depends_on=[deploy_ha_one_controller_flat],
groups=["ha_one_controller_flat_create_instance"])
@log_snapshot_on_error
def ha_one_controller_flat_create_instance(self):
"""Create instance with file injection
Scenario:
1. Revert "ha one controller flat" environment
2. Create instance with file injection
3. Assert instance was created
4. Assert file is on instance
Duration 20m
"""
self.env.revert_snapshot("deploy_ha_one_controller_flat")
data = {
'tenant': 'novaSimpleFlat',
'user': 'novaSimpleFlat',
'password': 'novaSimpleFlat'
}
cluster_id = self.fuel_web.get_last_created_cluster()
os = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id),
data['password'], data['tenant'])
remote = self.env.get_ssh_to_remote_by_name('slave-01')
remote.execute("echo 'Hello World' > /root/test.txt")
server_files = {"/root/test.txt": 'Hello World'}
instance = os.create_server_for_migration(file=server_files)
floating_ip = os.assign_floating_ip(instance)
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120)
res = os.execute_through_host(
remote,
floating_ip.ip, "sudo cat /root/test.txt")
assert_true(res == 'Hello World', 'file content is {0}'.format(res))
@test(depends_on=[deploy_ha_one_controller_flat],
groups=["ha_one_controller_flat_node_deletion"])
@log_snapshot_on_error
def ha_one_controller_flat_node_deletion(self):
"""Remove compute from cluster in ha mode with flat nova-network
Scenario:
1. Revert "deploy_ha_one_controller_flat" environment
2. Remove compute node
3. Deploy changes
4. Verify node returns to unallocated pull
Duration 8m
"""
self.env.revert_snapshot("deploy_ha_one_controller_flat")
cluster_id = self.fuel_web.get_last_created_cluster()
nailgun_nodes = self.fuel_web.update_nodes(
cluster_id, {'slave-02': ['compute']}, False, True)
task = self.fuel_web.deploy_cluster(cluster_id)
self.fuel_web.assert_task_success(task)
nodes = filter(lambda x: x["pending_deletion"] is True, nailgun_nodes)
assert_true(
len(nodes) == 1, "Verify 1 node has pending deletion status"
)
wait(
lambda: self.fuel_web.is_node_discovered(nodes[0]),
timeout=10 * 60
)
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["ha_one_controller_flat_blocked_vlan"])
@log_snapshot_on_error
def ha_one_controller_flat_blocked_vlan(self):
"""Verify network verification with blocked VLANs
Scenario:
1. Create cluster in Ha mode with 1 controller
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Deploy the cluster
5. Validate cluster was set up correctly, there are no dead
services, there are no errors in logs
6. Block first VLAN
7. Run Verify network and assert it fails
8. Restore first VLAN
Duration 20m
"""
self.env.revert_snapshot("ready_with_3_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id))
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=6, networks_count=1, timeout=300)
ebtables = self.env.get_ebtables(
cluster_id, self.env.get_virtual_environment(
).nodes().slaves[:2])
ebtables.restore_vlans()
try:
ebtables.block_first_vlan()
self.fuel_web.verify_network(cluster_id, success=False)
finally:
ebtables.restore_first_vlan()
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["ha_one_controller_flat_add_compute"])
@log_snapshot_on_error
def ha_one_controller_flat_add_compute(self):
"""Add compute node to cluster in ha mode
Scenario:
1. Create cluster in HA mode with 1 controller
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Deploy the cluster
5. Validate cluster was set up correctly, there are no dead
services, there are no errors in logs
6. Add 1 node with role compute
7. Deploy changes
8. Validate cluster was set up correctly, there are no dead
services, there are no errors in logs
9. Verify services list on compute nodes
10. Run OSTF
Duration 40m
Snapshot: ha_one_controller_flat_add_compute
"""
self.env.revert_snapshot("ready_with_3_slaves")
data = {
'tenant': 'flatAddCompute',
'user': 'flatAddCompute',
'password': 'flatAddCompute'
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings=data
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id),
data['user'], data['password'], data['tenant'])
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=6, networks_count=1, timeout=300)
self.fuel_web.update_nodes(
cluster_id, {'slave-03': ['compute']}, True, False)
self.fuel_web.deploy_cluster_wait(cluster_id)
assert_equal(
3, len(self.fuel_web.client.list_cluster_nodes(cluster_id)))
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=8, networks_count=1, timeout=300)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("ha_one_controller_flat_add_compute")
@test(groups=["thread_2"])
class HAOneControllerVlan(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_ha_one_controller_vlan",
"ha_one_controller_nova_vlan"])
@log_snapshot_on_error
def deploy_ha_one_controller_vlan(self):
"""Deploy cluster in ha mode with nova-network VLAN Manager
Scenario:
1. Create cluster in Ha mode with 1 controller
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Set up cluster to use Network VLAN manager with 8 networks
5. Deploy the cluster
6. Validate cluster was set up correctly, there are no dead
services, there are no errors in logs
7. Run network verification
8. Run OSTF
Duration 30m
Snapshot: deploy_ha_one_controller_vlan
"""
self.env.revert_snapshot("ready_with_3_slaves")
data = {
'tenant': 'novaSimpleVlan',
'user': 'novaSimpleVlan',
'password': 'novaSimpleVlan'
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings=data
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute']
}
)
self.fuel_web.update_vlan_network_fixed(
cluster_id, amount=8, network_size=32)
self.fuel_web.deploy_cluster_wait(cluster_id)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id),
data['user'], data['password'], data['tenant'])
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=6, networks_count=8, timeout=300)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_ha_one_controller_vlan")
@test(groups=["thread_2", "multirole"])
class MultiroleControllerCinder(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_multirole_controller_cinder"])
@log_snapshot_on_error
def deploy_multirole_controller_cinder(self):
"""Deploy cluster in HA mode with multi-role controller and cinder
Scenario:
1. Create cluster in HA mode with 1 controller
2. Add 1 node with controller and cinder roles
3. Add 1 node with compute role
4. Deploy the cluster
5. Run network verification
6. Run OSTF
Duration 30m
Snapshot: deploy_multirole_controller_cinder
"""
self.env.revert_snapshot("ready_with_3_slaves")
data = {
'tenant': 'multirolecinder',
'user': 'multirolecinder',
'password': 'multirolecinder'
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings=data
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller', 'cinder'],
'slave-02': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("deploy_multirole_controller_cinder")
@test(groups=["thread_2", "multirole"])
class MultiroleComputeCinder(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_multirole_compute_cinder"])
@log_snapshot_on_error
def deploy_multirole_compute_cinder(self):
"""Deploy cluster in HA mode with multi-role compute and cinder
Scenario:
1. Create cluster in Ha mode with 1 controller
2. Add 1 node with controller role
3. Add 2 node with compute and cinder roles
4. Deploy the cluster
5. Run network verification
6. Run OSTF
Duration 30m
Snapshot: deploy_multirole_compute_cinder
"""
self.env.revert_snapshot("ready_with_3_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute', 'cinder'],
'slave-03': ['compute', 'cinder']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("deploy_multirole_compute_cinder")
@test(groups=["thread_2"])
class FloatingIPs(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_floating_ips"])
@log_snapshot_on_error
def deploy_floating_ips(self):
"""Deploy cluster with non-default 3 floating IPs ranges
Scenario:
1. Create cluster in Ha mode with 1 controller
2. Add 1 node with controller role
3. Add 1 node with compute and cinder roles
4. Update floating IP ranges. Use 3 ranges
5. Deploy the cluster
6. Verify available floating IP list
7. Run OSTF
Duration 30m
Snapshot: deploy_floating_ips
"""
self.env.revert_snapshot("ready_with_3_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
'tenant': 'floatingip',
'user': 'floatingip',
'password': 'floatingip'
}
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute']
}
)
networking_parameters = {
"floating_ranges": self.fuel_web.get_floating_ranges()[0]}
self.fuel_web.client.update_network(
cluster_id,
networking_parameters=networking_parameters
)
self.fuel_web.deploy_cluster_wait(cluster_id)
# assert ips
expected_ips = self.fuel_web.get_floating_ranges()[1]
self.fuel_web.assert_cluster_floating_list('slave-02', expected_ips)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_floating_ips")
@test(groups=["ha_one_controller"])
class HAOneControllerCinder(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_ha_one_controller_cinder",
"ha_one_controller_nova_cinder"])
@log_snapshot_on_error
def deploy_ha_one_controller_cinder(self):
"""Deploy cluster in HA mode with cinder
Scenario:
1. Create cluster in Ha mode with 1 controller
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Add 1 node with cinder role
5. Deploy the cluster
6. Validate cluster was set up correctly, there are no dead
services, there are no errors in logs
7. Run OSTF
Duration 30m
Snapshot: deploy_ha_one_controller_cinder
"""
self.env.revert_snapshot("ready_with_3_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': ['cinder']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id))
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=6, networks_count=1, timeout=300)
self.fuel_web.check_fixed_network_cidr(
cluster_id, self.env.get_ssh_to_remote_by_name('slave-01'))
self.fuel_web.verify_network(cluster_id)
self.env.verify_network_configuration("slave-01")
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("deploy_ha_one_controller_cinder")
@test(groups=["thread_1"])
class NodeMultipleInterfaces(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_node_multiple_interfaces"])
@log_snapshot_on_error
def deploy_node_multiple_interfaces(self):
"""Deploy cluster with networks allocated on different interfaces
Scenario:
1. Create cluster in Ha mode with 1 controller
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Add 1 node with cinder role
5. Split networks on existing physical interfaces
6. Deploy the cluster
7. Verify network configuration on each deployed node
8. Run network verification
Duration 25m
Snapshot: deploy_node_multiple_interfaces
"""
self.env.revert_snapshot("ready_with_3_slaves")
interfaces_dict = {
'eth1': ['public'],
'eth2': ['storage'],
'eth3': ['fixed'],
'eth4': ['management'],
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': ['cinder']
}
)
nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
for node in nailgun_nodes:
self.fuel_web.update_node_networks(node['id'], interfaces_dict)
self.fuel_web.deploy_cluster_wait(cluster_id)
for node in ['slave-01', 'slave-02', 'slave-03']:
self.env.verify_network_configuration(node)
self.fuel_web.verify_network(cluster_id)
self.env.make_snapshot("deploy_node_multiple_interfaces")
@test(groups=["thread_1"])
class NodeDiskSizes(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["check_nodes_notifications"])
@log_snapshot_on_error
def check_nodes_notifications(self):
"""Verify nailgun notifications for discovered nodes
Scenario:
1. Revert snapshot "ready_with_3_slaves"
2. Verify hard drive sizes for discovered nodes in /api/nodes
3. Verify hard drive sizes for discovered nodes in notifications
Duration 5m
"""
self.env.revert_snapshot("ready_with_3_slaves")
# assert /api/nodes
disk_size = NODE_VOLUME_SIZE * 1024 ** 3
nailgun_nodes = self.fuel_web.client.list_nodes()
for node in nailgun_nodes:
for disk in node['meta']['disks']:
assert_equal(disk['size'], disk_size, 'Disk size')
hdd_size = "{} TB HDD".format(float(disk_size * 3 / (10 ** 9)) / 1000)
notifications = self.fuel_web.client.get_notifications()
for node in nailgun_nodes:
# assert /api/notifications
for notification in notifications:
discover = notification['topic'] == 'discover'
current_node = notification['node_id'] == node['id']
if current_node and discover and \
"discovered" in notification['message']:
assert_true(hdd_size in notification['message'])
# assert disks
disks = self.fuel_web.client.get_node_disks(node['id'])
for disk in disks:
assert_equal(disk['size'],
NODE_VOLUME_SIZE * 1024 - 500, 'Disk size')
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["check_nodes_disks"])
@log_snapshot_on_error
def check_nodes_disks(self):
"""Verify hard drive sizes for deployed nodes
Scenario:
1. Create cluster in Ha mode with 1 controller
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Add 1 node with cinder role
5. Deploy the cluster
6. Verify hard drive sizes for deployed nodes
Duration 30m
"""
self.env.revert_snapshot("ready_with_3_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': ['cinder']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id))
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=6, networks_count=1, timeout=300)
self.fuel_web.security.verify_firewall(cluster_id)
self.fuel_web.run_ostf(cluster_id=cluster_id)
nodes_dict = {
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': ['cinder']
}
# assert node disks after deployment
for node_name in nodes_dict:
str_block_devices = self.fuel_web.get_cluster_block_devices(
node_name)
logger.debug("Block device:\n{}".format(str_block_devices))
expected_regexp = re.compile(
"vda\s+\d+:\d+\s+0\s+{}G\s+0\s+disk".format(NODE_VOLUME_SIZE))
assert_true(
expected_regexp.search(str_block_devices),
"Unable to find vda block device for {}G in: {}".format(
NODE_VOLUME_SIZE, str_block_devices
))
expected_regexp = re.compile(
"vdb\s+\d+:\d+\s+0\s+{}G\s+0\s+disk".format(NODE_VOLUME_SIZE))
assert_true(
expected_regexp.search(str_block_devices),
"Unable to find vdb block device for {}G in: {}".format(
NODE_VOLUME_SIZE, str_block_devices
))
expected_regexp = re.compile(
"vdc\s+\d+:\d+\s+0\s+{}G\s+0\s+disk".format(NODE_VOLUME_SIZE))
assert_true(
expected_regexp.search(str_block_devices),
"Unable to find vdc block device for {}G in: {}".format(
NODE_VOLUME_SIZE, str_block_devices
))
@test(groups=["thread_1"])
class MultinicBootstrap(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_release],
groups=["multinic_bootstrap_booting"])
@log_snapshot_on_error
def multinic_bootstrap_booting(self):
"""Verify slaves booting with blocked mac address
Scenario:
1. Revert snapshot "ready"
2. Block traffic for first slave node (by mac)
3. Restore mac addresses and boot first slave
4. Verify slave mac addresses is equal to unblocked
Duration 2m
"""
self.env.revert_snapshot("ready")
slave = self.env.get_virtual_environment().nodes().slaves[0]
mac_addresses = [interface.mac_address for interface in
slave.interfaces.filter(network__name='internal')]
try:
for mac in mac_addresses:
Ebtables.block_mac(mac)
for mac in mac_addresses:
Ebtables.restore_mac(mac)
slave.destroy(verbose=False)
self.env.get_virtual_environment(
).nodes().admins[0].revert("ready")
nailgun_slave = self.env.bootstrap_nodes([slave])[0]
assert_equal(mac.upper(), nailgun_slave['mac'].upper())
Ebtables.block_mac(mac)
finally:
for mac in mac_addresses:
Ebtables.restore_mac(mac)
@test(groups=["thread_2", "test"])
class DeleteEnvironment(TestBasic):
@test(depends_on=[HAOneControllerFlat.deploy_ha_one_controller_flat],
groups=["delete_environment"])
@log_snapshot_on_error
def delete_environment(self):
"""Delete existing environment
and verify nodes returns to unallocated state
Scenario:
1. Revert "deploy_ha_one_controller" environment
2. Delete environment
3. Verify node returns to unallocated pull
Duration 15m
"""
self.env.revert_snapshot("deploy_ha_one_controller_flat")
cluster_id = self.fuel_web.get_last_created_cluster()
self.fuel_web.client.delete_cluster(cluster_id)
nailgun_nodes = self.fuel_web.client.list_nodes()
nodes = filter(lambda x: x["pending_deletion"] is True, nailgun_nodes)
assert_true(
len(nodes) == 2, "Verify 2 node has pending deletion status"
)
wait(
lambda:
self.fuel_web.is_node_discovered(nodes[0]) and
self.fuel_web.is_node_discovered(nodes[1]),
timeout=10 * 60,
interval=15
)
@test(groups=["thread_1"])
class UntaggedNetworksNegative(TestBasic):
@test(
depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["untagged_networks_negative"],
enabled=False)
@log_snapshot_on_error
def untagged_networks_negative(self):
"""Verify network verification fails with untagged network on eth0
Scenario:
1. Create cluster in ha mode with 1 controller
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Split networks on existing physical interfaces
5. Remove VLAN tagging from networks which are on eth0
6. Run network verification (assert it fails)
7. Start cluster deployment (assert it fails)
Duration 30m
"""
self.env.revert_snapshot("ready_with_3_slaves")
vlan_turn_off = {'vlan_start': None}
interfaces = {
'eth0': ["fixed"],
'eth1': ["public"],
'eth2': ["management", "storage"],
'eth3': []
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute']
}
)
nets = self.fuel_web.client.get_networks(cluster_id)['networks']
nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
for node in nailgun_nodes:
self.fuel_web.update_node_networks(node['id'], interfaces)
# select networks that will be untagged:
[net.update(vlan_turn_off) for net in nets]
# stop using VLANs:
self.fuel_web.client.update_network(cluster_id, networks=nets)
# run network check:
self.fuel_web.verify_network(cluster_id, success=False)
# deploy cluster:
task = self.fuel_web.deploy_cluster(cluster_id)
self.fuel_web.assert_task_failed(task)
@test(groups=["known_issues"])
class BackupRestoreHAOneController(TestBasic):
@test(depends_on=[HAOneControllerFlat.deploy_ha_one_controller_flat],
groups=["ha_one_controller_backup_restore"])
@log_snapshot_on_error
def ha_one_controller_backup_restore(self):
"""Backup/restore master node with cluster in ha mode
Scenario:
1. Revert snapshot "deploy_ha_one_controller_flat"
2. Backup master
3. Check backup
4. Run OSTF
5. Add 1 node with compute role
6. Restore master
7. Check restore
8. Run OSTF
Duration 35m
"""
self.env.revert_snapshot("deploy_ha_one_controller_flat")
cluster_id = self.fuel_web.get_last_created_cluster()
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id),
'novaSimpleFlat', 'novaSimpleFlat', 'novaSimpleFlat')
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=6, networks_count=1, timeout=300)
self.fuel_web.backup_master(self.env.get_admin_remote())
checkers.backup_check(self.env.get_admin_remote())
self.fuel_web.update_nodes(
cluster_id, {'slave-03': ['compute']}, True, False)
assert_equal(
3, len(self.fuel_web.client.list_cluster_nodes(cluster_id)))
self.fuel_web.restore_master(self.env.get_admin_remote())
checkers.restore_check_sum(self.env.get_admin_remote())
self.fuel_web.restore_check_nailgun_api(self.env.get_admin_remote())
checkers.iptables_check(self.env.get_admin_remote())
assert_equal(
2, len(self.fuel_web.client.list_cluster_nodes(cluster_id)))
self.fuel_web.update_nodes(
cluster_id, {'slave-03': ['compute']}, True, False)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("ha_one_controller_backup_restore")

View File

@ -0,0 +1,173 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import SkipTest
from proboscis import test
from proboscis.asserts import assert_equal
from fuelweb_test.helpers.decorators import check_fuel_statistics
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.settings import DEPLOYMENT_MODE_HA
from fuelweb_test.settings import MULTIPLE_NETWORKS
from fuelweb_test.settings import NODEGROUPS
from fuelweb_test.tests.base_test_case import TestBasic
from fuelweb_test.tests.base_test_case import SetupEnvironment
@test(groups=["multiple_cluster_networks", "thread_7"])
class TestMultipleClusterNets(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["multiple_cluster_networks", "multiple_cluster_net_setup"])
@log_snapshot_on_error
def multiple_cluster_net_setup(self):
"""Check master node deployment and configuration with 2 sets of nets
Scenario:
1. Revert snapshot with 5 slaves
2. Check that slaves got IPs via DHCP from both admin/pxe networks
3. Make environment snapshot
Duration 6m
Snapshot multiple_cluster_net_setup
"""
if not MULTIPLE_NETWORKS:
raise SkipTest()
self.env.revert_snapshot("ready_with_5_slaves")
# Get network parts of IP addresses with /24 netmask
networks = ['.'.join(self.env._get_network(n).split('.')[0:-1]) for n
in [self.env.admin_net, self.env.admin_net2]]
nodes_addresses = ['.'.join(node['ip'].split('.')[0:-1]) for node in
self.fuel_web.client.list_nodes()]
assert_equal(set(networks), set(nodes_addresses),
"Only one admin network is used for discovering slaves:"
" '{0}'".format(set(nodes_addresses)))
self.env.make_snapshot("multiple_cluster_net_setup", is_make=True)
@test(depends_on=[multiple_cluster_net_setup],
groups=["multiple_cluster_networks",
"multiple_cluster_net_neutron_gre_ha", "thread_7"])
@log_snapshot_on_error
@check_fuel_statistics
def deploy_neutron_gre_ha_nodegroups(self):
"""Deploy HA environment with NeutronGRE and 2 nodegroups
Scenario:
1. Revert snapshot with 2 networks sets for slaves
2. Create cluster (HA) with Neutron GRE
3. Add 3 controller nodes from default nodegroup
4. Add 2 compute nodes from custom nodegroup
5. Deploy cluster
6. Run health checks (OSTF)
Duration 110m
Snapshot deploy_neutron_gre_ha_nodegroups
"""
if not MULTIPLE_NETWORKS:
raise SkipTest()
self.env.revert_snapshot("multiple_cluster_net_setup")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE_HA,
settings={
"net_provider": 'neutron',
"net_segment_type": 'gre',
'tenant': 'haGre',
'user': 'haGre',
'password': 'haGre'
}
)
nodegroup1 = NODEGROUPS[0]['name']
nodegroup2 = NODEGROUPS[1]['name']
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': [['controller'], nodegroup1],
'slave-05': [['controller'], nodegroup1],
'slave-03': [['controller'], nodegroup1],
'slave-02': [['compute', 'cinder'], nodegroup2],
'slave-04': [['compute', 'cinder'], nodegroup2],
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("deploy_neutron_gre_ha_nodegroups")
@test(depends_on=[multiple_cluster_net_setup],
groups=["multiple_cluster_networks",
"multiple_cluster_net_ceph_ha", "thread_7"])
@log_snapshot_on_error
def deploy_ceph_ha_nodegroups(self):
"""Deploy HA environment with NeutronGRE, Ceph and 2 nodegroups
Scenario:
1. Revert snapshot with 2 networks sets for slaves
2. Create cluster (HA) with Neutron GRE and Ceph
3. Add 3 controller + ceph nodes from default nodegroup
4. Add 2 compute + ceph nodes from custom nodegroup
5. Deploy cluster
6. Run health checks (OSTF)
Duration 110m
Snapshot deploy_neutron_gre_ha_nodegroups
"""
if not MULTIPLE_NETWORKS:
raise SkipTest()
self.env.revert_snapshot("multiple_cluster_net_setup")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE_HA,
settings={
'volumes_ceph': True,
'images_ceph': True,
'volumes_lvm': False,
"net_provider": 'neutron',
"net_segment_type": 'gre',
'tenant': 'haGreCeph',
'user': 'haGreCeph',
'password': 'haGreCeph'
}
)
nodegroup1 = NODEGROUPS[0]['name']
nodegroup2 = NODEGROUPS[1]['name']
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': [['controller', 'ceph-osd'], nodegroup1],
'slave-05': [['controller', 'ceph-osd'], nodegroup1],
'slave-03': [['controller', 'ceph-osd'], nodegroup1],
'slave-02': [['compute', 'ceph-osd'], nodegroup2],
'slave-04': [['compute', 'ceph-osd'], nodegroup2],
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("deploy_neutron_gre_ha_nodegroups")

View File

@ -0,0 +1,387 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis.asserts import assert_equal
from proboscis import test
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.settings import DEPLOYMENT_MODE
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@test(groups=["thread_1", "neutron", "smoke_neutron"])
class NeutronGre(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_neutron_gre", "ha_one_controller_neutron_gre"])
@log_snapshot_on_error
def deploy_neutron_gre(self):
"""Deploy cluster in ha mode with 1 controller and Neutron GRE
Scenario:
1. Create cluster
2. Add 1 node with controller role
3. Add 2 nodes with compute role
4. Deploy the cluster
5. Run network verification
6. Run OSTF
Duration 35m
Snapshot deploy_neutron_gre
"""
self.env.revert_snapshot("ready_with_3_slaves")
segment_type = 'gre'
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": segment_type,
'tenant': 'simpleGre',
'user': 'simpleGre',
'password': 'simpleGre'
}
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute', 'cinder'],
'slave-03': ['compute', 'cinder']
}
)
self.fuel_web.update_internal_network(cluster_id, '192.168.196.0/26',
'192.168.196.1')
self.fuel_web.deploy_cluster_wait(cluster_id)
cluster = self.fuel_web.client.get_cluster(cluster_id)
assert_equal(str(cluster['net_provider']), 'neutron')
# assert_equal(str(cluster['net_segment_type']), segment_type)
self.fuel_web.check_fixed_network_cidr(
cluster_id, self.env.get_ssh_to_remote_by_name('slave-01'))
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_neutron_gre")
@test(groups=["thread_1", "neutron"])
class NeutronVlan(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_neutron_vlan", "ha_one_controller_neutron_vlan"])
@log_snapshot_on_error
def deploy_neutron_vlan(self):
"""Deploy cluster in ha mode with 1 controller and Neutron VLAN
Scenario:
1. Create cluster
2. Add 1 node with controller role
3. Add 2 nodes with compute role
4. Deploy the cluster
5. Run network verification
6. Run OSTF
Duration 35m
Snapshot deploy_neutron_vlan
"""
self.env.revert_snapshot("ready_with_3_slaves")
segment_type = 'vlan'
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": segment_type,
'tenant': 'simpleVlan',
'user': 'simpleVlan',
'password': 'simpleVlan'
}
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
cluster = self.fuel_web.client.get_cluster(cluster_id)
assert_equal(str(cluster['net_provider']), 'neutron')
# assert_equal(str(cluster['net_segment_type']), segment_type)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_neutron_vlan")
@test(groups=["neutron", "ha", "ha_neutron", "image_based"])
class NeutronGreHa(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["deploy_neutron_gre_ha", "ha_neutron_gre"])
@log_snapshot_on_error
def deploy_neutron_gre_ha(self):
"""Deploy cluster in HA mode with Neutron GRE
Scenario:
1. Create cluster
2. Add 3 nodes with controller role
3. Add 2 nodes with compute role
4. Deploy the cluster
5. Run network verification
6. Run OSTF
Duration 80m
Snapshot deploy_neutron_gre_ha
"""
self.env.revert_snapshot("ready_with_5_slaves")
segment_type = 'gre'
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": segment_type,
'tenant': 'haGre',
'user': 'haGre',
'password': 'haGre'
}
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
cluster = self.fuel_web.client.get_cluster(cluster_id)
assert_equal(str(cluster['net_provider']), 'neutron')
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_neutron_gre_ha")
@test(groups=["thread_6", "neutron", "ha", "ha_neutron"])
class NeutronGreHaPublicNetwork(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["deploy_neutron_gre_ha_public_network"])
@log_snapshot_on_error
def deploy_neutron_gre_ha_with_public_network(self):
"""Deploy cluster in HA mode with Neutron GRE and public network
assigned to all nodes
Scenario:
1. Create cluster
2. Add 3 nodes with controller role
3. Add 2 nodes with compute role
4. Enable assign public networks to all nodes option
5. Deploy the cluster
6. Check that public network was assigned to all nodes
7. Run network verification
8. Run OSTF
Duration 80m
Snapshot deploy_neutron_gre_ha_public_network
"""
self.env.revert_snapshot("ready_with_5_slaves")
segment_type = 'gre'
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": segment_type,
'tenant': 'haGre',
'user': 'haGre',
'password': 'haGre',
'assign_to_all_nodes': True
}
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
cluster = self.fuel_web.client.get_cluster(cluster_id)
assert_equal(str(cluster['net_provider']), 'neutron')
self.fuel_web.verify_network(cluster_id)
self.fuel_web.security.verify_firewall(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_neutron_gre_ha_public_network")
@test(groups=["neutron", "ha", "ha_neutron"])
class NeutronVlanHa(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["deploy_neutron_vlan_ha", "ha_neutron_vlan"])
@log_snapshot_on_error
def deploy_neutron_vlan_ha(self):
"""Deploy cluster in HA mode with Neutron VLAN
Scenario:
1. Create cluster
2. Add 3 nodes with controller role
3. Add 2 nodes with compute role
4. Deploy the cluster
5. Run network verification
6. Run OSTF
Duration 80m
Snapshot deploy_neutron_vlan_ha
"""
self.env.revert_snapshot("ready_with_5_slaves")
segment_type = 'vlan'
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": segment_type
}
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['compute']
}
)
self.fuel_web.update_internal_network(cluster_id, '192.168.196.0/22',
'192.168.196.1')
self.fuel_web.deploy_cluster_wait(cluster_id)
cluster = self.fuel_web.client.get_cluster(cluster_id)
assert_equal(str(cluster['net_provider']), 'neutron')
# assert_equal(str(cluster['net_segment_type']), segment_type)
self.fuel_web.check_fixed_network_cidr(
cluster_id, self.env.get_ssh_to_remote_by_name('slave-01'))
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_neutron_vlan_ha")
@test(groups=["thread_6", "neutron", "ha", "ha_neutron"])
class NeutronVlanHaPublicNetwork(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["deploy_neutron_vlan_ha_public_network"])
@log_snapshot_on_error
def deploy_neutron_vlan_ha_with_public_network(self):
"""Deploy cluster in HA mode with Neutron VLAN and public network
assigned to all nodes
Scenario:
1. Create cluster
2. Add 3 nodes with controller role
3. Add 2 nodes with compute role
4. Enable assign public networks to all nodes option
5. Deploy the cluster
6. Check that public network was assigned to all nodes
7. Run network verification
8. Run OSTF
Duration 80m
Snapshot deploy_neutron_vlan_ha_public_network
"""
self.env.revert_snapshot("ready_with_5_slaves")
segment_type = 'vlan'
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": segment_type,
'assign_to_all_nodes': True
}
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['compute']
}
)
self.fuel_web.update_internal_network(cluster_id, '192.168.196.0/22',
'192.168.196.1')
self.fuel_web.deploy_cluster_wait(cluster_id)
cluster = self.fuel_web.client.get_cluster(cluster_id)
assert_equal(str(cluster['net_provider']), 'neutron')
# assert_equal(str(cluster['net_segment_type']), segment_type)
self.fuel_web.check_fixed_network_cidr(
cluster_id, self.env.get_ssh_to_remote_by_name('slave-01'))
self.fuel_web.verify_network(cluster_id)
self.fuel_web.security.verify_firewall(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_neutron_vlan_ha_public_network")

View File

@ -0,0 +1,73 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import SkipTest
from proboscis import test
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.settings import DEPLOYMENT_MODE, NEUTRON_ENABLE
from fuelweb_test.settings import OPENSTACK_RELEASE
from fuelweb_test.settings import OPENSTACK_RELEASE_REDHAT
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@test(groups=["test_pullrequest"])
class TestPullRequest(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_pr_ha"])
@log_snapshot_on_error
def deploy_pr_ha(self):
"""Deploy one-controller cluster in HA mode with Neutron GRE
Scenario:
1. Create cluster
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Deploy the cluster
5. Validate cluster network
Snapshot: deploy_pr_ha
"""
if OPENSTACK_RELEASE == OPENSTACK_RELEASE_REDHAT:
raise SkipTest()
self.env.revert_snapshot("ready_with_3_slaves")
settings = None
if NEUTRON_ENABLE:
settings = {
"net_provider": 'neutron',
"net_segment_type": "gre"
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings=settings
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute'],
}
)
self.fuel_web.deploy_cluster_wait(cluster_id, is_feature=True)
self.fuel_web.run_ostf(
cluster_id=self.fuel_web.get_last_created_cluster()
)

View File

@ -0,0 +1,994 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import division
from devops.helpers.helpers import wait
from proboscis import asserts
from proboscis import SkipTest
from proboscis import test
from proboscis.asserts import assert_equal
from fuelweb_test.helpers import checkers
from fuelweb_test.helpers.common import Common
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.helpers import os_actions
from fuelweb_test import settings
from fuelweb_test import logger as LOGGER
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@test(groups=["services", "services.sahara", "services_ha_one_controller"])
class SaharaHAOneController(TestBasic):
"""Sahara ha with 1 controller tests.
Don't recommend to start tests without kvm
Put Sahara image before start
"""
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_sahara_ha_one_controller_gre"])
@log_snapshot_on_error
def deploy_sahara_ha_one_controller_gre(self):
"""Deploy cluster in ha mode with 1 controller Sahara and Neutron GRE
Scenario:
1. Create a Fuel cluster. Set the option for Sahara installation
2. Add 1 node with "controller" role
3. Add 1 node with "compute" role
4. Deploy the Fuel cluster
5. Verify Sahara service on controller
6. Run all sanity and smoke tests
7. Register Vanilla2 image for Sahara
8. Run platform Vanilla2 test for Sahara
Duration 65m
Snapshot: deploy_sahara_ha_one_controller_gre
"""
if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_REDHAT:
raise SkipTest()
LOGGER.debug('Check MD5 sum of Vanilla2 image')
check_image = checkers.check_image(
settings.SERVTEST_SAHARA_VANILLA_2_IMAGE,
settings.SERVTEST_SAHARA_VANILLA_2_IMAGE_MD5,
settings.SERVTEST_LOCAL_PATH)
asserts.assert_true(check_image)
self.env.revert_snapshot("ready_with_3_slaves")
LOGGER.debug('Create Fuel cluster for Sahara tests')
data = {
'sahara': True,
'net_provider': 'neutron',
'net_segment_type': 'gre',
'tenant': 'saharaSimple',
'user': 'saharaSimple',
'password': 'saharaSimple'
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings=data
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id),
data['user'], data['password'], data['tenant'])
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=5, networks_count=2, timeout=300)
LOGGER.debug('Verify Sahara service on controller')
checkers.verify_service(
self.env.get_ssh_to_remote_by_name("slave-01"),
service_name='sahara-all')
LOGGER.debug('Run all sanity and smoke tests')
path_to_tests = 'fuel_health.tests.sanity.test_sanity_sahara.'
test_names = ['VanillaTwoTemplatesTest.test_vanilla_two_templates',
'HDPTwoTemplatesTest.test_hdp_two_templates']
self.fuel_web.run_ostf(
cluster_id=self.fuel_web.get_last_created_cluster(),
tests_must_be_passed=[path_to_tests + test_name
for test_name in test_names]
)
LOGGER.debug('Import Vanilla2 image for Sahara')
common_func = Common(
self.fuel_web.get_public_vip(cluster_id),
data['user'], data['password'], data['tenant'])
common_func.image_import(
settings.SERVTEST_LOCAL_PATH,
settings.SERVTEST_SAHARA_VANILLA_2_IMAGE,
settings.SERVTEST_SAHARA_VANILLA_2_IMAGE_NAME,
settings.SERVTEST_SAHARA_VANILLA_2_IMAGE_META)
path_to_tests = 'fuel_health.tests.platform_tests.test_sahara.'
test_names = ['VanillaTwoClusterTest.test_vanilla_two_cluster']
for test_name in test_names:
LOGGER.debug('Run platform test {0} for Sahara'.format(test_name))
self.fuel_web.run_single_ostf_test(
cluster_id=cluster_id, test_sets=['platform_tests'],
test_name=path_to_tests + test_name, timeout=60 * 200)
self.env.make_snapshot("deploy_sahara_ha_one_controller_gre")
@test(groups=["services", "services.sahara", "services_ha"])
class SaharaHA(TestBasic):
"""Sahara HA tests.
Don't recommend to start tests without kvm
Put Sahara image before start
"""
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["deploy_sahara_ha_gre"])
@log_snapshot_on_error
def deploy_sahara_ha_gre(self):
"""Deploy cluster in HA mode with Sahara and Neutron GRE
Scenario:
1. Create a Fuel cluster. Set the option for Sahara installation
2. Add 3 node with "controller" role
3. Add 1 node with "compute" role
4. Deploy the Fuel cluster
5. Verify Sahara service on all controllers
6. Run all sanity and smoke tests
7. Register Vanilla2 image for Sahara
8. Run platform Vanilla2 test for Sahara
Duration 130m
Snapshot: deploy_sahara_ha_gre
"""
if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_REDHAT:
raise SkipTest()
LOGGER.debug('Check MD5 sum of Vanilla2 image')
check_image = checkers.check_image(
settings.SERVTEST_SAHARA_VANILLA_2_IMAGE,
settings.SERVTEST_SAHARA_VANILLA_2_IMAGE_MD5,
settings.SERVTEST_LOCAL_PATH)
asserts.assert_true(check_image)
self.env.revert_snapshot("ready_with_5_slaves")
LOGGER.debug('Create Fuel cluster for Sahara tests')
data = {
'sahara': True,
'net_provider': 'neutron',
'net_segment_type': 'gre',
'tenant': 'saharaHA',
'user': 'saharaHA',
'password': 'saharaHA'
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings=data
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
cluster_vip = self.fuel_web.get_public_vip(cluster_id)
os_conn = os_actions.OpenStackActions(
cluster_vip, data['user'], data['password'], data['tenant'])
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=13, networks_count=2, timeout=300)
LOGGER.debug('Verify Sahara service on all controllers')
for slave in ["slave-01", "slave-02", "slave-03"]:
checkers.verify_service(
self.env.get_ssh_to_remote_by_name(slave),
service_name='sahara-all')
LOGGER.debug('Run all sanity and smoke tests')
path_to_tests = 'fuel_health.tests.sanity.test_sanity_sahara.'
test_names = ['VanillaTwoTemplatesTest.test_vanilla_two_templates',
'HDPTwoTemplatesTest.test_hdp_two_templates']
self.fuel_web.run_ostf(
cluster_id=self.fuel_web.get_last_created_cluster(),
tests_must_be_passed=[path_to_tests + test_name
for test_name in test_names]
)
LOGGER.debug('Import Vanilla2 image for Sahara')
common_func = Common(cluster_vip,
data['user'], data['password'], data['tenant'])
common_func.image_import(
settings.SERVTEST_LOCAL_PATH,
settings.SERVTEST_SAHARA_VANILLA_2_IMAGE,
settings.SERVTEST_SAHARA_VANILLA_2_IMAGE_NAME,
settings.SERVTEST_SAHARA_VANILLA_2_IMAGE_META)
path_to_tests = 'fuel_health.tests.platform_tests.test_sahara.'
test_names = ['VanillaTwoClusterTest.test_vanilla_two_cluster']
for test_name in test_names:
LOGGER.debug('Run platform test {0} for Sahara'.format(test_name))
self.fuel_web.run_single_ostf_test(
cluster_id=cluster_id, test_sets=['platform_tests'],
test_name=path_to_tests + test_name, timeout=60 * 200)
self.env.make_snapshot("deploy_sahara_ha_gre")
@test(groups=["services", "services.murano", "services_ha_one_controller"])
class MuranoHAOneController(TestBasic):
"""Murano HA with 1 controller tests.
Don't recommend to start tests without kvm
Put Murano image before start
Murano OSTF platform tests without Internet connection will be failed
"""
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_murano_ha_one_controller_gre"])
@log_snapshot_on_error
def deploy_murano_ha_one_controller_gre(self):
"""Deploy cluster in HA mode with Murano and Neutron GRE
Scenario:
1. Create cluster. Set install Murano option
2. Add 1 node with controller role
3. Add 1 nodes with compute role
4. Deploy the cluster
5. Verify Murano services
6. Run OSTF
7. Register Murano image
8. Run OSTF Murano platform tests
Duration 40m
Snapshot: deploy_murano_ha_one_controller_gre
"""
if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_REDHAT:
raise SkipTest()
self.env.revert_snapshot("ready_with_3_slaves")
LOGGER.debug('Check MD5 of image')
check_image = checkers.check_image(
settings.SERVTEST_MURANO_IMAGE,
settings.SERVTEST_MURANO_IMAGE_MD5,
settings.SERVTEST_LOCAL_PATH)
asserts.assert_true(check_image, "Image verification failed")
data = {
'murano': True,
'net_provider': 'neutron',
'net_segment_type': 'gre',
'tenant': 'muranoSimple',
'user': 'muranoSimple',
'password': 'muranoSimple'
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings=data)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id),
data['user'], data['password'], data['tenant'])
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=5, networks_count=2, timeout=300)
checkers.verify_service(
self.env.get_ssh_to_remote_by_name("slave-01"),
service_name='murano-api')
common_func = Common(self.fuel_web.get_public_vip(cluster_id),
data['user'], data['password'],
data['tenant'])
LOGGER.debug('Run sanity and functional Murano OSTF tests')
self.fuel_web.run_single_ostf_test(
cluster_id=self.fuel_web.get_last_created_cluster(),
test_sets=['sanity'],
test_name=('fuel_health.tests.sanity.test_sanity_murano.'
'MuranoSanityTests.test_create_and_delete_service')
)
LOGGER.debug('Import Murano image')
common_func.image_import(
settings.SERVTEST_LOCAL_PATH,
settings.SERVTEST_MURANO_IMAGE,
settings.SERVTEST_MURANO_IMAGE_NAME,
settings.SERVTEST_MURANO_IMAGE_META)
LOGGER.debug('Boot instance with Murano image')
image_name = settings.SERVTEST_MURANO_IMAGE_NAME
srv = common_func.create_instance(flavor_name='test_murano_flavor',
ram=2048, vcpus=1, disk=20,
server_name='murano_instance',
image_name=image_name,
neutron_network=True)
wait(lambda: common_func.get_instance_detail(srv).status == 'ACTIVE',
timeout=60 * 60)
common_func.delete_instance(srv)
LOGGER.debug('Run OSTF platform tests')
test_class_main = ('fuel_health.tests.platform_tests'
'.test_murano_linux.MuranoDeployLinuxServicesTests')
tests_names = ['test_deploy_apache_service', ]
test_classes = []
for test_name in tests_names:
test_classes.append('{0}.{1}'.format(test_class_main,
test_name))
for test_name in test_classes:
self.fuel_web.run_single_ostf_test(
cluster_id=cluster_id, test_sets=['platform_tests'],
test_name=test_name, timeout=60 * 36)
self.env.make_snapshot("deploy_murano_ha_one_controller_gre")
@test(groups=["services", "services.murano", "services_ha"])
class MuranoHA(TestBasic):
"""Murano HA tests.
Don't recommend to start tests without kvm
Put Murano image before start
Murano OSTF platform tests without Internet connection will be failed
"""
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["deploy_murano_ha_with_gre"])
@log_snapshot_on_error
def deploy_murano_ha_with_gre(self):
"""Deploy cluster in ha mode with Murano and Neutron GRE
Scenario:
1. Create cluster. Set install Murano option
2. Add 3 node with controller role
3. Add 1 nodes with compute role
4. Deploy the cluster
5. Verify Murano services
6. Run OSTF
7. Register Murano image
8. Run OSTF Murano platform tests
Duration 100m
Snapshot: deploy_murano_ha_with_gre
"""
self.env.revert_snapshot("ready_with_5_slaves")
LOGGER.debug('Check MD5 of image')
check_image = checkers.check_image(
settings.SERVTEST_MURANO_IMAGE,
settings.SERVTEST_MURANO_IMAGE_MD5,
settings.SERVTEST_LOCAL_PATH)
asserts.assert_true(check_image, "Image verification failed")
data = {
'murano': True,
'net_provider': 'neutron',
'net_segment_type': 'gre',
'tenant': 'muranoHA',
'user': 'muranoHA',
'password': 'muranoHA'
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings=data)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
cluster_vip = self.fuel_web.get_public_vip(cluster_id)
os_conn = os_actions.OpenStackActions(
cluster_vip, data['user'], data['password'], data['tenant'])
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=13, networks_count=2, timeout=300)
for slave in ["slave-01", "slave-02", "slave-03"]:
checkers.verify_service(
self.env.get_ssh_to_remote_by_name(slave),
service_name='murano-api')
common_func = Common(cluster_vip, data['user'], data['password'],
data['tenant'])
LOGGER.debug('Run sanity and functional Murano OSTF tests')
self.fuel_web.run_single_ostf_test(
cluster_id=self.fuel_web.get_last_created_cluster(),
test_sets=['sanity'],
test_name=('fuel_health.tests.sanity.test_sanity_murano.'
'MuranoSanityTests.test_create_and_delete_service')
)
LOGGER.debug('Import Murano image')
common_func.image_import(
settings.SERVTEST_LOCAL_PATH,
settings.SERVTEST_MURANO_IMAGE,
settings.SERVTEST_MURANO_IMAGE_NAME,
settings.SERVTEST_MURANO_IMAGE_META)
LOGGER.debug('Boot instance with Murano image')
image_name = settings.SERVTEST_MURANO_IMAGE_NAME
srv = common_func.create_instance(flavor_name='test_murano_flavor',
ram=2048, vcpus=1, disk=20,
server_name='murano_instance',
image_name=image_name,
neutron_network=True)
wait(lambda: common_func.get_instance_detail(srv).status == 'ACTIVE',
timeout=60 * 60)
common_func.delete_instance(srv)
LOGGER.debug('Run OSTF platform tests')
test_class_main = ('fuel_health.tests.platform_tests'
'.test_murano_linux.MuranoDeployLinuxServicesTests')
tests_names = ['test_deploy_apache_service', ]
test_classes = []
for test_name in tests_names:
test_classes.append('{0}.{1}'.format(test_class_main,
test_name))
for test_name in test_classes:
self.fuel_web.run_single_ostf_test(
cluster_id=cluster_id, test_sets=['platform_tests'],
test_name=test_name, timeout=60 * 36)
self.env.make_snapshot("deploy_murano_ha_with_gre")
class CeilometerOSTFTestsRun(TestBasic):
def run_tests(self, cluster_id):
"""Method run smoke, sanity and platform Ceilometer tests."""
LOGGER.debug('Run sanity and smoke tests')
self.fuel_web.run_ostf(
cluster_id=cluster_id,
test_sets=['smoke', 'sanity'],
timeout=60 * 10
)
LOGGER.debug('Run platform OSTF Ceilometer tests')
test_class_main = ('fuel_health.tests.platform_tests.'
'test_ceilometer.'
'CeilometerApiPlatformTests')
tests_names = ['test_check_alarm_state',
'test_create_sample']
test_classes = []
for test_name in tests_names:
test_classes.append('{0}.{1}'.format(test_class_main,
test_name))
for test_name in test_classes:
self.fuel_web.run_single_ostf_test(
cluster_id=cluster_id, test_sets=['platform_tests'],
test_name=test_name, timeout=60 * 20)
@test(groups=["services", "services.ceilometer", "services_ha_one_controller"])
class CeilometerHAOneControllerMongo(CeilometerOSTFTestsRun):
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_ceilometer_ha_one_controller_with_mongo"])
@log_snapshot_on_error
def deploy_ceilometer_ha_one_controller_with_mongo(self):
"""Deploy cluster in HA mode with Ceilometer
Scenario:
1. Create cluster. Set install Ceilometer option
2. Add 1 node with controller role
3. Add 1 nodes with compute role
4. Add 1 node with cinder role
5. Add 1 node with mongo role
6. Deploy the cluster
7. Verify ceilometer api is running
8. Run OSTF
Duration 45m
Snapshot: deploy_ceilometer_ha_one_controller_with_mongo
"""
self.env.revert_snapshot("ready_with_3_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings={
'ceilometer': True,
'tenant': 'ceilometerSimple',
'user': 'ceilometerSimple',
'password': 'ceilometerSimple'
}
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute', 'cinder'],
'slave-03': ['mongo']
}
)
nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
disk_mb = 0
for node in nailgun_nodes:
if node.get('pending_roles') == ['mongo']:
disk_mb = self.fuel_web.get_node_disk_size(node.get('id'),
"vda")
LOGGER.debug('disk size is {0}'.format(disk_mb))
mongo_disk_mb = 11116
os_disk_mb = disk_mb - mongo_disk_mb
mongo_disk_gb = ("{0}G".format(round(mongo_disk_mb / 1024, 1)))
disk_part = {
"vda": {
"os": os_disk_mb,
"mongo": mongo_disk_mb
}
}
for node in nailgun_nodes:
if node.get('pending_roles') == ['mongo']:
self.fuel_web.update_node_disk(node.get('id'), disk_part)
self.fuel_web.deploy_cluster_wait(cluster_id)
checkers.verify_service(
self.env.get_ssh_to_remote_by_name("slave-01"),
service_name='ceilometer-api')
partitions = checkers.get_mongo_partitions(
self.env.get_ssh_to_remote_by_name("slave-03"), "vda5")
assert_equal(partitions[0].rstrip(), mongo_disk_gb,
'Mongo size {0} before deployment is not equal'
' to size after {1}'.format(mongo_disk_gb, partitions))
self.run_tests(cluster_id)
self.env.make_snapshot(
"deploy_ceilometer_ha_one_controller_with_mongo")
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_ceilometer_ha_one_controller_multirole"])
@log_snapshot_on_error
def deploy_ceilometer_ha_one_controller_multirole(self):
"""Deploy cluster in ha multirole mode with Ceilometer
Scenario:
1. Create cluster. Set install Ceilometer option
2. Add 1 node with controller role
3. Add 1 nodes with compute role
4. Add 2 nodes with cinder and mongo roles
5. Deploy the cluster
6. Verify ceilometer api is running
7. Run OSTF
Duration 35m
Snapshot: deploy_ceilometer_ha_one_controller_multirole
"""
self.env.revert_snapshot("ready_with_3_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings={
'ceilometer': True
}
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': ['cinder', 'mongo']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
checkers.verify_service(
self.env.get_ssh_to_remote_by_name("slave-01"),
service_name='ceilometer-api')
self.run_tests(cluster_id)
self.env.make_snapshot("deploy_ceilometer_ha_one_controller_mulirole")
@test(groups=["services", "services.ceilometer", "services_ha"])
class CeilometerHAMongo(CeilometerOSTFTestsRun):
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["deploy_ceilometer_ha_with_mongo"])
@log_snapshot_on_error
def deploy_ceilometer_ha_with_mongo(self):
"""Deploy cluster in ha mode with Ceilometer
Scenario:
1. Create cluster. Set install Ceilometer option
2. Add 3 node with controller role
3. Add 1 nodes with compute role
4. Add 1 node with mongo role
5. Deploy the cluster
6. Verify ceilometer api is running
7. Run OSTF
Duration 65m
Snapshot: deploy_ceilometer_ha_with_mongo
"""
self.env.revert_snapshot("ready_with_5_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings={
'ceilometer': True,
'tenant': 'ceilometerHA',
'user': 'ceilometerHA',
'password': 'ceilometerHA'
}
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['mongo']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
checkers.verify_service(
self.env.get_ssh_to_remote_by_name("slave-01"),
service_name='ceilometer-api')
self.run_tests(cluster_id)
self.env.make_snapshot("deploy_ceilometer_ha_with_mongo")
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["deploy_ceilometer_ha_multirole"])
@log_snapshot_on_error
def deploy_ceilometer_ha_multirole(self):
"""Deploy cluster in ha multirole mode with Ceilometer
Scenario:
1. Create cluster. Set install Ceilometer option
2. Add 3 node with controller and mongo roles
3. Add 1 nodes with compute role
4. Add 1 nodes with cinder
5. Deploy the cluster
6. Verify ceilometer api is running
7. Run OSTF
Duration 80m
Snapshot: deploy_ceilometer_ha_multirole
"""
self.env.revert_snapshot("ready_with_5_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings={
'ceilometer': True
}
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller', 'mongo'],
'slave-02': ['controller', 'mongo'],
'slave-03': ['controller', 'mongo'],
'slave-04': ['compute'],
'slave-05': ['cinder']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
checkers.verify_service(
self.env.get_ssh_to_remote_by_name("slave-01"),
service_name='ceilometer-api')
self.run_tests(cluster_id)
self.env.make_snapshot("deploy_ceilometer_ha_mulirole")
@test(groups=["services", "services.heat", "services_ha_one_controller"])
class HeatHAOneController(TestBasic):
"""Heat HA one controller test.
Don't recommend to start tests without kvm
"""
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_heat_ha_one_controller_neutron"])
@log_snapshot_on_error
def deploy_heat_ha_one_controller_neutron(self):
"""Deploy Heat cluster in HA mode with Neutron GRE
Scenario:
1. Create cluster
2. Add 1 node with controller role and mongo
3. Add 1 nodes with compute role
4. Set install Ceilometer option
5. Deploy the cluster
6. Verify Heat, Ceilometer services
7. Run OSTF platform tests
Duration 40m
Snapshot: deploy_heat_ha_one_controller_neutron
"""
self.env.revert_snapshot("ready_with_3_slaves")
data = {
'ceilometer': True,
'net_provider': 'neutron',
'net_segment_type': 'gre',
'tenant': 'heatSimple',
'user': 'heatSimple',
'password': 'heatSimple'
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings=data)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller', 'mongo'],
'slave-02': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id),
data['password'], data['tenant'])
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=5, networks_count=2, timeout=300)
checkers.verify_service(
self.env.get_ssh_to_remote_by_name("slave-01"),
service_name='heat-api', count=3)
checkers.verify_service(
self.env.get_ssh_to_remote_by_name("slave-01"),
service_name='ceilometer-api')
LOGGER.debug('Run Heat OSTF platform tests')
test_class_main = ('fuel_health.tests.platform_tests.'
'test_heat.'
'HeatSmokeTests')
tests_names = ['test_actions',
'test_autoscaling',
'test_rollback',
'test_update']
test_classes = []
for test_name in tests_names:
test_classes.append('{0}.{1}'.format(test_class_main,
test_name))
for test_name in test_classes:
self.fuel_web.run_single_ostf_test(
cluster_id=cluster_id, test_sets=['platform_tests'],
test_name=test_name, timeout=60 * 60)
self.env.make_snapshot("deploy_heat_ha_one_controller_neutron")
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_heat_ha_one_controller_nova"])
@log_snapshot_on_error
def deploy_heat_ha_one_controller_nova(self):
"""Deploy Heat cluster in ha mode with Nova Network
Scenario:
1. Create cluster
2. Add 1 node with controller role and mongo
3. Add 1 nodes with compute role
4. Set Ceilometer install option
4. Deploy the cluster
5. Verify Heat, Ceilometer services
6. Run OSTF platform tests
Duration 40m
Snapshot: deploy_heat_ha_one_controller_nova
"""
self.env.revert_snapshot("ready_with_3_slaves")
data = {
'ceilometer': True,
'tenant': 'heatSimple',
'user': 'heatSimple',
'password': 'heatSimple'
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings=data)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller', 'mongo'],
'slave-02': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id),
data['user'], data['password'], data['tenant'])
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=6, networks_count=1, timeout=300)
checkers.verify_service(
self.env.get_ssh_to_remote_by_name("slave-01"),
service_name='heat-api', count=3)
checkers.verify_service(
self.env.get_ssh_to_remote_by_name("slave-01"),
service_name='ceilometer-api')
LOGGER.debug('Run Heat OSTF platform tests')
test_class_main = ('fuel_health.tests.platform_tests.'
'test_heat.'
'HeatSmokeTests')
tests_names = ['test_actions',
'test_autoscaling',
'test_rollback',
'test_update']
test_classes = []
for test_name in tests_names:
test_classes.append('{0}.{1}'.format(test_class_main,
test_name))
for test_name in test_classes:
self.fuel_web.run_single_ostf_test(
cluster_id=cluster_id, test_sets=['platform_tests'],
test_name=test_name, timeout=60 * 60)
self.env.make_snapshot("deploy_heat_ha_one_controller_nova")
@test(groups=["services", "services.heat", "services_ha"])
class HeatHA(TestBasic):
"""Heat HA test.
Don't recommend to start tests without kvm
"""
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["deploy_heat_ha"])
@log_snapshot_on_error
def deploy_heat_ha(self):
"""Deploy Heat cluster in HA mode
Scenario:
1. Create cluster
2. Add 3 node with controller role and mongo
3. Add 1 nodes with compute role
4. Set Ceilometer install option
4. Deploy the cluster
5. Verify Heat and Ceilometer services
6. Run OSTF platform tests
Duration 70m
Snapshot: deploy_heat_ha
"""
self.env.revert_snapshot("ready_with_5_slaves")
data = {
'ceilometer': True,
'net_provider': 'neutron',
'net_segment_type': 'gre',
'tenant': 'heatHA',
'user': 'heatHA',
'password': 'heatHA'
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings=data)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller', 'mongo'],
'slave-02': ['controller', 'mongo'],
'slave-03': ['controller', 'mongo'],
'slave-04': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
cluster_vip = self.fuel_web.get_public_vip(cluster_id)
os_conn = os_actions.OpenStackActions(
cluster_vip, data['user'], data['password'], data['tenant'])
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=13, networks_count=2, timeout=300)
for slave in ["slave-01", "slave-02", "slave-03"]:
checkers.verify_service(
self.env.get_ssh_to_remote_by_name(slave),
service_name='heat-api', count=3)
checkers.verify_service(
self.env.get_ssh_to_remote_by_name(slave),
service_name='ceilometer-api')
LOGGER.debug('Run Heat OSTF platform tests')
test_class_main = ('fuel_health.tests.platform_tests.'
'test_heat.'
'HeatSmokeTests')
tests_names = ['test_actions',
'test_rollback']
test_classes = []
for test_name in tests_names:
test_classes.append('{0}.{1}'.format(test_class_main,
test_name))
for test_name in test_classes:
self.fuel_web.run_single_ostf_test(
cluster_id=cluster_id, test_sets=['platform_tests'],
test_name=test_name, timeout=60 * 60)
self.env.make_snapshot("deploy_heat_ha")

View File

@ -0,0 +1,483 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from proboscis.asserts import assert_equal
from proboscis import test
from proboscis import SkipTest
from devops.helpers.helpers import _wait
from devops.helpers.helpers import wait
from fuelweb_test.helpers import checkers
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.helpers.decorators import create_diagnostic_snapshot
from fuelweb_test.helpers import os_actions
from fuelweb_test import logger
from fuelweb_test import settings as hlp_data
from fuelweb_test.tests import base_test_case as base_test_data
@test(groups=["upgrade"])
class UpgradeFuelMaster(base_test_data.TestBasic):
@classmethod
def check_upgraded_kernel(cls, admin_remote, slave_remote):
#the archive contains several versions of the kernel
#regular expression will pick the newer one
#that is actually gets installed
cmd = r"find /var/upgrade/repos/*/ubuntu/ -type f -name" \
r" 'linux-image-*.deb' -printf '%f\n' | sed -rn " \
r"'s/^linux-image-([0-9, \.]+(\-[0-9]+)?)-.*/\1/p' |" \
r" sort -rV | " \
r"head -1"
expected_kernel = ''.join(admin_remote.execute(
cmd)['stdout']).rstrip()
logger.debug("kernel version from repos is {0}".format(
expected_kernel))
kernel = UpgradeFuelMaster.get_slave_kernel(slave_remote)
checkers.check_kernel(kernel, expected_kernel)
@classmethod
def get_slave_kernel(cls, slave_remote):
kernel = ''.join(slave_remote.execute(
r"uname -r | sed -rn"
r" 's/^([0-9, \.]+(\-[0-9]+)?)-.*/\1/p'")['stdout']).rstrip()
logger.debug("slave kernel is {0}".format(kernel))
return kernel
@test(groups=["upgrade_ha_one_controller"])
@log_snapshot_on_error
def upgrade_ha_one_controller_env(self):
"""Upgrade ha one controller deployed cluster with ceph
Scenario:
1. Revert snapshot with ha one controller ceph env
2. Run upgrade on master
3. Check that upgrade was successful
4. Add another compute node
5. Re-deploy cluster
6. Run OSTF
"""
if not self.env.get_virtual_environment().has_snapshot(
'ceph_ha_one_controller_compact'):
raise SkipTest()
self.env.revert_snapshot("ceph_ha_one_controller_compact")
cluster_id = self.fuel_web.get_last_created_cluster()
remote = self.env.get_ssh_to_remote_by_name('slave-01')
expected_kernel = self.get_slave_kernel(remote)
checkers.upload_tarball(self.env.get_admin_remote(),
hlp_data.TARBALL_PATH, '/var')
checkers.check_tarball_exists(self.env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH),
'/var')
checkers.untar(self.env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH), '/var')
checkers.run_script(self.env.get_admin_remote(), '/var',
'upgrade.sh', password=
hlp_data.KEYSTONE_CREDS['password'])
checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 3000,
phrase='*** UPGRADE DONE SUCCESSFULLY')
checkers.check_upgraded_containers(self.env.get_admin_remote(),
hlp_data.UPGRADE_FUEL_FROM,
hlp_data.UPGRADE_FUEL_TO)
self.fuel_web.assert_nodes_in_ready_state(cluster_id)
self.fuel_web.wait_nodes_get_online_state(
self.env.get_virtual_environment().nodes().slaves[:3])
self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
self.fuel_web.assert_nailgun_upgrade_migration()
self.env.bootstrap_nodes(
self.env.get_virtual_environment().nodes().slaves[3:4])
self.fuel_web.update_nodes(
cluster_id, {'slave-04': ['compute']},
True, False
)
self.fuel_web.deploy_cluster_wait(cluster_id)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id),
user='ceph1', tenant='ceph1', passwd='ceph1')
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=10, networks_count=1, timeout=300)
self.fuel_web.run_ostf(cluster_id=cluster_id)
if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
remote = self.env.get_ssh_to_remote_by_name('slave-04')
kernel = self.get_slave_kernel(remote)
checkers.check_kernel(kernel, expected_kernel)
create_diagnostic_snapshot(
self.env, "pass", "upgrade_ha_one_controller_env")
self.env.make_snapshot("upgrade_ha_one_controller")
@test(groups=["upgrade_ha_one_controller_delete_node"])
@log_snapshot_on_error
def upgrade_ha_one_controller_delete_node(self):
"""Upgrade ha 1 controller deployed cluster with ceph and
delete node from old cluster
Scenario:
1. Revert ceph_ha_one_controller_compact snapshot
2. Run upgrade on master
3. Check that upgrade was successful
4. Delete one compute+ceph node
5. Re-deploy cluster
6. Run OSTF
"""
if not self.env.get_virtual_environment().has_snapshot(
'ceph_ha_one_controller_compact'):
raise SkipTest()
self.env.revert_snapshot("ceph_ha_one_controller_compact")
cluster_id = self.fuel_web.get_last_created_cluster()
checkers.upload_tarball(self.env.get_admin_remote(),
hlp_data.TARBALL_PATH, '/var')
checkers.check_tarball_exists(self.env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH),
'/var')
checkers.untar(self.env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH), '/var')
checkers.run_script(self.env.get_admin_remote(), '/var',
'upgrade.sh', password=
hlp_data.KEYSTONE_CREDS['password'])
checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 3000,
phrase='*** UPGRADE DONE SUCCESSFULLY')
checkers.check_upgraded_containers(self.env.get_admin_remote(),
hlp_data.UPGRADE_FUEL_FROM,
hlp_data.UPGRADE_FUEL_TO)
self.fuel_web.assert_nodes_in_ready_state(cluster_id)
self.fuel_web.wait_nodes_get_online_state(
self.env.get_virtual_environment().nodes().slaves[:3])
self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
self.fuel_web.assert_nailgun_upgrade_migration()
nailgun_nodes = self.fuel_web.update_nodes(
cluster_id, {'slave-03': ['compute', 'ceph-osd']}, False, True)
task = self.fuel_web.deploy_cluster(cluster_id)
self.fuel_web.assert_task_success(task)
nodes = filter(lambda x: x["pending_deletion"] is True, nailgun_nodes)
wait(
lambda: self.fuel_web.is_node_discovered(nodes[0]),
timeout=10 * 60
)
self.fuel_web.run_ostf(cluster_id=cluster_id, should_fail=1)
self.env.make_snapshot("upgrade_ha_one_controller_delete_node")
@test(groups=["upgrade_ha"])
@log_snapshot_on_error
def upgrade_ha_env(self):
"""Upgrade ha deployed cluster
Scenario:
1. Revert snapshot with neutron gre ha env
2. Run upgrade on master
3. Check that upgrade was successful
4. Check cluster is operable
5. Create new ha cluster with 1 controller Vlan cluster
6. Deploy cluster
7. Run OSTF
"""
if not self.env.get_virtual_environment().has_snapshot(
'deploy_neutron_gre_ha'):
raise SkipTest()
self.env.revert_snapshot("deploy_neutron_gre_ha")
cluster_id = self.fuel_web.get_last_created_cluster()
available_releases_before = self.fuel_web.get_releases_list_for_os(
release_name=hlp_data.OPENSTACK_RELEASE)
checkers.upload_tarball(self.env.get_admin_remote(),
hlp_data.TARBALL_PATH, '/var')
checkers.check_tarball_exists(self.env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH),
'/var')
checkers.untar(self.env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH), '/var')
checkers.run_script(self.env.get_admin_remote(), '/var',
'upgrade.sh', password=
hlp_data.KEYSTONE_CREDS['password'])
checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 3000,
phrase='*** UPGRADE DONE SUCCESSFULLY')
checkers.check_upgraded_containers(self.env.get_admin_remote(),
hlp_data.UPGRADE_FUEL_FROM,
hlp_data.UPGRADE_FUEL_TO)
self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
self.fuel_web.assert_nodes_in_ready_state(cluster_id)
self.fuel_web.wait_nodes_get_online_state(
self.env.get_virtual_environment().nodes().slaves[:5])
self.fuel_web.assert_nailgun_upgrade_migration()
self.fuel_web.run_ostf(
cluster_id=cluster_id)
available_releases_after = self.fuel_web.get_releases_list_for_os(
release_name=hlp_data.OPENSTACK_RELEASE)
added_release = [id for id in available_releases_after
if id not in available_releases_before]
self.env.bootstrap_nodes(
self.env.get_virtual_environment().nodes().slaves[5:7])
data = {
'tenant': 'novaSimpleVlan',
'user': 'novaSimpleVlan',
'password': 'novaSimpleVlan'
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=hlp_data.DEPLOYMENT_MODE,
settings=data,
release_id=added_release[0]
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-06': ['controller'],
'slave-07': ['compute']
}
)
self.fuel_web.update_vlan_network_fixed(
cluster_id, amount=8, network_size=32)
self.fuel_web.deploy_cluster_wait(cluster_id)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id),
data['user'], data['password'], data['tenant'])
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=6, networks_count=8, timeout=300)
if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
remote = self.env.get_ssh_to_remote_by_name('slave-06')
self.check_upgraded_kernel(self.env.get_admin_remote(), remote)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("upgrade_ha")
@test(groups=["deploy_ha_after_upgrade"])
@log_snapshot_on_error
def deploy_ha_after_upgrade(self):
"""Upgrade and deploy new ha cluster
Scenario:
1. Revert snapshot with ha 1 controller ceph env
2. Run upgrade on master
3. Check that upgrade was successful
4. Re-deploy cluster
5. Run OSTF
"""
if not self.env.get_virtual_environment().has_snapshot(
'ceph_ha_one_controller_compact'):
raise SkipTest()
self.env.revert_snapshot("ceph_ha_one_controller_compact")
cluster_id = self.fuel_web.get_last_created_cluster()
available_releases_before = self.fuel_web.get_releases_list_for_os(
release_name=hlp_data.OPENSTACK_RELEASE)
checkers.upload_tarball(self.env.get_admin_remote(),
hlp_data.TARBALL_PATH, '/var')
checkers.check_tarball_exists(self.env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH),
'/var')
checkers.untar(self.env.get_admin_remote(),
os.path.basename(hlp_data.TARBALL_PATH),
'/var')
checkers.run_script(self.env.get_admin_remote(), '/var',
'upgrade.sh', password=
hlp_data.KEYSTONE_CREDS['password'])
checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 3000,
phrase='*** UPGRADE DONE SUCCESSFULLY')
checkers.check_upgraded_containers(self.env.get_admin_remote(),
hlp_data.UPGRADE_FUEL_FROM,
hlp_data.UPGRADE_FUEL_TO)
self.fuel_web.assert_nodes_in_ready_state(cluster_id)
self.fuel_web.wait_nodes_get_online_state(
self.env.get_virtual_environment().nodes().slaves[:3])
self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
self.fuel_web.assert_nailgun_upgrade_migration()
available_releases_after = self.fuel_web.get_releases_list_for_os(
release_name=hlp_data.OPENSTACK_RELEASE)
added_release = [id for id in available_releases_after
if id not in available_releases_before]
self.env.bootstrap_nodes(
self.env.get_virtual_environment().nodes().slaves[3:9])
segment_type = 'vlan'
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=hlp_data.DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": segment_type
},
release_id=added_release[0]
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-04': ['controller'],
'slave-05': ['controller'],
'slave-06': ['controller'],
'slave-07': ['compute'],
'slave-08': ['compute'],
'slave-09': ['cinder']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
cluster = self.fuel_web.client.get_cluster(cluster_id)
assert_equal(str(cluster['net_provider']), 'neutron')
if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
remote = self.env.get_ssh_to_remote_by_name('slave-04')
self.check_upgraded_kernel(self.env.get_admin_remote(), remote)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_ha_after_upgrade")
@test(groups=["rollback"])
class RollbackFuelMaster(base_test_data.TestBasic):
@test(groups=["rollback_automatic_ha"])
@log_snapshot_on_error
def rollback_automatically_ha_env(self):
"""Rollback manually ha deployed cluster
Scenario:
1. Revert snapshot with neutron gre ha env
2. Add raise exception to openstack.py file
3. Run upgrade on master
4. Check that rollback starts automatically
5. Check that cluster was not upgraded
6. Add 1 cinder node and re-deploy cluster
7. Run OSTF
"""
if not self.env.get_virtual_environment().has_snapshot(
'deploy_neutron_gre_ha'):
raise SkipTest()
self.env.revert_snapshot("deploy_neutron_gre_ha")
cluster_id = self.fuel_web.get_last_created_cluster()
checkers.upload_tarball(self.env.get_admin_remote(),
hlp_data.TARBALL_PATH, '/var')
checkers.check_tarball_exists(self.env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH),
'/var')
checkers.untar(self.env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH), '/var')
checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh',
password=
hlp_data.KEYSTONE_CREDS['password'],
rollback=True, exit_code=255)
checkers.wait_rollback_is_done(self.env.get_admin_remote(), 3000)
checkers.check_upgraded_containers(self.env.get_admin_remote(),
hlp_data.UPGRADE_FUEL_TO,
hlp_data.UPGRADE_FUEL_FROM)
logger.debug("all containers are ok")
_wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
self.env.get_virtual_environment(
).nodes().slaves[0]), timeout=120)
logger.debug("all services are up now")
self.fuel_web.wait_nodes_get_online_state(
self.env.get_virtual_environment().nodes().slaves[:5])
self.fuel_web.assert_nodes_in_ready_state(cluster_id)
self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM)
self.env.bootstrap_nodes(
self.env.get_virtual_environment().nodes().slaves[5:6])
self.fuel_web.update_nodes(
cluster_id, {'slave-06': ['cinder']},
True, False
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("rollback_automatic_ha")
@test(groups=["rollback_automatic_ha_one_controller"])
@log_snapshot_on_error
def rollback_automatically_ha_one_controller_env(self):
"""Rollback automatically ha one controller deployed cluster
Scenario:
1. Revert snapshot with deploy neutron gre env
2. Add raise exception to docker_engine.py file
3. Run upgrade on master
4. Check that rollback starts automatically
5. Check that cluster was not upgraded and run OSTf
6. Add 1 cinder node and re-deploy cluster
7. Run OSTF
"""
if not self.env.get_virtual_environment().has_snapshot(
'deploy_neutron_gre'):
raise SkipTest()
self.env.revert_snapshot("deploy_neutron_gre")
cluster_id = self.fuel_web.get_last_created_cluster()
remote = self.env.get_ssh_to_remote_by_name('slave-01')
expected_kernel = UpgradeFuelMaster.get_slave_kernel(remote)
checkers.upload_tarball(self.env.get_admin_remote(),
hlp_data.TARBALL_PATH, '/var')
checkers.check_tarball_exists(self.env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH),
'/var')
checkers.untar(self.env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH), '/var')
#we expect 255 exit code here because upgrade failed
# and exit status is 255
checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh',
password=
hlp_data.KEYSTONE_CREDS['password'],
rollback=True, exit_code=255)
checkers.wait_rollback_is_done(self.env.get_admin_remote(), 3000)
checkers.check_upgraded_containers(self.env.get_admin_remote(),
hlp_data.UPGRADE_FUEL_TO,
hlp_data.UPGRADE_FUEL_FROM)
logger.debug("all containers are ok")
_wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
self.env.get_virtual_environment(
).nodes().slaves[0]), timeout=120)
logger.debug("all services are up now")
self.fuel_web.wait_nodes_get_online_state(
self.env.get_virtual_environment().nodes().slaves[:3])
self.fuel_web.assert_nodes_in_ready_state(cluster_id)
self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.bootstrap_nodes(
self.env.get_virtual_environment().nodes().slaves[3:4])
self.fuel_web.update_nodes(
cluster_id, {'slave-04': ['cinder']},
True, False
)
self.fuel_web.deploy_cluster_wait(cluster_id)
if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
remote = self.env.get_ssh_to_remote_by_name('slave-04')
kernel = UpgradeFuelMaster.get_slave_kernel(remote)
checkers.check_kernel(kernel, expected_kernel)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("rollback_automatic_ha_one_controller")

View File

@ -0,0 +1,585 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from devops.helpers.helpers import wait
from proboscis.asserts import assert_true
from proboscis import test
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.helpers import os_actions
from fuelweb_test import logger
from fuelweb_test.settings import DEPLOYMENT_MODE_SIMPLE
from fuelweb_test.settings import DEPLOYMENT_MODE_HA
from fuelweb_test.settings import VCENTER_IP
from fuelweb_test.settings import VCENTER_USERNAME
from fuelweb_test.settings import VCENTER_PASSWORD
from fuelweb_test.settings import VCENTER_CLUSTERS
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@test(groups=["vcenter"])
class VcenterDeploy(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_1],
groups=["smoke", "vcenter_one_node_simple"])
@log_snapshot_on_error
def vcenter_one_node_simple(self):
"""Deploy vcenter cluster with controller node only
Scenario:
1. Create cluster
2. Add 1 node with controller role
3. Deploy the cluster
4. Verify that the cluster was set up correctly, there are no
dead services
5. Create instance and delete instance
"""
self.env.revert_snapshot("ready_with_1_slaves")
# Configure cluster
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE_SIMPLE,
settings={
'host_ip': VCENTER_IP,
'vc_user': VCENTER_USERNAME,
'vc_password': VCENTER_PASSWORD,
'cluster': VCENTER_CLUSTERS
}
)
logger.info("cluster is {}".format(cluster_id))
# Assign role to node
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller']}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
# Wait until nova-compute get information about clusters
# Fix me. Later need to change sleep with wait function.
time.sleep(60)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['smoke', 'sanity'])
@test(depends_on=[SetupEnvironment.prepare_slaves_1],
groups=["vcenter_multiple_cluster"])
@log_snapshot_on_error
def vcenter_multiple_cluster(self):
"""Deploy cluster with one controller and test vCenter
multiple vSphere clusters support
Scenario:
1. Create cluster
2. Add 1 node with controller role
3. Deploy the cluster
4. Check that available at least two hypervisors (vSphere clusters)
5. Create 4 instances
6. Check connectivity between 2 instances that are running in
different vSphere clusters
"""
self.env.revert_snapshot("ready_with_1_slaves")
# Configure cluster
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE_SIMPLE,
settings={
'host_ip': VCENTER_IP,
'vc_user': VCENTER_USERNAME,
'vc_password': VCENTER_PASSWORD,
'cluster': VCENTER_CLUSTERS
}
)
logger.info("cluster is {0}".format(cluster_id))
# Add nodes to roles
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller']}
)
# Deploy cluster
self.fuel_web.deploy_cluster_wait(cluster_id)
# Wait until nova-compute get information about clusters
# Fix me. Later need to change sleep with wait function.
time.sleep(60)
ctrl_ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
logger.info("Controller IP is {}".format(ctrl_ip))
os = os_actions.OpenStackActions(ctrl_ip)
hypervisors = os.get_hypervisors()
# Check hypervisor quantity and create instances
assert_true(len(hypervisors) > 1, 'Not enough vCenter clusters.')
if len(hypervisors) > 1:
logger.info("Create instances and assign floating IPs:")
for i in range(1, 6):
srv = os.create_server_for_migration(timeout=300)
logger.info(os.get_instance_detail(srv).to_dict()['name'])
os.assign_floating_ip(srv)
# Check that there are instanses on each hypervisor
# Fix me. Later need to change sleep with wait function.
time.sleep(30)
hypervisors = os.get_hypervisors()
for hypervisor in hypervisors:
assert_true(os.get_hypervisor_vms_count(hypervisor) != 0,
"No active VMs on " +
os.get_hypervisor_hostname(hypervisor))
logger.info("{} active VMs on Hypervisor {}".format(
os.get_hypervisor_vms_count(hypervisor),
os.get_hypervisor_hostname(hypervisor)))
# Get instances IPs from different hypervisors
servers_for_check = {}
ips_for_check = []
servers = os.get_servers()
for server in servers:
if os.get_srv_hypervisor_name(server) not in servers_for_check:
servers_for_check[os.get_srv_hypervisor_name(server)] = {}
server_detail = os.get_instance_detail(server).to_dict()
for net_prefs in server_detail['addresses']['novanetwork']:
if net_prefs['OS-EXT-IPS:type'] == 'floating' and \
net_prefs['addr'] not in ips_for_check and \
len(ips_for_check) == 0:
ips_for_check.append(net_prefs['addr'])
if net_prefs['OS-EXT-IPS:type'] == 'fixed' and \
len(ips_for_check) == 1:
ips_for_check.append(net_prefs['addr'])
# Wait until vm is booted
ssh = self.env.get_ssh_to_remote_by_name("slave-01")
wait(
lambda: not ssh.execute('curl -s -m1 http://' + ips_for_check[0] +
':22 |grep -iq "[a-z]"')['exit_code'],
interval=10, timeout=100)
# Check server's connectivity
res = int(os.execute_through_host(ssh, ips_for_check[0],
"ping -q -c3 " + ips_for_check[1] +
" 2>/dev/null >/dev/null;"
" echo -n $?"))
assert_true(res == 0, "Error in Instances network connectivity.")
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["vcenter_vmdk"])
@log_snapshot_on_error
def vcenter_vmdk(self):
"""Deploy cluster with controller and cinder nodes and run checks
Scenario:
1. Create cluster
2. Add 2 nodes
1 controller
1 cinder (VMDK backend)
3. Deploy the cluster
4. Run OSTF
"""
self.env.revert_snapshot("ready_with_3_slaves")
# Configure cluster
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE_SIMPLE,
settings={
'volumes_vmdk': True,
'volumes_lvm': False,
'host_ip': VCENTER_IP,
'vc_user': VCENTER_USERNAME,
'vc_password': VCENTER_PASSWORD,
'cluster': VCENTER_CLUSTERS,
'tenant': 'vcenter',
'user': 'vcenter',
'password': 'vcenter'
}
)
logger.info("cluster is {0}".format(cluster_id))
# Assign roles to nodes
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller'],
'slave-02': ['cinder']}
)
# Deploy cluster
self.fuel_web.deploy_cluster_wait(cluster_id)
# Wait until nova-compute get information about clusters
# FIXME: Later need to change sleep with wait function.
time.sleep(60)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['smoke', 'sanity'])
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["vcenter_ha"])
@log_snapshot_on_error
def vcenter_ha(self):
"""Deploy cluster with 3 controllers and run OSTF
Scenario:
1. Create cluster
2. Add 3 nodes with controller role
3. Deploy the cluster
4. Run OSTF
"""
self.env.revert_snapshot("ready_with_3_slaves")
# Configure cluster
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE_HA,
settings={
'host_ip': VCENTER_IP,
'vc_user': VCENTER_USERNAME,
'vc_password': VCENTER_PASSWORD,
'cluster': VCENTER_CLUSTERS,
'tenant': 'vcenter',
'user': 'vcenter',
'password': 'vcenter'
}
)
logger.info("cluster is {0}".format(cluster_id))
# Add nodes to roles
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller']
}
)
# Deploy cluster
self.fuel_web.deploy_cluster_wait(cluster_id)
# Wait until nova-compute get information about clusters
# Fix me. Later need to change sleep with wait function.
time.sleep(60)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity'])
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["vcenter_simple_add_cinder"])
@log_snapshot_on_error
def vcenter_simple_add_cinder(self):
"""Deploy cluster with one controller and cinder node
Scenario:
1. Create cluster
2. Add 1 node with role controller
3. Deploy the cluster
4. Check network connectivity and run OSTF
5. Add 1 cinder node to the cluster
6. Re-deploy the cluster
7. Check network connectivity and run OSTF
"""
self.env.revert_snapshot("ready_with_3_slaves")
# Configure cluster
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE_SIMPLE,
settings={
'volumes_lvm': False,
'volumes_vmdk': True,
'host_ip': VCENTER_IP,
'vc_user': VCENTER_USERNAME,
'vc_password': VCENTER_PASSWORD,
'cluster': VCENTER_CLUSTERS,
'tenant': 'vcenter',
'user': 'vcenter',
'password': 'vcenter'
}
)
logger.info("cluster is {0}".format(cluster_id))
# Add nodes to roles
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller']}
)
# Deploy cluster
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.update_nodes(
cluster_id,
{'slave-02': ['cinder']}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['smoke', 'sanity'])
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["vcenter_ha_deployment_with_cinder"])
@log_snapshot_on_error
def vcenter_ha_deployment_with_cinder(self):
"""Deploy HA cluster with standalone cinder node
Scenario:
1. Create cluster
2. Add 4 nodes with roles:
3 controller
1 cinder
3. Deploy cluster
4. Check network connectivity and run OSTF
"""
self.env.revert_snapshot("ready_with_5_slaves")
# Configure cluster
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE_HA,
settings={
'volumes_lvm': False,
'volumes_vmdk': True,
'host_ip': VCENTER_IP,
'vc_user': VCENTER_USERNAME,
'vc_password': VCENTER_PASSWORD,
'cluster': VCENTER_CLUSTERS,
'tenant': 'vcenter',
'user': 'vcenter',
'password': 'vcenter'
}
)
logger.info("cluster is {0}".format(cluster_id))
# Add nodes to roles
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['cinder']
}
)
# Deploy cluster
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity'])
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["vcenter_simple_stop_deployment"])
@log_snapshot_on_error
def vcenter_simple_stop_deployment(self):
"""Deploy cluster, stop running deployment process, start deployment
again
Scenario:
1. Create cluster
2. Add 2 nodes with roles:
1 controller/cinder
1 cinder
3. Stop cluster deployment
4. Wait until nodes will be 'online' again
5. Re-deploy cluster
4. Check network connectivity and run OSTF
"""
self.env.revert_snapshot("ready_with_3_slaves")
# Configure cluster
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE_SIMPLE,
settings={
'volumes_lvm': False,
'volumes_vmdk': True,
'host_ip': VCENTER_IP,
'vc_user': VCENTER_USERNAME,
'vc_password': VCENTER_PASSWORD,
'cluster': VCENTER_CLUSTERS,
'tenant': 'vcenter',
'user': 'vcenter',
'password': 'vcenter'
}
)
logger.info("cluster is {0}".format(cluster_id))
# Add nodes to roles
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller', 'cinder'],
'slave-02': ['cinder']
}
)
self.fuel_web.provisioning_cluster_wait(cluster_id)
self.fuel_web.deploy_task_wait(cluster_id=cluster_id, progress=40)
self.fuel_web.stop_deployment_wait(cluster_id)
self.fuel_web.wait_nodes_get_online_state(
self.env.get_virtual_environment().nodes().slaves[:2])
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity'])
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["vcenter_vlan_simple", "vcenter_vlan"])
@log_snapshot_on_error
def vcenter_vlan_simple(self):
"""Deploy a cluster in Simple mode with 1 controller node,
1 cinder node, vCenter and VlanManager enabled.
Verify that it works.
Scenario:
1. Create a Simple cluster with vCenter as a hypervisor
2. Add 1 node with controller and 1 node with cinder roles
3. Set Nova-Network VlanManager as a network backend
4. Deploy the cluster
5. Run network verification
6. Run OSTF
"""
self.env.revert_snapshot("ready_with_3_slaves")
# Configure a cluster.
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE_SIMPLE,
settings={
'volumes_vmdk': True,
'volumes_lvm': False,
'host_ip': VCENTER_IP,
'vc_user': VCENTER_USERNAME,
'vc_password': VCENTER_PASSWORD,
'cluster': VCENTER_CLUSTERS,
'tenant': 'vcenter',
'user': 'vcenter',
'password': 'vcenter'
}
)
logger.info("cluster is {0}".format(cluster_id))
# Assign roles to nodes.
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller'],
'slave-02': ['cinder'],
}
)
# Configure network interfaces.
# Public and Fixed networks are on the same interface
# because Nova will use the same vSwitch for PortGroups creating
# as a ESXi' management interface is located in.
interfaces = {
'eth0': ["fuelweb_admin"],
'eth1': ["public", "fixed"],
'eth2': ["management", ],
'eth3': [],
'eth4': ["storage"],
}
slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
for node in slave_nodes:
self.fuel_web.update_node_networks(node['id'], interfaces)
# Configure Nova-Network VLanManager.
self.fuel_web.update_vlan_network_fixed(
cluster_id, amount=8, network_size=32)
# Deploy the cluster.
self.fuel_web.deploy_cluster_wait(cluster_id)
# Run tests.
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['smoke', 'sanity'])
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["vcenter_vlan_ha", "vcenter_vlan"])
@log_snapshot_on_error
def vcenter_vlan_ha(self):
"""Deploy a cluster in HA mode with 2 controller node,
1 controller + cinder node, vCenter and VlanManager enabled.
Verify that it works.
Scenario:
1. Create a Simple cluster with vCenter as a hypervisor
2. Add 3 nodes with roles:
2 controller
1 controller+cinder
3. Set Nova-Network VlanManager as a network backend
4. Deploy the cluster
5. Run network verification
6. Run OSTF
"""
self.env.revert_snapshot("ready_with_3_slaves")
# Configure a cluster.
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE_HA,
settings={
'volumes_vmdk': True,
'volumes_lvm': False,
'host_ip': VCENTER_IP,
'vc_user': VCENTER_USERNAME,
'vc_password': VCENTER_PASSWORD,
'cluster': VCENTER_CLUSTERS,
'tenant': 'vcenter',
'user': 'vcenter',
'password': 'vcenter'
}
)
logger.info("cluster is {0}".format(cluster_id))
# Assign roles to nodes.
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller', 'cinder'],
}
)
# Configure network interfaces.
# Public and Fixed networks are on the same interface
# because Nova will use the same vSwitch for PortGroups creating
# as a ESXi' management interface is located in.
interfaces = {
'eth0': ["fuelweb_admin"],
'eth1': ["public", "fixed"],
'eth2': ["management", ],
'eth3': [],
'eth4': ["storage"],
}
slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
for node in slave_nodes:
self.fuel_web.update_node_networks(node['id'], interfaces)
# Configure Nova-Network VLanManager.
self.fuel_web.update_vlan_network_fixed(
cluster_id, amount=8, network_size=32)
# Deploy the cluster.
self.fuel_web.deploy_cluster_wait(cluster_id)
# Run tests.
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity'])

View File

@ -0,0 +1,118 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nose.tools import assert_equals
from proboscis import test
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.helpers import checkers
from fuelweb_test.helpers import http
from fuelweb_test.helpers import os_actions
from fuelweb_test import settings as hlp
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
from fuelweb_test import logger
@test(groups=["thread_2"])
class HAOneControllerZabbix(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_ha_one_controller_zabbix"])
@log_snapshot_on_error
def deploy_ha_one_controller_zabbix(self):
"""Deploy cluster in ha mode 1 controller with zabbix-server
Scenario:
1. Setup master node
2. Enable 'experimental' in Nailgun
3. Restart Nailgun
4. Create cluster in ha mode with 1 controller
5. Add 1 node with controller role
6. Add 1 node with compute role
7. Add 1 node with zabbix role
8. Deploy the cluster
9. Verify networks
10. Check that zabbix server is running on the node
11. Run OSTF
12. Login in zabbix dashboard
Duration 30m
Snapshot: deploy_ha_one_controller_zabbix
"""
self.env.revert_snapshot("ready_with_3_slaves")
node_ssh = self.env.get_ssh_to_remote(self.fuel_web.admin_node_ip)
# Turn on experimental mode
checkers.check_enable_experimental_mode(
node_ssh, '/etc/fuel/version.yaml')
# restart nailgun
checkers.restart_nailgun(node_ssh)
# check if zabbix role appears
self.fuel_web.assert_release_role_present(
release_name=hlp.OPENSTACK_RELEASE,
role_name='zabbix-server')
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=hlp.DEPLOYMENT_MODE,
settings={
'tenant': 'admin',
'user': 'admin',
'password': 'admin'
}
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': ['zabbix-server']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id))
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=6, networks_count=1, timeout=300)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
# login in dashboard
node_ip = self.fuel_web.get_nailgun_node_by_devops_node(
self.env.get_virtual_environment().get_node(name='slave-03'))['ip']
dashboard_url = 'http://{0}/zabbix/'.format(node_ip)
logger.debug("Zabbix dashboard {0}".format(dashboard_url))
login_data = {'username': 'admin', 'password': 'zabbix'}
zab_client = http.HTTPClientZabbix(url=dashboard_url)
login_resp = zab_client.post(endpoint='index.php', data=login_data)
assert_equals(login_resp.code, 200)
assert_equals(login_resp.msg, 'OK')
event_resp = zab_client.get(
endpoint='events.php',
cookie=login_resp.headers.get('Set-Cookie'))
assert_equals(event_resp.code, 200)
self.env.make_snapshot("deploy_ha_one_controller_zabbix")

View File

@ -0,0 +1,370 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
from proboscis.asserts import assert_not_equal
from proboscis.asserts import assert_true
from proboscis import factory
from proboscis import SkipTest
from proboscis import test
from fuelweb_test.helpers import checkers
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.helpers import packages_fixture
from fuelweb_test.helpers import utils
from fuelweb_test import settings as hlp_data
from fuelweb_test.tests.base_test_case import TestBasic
from fuelweb_test import logger
@test(groups=["os_patching"])
class TestPatch(TestBasic):
def __init__(self, snapshot):
super(TestPatch, self).__init__()
self.snapshot = snapshot
@test
@log_snapshot_on_error
def deploy_and_patch(self):
"""Update OS on reverted env
Scenario:
1. Revert environment
2. Upload tarball
3. Check that it uploaded
4. Extract data
5. Get available releases
6. Run upgrade script
7. Check that new release appears
8. Put new release into cluster
9. Run cluster update
10. Get cluster net configuration
11. Check that services are restarted
12. Check that packages are updated
13. Run OSTF
14. Create snapshot
"""
logger.info("snapshot name is {0}".format(self.snapshot))
if not self.env.manager.devops_env.has_snapshot(self.snapshot):
logger.error('There is no shaphot found {0}'.format(self.snapshot))
raise SkipTest('Can not find snapshot {0}'.format(self.snapshot))
# 1. Revert environment
self.env.revert_snapshot(self.snapshot)
logger.info("Start upload upgrade archive")
node_ssh = self.env.get_ssh_to_remote(self.fuel_web.admin_node_ip)
# 2. Upload tarball
checkers.upload_tarball(
node_ssh=node_ssh, tar_path=hlp_data.TARBALL_PATH,
tar_target='/var/tmp')
logger.info("Archive should upload. "
"Lets check that it exists on master node ...")
# 3. Check that it uploaded
checkers.check_tarball_exists(node_ssh, os.path.basename(
hlp_data.TARBALL_PATH), '/var/tmp')
logger.info("Extract archive to the /var/tmp")
# 4. Extract data
checkers.untar(node_ssh, os.path.basename(
hlp_data.TARBALL_PATH), '/var/tmp')
logger.info("Get release ids for deployed operation"
" system before upgrade.")
# Get cluster nodes
nailgun_nodes = [
self.fuel_web.get_nailgun_node_by_devops_node(node)
for node in self.env.nodes().slaves
if self.fuel_web.get_nailgun_node_by_devops_node(node)]
# Try to remember installed nova-packages before update
p_version_before = {}
for node in nailgun_nodes:
remote = self.fuel_web.get_ssh_for_node(node["devops_name"])
res = checkers.get_package_versions_from_node(
remote=remote, name="nova", os_type=hlp_data.OPENSTACK_RELEASE)
p_version_before[node["devops_name"]] = res
# 5. Get available releases
available_releases_before = self.fuel_web.get_releases_list_for_os(
release_name=hlp_data.OPENSTACK_RELEASE)
logger.info('Time to run upgrade...')
# 6. Run upgrade script
checkers.run_script(node_ssh, '/var/tmp', 'upgrade.sh', password=
hlp_data.KEYSTONE_CREDS['password'])
logger.info('Check if the upgrade complete.')
checkers.wait_upgrade_is_done(node_ssh=node_ssh,
phrase='*** UPGRADE DONE SUCCESSFULLY',
timeout=600 * 10)
available_releases_after = self.fuel_web.get_releases_list_for_os(
release_name=hlp_data.OPENSTACK_RELEASE)
logger.info('release ids list after upgrade is {0}'.format(
available_releases_after))
# 7. Check that new release appears
assert_true(
len(available_releases_after) > len(available_releases_before),
"There is no new release, release ids before {0},"
" release ids after {1}". format(
available_releases_before, available_releases_after))
release_version = hlp_data.RELEASE_VERSION
logger.debug("Release version is {0}".format(release_version))
if 'Ubuntu' in hlp_data.OPENSTACK_RELEASE:
res = utils.get_yaml_to_json(
node_ssh,
'/etc/puppet/{0}/manifests/ubuntu-versions.yaml'.format(
release_version))
res_packages = json.loads(res[0])
logger.debug('what we have in res_packages {0}'.format(
res_packages))
else:
res = utils.get_yaml_to_json(
node_ssh,
'/etc/puppet/{0}/manifests/centos-versions.yaml'.format(
release_version))
res_packages = json.loads(res[0])
logger.debug('what we have in res_packages {0}'.format(
res_packages))
cluster_id = self.fuel_web.get_last_created_cluster()
logger.debug("Cluster id is {0}".format(cluster_id))
# 8. Put new release into cluster
if release_version:
added_release = self.fuel_web.get_releases_list_for_os(
release_name=hlp_data.OPENSTACK_RELEASE,
release_version=release_version)
logger.debug("Does we have here release id ? {0}".format(
release_version))
else:
added_release = [id for id in available_releases_after
if id not in available_releases_before]
# get nova pids on controller before update
ssh_to_controller = self.fuel_web.get_ssh_for_node(
[n["devops_name"] for n in nailgun_nodes
if 'controller' in n['roles']][0])
nova_controller_services = ['nova-api', 'nova-cert',
'nova-objectstore', 'nova-conductor',
'nova-scheduler']
nova_pids_before = utils.nova_service_get_pid(
ssh_to_controller, nova_controller_services)
logger.debug('Nova pids on controller before {0}'.format(
nova_pids_before))
# 9. Run cluster update
self.fuel_web.update_cluster(
cluster_id=cluster_id,
data={
'pending_release_id': added_release[0],
'release_id': self.fuel_web.get_cluster_release_id(
cluster_id)})
logger.info('Huh all preparation for update are done.'
'It is time to update cluster.')
self.fuel_web.run_update(cluster_id=cluster_id,
timeout=hlp_data.UPDATE_TIMEOUT, interval=20)
# 10. Get cluster net configuration
cluster_net = self.fuel_web.client.get_cluster(
cluster_id)['net_provider']
logger.debug('cluster net is {0}'.format(cluster_net))
# 11. Check is services are restarted
if 'Ubuntu' in hlp_data.OPENSTACK_RELEASE:
utils.check_if_service_restarted_ubuntu(
ssh_to_controller, ["keystone'",
"glance-registry'",
"glance-api'",
"heat-api-cfn'",
"heat-engine'",
"heat-api'",
"heat-api-cloudwatch'"])
else:
utils.check_if_service_restarted_centos(
ssh_to_controller, ["keystone",
"glance-registry",
"glance-api",
"heat-api-cfn",
"heat-engine",
"heat-api",
"heat-api-cloudwatch",
"nova-novncproxy"])
# get nova pids on controller after update
nova_pids_after = utils.nova_service_get_pid(
ssh_to_controller, nova_controller_services)
logger.debug('Nova pids on controller before {0}'.format(
nova_pids_before))
assert_not_equal(nova_pids_before, nova_pids_after)
# 12. Check is packages are updated
if 'Ubuntu' in hlp_data.OPENSTACK_RELEASE:
for package in packages_fixture.dep:
packages_fixture.dep[package] = res_packages[package]
logger.debug("Current state of dict is {0}".format(
packages_fixture.dep))
for key in packages_fixture.dep:
res = checkers.get_package_versions_from_node(
ssh_to_controller, name=key, os_type='Ubuntu')
logger.debug('res_from_node is {0}'.format(res))
assert_true(
packages_fixture.dep[key] in res,
"Wrong version of package {0}. "
"Should be {1} but get {2}".format(
key, packages_fixture.dep[key], res))
else:
for package in packages_fixture.rpm:
packages_fixture.rpm[package] = res_packages[package]
logger.debug("Current state of dict is {0}".format(
packages_fixture.rpm))
for key in packages_fixture.rpm:
res = checkers.get_package_versions_from_node(
ssh_to_controller, name=key,
os_type=hlp_data.OPENSTACK_RELEASE)
assert_true(
packages_fixture.rpm[key] in res,
"Wrong version of package {0}. "
"Should be {1} but get {2}".format(
key, packages_fixture.rpm[key], res))
p_version_after = {}
for node in nailgun_nodes:
remote = self.fuel_web.get_ssh_for_node(node["devops_name"])
res = checkers.get_package_versions_from_node(
remote=remote, name="openstack",
os_type=hlp_data.OPENSTACK_RELEASE)
p_version_after[node["devops_name"]] = res
logger.info("packages before {0}".format(p_version_before))
logger.info("packages after {0}".format(p_version_after))
assert_true(p_version_before != p_version_after)
# 13. Run OSTF
self.fuel_web.run_ostf(cluster_id=cluster_id)
# 14. Create snapshot
self.env.make_snapshot('{0}_and_patch'.format(self.snapshot))
# TODO (tleontovich) enable if rollback will be available
#@test(depends_on=[deploy_and_patch])
@log_snapshot_on_error
def deploy_and_rollback(self):
"""Rollback/Downgrade os on reverted env
Scenario:
1. Revert patched environment
2. Get release ids
2. Identify release id for rollback/downgrade
3. Run rollback/downgrade
4. Check that operation was successful
5. Run OSTF
"""
logger.info("snapshot name is {0}".format(self.snapshot))
if not self.env.manager.devops_env.has_snapshot(
'{0}_and_patch'.format(self.snapshot)):
raise SkipTest('Can not find snapshot {0}'.format(self.snapshot))
self.env.revert_snapshot('{0}_and_patch'.format(self.snapshot))
logger.info("Get release ids for deployed operation"
" system before rollback..")
# Get cluster nodes
nailgun_nodes = [
self.fuel_web.get_nailgun_node_by_devops_node(node)
for node in self.env.nodes().slaves
if self.fuel_web.get_nailgun_node_by_devops_node(node)]
logger.info("Find next nodes {0}".format(nailgun_nodes))
# Try to remember installed nova-packages before update
p_version_before = {}
for node in nailgun_nodes:
remote = self.fuel_web.get_ssh_for_node(node["devops_name"])
res = checkers.get_package_versions_from_node(
remote=remote, name="nova", os_type=hlp_data.OPENSTACK_RELEASE)
p_version_before[node["devops_name"]] = res
avail_release_ids = self.fuel_web.get_releases_list_for_os(
release_name=hlp_data.OPENSTACK_RELEASE)
logger.info('Available release ids before rollback {0}'.format(
avail_release_ids))
cluster_id = self.fuel_web.get_last_created_cluster()
cluster_release_id = self.fuel_web.get_cluster_release_id(
cluster_id)
logger.info('Time to run rollback...')
self.fuel_web.update_cluster(
cluster_id=cluster_id,
data={
'pending_release_id': [i for i in avail_release_ids
if i != cluster_release_id][0],
'release_id': self.fuel_web.get_cluster_release_id(
cluster_id)})
self.fuel_web.run_update(cluster_id=cluster_id,
timeout=hlp_data.UPDATE_TIMEOUT, interval=20)
# Check packages after
p_version_after = {}
for node in nailgun_nodes:
remote = self.fuel_web.get_ssh_for_node(node["devops_name"])
res = checkers.get_package_versions_from_node(
remote=remote, name="nova", os_type=hlp_data.OPENSTACK_RELEASE)
p_version_after[node["devops_name"]] = res
logger.info("packages after {0}".format(p_version_after))
logger.info("packages before {0}".format(p_version_before))
# TODO tleontovich: Add assert for packages, when test repo will avail
self.fuel_web.run_ostf(cluster_id=cluster_id,)
self.env.make_snapshot('{0}_and_rollback'.format(self.snapshot))
@factory
def generate_patch_tests():
snap = hlp_data.SNAPSHOT.split(",")
return [TestPatch(s) for s in snap]

View File

@ -0,0 +1,521 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import time
from devops.helpers.helpers import wait
from devops.helpers.helpers import _wait
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_not_equal
from proboscis.asserts import assert_true
from proboscis import test
from proboscis import SkipTest
from fuelweb_test.helpers.checkers import check_mysql
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.helpers import os_actions
from fuelweb_test import logger
from fuelweb_test.settings import DEPLOYMENT_MODE
from fuelweb_test.settings import NEUTRON_SEGMENT_TYPE
from fuelweb_test.settings import NEUTRON_ENABLE
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@test(groups=["thread_5", "ha", "neutron_failover", "ha_nova_destructive",
"ha_neutron_destructive"])
class TestHaFailover(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["deploy_ha"])
@log_snapshot_on_error
def deploy_ha(self):
"""Prepare cluster in HA mode for failover tests
Scenario:
1. Create cluster
2. Add 3 nodes with controller roles
3. Add 2 nodes with compute roles
4. Deploy the cluster
8. Make snapshot
Duration 70m
Snapshot deploy_ha
"""
try:
self.check_run("deploy_ha")
except SkipTest:
return
self.env.revert_snapshot("ready_with_5_slaves")
settings = None
if NEUTRON_ENABLE:
settings = {
"net_provider": 'neutron',
"net_segment_type": NEUTRON_SEGMENT_TYPE
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings=settings
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
public_vip = self.fuel_web.get_public_vip(cluster_id)
os_conn = os_actions.OpenStackActions(public_vip)
if NEUTRON_ENABLE:
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=14, networks_count=2, timeout=300)
else:
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=16, networks_count=1, timeout=300)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.security.verify_firewall(cluster_id)
# Bug #1289297. Pause 5 min to make sure that all remain activity
# on the admin node has over before creating a snapshot.
time.sleep(5 * 60)
self.env.make_snapshot("deploy_ha", is_make=True)
@test(depends_on_groups=['deploy_ha'],
groups=["ha_destroy_controllers"])
@log_snapshot_on_error
def ha_destroy_controllers(self):
"""Destroy two controllers and check pacemaker status is correct
Scenario:
1. Destroy first controller
2. Check pacemaker status
3. Run OSTF
4. Revert environment
5. Destroy second controller
6. Check pacemaker status
7. Run OSTF
Duration 35m
"""
for devops_node in self.env.get_virtual_environment(
).nodes().slaves[:2]:
self.env.revert_snapshot("deploy_ha")
devops_node.suspend(False)
self.fuel_web.assert_pacemaker(
self.env.get_virtual_environment().nodes(
).slaves[2].name,
set(self.env.get_virtual_environment(
).nodes().slaves[:3]) - {devops_node},
[devops_node])
cluster_id = self.fuel_web.client.get_cluster_id(
self.__class__.__name__)
# Wait until Nailgun marked suspended controller as offline
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
devops_node)['online'],
timeout=60 * 5)
# Wait until MySQL Galera is UP on online controllers
self.fuel_web.wait_mysql_galera_is_up(
[n.name for n in
set(self.env.get_virtual_environment(
).nodes().slaves[:3]) - {devops_node}])
self.fuel_web.run_ostf(
cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'],
should_fail=1)
@test(depends_on_groups=['deploy_ha'],
groups=["ha_disconnect_controllers"])
@log_snapshot_on_error
def ha_disconnect_controllers(self):
"""Disconnect controllers and check pacemaker status is correct
Scenario:
1. Disconnect eth3 of the first controller
2. Check pacemaker status
3. Revert environment
4. Disconnect eth3 of the second controller
5. Check pacemaker status
6. Run OSTF
Duration 45m
"""
for devops_node in self.env.get_virtual_environment(
).nodes().slaves[:2]:
self.env.revert_snapshot("deploy_ha")
remote = self.fuel_web.get_ssh_for_node(devops_node.name)
remote.check_call('ifconfig eth2 down')
self.fuel_web.assert_pacemaker(
self.env.get_virtual_environment(
).nodes().slaves[2].name,
set(self.env.get_virtual_environment(
).nodes().slaves[:3]) - {devops_node},
[devops_node])
cluster_id = self.fuel_web.client.get_cluster_id(
self.__class__.__name__)
# Wait until MySQL Galera is UP on some controller
self.fuel_web.wait_mysql_galera_is_up(['slave-01'])
self.fuel_web.run_ostf(
cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'])
@test(depends_on_groups=['deploy_ha'],
groups=["ha_delete_vips"])
@log_snapshot_on_error
def ha_delete_vips(self):
"""Delete all management and public VIPs on all controller nodes.
Verify that they are restored.
Verify total amount of secondary IPs. Should be 2:
management and public
Scenario:
1. Delete all secondary VIP
2. Wait while it is being restored
3. Verify it is restored
4. Run OSTF
Duration 30m
"""
logger.debug('Start reverting of deploy_ha snapshot')
self.env.revert_snapshot("deploy_ha")
cluster_id = \
self.fuel_web.client.get_cluster_id(self.__class__.__name__)
logger.debug('Cluster id is {0}'.format(cluster_id))
interfaces = ('hapr-p', 'hapr-m')
slaves = self.env.get_virtual_environment(
).nodes().slaves[:3]
logger.debug("Current nodes are {0}".format([i.name for i in slaves]))
ips_amount = 0
for devops_node in slaves:
# Verify VIPs are started.
ret = self.fuel_web.get_pacemaker_status(devops_node.name)
logger.debug("Pacemaker status {0} for node {1}".format
(ret, devops_node.name))
assert_true(
re.search('vip__management\s+\(ocf::fuel:ns_IPaddr2\):'
'\s+Started node', ret),
'vip management not started. '
'Current pacemaker status is {0}'.format(ret))
assert_true(
re.search('vip__public\s+\(ocf::fuel:ns_IPaddr2\):'
'\s+Started node', ret),
'vip public not started. '
'Current pacemaker status is {0}'.format(ret))
for interface in interfaces:
# Look for management and public ip in namespace and remove it
logger.debug("Start to looking for ip of Vips")
addresses = self.fuel_web.ip_address_show(devops_node.name,
interface=interface,
namespace='haproxy')
logger.debug("Vip addresses is {0} for node {1} and interface"
" {2}".format(addresses, devops_node.name,
interface))
ip_search = re.search(
'inet (?P<ip>\d+\.\d+\.\d+.\d+/\d+) scope global '
'{0}'.format(interface), addresses)
if ip_search is None:
logger.debug("Ip show output does not"
" match in regex. Current value is None")
continue
ip = ip_search.group('ip')
logger.debug("Founded ip is {0}".format(ip))
logger.debug("Start ip {0} deletion on node {1} and "
"interface {2} ".format(ip, devops_node.name,
interface))
self.fuel_web.ip_address_del(
node_name=devops_node.name,
interface=interface,
ip=ip, namespace='haproxy')
# The ip should be restored
ip_assigned = lambda nodes: \
any([ip in self.fuel_web.ip_address_show(
n.name, 'haproxy', interface)
for n in nodes])
logger.debug("Waiting while deleted ip restores ...")
wait(lambda: ip_assigned(slaves), timeout=30)
assert_true(ip_assigned(slaves),
"IP isn't restored restored.")
ips_amount += 1
time.sleep(60)
# Run OSTF tests
self.fuel_web.run_ostf(
cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'],
should_fail=1)
# Revert initial state. VIP could be moved to other controller
self.env.revert_snapshot("deploy_ha")
assert_equal(ips_amount, 2,
'Not all VIPs were found: expect - 2, found {0}'.format(
ips_amount))
@test(depends_on_groups=['deploy_ha'],
groups=["ha_mysql_termination"])
@log_snapshot_on_error
def ha_mysql_termination(self):
"""Terminate mysql on all controllers one by one
Scenario:
1. Terminate mysql
2. Wait while it is being restarted
3. Verify it is restarted
4. Go to another controller
5. Run OSTF
Duration 15m
"""
self.env.revert_snapshot("deploy_ha")
for devops_node in self.env.get_virtual_environment(
).nodes().slaves[:3]:
remote = self.fuel_web.get_ssh_for_node(devops_node.name)
logger.info('Terminating MySQL on {0}'.format(devops_node.name))
try:
remote.check_call('pkill -9 -x "mysqld"')
except:
logger.error('MySQL on {0} is down after snapshot revert'.
format(devops_node.name))
raise
check_mysql(remote, devops_node.name)
cluster_id = self.fuel_web.client.get_cluster_id(
self.__class__.__name__)
self.fuel_web.wait_mysql_galera_is_up(['slave-01', 'slave-02',
'slave-03'])
self.fuel_web.run_ostf(
cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'])
@test(depends_on_groups=['deploy_ha'],
groups=["ha_haproxy_termination"])
@log_snapshot_on_error
def ha_haproxy_termination(self):
"""Terminate haproxy on all controllers one by one
Scenario:
1. Terminate haproxy
2. Wait while it is being restarted
3. Verify it is restarted
4. Go to another controller
5. Run OSTF
Duration 25m
"""
self.env.revert_snapshot("deploy_ha")
for devops_node in self.env.get_virtual_environment(
).nodes().slaves[:3]:
remote = self.fuel_web.get_ssh_for_node(devops_node.name)
remote.check_call('kill -9 $(pidof haproxy)')
mysql_started = lambda: \
len(remote.check_call(
'ps aux | grep "/usr/sbin/haproxy"')['stdout']) == 3
wait(mysql_started, timeout=20)
assert_true(mysql_started(), 'haproxy restarted')
cluster_id = self.fuel_web.client.get_cluster_id(
self.__class__.__name__)
self.fuel_web.run_ostf(
cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'])
@test(depends_on_groups=['deploy_ha'],
groups=["ha_pacemaker_configuration"])
@log_snapshot_on_error
def ha_pacemaker_configuration(self):
"""Verify resources are configured
Scenario:
1. SSH to controller node
2. Verify resources are configured
3. Go to next controller
Duration 15m
"""
self.env.revert_snapshot("deploy_ha")
devops_ctrls = self.env.get_virtual_environment(
).nodes().slaves[:3]
pcm_nodes = ' '.join(self.fuel_web.get_pcm_nodes(
self.env.get_virtual_environment(
).nodes().slaves[0].name, pure=True)['Online'])
logger.debug("pacemaker nodes are {0}".format(pcm_nodes))
for devops_node in devops_ctrls:
config = self.fuel_web.get_pacemaker_config(devops_node.name)
logger.debug("config on node {0} is {1}".format(
devops_node.name, config))
assert_not_equal(re.search(
"vip__public\s+\(ocf::fuel:ns_IPaddr2\):\s+Started\s+"
"Clone Set:\s+clone_ping_vip__public\s+\[ping_vip__public\]"
"\s+Started:\s+\[ {0} \]".format(pcm_nodes), config), None,
'public vip is not configured right')
assert_true(
'vip__management (ocf::fuel:ns_IPaddr2): Started'
in config, 'vip management is not configured right')
assert_not_equal(re.search(
"Clone Set: clone_p_(heat|openstack-heat)-engine"
" \[p_(heat|openstack-heat)-engine\]\s+"
"Started: \[ {0} \]".format(
pcm_nodes), config), None,
'heat engine is not configured right')
assert_not_equal(re.search(
"Clone Set: clone_p_mysql \[p_mysql\]\s+Started:"
" \[ {0} \]".format(pcm_nodes), config), None,
'mysql is not configured right')
assert_not_equal(re.search(
"Clone Set: clone_p_haproxy \[p_haproxy\]\s+Started:"
" \[ {0} \]".format(pcm_nodes), config), None,
'haproxy is not configured right')
@test(enabled=False, depends_on_groups=['deploy_ha'],
groups=["ha_pacemaker_restart_heat_engine"])
@log_snapshot_on_error
def ha_pacemaker_restart_heat_engine(self):
"""Verify heat engine service is restarted
by pacemaker on amqp connection loss
Scenario:
1. SSH to any controller
2. Check heat-engine status
3. Block heat-engine amqp connections
4. Check heat-engine was stopped on current controller
5. Unblock heat-engine amqp connections
6. Check heat-engine process is running with new pid
7. Check amqp connection re-appears for heat-engine
Duration 15m
"""
self.env.revert_snapshot("deploy_ha")
ocf_success = "DEBUG: OpenStack Orchestration Engine" \
" (heat-engine) monitor succeeded"
ocf_error = "ERROR: OpenStack Heat Engine is not connected to the" \
" AMQP server: AMQP connection test returned 1"
heat_name = 'heat-engine'
ocf_status = \
'script -q -c "OCF_ROOT=/usr/lib/ocf' \
' /usr/lib/ocf/resource.d/fuel/{0}' \
' monitor 2>&1"'.format(heat_name)
remote = self.fuel_web.get_ssh_for_node(
self.env.get_virtual_environment(
).nodes().slaves[0].name)
pid = ''.join(remote.execute('pgrep heat-engine')['stdout'])
get_ocf_status = ''.join(
remote.execute(ocf_status)['stdout']).rstrip()
assert_true(ocf_success in get_ocf_status,
"heat engine is not succeeded, status is {0}".format(
get_ocf_status))
assert_true(len(remote.execute(
"netstat -nap | grep {0} | grep :5673".
format(pid))['stdout']) > 0, 'There is no amqp connections')
remote.execute("iptables -I OUTPUT 1 -m owner --uid-owner heat -m"
" state --state NEW,ESTABLISHED,RELATED -j DROP")
wait(lambda: len(remote.execute
("netstat -nap | grep {0} | grep :5673".
format(pid))['stdout']) == 0, timeout=300)
get_ocf_status = ''.join(
remote.execute(ocf_status)['stdout']).rstrip()
logger.info('ocf status after blocking is {0}'.format(
get_ocf_status))
assert_true(ocf_error in get_ocf_status,
"heat engine is running, status is {0}".format(
get_ocf_status))
remote.execute("iptables -D OUTPUT 1 -m owner --uid-owner heat -m"
" state --state NEW,ESTABLISHED,RELATED")
_wait(lambda: assert_true(ocf_success in ''.join(
remote.execute(ocf_status)['stdout']).rstrip()), timeout=240)
newpid = ''.join(remote.execute('pgrep heat-engine')['stdout'])
assert_true(pid != newpid, "heat pid is still the same")
get_ocf_status = ''.join(remote.execute(
ocf_status)['stdout']).rstrip()
assert_true(ocf_success in get_ocf_status,
"heat engine is not succeeded, status is {0}".format(
get_ocf_status))
assert_true(len(
remote.execute("netstat -nap | grep {0} | grep :5673".format(
newpid))['stdout']) > 0)
cluster_id = self.fuel_web.get_last_created_cluster()
self.fuel_web.run_ostf(cluster_id=cluster_id)
@test(depends_on_groups=['deploy_ha'],
groups=["ha_check_monit"])
@log_snapshot_on_error
def ha_check_monit(self):
"""Verify monit restarted nova
service if it was killed
Scenario:
1. SSH to every compute node in cluster
2. Kill nova-compute service
3. Check service is restarted by monit
Duration 25m
"""
self.env.revert_snapshot("deploy_ha")
for devops_node in self.env.get_virtual_environment(
).nodes().slaves[3:5]:
remote = self.fuel_web.get_ssh_for_node(devops_node.name)
remote.execute("kill -9 `pgrep nova-compute`")
wait(
lambda: len(remote.execute('pgrep nova-compute')['stdout'])
== 1, timeout=120)
assert_true(len(remote.execute('pgrep nova-compute')['stdout'])
== 1, 'Nova service was not restarted')
assert_true(len(remote.execute(
"grep \"nova-compute.*trying to restart\" "
"/var/log/monit.log")['stdout']) > 0,
'Nova service was not restarted')

View File

@ -0,0 +1,303 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import SkipTest
from proboscis import test
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test import settings
from fuelweb_test.tests import base_test_case
from fuelweb_test.helpers import os_actions
@test(groups=["huge_environments"])
class HugeEnvironments(base_test_case.TestBasic):
@test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_9],
groups=["nine_nodes_mixed"])
@log_snapshot_on_error
def nine_nodes_mixed(self):
"""Deploy cluster with mixed roles on 9 nodes in HA mode
Scenario:
1. Create cluster
2. Add 4 nodes as controllers with ceph OSD roles
3. Add 5 nodes as compute with ceph OSD and mongo roles
4. Turn on Sahara and Ceilometer
5. Deploy the cluster
6. Check networks and OSTF
Duration 150m
"""
if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_REDHAT:
raise SkipTest()
self.env.revert_snapshot("ready_with_9_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings={
'volumes_ceph': True,
'images_ceph': True,
'volumes_lvm': False,
'sahara': True,
'ceilometer': True
}
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller', 'mongo'],
'slave-02': ['controller', 'mongo'],
'slave-03': ['controller', 'mongo'],
'slave-04': ['controller', 'mongo'],
'slave-05': ['compute', 'ceph-osd'],
'slave-06': ['compute', 'ceph-osd'],
'slave-07': ['compute', 'ceph-osd'],
'slave-08': ['compute', 'ceph-osd'],
'slave-09': ['compute', 'ceph-osd']
}
)
# Cluster deploy
self.fuel_web.deploy_cluster_wait(cluster_id,
timeout=120 * 60,
interval=30)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
@test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_9],
groups=["nine_nodes_separate_roles"])
@log_snapshot_on_error
def nine_nodes_separate_roles(self):
"""Deploy cluster with separate roles on 9 nodes in HA mode with GRE
Scenario:
1. Create cluster
2. Add 3 nodes as controllers
3. Add 2 nodes as compute
4. Add 1 node as cinder and 1 as mongo
5. Add 2 nodes as ceph
6. Turn on Sahara and Ceilometer
7. Deploy the cluster
8. Check networks and OSTF
Duration 100m
"""
if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_REDHAT:
raise SkipTest()
self.env.revert_snapshot("ready_with_9_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings={
'volumes_ceph': True,
'images_ceph': False,
'volumes_lvm': False,
'sahara': True,
'ceilometer': True,
'net_provider': 'neutron',
'net_segment_type': 'gre'
}
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['compute'],
'slave-06': ['cinder'],
'slave-07': ['mongo'],
'slave-08': ['ceph-osd'],
'slave-09': ['ceph-osd'],
}
)
# Cluster deploy
self.fuel_web.deploy_cluster_wait(cluster_id,
timeout=120 * 60,
interval=30)
self.fuel_web.run_ostf(
cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'],
should_fail=1)
@test(groups=["huge_environments", "huge_ha_neutron", "huge_scale"])
class HugeHaNeutron(base_test_case.TestBasic):
@test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_9],
groups=["huge_ha_neutron_gre_ceph_ceilometer_rados"])
@log_snapshot_on_error
def huge_ha_neutron_gre_ceph_ceilometer_rados(self):
"""Deploy cluster in HA mode with Neutron GRE, RadosGW
Scenario:
1. Create cluster
2. Add 3 nodes with controller and ceph role
3. Add 3 nodes with compute and ceph roles
4. Add 3 nodes with mongo roles
5. Deploy the cluster
6. Verify smiles count
7. Run OSTF
Duration 100m
"""
self.env.revert_snapshot("ready_with_9_slaves")
data = {
'volumes_lvm': False,
'volumes_ceph': True,
'images_ceph': True,
'objects_ceph': True,
'ceilometer': True,
'objects_ceph': True,
'net_provider': 'neutron',
'net_segment_type': 'gre',
'tenant': 'haGreCephHugeScale',
'user': 'haGreCephHugeScale',
'password': 'haGreCephHugeScale'
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings=data
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller', 'ceph-osd'],
'slave-02': ['controller', 'ceph-osd'],
'slave-03': ['controller', 'ceph-osd'],
'slave-04': ['compute', 'ceph-osd'],
'slave-05': ['compute', 'ceph-osd'],
'slave-06': ['compute', 'ceph-osd'],
'slave-07': ['mongo'],
'slave-08': ['mongo'],
'slave-09': ['mongo']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.verify_network(cluster_id)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id),
data['user'],
data['password'],
data['tenant'])
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=15, networks_count=2, timeout=300)
self.fuel_web.run_ostf(cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'])
test_class_main = ('fuel_health.tests.platform_tests.'
'test_ceilometer.'
'CeilometerApiPlatformTests')
tests_names = ['test_check_alarm_state',
'test_create_sample']
test_classes = ['{0}.{1}'.format(test_class_main, test_name)
for test_name in tests_names]
for test_name in test_classes:
self.fuel_web.run_single_ostf_test(
cluster_id=cluster_id, test_sets=['platform_tests'],
test_name=test_name, timeout=60 * 20)
@test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_9],
groups=["huge_ha_neutron_vlan_ceph_ceilometer_rados"])
@log_snapshot_on_error
def huge_ha_neutron_vlan_ceph_ceilometer_rados(self):
"""Deploy cluster in HA mode with Neutron VLAN, RadosGW
Scenario:
1. Create cluster
2. Add 3 nodes with controller and ceph role
3. Add 3 nodes with compute and ceph roles
4. Add 3 nodes with mongo roles
5. Deploy the cluster
6. Verify smiles count
7. Run OSTF
Duration 100m
"""
self.env.revert_snapshot("ready_with_9_slaves")
data = {
'ceilometer': True,
'volumes_ceph': True,
'images_ceph': True,
'volumes_lvm': False,
'ceilometer': True,
'objects_ceph': True,
'net_provider': 'neutron',
'net_segment_type': 'vlan',
'tenant': 'haVlanCephHugeScale',
'user': 'haVlanCephHugeScale',
'password': 'haVlanCephHugeScale'
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings=data
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller', 'ceph-osd'],
'slave-02': ['controller', 'ceph-osd'],
'slave-03': ['controller', 'ceph-osd'],
'slave-04': ['compute', 'ceph-osd'],
'slave-05': ['compute', 'ceph-osd'],
'slave-06': ['compute', 'ceph-osd'],
'slave-07': ['mongo'],
'slave-08': ['mongo'],
'slave-09': ['mongo']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.verify_network(cluster_id)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id),
data['user'],
data['password'],
data['tenant'])
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=15, networks_count=2, timeout=300)
self.fuel_web.run_ostf(cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'])
test_class_main = ('fuel_health.tests.platform_tests.'
'test_ceilometer.'
'CeilometerApiPlatformTests')
tests_names = ['test_check_alarm_state',
'test_create_sample']
test_classes = ['{0}.{1}'.format(test_class_main, test_name)
for test_name in tests_names]
for test_name in test_classes:
self.fuel_web.run_single_ostf_test(
cluster_id=cluster_id, test_sets=['platform_tests'],
test_name=test_name, timeout=60 * 20)

View File

@ -0,0 +1,97 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from devops.helpers.helpers import wait
from proboscis import test
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.settings import DEPLOYMENT_MODE
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@test(groups=["repeatable_image_based", "image_based"])
class RepeatableImageBased(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["repeatable_image_based", "image_based"])
@log_snapshot_on_error
def repeatable_image_based(self):
"""Provision new cluster many times after deletion the old one
Scenario:
1. Create HA cluster
2. Add 1 controller, 2 compute and 2 cinder nodes
3. Deploy the cluster
4. Delete cluster
5. Create another HA cluster
6. Create snapshot of environment
7. Revert snapshot and try provision cluster 10 times
Duration 60m
"""
self.env.revert_snapshot("ready_with_5_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": 'gre'})
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': ['compute'],
'slave-04': ['cinder'],
'slave-05': ['cinder']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.client.delete_cluster(cluster_id)
#wait nodes go to reboot
wait(lambda: not self.fuel_web.client.list_nodes(), timeout=5 * 60)
#wait for nodes to appear after bootstrap
wait(lambda: len(self.fuel_web.client.list_nodes()) == 5,
timeout=5 * 60)
for node in self.fuel_web.client.list_nodes():
wait(lambda: self.fuel_web.is_node_discovered(node),
timeout=2 * 60)
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": 'vlan'
}
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['compute']
}
)
self.env.make_snapshot("deploy_after_delete", is_make=True)
for i in range(0, 10):
self.env.revert_snapshot("deploy_after_delete")
cluster_id = self.fuel_web.get_last_created_cluster()
self.fuel_web.provisioning_cluster_wait(cluster_id)

View File

@ -0,0 +1,96 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis.asserts import assert_equal
from proboscis import test
from fuelweb_test.helpers import common
from fuelweb_test.helpers import os_actions
from fuelweb_test import settings
from fuelweb_test import logger
from fuelweb_test.tests import base_test_case
@test(groups=["thread_non_func_1"])
class DeployHAOneControllerMasterNodeFail(base_test_case.TestBasic):
@test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_3],
groups=["non_functional",
"deploy_ha_one_controller_flat_master_node_fail"])
def deploy_ha_one_controller_flat_master_node_fail(self):
"""Deploy HA cluster with nova-network and check it without master node
Scenario:
1. Create cluster in ha mode with 1 controller
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Deploy the cluster
5. Validate cluster was set up correctly, there are no dead
services, there are no errors in logs
6. Verify networks
7. Verify network configuration on controller
8. Run OSTF
9. Shut down master node
10. Run openstack verification
Duration 1000m
"""
self.env.revert_snapshot("ready_with_3_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
controller_ip = self.fuel_web.get_public_vip(cluster_id)
os_conn = os_actions.OpenStackActions(controller_ip)
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=6, networks_count=1, timeout=300)
self.fuel_web.verify_network(cluster_id)
logger.info('PASS DEPLOYMENT')
self.fuel_web.run_ostf(
cluster_id=cluster_id)
logger.info('PASS OSTF')
logger.info('Destroy admin node...')
self.env.get_virtual_environment().nodes().admin.destroy()
logger.info('Admin node destroyed')
common_func = common.Common(
controller_ip,
settings.SERVTEST_USERNAME,
settings.SERVTEST_PASSWORD,
settings.SERVTEST_TENANT)
# create instance
server = common_func.create_instance()
# get_instance details
details = common_func.get_instance_detail(server)
assert_equal(details.name, 'test_instance')
# Check if instacne active
common_func.verify_instance_status(server, 'ACTIVE')
# delete instance
common_func.delete_instance(server)

View File

@ -0,0 +1,367 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from devops.helpers.helpers import wait
from devops.error import TimeoutError
from proboscis.asserts import assert_equal
from proboscis import SkipTest
from proboscis import test
from fuelweb_test import logger
from fuelweb_test import settings
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.helpers.decorators import retry
from fuelweb_test.helpers import os_actions
from fuelweb_test.tests import base_test_case
@test(groups=["thread_5", "ha"])
class TestNeutronFailover(base_test_case.TestBasic):
@classmethod
def get_node_with_dhcp(cls, self, os_conn, net_id):
node = os_conn.get_node_with_dhcp_for_network(net_id)[0]
node_fqdn = self.fuel_web.get_fqdn_by_hostname(node)
logger.debug('node name with dhcp is {0}'.format(node))
devops_node = self.fuel_web.find_devops_node_by_nailgun_fqdn(
node_fqdn, self.env.get_virtual_environment(
).nodes().slaves[0:6])
return devops_node
@classmethod
def get_node_with_l3(cls, self, node_with_l3):
node_with_l3_fqdn = self.fuel_web.get_fqdn_by_hostname(node_with_l3)
logger.debug("new node with l3 is {0}".format(node_with_l3))
devops_node = self.fuel_web.find_devops_node_by_nailgun_fqdn(
node_with_l3_fqdn,
self.env.get_virtual_environment().nodes().slaves[0:6])
return devops_node
@classmethod
def create_instance_with_keypair(cls, os_conn, remote):
remote.execute(
'. openrc;'
' nova keypair-add instancekey > /root/.ssh/webserver_rsa')
remote.execute('chmod 400 /root/.ssh/webserver_rsa')
instance = os_conn.create_server_for_migration(
neutron=True, key_name='instancekey')
return instance
@classmethod
def reshedule_router_manually(cls, os_conn, router_id):
l3_agent_id = os_conn.get_l3_agent_ids(router_id)[0]
logger.debug("l3 agent id is {0}".format(l3_agent_id))
another_l3_agent = os_conn.get_available_l3_agents_ids(
l3_agent_id)[0]
logger.debug("another l3 agent is {0}".format(another_l3_agent))
os_conn.remove_l3_from_router(l3_agent_id, router_id)
os_conn.add_l3_to_router(another_l3_agent, router_id)
wait(lambda: os_conn.get_l3_agent_ids(router_id), timeout=60 * 5)
@classmethod
def check_instance_connectivity(cls, remote, dhcp_namespace, instance_ip):
cmd = ". openrc; ip netns exec {0} ssh -i /root/.ssh/webserver_rsa" \
" -o 'StrictHostKeyChecking no'" \
" cirros@{1} \"ping -c 1 8.8.8.8\"".format(dhcp_namespace,
instance_ip)
wait(lambda: remote.execute(cmd)['exit_code'] == 0, timeout=2 * 60)
res = remote.execute(cmd)
assert_equal(0, res['exit_code'],
'instance has no connectivity, exit code {0}'.format(
res['exit_code']))
@test(depends_on=[base_test_case.SetupEnvironment.prepare_release],
groups=["deploy_ha_neutron"])
@log_snapshot_on_error
def deploy_ha_neutron(self):
"""Deploy cluster in HA mode, Neutron with GRE segmentation
Scenario:
1. Create cluster. HA, Neutron with GRE segmentation
2. Add 3 nodes with controller roles
3. Add 2 nodes with compute roles
4. Add 1 node with cinder role
5. Deploy the cluster
Duration 90m
Snapshot deploy_ha_neutron
"""
try:
self.check_run('deploy_ha_neutron')
except SkipTest:
return
self.env.revert_snapshot("ready")
self.env.bootstrap_nodes(
self.env.get_virtual_environment().nodes().slaves[:6])
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": 'gre'
}
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['compute'],
'slave-06': ['cinder']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.env.make_snapshot("deploy_ha_neutron", is_make=True)
@test(depends_on=[deploy_ha_neutron],
groups=["neutron_l3_migration"])
@log_snapshot_on_error
def neutron_l3_migration(self):
"""Check l3-agent rescheduling after l3-agent dies
Scenario:
1. Revert snapshot with neutron cluster
2. Manually reschedule router from primary controller
to another one
3. Stop l3-agent on new node with pcs
4. Check l3-agent was rescheduled
5. Check network connectivity from instance via
dhcp namespace
6. Run OSTF
Duration 30m
"""
self.env.revert_snapshot("deploy_ha_neutron")
cluster_id = self.fuel_web.get_last_created_cluster()
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id))
net_id = os_conn.get_network('net04')['id']
devops_node = self.get_node_with_dhcp(self, os_conn, net_id)
remote = self.env.get_ssh_to_remote_by_name(devops_node.name)
dhcp_namespace = ''.join(remote.execute('ip netns | grep {0}'.format(
net_id))['stdout']).rstrip()
logger.debug('dhcp namespace is {0}'.format(dhcp_namespace))
instance_ip = \
self.create_instance_with_keypair(
os_conn, remote).addresses['net04'][0]['addr']
logger.debug('instance internal ip is {0}'.format(instance_ip))
router_id = os_conn.get_routers_ids()[0]
self.reshedule_router_manually(os_conn, router_id)
self.check_instance_connectivity(remote, dhcp_namespace, instance_ip)
node_with_l3 = os_conn.get_l3_agent_hosts(router_id)[0]
new_devops = self.get_node_with_l3(self, node_with_l3)
new_remote = self.env.get_ssh_to_remote_by_name(new_devops.name)
new_remote.execute("pcs resource ban p_neutron-l3-agent {0}".format(
node_with_l3))
try:
wait(lambda: not node_with_l3 == os_conn.get_l3_agent_hosts(
router_id)[0], timeout=60 * 3)
except TimeoutError:
raise TimeoutError(
"l3 agent wasn't banned, it is still {0}".format(
os_conn.get_l3_agent_hosts(router_id)[0]))
wait(lambda: os_conn.get_l3_agent_ids(router_id), timeout=60)
self.check_instance_connectivity(remote, dhcp_namespace, instance_ip)
self.fuel_web.run_ostf(
cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'])
new_remote.execute("pcs resource clear p_neutron-l3-agent {0}".
format(node_with_l3))
@test(depends_on=[deploy_ha_neutron],
groups=["neutron_l3_migration_after_reset"])
@log_snapshot_on_error
def neutron_l3_migration_after_reset(self):
"""Check l3-agent rescheduling after reset non-primary controller
Scenario:
1. Revert snapshot with neutron cluster
2. Manually reschedule router from primary controller
to another one
3. Reset controller with l3-agent
4. Check l3-agent was rescheduled
5. Check network connectivity from instance via
dhcp namespace
6. Run OSTF
Duration 30m
"""
self.env.revert_snapshot("deploy_ha_neutron")
cluster_id = self.fuel_web.get_last_created_cluster()
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id))
net_id = os_conn.get_network('net04')['id']
devops_node = self.get_node_with_dhcp(self, os_conn, net_id)
remote = self.env.get_ssh_to_remote_by_name(devops_node.name)
dhcp_namespace = ''.join(remote.execute('ip netns | grep {0}'.format(
net_id))['stdout']).rstrip()
logger.debug('dhcp namespace is {0}'.format(dhcp_namespace))
instance_ip = \
self.create_instance_with_keypair(
os_conn, remote).addresses['net04'][0]['addr']
logger.debug('instance internal ip is {0}'.format(instance_ip))
router_id = os_conn.get_routers_ids()[0]
self.reshedule_router_manually(os_conn, router_id)
self.check_instance_connectivity(remote, dhcp_namespace, instance_ip)
node_with_l3 = os_conn.get_l3_agent_hosts(router_id)[0]
new_devops = self.get_node_with_l3(self, node_with_l3)
self.fuel_web.warm_restart_nodes([new_devops])
try:
wait(lambda: not node_with_l3 == os_conn.get_l3_agent_hosts(
router_id)[0], timeout=60 * 3)
except TimeoutError:
raise TimeoutError(
"l3 agent wasn't rescheduled, it is still {0}".format(
os_conn.get_l3_agent_hosts(router_id)[0]))
wait(lambda: os_conn.get_l3_agent_ids(router_id), timeout=60)
self.check_instance_connectivity(remote, dhcp_namespace, instance_ip)
self.fuel_web.run_ostf(
cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'])
@test(depends_on=[deploy_ha_neutron],
groups=["neutron_l3_migration_after_destroy"])
@log_snapshot_on_error
def neutron_l3_migration_after_destroy(self):
"""Check l3-agent rescheduling after destroy non-primary controller
Scenario:
1. Revert snapshot with neutron cluster
2. Manually reschedule router from primary controller
to another one
3. Destroy controller with l3-agent
4. Check l3-agent was rescheduled
5. Check network connectivity from instance via
dhcp namespace
6. Run OSTF
Duration 30m
"""
self.env.revert_snapshot("deploy_ha_neutron")
cluster_id = self.fuel_web.get_last_created_cluster()
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id))
net_id = os_conn.get_network('net04')['id']
devops_node = self.get_node_with_dhcp(self, os_conn, net_id)
remote = self.env.get_ssh_to_remote_by_name(devops_node.name)
dhcp_namespace = ''.join(remote.execute('ip netns | grep {0}'.format(
net_id))['stdout']).rstrip()
logger.debug('dhcp namespace is {0}'.format(dhcp_namespace))
instance_ip = \
self.create_instance_with_keypair(
os_conn, remote).addresses['net04'][0]['addr']
logger.debug('instance internal ip is {0}'.format(instance_ip))
router_id = os_conn.get_routers_ids()[0]
self.reshedule_router_manually(os_conn, router_id)
self.check_instance_connectivity(remote, dhcp_namespace, instance_ip)
node_with_l3 = os_conn.get_l3_agent_hosts(router_id)[0]
new_devops = self.get_node_with_l3(self, node_with_l3)
new_devops.destroy()
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
new_devops)['online'], timeout=60 * 10)
self.fuel_web.wait_mysql_galera_is_up(
[n.name for n in
set(self.env.get_virtual_environment(
).nodes().slaves[:3]) - {new_devops}])
try:
wait(lambda: not node_with_l3 == os_conn.get_l3_agent_hosts(
router_id)[0], timeout=60 * 3)
except TimeoutError:
raise TimeoutError(
"l3 agent wasn't rescheduled, it is still {0}".format(
os_conn.get_l3_agent_hosts(router_id)[0]))
wait(lambda: os_conn.get_l3_agent_ids(router_id), timeout=60)
self.check_instance_connectivity(remote, dhcp_namespace, instance_ip)
@retry(count=3, delay=120)
def run_single_test(cluster_id):
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['smoke'],
test_name='fuel_health.tests.smoke.'
'test_neutron_actions.TestNeutron.'
'test_check_neutron_objects_creation')
run_single_test(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'],
should_fail=1,
failed_test_name=['Check that required services are running'])
@test(depends_on=[deploy_ha_neutron],
groups=["neutron_packets_drops_stat"])
@log_snapshot_on_error
def neutron_packets_drop_stat(self):
"""Check packets drops statistic when size is equal to MTU
Scenario:
1. Revert snapshot with neutron cluster
2. Create instance, assign floating IP to it
3. Send ICMP packets from controller to instance with 1500 bytes
4. If at least 7 responses on 10 requests are received
assume test is passed
Duration 30m
"""
self.env.revert_snapshot("deploy_ha_neutron")
cluster_id = self.fuel_web.get_last_created_cluster()
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id))
instance = os_conn.create_server_for_migration(neutron=True)
floating_ip = os_conn.assign_floating_ip(instance)
logger.debug("instance floating ip is {0}".format(floating_ip.ip))
remote = self.env.get_ssh_to_remote_by_name('slave-01')
mtu_cmd = r"cat /sys/class/net/$(ip r g {0} |" \
r" sed -rn" \
r" 's/.*dev\s+(\S+)\s.*/\1/p')/mtu".format(floating_ip.ip)
mtu = ''.join(remote.execute(mtu_cmd)['stdout'])
logger.debug('mtu is equal to {0}'.format(mtu))
cmd = "ping -q -s {0} -c 7 -w 10 {1}".format(int(mtu) - 28,
floating_ip.ip)
res = remote.execute(cmd)
assert_equal(0, res['exit_code'],
'most packages were dropped, result is {0}'.format(res))

View File

@ -0,0 +1,112 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import test
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test import settings as hlp_date
from fuelweb_test.tests import base_test_case
@test(groups=["ostf_repeatable_tests"])
class OstfRepeatableTests(base_test_case.TestBasic):
@test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_3],
groups=["create_delete_ip_n_times_nova_vlan"])
@log_snapshot_on_error
def create_delete_ip_n_times_nova_vlan(self):
"""Deploy cluster in ha mode with VLAN Manager
Scenario:
1. Create cluster in ha mode with 1 controller
2. Add 1 nodes with controller roles
3. Add 2 nodes with compute roles
4. Set up cluster to use Network VLAN manager with 8 networks
5. Deploy the cluster
6. Run network verification
7. Run test Check network connectivity
from instance via floating IP' n times
Duration 1000m
Snapshot create_delete_ip_n_times_nova_vlan
"""
self.env.revert_snapshot("ready_with_3_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=hlp_date.DEPLOYMENT_MODE
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute']
}
)
self.fuel_web.update_vlan_network_fixed(
cluster_id, amount=8, network_size=32)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf_repeatably(cluster_id)
self.env.make_snapshot("create_delete_ip_n_times_nova_vlan")
@test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_3],
groups=["create_delete_ip_n_times_nova_flat"])
@log_snapshot_on_error
def deploy_create_delete_ip_n_times_nova_flat(self):
"""Deploy HA cluster, check connectivity from instance n times
Scenario:
1. Create cluster in ha mode with 1 controller
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Deploy the cluster
5. Verify networks
6. Run test Check network connectivity
from instance via floating IP' n times
Duration 1000m
Snapshot: create_delete_ip_n_times_nova_flat
"""
self.env.revert_snapshot("ready_with_3_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=hlp_date.DEPLOYMENT_MODE
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf_repeatably(cluster_id)
self.env.make_snapshot("create_delete_ip_n_times_nova_flat")
@test(groups=["run_ostf_n_times_against_custom_environment"])
@log_snapshot_on_error
def run_ostf_n_times_against_custom_deployment(self):
cluster_id = self.fuel_web.client.get_cluster_id(
hlp_date.DEPLOYMENT_NAME)
self.fuel_web.run_ostf_repeatably(cluster_id)

View File

@ -0,0 +1,243 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test import logger
from fuelweb_test import ostf_test_mapping as map_ostf
from fuelweb_test import settings
from fuelweb_test.settings import DEPLOYMENT_MODE
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
from devops.helpers.helpers import wait
from proboscis import SkipTest
from proboscis import test
@test(groups=["thread_3", "ceph"])
class CephRestart(TestBasic):
@test(depends_on_groups=['ceph_ha_one_controller_with_cinder'],
groups=["ceph_ha_one_controller_with_cinder_restart"])
@log_snapshot_on_error
def ceph_ha_one_controller_with_cinder_restart(self):
"""Restart cluster with ceph and cinder in ha mode
Scenario:
1. Create cluster in ha mode with 1 controller
2. Add 1 node with controller and ceph OSD roles
3. Add 1 node with compute role
4. Add 2 nodes with cinder and ceph OSD roles
5. Deploy the cluster
7. Warm restart
8. Check ceph status
Duration 90m
Snapshot None
"""
if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_REDHAT:
raise SkipTest()
self.env.revert_snapshot("ceph_ha_one_controller_with_cinder")
cluster_id = self.fuel_web.get_last_created_cluster()
# Warm restart
self.fuel_web.warm_restart_nodes(
self.env.get_virtual_environment().nodes().slaves[:4])
self.fuel_web.check_ceph_status(cluster_id)
# Wait until Cinder services UP on a controller
self.fuel_web.wait_cinder_is_up(['slave-01'])
try:
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['smoke'],
test_name=map_ostf.OSTF_TEST_MAPPING.get(
'Create volume and attach it to instance'))
except AssertionError:
logger.debug("Test failed from first probe,"
" we sleep 60 second try one more time "
"and if it fails again - test will fails ")
time.sleep(60)
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['smoke'],
test_name=map_ostf.OSTF_TEST_MAPPING.get(
'Create volume and attach it to instance'))
self.fuel_web.run_ostf(cluster_id=cluster_id)
@test(depends_on_groups=['ceph_ha'],
groups=["ceph_ha_restart"])
@log_snapshot_on_error
def ceph_ha_restart(self):
"""Deploy ceph with in HA mode
Scenario:
1. Create cluster
2. Add 3 nodes with controller and ceph OSD roles
3. Add 1 node with ceph OSD roles
4. Add 2 nodes with compute and ceph OSD roles
5. Deploy the cluster
6. Check ceph status
7. Cold retsart
8. Check ceph status
Duration 30m
Snapshot ceph_ha
"""
if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_REDHAT:
raise SkipTest()
self.env.revert_snapshot("ceph_ha")
# Wait until MySQL Galera is UP on some controller
self.fuel_web.wait_mysql_galera_is_up(['slave-01'])
# Wait until Cinder services UP on a controller
self.fuel_web.wait_cinder_is_up(['slave-01'])
cluster_id = self.fuel_web.get_last_created_cluster()
self.fuel_web.check_ceph_status(cluster_id)
# Run ostf
self.fuel_web.run_ostf(cluster_id=cluster_id)
# Destroy osd-node
self.env.get_virtual_environment(
).nodes().slaves[5].destroy()
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
self.env.get_virtual_environment(
).nodes().slaves[5])['online'], timeout=30 * 8)
offline_nodes = [self.fuel_web.get_nailgun_node_by_devops_node(
self.env.get_virtual_environment(
).nodes().slaves[5])['id']]
self.fuel_web.check_ceph_status(cluster_id, offline_nodes)
self.fuel_web.run_ostf(cluster_id=cluster_id)
# Destroy compute node
self.env.get_virtual_environment(
).nodes().slaves[4].destroy()
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
self.env.get_virtual_environment(
).nodes().slaves[4])['online'], timeout=30 * 8)
offline_nodes.append(self.fuel_web.get_nailgun_node_by_devops_node(
self.env.get_virtual_environment(
).nodes().slaves[4])['id'])
self.fuel_web.check_ceph_status(cluster_id, offline_nodes)
self.fuel_web.run_ostf(cluster_id=cluster_id, should_fail=1)
# Cold restart
self.fuel_web.cold_restart_nodes(
self.env.get_virtual_environment().nodes().slaves[:4])
self.fuel_web.check_ceph_status(cluster_id, offline_nodes)
# Wait until MySQL Galera is UP on some controller
self.fuel_web.wait_mysql_galera_is_up(['slave-01'])
# Wait until Cinder services UP on a controller
self.fuel_web.wait_cinder_is_up(['slave-01'])
try:
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['smoke'],
test_name=map_ostf.OSTF_TEST_MAPPING.get(
'Create volume and attach it to instance'))
except AssertionError:
logger.debug("Test failed from first probe,"
" we sleep 60 second try one more time "
"and if it fails again - test will fails ")
time.sleep(180)
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['smoke'],
test_name=map_ostf.OSTF_TEST_MAPPING.get(
'Create volume and attach it to instance'))
self.fuel_web.run_ostf(cluster_id=cluster_id, should_fail=1)
self.env.make_snapshot("ceph_ha")
@test(groups=["thread_1"])
class HAOneControllerFlatRestart(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["ha_one_controller_flat_warm_restart"])
@log_snapshot_on_error
def ha_one_controller_flat_warm_restart(self):
"""Cold restart for ha one controller environment
Scenario:
1. Create cluster
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Deploy the cluster
5. Validate cluster was set up correctly, there are no dead
services, there are no errors in logs
6. Turn off all nodes
7. Start all nodes
8. Run OSTF
9. Warm restart
10. Run OSTF
Duration 30m
"""
self.env.revert_snapshot("ready_with_3_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
# Warm restart
self.fuel_web.warm_restart_nodes(
self.env.get_virtual_environment().nodes().slaves[:2])
try:
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['sanity'],
test_name=map_ostf.OSTF_TEST_MAPPING.get(
'Check that required services are running'))
except AssertionError:
logger.debug("Test failed from first probe,"
" we sleep 60 second try one more time "
"and if it fails again - test will fails ")
time.sleep(60)
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['sanity'],
test_name=map_ostf.OSTF_TEST_MAPPING.get(
'Check that required services are running'))
self.fuel_web.security.verify_firewall(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)

7
run_tests.sh Executable file
View File

@ -0,0 +1,7 @@
#!/bin/bash
set -e
set -x
flake8 --ignore=H302,H802 --show-source ./