Update docstring test descriptions

Change docstring test steps in test_admin_node.py in check_rpm_packages_signed()
Change docstring test steps in test_admin_node.py
  in check_remote_packages_and_mos_repositories_signed()
Change docstring test steps in test_cli.py in cli_selected_nodes_deploy()
Change docstring test steps in test_ssl.py in master_node_with_https_only()
Change docstring test steps and show_step() numbers in test_ssl.py
  in endpoints_with_disabled_ssl()
Change docstring test steps and show_step() numbers in test_cli_deploy.py
  in cli_deploy_neutron_tun()
Change docstring test steps and show_step() numbers in test_cli_deploy.py
  in cli_deploy_tasks()
Change docstring test steps and show_step() numbers in test_cli_deploy_ceph.py
  in cli_deploy_ceph_neutron_tun()
Change docstring test steps and show_step() numbers in test_cli_deploy_ceph.py
  in cli_deploy_ceph_neutron_vlan()
Change docstring test steps and show_step() numbers in test_cli_role.py
  in cli_update_role()
Change docstring test steps and show_step() numbers in test_cli_role.py
  in cli_create_role()
Change docstring test steps and show_step() numbers in test_cli_role.py
  in cli_create_role_with_has_primary()
Change docstring test steps and show_step() numbers in test_cli_role.py
  in cli_delete_role()
Change docstring test steps and show_step() numbers in test_cli_role.py
  in cli_incorrect_update_role()
Change docstring test steps in test_ha_tun_group_1.py in tun_controller_base_os()
Change docstring test steps in test_ha_tun_group_1.py
  in tun_ceph_for_images_and_objects()
Change docstring test steps in test_ha_tun_group_2.py in tun_ha_ceph_base_os()
Change docstring test steps in test_ha_tun_group_2.py in tun_ceph_all()
Change docstring test steps in test_ha_tun_group_3.py
  in tun_no_volumes_ceph_for_images_and_ephemeral()
Change docstring test steps in test_ha_tun_group_3.py
  in tun_5_ctrl_ceph_ephemeral()
Change docstring test steps in test_ha_vlan_group_1.py
  in cinder_ceph_for_images()
Change docstring test steps in test_ha_vlan_group_1.py
  in ceph_for_volumes_swift()
Change docstring test steps in test_ha_vlan_group_2.py
  in cinder_ceph_for_ephemeral()
Change docstring test steps in test_ha_vlan_group_2.py
  in cinder_ceph_for_images_ephemeral()
Change docstring test steps in test_ha_vlan_group_3.py
  in no_storage_for_volumes_swift()
Change docstring test steps and show_step() in test_ha_vlan_group_3.py
  in ceph_volumes_ephemeral()
Change docstring test steps in test_ha_vlan_group_4.py
  in four_controllers()
Change docstring test steps and show_step() in test_ha_vlan_group_4.py
  in ceph_rados_gw_no_storage_volumes()
Change docstring test steps and show_step() in test_ha_vlan_group_5.py
  in ceph_for_volumes_images_ephemeral_rados()
Change docstring test steps and show_step() in test_ha_vlan_group_5.py
  in cinder_ceph_for_images_ephemeral_rados()
Change docstring test steps and show_step() in test_ha_vlan_group_6.py
  in ceph_for_images_ephemeral_rados()
Change docstring test steps and show_step() in test_ha_vlan_group_6.py
  in ceph_for_volumes_images_ephemeral()
Change docstring test steps in test_ha_vlan_group_7.py
  in ceph_for_images()
Change docstring test steps in test_ha_vlan_group_7.py
  in ha_vlan_operating_system()
Change docstring test description in test_use_mirror.py
  in deploy_with_custom_mirror()
Change docstring test steps in test_mongo_multirole.py
  in ha_ceilometer_untag_network()
Change docstring test steps and show_step() in test_multirole_group_1.py
  in controller_ceph_and_compute_cinder()
Change docstring test steps and show_step() in test_multirole_group_1.py
  in controller_ceph_cinder_compute_ceph_cinder()
Change docstring test steps and show_step() in test_mixed_os_components.py
  in mixed_components_murano_sahara_ceilometer()
Change docstring test description in test_scale_group_1.py
  in add_controllers_stop()
Change docstring test description in test_scale_group_1.py
  in add_ceph_stop()
Change docstring test description in test_scale_group_2.py
  in replace_primary_controller()
Change docstring test description in test_scale_group_2.py
  in remove_controllers()
Change docstring test steps and show_step() in test_scale_group_3.py
  in add_delete_compute()
Change docstring test steps and show_step() in test_scale_group_3.py
  in add_delete_cinder()
Change docstring test steps in test_scale_group_4.py
  in add_delete_ceph()
Change docstring test steps and show_step() in test_scale_group_4.py
  in add_delete_cinder_ceph()
Correct misspelling in test_scale_group_5.py
Correct misspelling in test_scale_group_6.py
Add show_step() in test_deploy_platform_components.py
Change docstring test steps and show_step() in test_deploy_platform_components.py
  in acceptance_deploy_platform_components_ceilometer()
Change docstring test steps in test_separate_db_ceph.py
  in separate_db_ceph_service()
Change docstring test steps in test_separate_keystone_ceph.py
  in separate_keystone_ceph_service()
Change docstring test steps in test_separate_rabbitmq_ceph.py
  in separate_rabbit_ceph_service()
Change docstring test steps in test_failover_group_1.py
  in lock_db_access_from_primary_controller()
Change docstring test steps in test_failover_group_1.py
  in recovery_neutron_agents_after_restart()
Change docstring test steps in test_failover_group_1.py
  in safe_reboot_primary_controller()
Change docstring test steps in test_failover_group_1.py
  in hard_reset_primary_controller()
Change docstring test steps and swap order of ostf tests and network verification
  in test_failover_group_1.py in power_outage_cinder_cluster()
Change docstring test steps in test_failover_group_2.py
  in safe_reboot_primary_controller_ceph()
Change docstring test steps and show_step() in test_failover_group_2.py
  in hard_reboot_primary_controller_ceph()
Change docstring test steps in test_failover_mongo.py
  in kill_mongo_processes()
Change docstring test steps in test_failover_mongo.py
  in close_connections_for_mongo()
Change docstring test steps in test_failover_mongo.py
  in shut_down_mongo_node()

Change-Id: I570478614682d237fd95c67d51597a35b94c610c
Closes-Bug: #1560931
This commit is contained in:
Andrey Lavrentyev 2016-03-29 17:11:38 +03:00
parent 85e02c04df
commit fcdb73a51b
33 changed files with 790 additions and 564 deletions

View File

@ -463,12 +463,13 @@ class GPGSigningCheck(TestBasic):
groups=['test_check_rpm_packages_signed'])
@log_snapshot_after_test
def check_rpm_packages_signed(self):
""" Check that local rpm packages are signed
"""Check that local rpm packages are signed
Scenario:
1. Revert snapshot with installed master
2. Import publick mirantis GPG key
3. Check all local rpm packet and verify it
1. Create environment using fuel-qa
2. Import public GPG key for rpm verification by executing:
rpm --import gpg-pub-key
3. Check all local rpm packets and verify it
Duration: 15 min
"""
@ -500,15 +501,15 @@ class GPGSigningCheck(TestBasic):
groups=['test_remote_packages_and_mos_repositories_signed'])
@log_snapshot_after_test
def check_remote_packages_and_mos_repositories_signed(self):
""" Check that remote packages and MOS repositories are signed
"""Check that remote packages and MOS repositories are signed
Scenario:
1. Revert snapshot with installed master
2. Import mirantis publick GPG key for rpm
3. Import mirantis publick GPG key for gpg
4. Download repomd.xml.asc and repomd.xml and verify they
5. Download Release and Releasee.gpg and verify they
6. Download randomly choosed .rpm file and verify it
1. Create environment using fuel-qa
2. Import GPG key for rpm
3. Import GPG key for gpg
4. Download repomd.xml.asc and repomd.xml and verify them
5. Download Release and Releasee.gpg and verify those
6. Download randomly chosen .rpm file and verify it
Duration: 15 min
"""

View File

@ -98,23 +98,28 @@ class CommandLineTest(test_cli_base.CommandLine):
groups=["cli_selected_nodes_deploy"])
@log_snapshot_after_test
def cli_selected_nodes_deploy(self):
"""Create and deploy environment using Fuel CLI
"""Create and deploy environment using Fuel CLI and check CN name
is equal to the public name passed via UI (user-owned cert)
Scenario:
1. Revert snapshot "ready_with_3_slaves"
1. Create environment using fuel-qa
2. Create a cluster using Fuel CLI
3. Add floating ranges for public network
4. Provision a controller node using Fuel CLI
5. Provision two compute+cinder nodes using Fuel CLI
6. Deploy the controller node using Fuel CLI
7. Deploy the compute+cinder nodes using Fuel CLI
8. Compare floating ranges
9. Check that all services work by 'https'
10. Check that all services have domain name
11. Check CNs for user owned certificate
12. Check keypairs for user owned ceritificate
13. Run OSTF
14. Make snapshot "cli_selected_nodes_deploy"
8. Verify network
9. Compare floating ranges
10. Check that all services work by 'https'
11. Check that all services have domain name
12. Find 'CN' value at the output:
CN value is equal to the value specified
at certificate provided via Fuel UI
13. Find keypair data at the output:
Keypair data is equal to the value specified
at certificate provided via Fuel UI
14. Run OSTF
Duration 50m
"""

View File

@ -35,10 +35,11 @@ class SSL_Tests(TestBasic):
"""Check cluster creation with SSL is enabled only on Master node
Scenario:
1. Revert the snapshot "ready" with forced https
2. Check that we cannot connect to master node by http(8000 port)
3. Bootstrap slaves nodes and
check here that they appears in nailgun
1. Create environment using fuel-qa
2. Force master node to use https
3. Check that we cannot connect to master node by http(8000 port)
4. Bootstrap slaves nodes and
check here that they appear in nailgun
Duration 30m
"""
@ -46,12 +47,13 @@ class SSL_Tests(TestBasic):
self.env.revert_snapshot("ready")
admin_ip = self.ssh_manager.admin_ip
self.show_step(2)
self.show_step(3)
connection = http_client.HTTPConnection(admin_ip, 8000)
connection.request("GET", "/")
response = connection.getresponse()
assert_equal(str(response.status), '301',
message="HTTP was not disabled for master node")
self.show_step(3)
self.show_step(4)
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:2])
nodes = self.fuel_web.client.list_nodes()
assert_equal(2, len(nodes))
@ -65,19 +67,24 @@ class SSL_Tests(TestBasic):
when TLS is disabled
Scenario:
1. Revert snapshot "master_node_with_https_only"
1. Pre-condition - perform steps
from master_node_with_https_only test
2. Create a new cluster
3. Disable TLS for public endpoints
4. Deploy cluster
5. Run OSTF
6. Check that all endpoints link to plain http protocol.
3. Go to the Settings tab
4. Disable TLS for public endpoints
5. Add 1 controller and compute+cinder
6. Deploy cluster
7. Run OSTF
8. Check that all endpoints link to plain http protocol.
Duration 30m
"""
self.show_step(1)
self.env.revert_snapshot("master_node_with_https_only")
self.show_step(2)
self.show_step(3)
self.show_step(4)
self.env.revert_snapshot("master_node_with_https_only")
self.show_step(5)
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
configure_ssl=False,
@ -89,13 +96,13 @@ class SSL_Tests(TestBasic):
'slave-02': ['compute', 'cinder'],
}
)
self.show_step(4)
self.show_step(6)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(5)
self.show_step(7)
# Run OSTF
self.fuel_web.run_ostf(cluster_id=cluster_id,
test_sets=['smoke'])
self.show_step(6)
self.show_step(8)
# Get controller ip address
controller_keystone_ip = self.fuel_web.get_public_vip(cluster_id)
action = OpenStackActions(controller_ip=controller_keystone_ip)

View File

@ -28,16 +28,18 @@ class CommandLineAcceptanceDeploymentTests(test_cli_base.CommandLine):
groups=["cli_deploy_neutron_tun"])
@log_snapshot_after_test
def cli_deploy_neutron_tun(self):
"""Deploy neutron_tun cluster using Fuel CLI
"""Deployment with 1 controller, NeutronTUN
Scenario:
1. Create cluster
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Add 1 node with cinder role
5. Deploy the cluster
6. Run network verification
7. Run OSTF
1. Create new environment using fuel-qa
2. Choose Neutron, TUN
3. Add 1 controller
4. Add 1 compute
5. Add 1 cinder
6. Verify networks
7. Deploy the environment
8. Verify networks
9. Run OSTF tests
Duration 40m
"""
@ -50,6 +52,7 @@ class CommandLineAcceptanceDeploymentTests(test_cli_base.CommandLine):
release_name=OPENSTACK_RELEASE)[0]
self.show_step(1, initialize=True)
self.show_step(2)
cmd = ('fuel env create --name={0} --release={1} '
'--nst=tun --json'.format(self.__class__.__name__,
release_id))
@ -64,15 +67,15 @@ class CommandLineAcceptanceDeploymentTests(test_cli_base.CommandLine):
self.update_cli_network_configuration(cluster_id)
self.update_ssl_configuration(cluster_id)
self.show_step(2)
self.show_step(3)
self.show_step(4)
self.show_step(5)
self.add_nodes_to_cluster(cluster_id, node_ids[0], ['controller'])
self.add_nodes_to_cluster(cluster_id, node_ids[1], ['compute'])
self.add_nodes_to_cluster(cluster_id, node_ids[2], ['cinder'])
self.show_step(6)
self.fuel_web.verify_network(cluster_id)
self.show_step(5)
self.show_step(7)
cmd = 'fuel --env-id={0} deploy-changes --json'.format(cluster_id)
task = self.ssh_manager.execute_on_remote(
@ -82,10 +85,10 @@ class CommandLineAcceptanceDeploymentTests(test_cli_base.CommandLine):
)['stdout_json']
self.assert_cli_task_success(task, timeout=130 * 60)
self.show_step(6)
self.show_step(8)
self.fuel_web.verify_network(cluster_id)
self.show_step(7)
self.show_step(9)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity'],
should_fail=1)
@ -94,16 +97,20 @@ class CommandLineAcceptanceDeploymentTests(test_cli_base.CommandLine):
groups=["cli_deploy_tasks"])
@log_snapshot_after_test
def cli_deploy_tasks(self):
"""Deploy neutron_tun cluster using Fuel CLI
"""Deployment with 3 controllers, NeutronVLAN
Scenario:
1. Create new environment
2. Add 3 nodes with controller role
3. Provision 3 controllers
4. Start netconfig on second controller
5. Deploy the cluster
6. Run network verification
7. Run OSTF
2. Choose Neutron, Vlan
3. Add 3 controllers
4. Provision 3 controllers
(fuel node --node-id x,x,x --provision --env x)
5. Start netconfig on second controller
(fuel node --node 2 --end netconfig --env x)
6. Deploy controller nodes
(fuel node --node x,x,x --deploy --env-id x)
7. Verify networks
8. Run OSTF tests
Duration 50m
"""
@ -116,6 +123,7 @@ class CommandLineAcceptanceDeploymentTests(test_cli_base.CommandLine):
release_name=OPENSTACK_RELEASE)[0]
self.show_step(1)
self.show_step(2)
cmd = ('fuel env create --name={0} --release={1} '
'--nst=vlan --json'.format(self.__class__.__name__,
release_id))
@ -125,10 +133,10 @@ class CommandLineAcceptanceDeploymentTests(test_cli_base.CommandLine):
jsonify=True
)['stdout_json']
cluster_id = env_result['id']
self.show_step(2)
self.show_step(3)
self.add_nodes_to_cluster(cluster_id, node_ids[0:3],
['controller'])
self.show_step(3)
self.show_step(4)
cmd = ('fuel node --node-id {0} --provision --env {1} --json'.
format(','.join(str(n) for n in node_ids), cluster_id))
task = self.ssh_manager.execute_on_remote(
@ -137,7 +145,7 @@ class CommandLineAcceptanceDeploymentTests(test_cli_base.CommandLine):
jsonify=True
)['stdout_json']
self.assert_cli_task_success(task, timeout=20 * 60)
self.show_step(4)
self.show_step(5)
cmd = ('fuel node --node {0} --end netconfig --env {1} --json'.
format(node_ids[1], release_id))
task = self.ssh_manager.execute_on_remote(
@ -146,7 +154,7 @@ class CommandLineAcceptanceDeploymentTests(test_cli_base.CommandLine):
jsonify=True
)['stdout_json']
self.assert_cli_task_success(task, timeout=30 * 60)
self.show_step(5)
self.show_step(6)
cmd = 'fuel --env-id={0} deploy-changes --json'.format(cluster_id)
task = self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
@ -154,9 +162,9 @@ class CommandLineAcceptanceDeploymentTests(test_cli_base.CommandLine):
jsonify=True
)['stdout_json']
self.assert_cli_task_success(task, timeout=130 * 60)
self.show_step(6)
self.fuel_web.verify_network(cluster_id)
self.show_step(7)
self.fuel_web.verify_network(cluster_id)
self.show_step(8)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity'],
should_fail=1)

View File

@ -28,16 +28,20 @@ class CommandLineAcceptanceCephDeploymentTests(test_cli_base.CommandLine):
groups=["cli_deploy_ceph_neutron_tun"])
@log_snapshot_after_test
def cli_deploy_ceph_neutron_tun(self):
"""Deploy neutron_tun cluster using Fuel CLI
"""Deployment with 3 controllers, NeutronTUN, both Ceph
Scenario:
1. Create cluster
2. Add 3 nodes with controller role
3. Add 2 nodes with compute role
4. Add 2 nodes with ceph role
5. Deploy the cluster
6. Run network verification
7. Run OSTF
1. Create new environment
2. Choose Neutron, TUN
3. Choose Ceph for volumes and Ceph for images
4. Change ceph replication factor to 2
5. Add 3 controller
6. Add 2 compute
7. Add 2 ceph
8. Verify networks
9. Deploy the environment
10. Verify networks
11. Run OSTF tests
Duration 40m
"""
@ -53,6 +57,7 @@ class CommandLineAcceptanceCephDeploymentTests(test_cli_base.CommandLine):
release_name=OPENSTACK_RELEASE)[0]
self.show_step(1, initialize=True)
self.show_step(2)
cmd = ('fuel env create --name={0} --release={1} '
'--nst=tun --json'.format(self.__class__.__name__,
release_id))
@ -66,22 +71,23 @@ class CommandLineAcceptanceCephDeploymentTests(test_cli_base.CommandLine):
self.update_cli_network_configuration(cluster_id)
self.update_ssl_configuration(cluster_id)
self.show_step(3)
self.use_ceph_for_volumes(cluster_id)
self.use_ceph_for_images(cluster_id)
self.change_osd_pool_size(cluster_id, '2')
self.show_step(2)
self.show_step(3)
self.show_step(4)
self.show_step(5)
self.show_step(6)
self.add_nodes_to_cluster(cluster_id, node_ids[0:3],
['controller'])
self.add_nodes_to_cluster(cluster_id, node_ids[3:5],
['compute'])
self.add_nodes_to_cluster(cluster_id, node_ids[5:7],
['ceph-osd'])
self.show_step(7)
self.fuel_web.verify_network(cluster_id)
self.show_step(5)
self.show_step(8)
cmd = 'fuel --env-id={0} deploy-changes --json'.format(cluster_id)
task = self.ssh_manager.execute_on_remote(
@ -91,10 +97,10 @@ class CommandLineAcceptanceCephDeploymentTests(test_cli_base.CommandLine):
)['stdout_json']
self.assert_cli_task_success(task, timeout=130 * 60)
self.show_step(6)
self.show_step(9)
self.fuel_web.verify_network(cluster_id)
self.show_step(7)
self.show_step(10)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity'])
@ -102,17 +108,19 @@ class CommandLineAcceptanceCephDeploymentTests(test_cli_base.CommandLine):
groups=["cli_deploy_ceph_neutron_vlan"])
@log_snapshot_after_test
def cli_deploy_ceph_neutron_vlan(self):
""" Deployment with 3 controlelrs, NeutronVLAN, both Ceph
"""Deployment with 3 controlelrs, NeutronVLAN, both Ceph
Scenario:
1. Create new environment
2. Choose Neutron, VLAN
3. Choose Ceph for volumes and Ceph for images
4. Add 3 controller, 2 compute, 3 ceph
5. Verify networks
6. Deploy the environment
4. Add 3 controller
5. Add 2 compute
6. Add 3 ceph
7. Verify networks
8. Run OSTF tests
8. Deploy the environment
9. Verify networks
10. Run OSTF tests
Duration: 60 min
"""
@ -142,13 +150,15 @@ class CommandLineAcceptanceCephDeploymentTests(test_cli_base.CommandLine):
self.use_ceph_for_volumes(cluster['id'])
self.use_ceph_for_images(cluster['id'])
self.show_step(4)
self.show_step(5)
self.show_step(6)
nodes = {
'controller': node_ids[0:3],
'compute': node_ids[3:5],
'ceph-osd': node_ids[5:8]
}
self.show_step(4)
for role in nodes:
self.ssh_manager.execute_on_remote(
ip=admin_ip,
@ -158,10 +168,10 @@ class CommandLineAcceptanceCephDeploymentTests(test_cli_base.CommandLine):
','.join(map(str, nodes[role])), role)
)
self.show_step(5)
self.show_step(7)
self.fuel_web.verify_network(cluster['id'])
self.show_step(6)
self.show_step(8)
task = self.ssh_manager.execute_on_remote(
ip=admin_ip,
cmd='fuel --env-id={0} '
@ -170,9 +180,9 @@ class CommandLineAcceptanceCephDeploymentTests(test_cli_base.CommandLine):
)['stdout_json']
self.assert_cli_task_success(task, timeout=130 * 60)
self.show_step(7)
self.show_step(9)
self.fuel_web.verify_network(cluster['id'])
self.show_step(8)
self.show_step(10)
self.fuel_web.run_ostf(
cluster_id=cluster['id'],
test_sets=['ha', 'smoke', 'sanity']

View File

@ -34,12 +34,17 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
"""Update controller role using Fuel CLI
Scenario:
1. Revert snapshot "ready_with_3_slaves"
2. Download controller role yaml to master
3. Remove section "conflicts" under "meta" section
4. Upload changes using Fuel CLI
5. Create new cluster
6. Add new node to cluster with controller+compute
1. Setup master node
2. SSH to the master node
3. Download to file controller role with command:
fuel role --rel 2 --role controller --file controller.yaml
4. Edit the controller.yaml file,
remove section "conflicts" under "meta" section. Save file
5. Update role from file with command:
fuel role --rel 2 --update --file controller.yaml
6. Go to the Fuel UI and try to create a new environment
7. Add new node to the environment,
choose controller and compute roles for node
Duration 20m
"""
@ -52,17 +57,18 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
release_name=OPENSTACK_RELEASE)[0]
self.show_step(2)
self.show_step(3)
self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd='fuel role --rel {} --role controller --file'
' /tmp/controller.yaml'.format(release_id))
self.show_step(3)
self.show_step(4)
self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd="sed -i '/conflicts/,+1 d' /tmp/controller.yaml")
self.show_step(4)
self.show_step(5)
self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd='fuel role --rel {} --update --file'
@ -72,7 +78,7 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
nst = '--nst={0}'.format(NEUTRON_SEGMENT_TYPE)
else:
nst = ''
self.show_step(5)
self.show_step(6)
cmd = ('fuel env create --name={0} --release={1} '
'{2} --json'.format(self.__class__.__name__,
release_id, nst))
@ -82,7 +88,7 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
jsonify=True
)['stdout_json']
cluster_id = env_result['id']
self.show_step(6)
self.show_step(7)
cmd = ('fuel --env-id={0} node set --node {1} --role=controller,'
'compute'.format(cluster_id, node_ids[0]))
result = self.ssh_manager.execute(
@ -102,12 +108,32 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
"""Create new role using Fuel CLI
Scenario:
1. Revert snapshot "ready_with_3_slaves"
2. Upload new role yaml to master
3. Upload yaml to nailgun using Fuel CLI
4. Create new cluster
5. Try to create node with new role
6. Try to create node with new role and controller, compute
1. Create environment using fuel-qa
2. SSH to the master node
3. Create new file "role.yaml" and paste the above:
meta:
conflicts:
- controller
- compute
description: New role
has_primary: true
name: Test role
name: test-role
volumes_roles_mapping:
- allocate_size: min
id: os
4. Create new role with command:
fuel role --rel 2 --create --file role.yaml
5. Go to the Fuel UI and try to create a new environment
6. Add new node to the environment, choose test-role
and try to add compute or controller role to the same node
Duration 20m
"""
@ -126,6 +152,7 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
self.ssh_manager.upload_to_remote(self.ssh_manager.admin_ip,
templates_path, '/tmp')
self.show_step(3)
self.show_step(4)
self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd='fuel role --rel {} --create --file'
@ -135,7 +162,7 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
nst = '--nst={0}'.format(NEUTRON_SEGMENT_TYPE)
else:
nst = ''
self.show_step(4)
self.show_step(5)
cmd = ('fuel env create --name={0} --release={1} '
'{2} --json'.format(self.__class__.__name__,
release_id, nst))
@ -145,7 +172,7 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
jsonify=True
)['stdout_json']
cluster_id = env_result['id']
self.show_step(5)
self.show_step(6)
cmd = ('fuel --env-id={0} node set --node {1}'
' --role=test-role'.format(cluster_id, node_ids[0]))
result = self.ssh_manager.execute(
@ -155,7 +182,6 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
assert_equal(result['exit_code'], 0,
"Can't assign controller and compute node"
" to node id {}".format(node_ids[0]))
self.show_step(6)
cmd = ('fuel --env-id={0} node set --node {1}'
' --role=test-role,controller,'
'compute'.format(cluster_id, node_ids[1]))
@ -172,14 +198,33 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
groups=["cli_create_role_with_has_primary"])
@log_snapshot_after_test
def cli_create_role_with_has_primary(self):
"""Create new role using Fuel CLI
"""Create role with flag 'has_primary' set in 'true'
Scenario:
1. Revert snapshot "ready_with_3_slaves"
2. Upload new role yaml to master
3. Upload yaml to nailgun using Fuel CLI
4. Create new cluster
5. Try to create node with new role
1. Create environment using fuel-qa
2. SSH to the master node
3. Create new file "role.yaml" and paste the following:
meta:
conflicts:
- controller
- compute
description: New role
has_primary: true
name: Test role
name: test-role
volumes_roles_mapping:
- allocate_size: min
id: os
4. Upload yaml to nailgun using Fuel CLI
5. Create new role with command:
fuel role --rel 2 --create --file role.yaml
Duration 20m
"""
@ -194,10 +239,11 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
'{0}/fuelweb_test/config_templates/'.format(os.environ.get(
"WORKSPACE", "./")), 'create_primary_role.yaml')
self.show_step(2)
self.show_step(3)
if os.path.exists(templates_path):
self.ssh_manager.upload_to_remote(self.ssh_manager.admin_ip,
templates_path, '/tmp')
self.show_step(3)
self.show_step(4)
self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd='fuel role --rel {} --create --file'
@ -207,7 +253,7 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
nst = '--nst={0}'.format(NEUTRON_SEGMENT_TYPE)
else:
nst = ''
self.show_step(4)
self.show_step(5)
cmd = ('fuel env create --name={0} --release={1} '
'{2} --json'.format(self.__class__.__name__,
release_id, nst))
@ -217,7 +263,7 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
jsonify=True
)['stdout_json']
cluster_id = env_result['id']
self.show_step(5)
cmd = ('fuel --env-id={0} node set --node {1}'
' --role=test-primary-role'.format(cluster_id,
node_ids[0]))
@ -237,14 +283,33 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
"""Delete role using Fuel CLI
Scenario:
1. Revert snapshot "ready_with_3_slaves"
2. Upload new role yaml to master
3. Upload yaml to nailgun using Fuel CLI
4. Check new role exists
5. Create new cluster
6. Create node with controller, compute
7. Delete new role
8. Try to delete controller role and check it's impossible
1. Create environment using fuel-qa
2. SSH to the master node
3. Create new file "role.yaml" with the following content:
meta:
conflicts:
- controller
- compute
description: New role
name: Test role
name: test-role
volumes_roles_mapping:
- allocate_size: min
id: os
4. Create new role with command:
fuel role --rel 2 --create --file role.yaml
5. Go to the Fuel UI and try to create a new environment
6. Check if new role exists in the list of roles
7. Add new nodes to the environment: controller, compute
8. Go to the console and try to delete roles:
fuel role --rel 2 --delete --role <role name from step 3>
fuel role --rel 2 --delete --role controller
Duration 20m
"""
@ -259,10 +324,11 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
'{0}/fuelweb_test/config_templates/'.format(os.environ.get(
"WORKSPACE", "./")), 'create_role.yaml')
self.show_step(2)
self.show_step(3)
if os.path.exists(templates_path):
self.ssh_manager.upload_to_remote(self.ssh_manager.admin_ip,
templates_path, '/tmp')
self.show_step(3)
self.show_step(4)
self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd='fuel role --rel {} --create --file'
@ -270,7 +336,7 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
result = self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd='fuel role --rel {}'.format(release_id))['stdout']
self.show_step(4)
roles = [i.strip() for i in result]
assert_true('test-role' in roles,
"role is not in the list {}".format(roles))
@ -280,6 +346,7 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
else:
nst = ''
self.show_step(5)
self.show_step(6)
cmd = ('fuel env create --name={0} --release={1} '
'{2} --json'.format(self.__class__.__name__,
release_id, nst))
@ -289,7 +356,7 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
jsonify=True
)['stdout_json']
cluster_id = env_result['id']
self.show_step(6)
self.show_step(7)
cmd = ('fuel --env-id={0} node set --node {1}'
' --role=controller'.format(cluster_id, node_ids[0]))
result = self.ssh_manager.execute(
@ -300,7 +367,7 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
"Can't assign controller and"
" compute node to node id {}".format(node_ids[0]))
self.show_step(7)
self.show_step(8)
cmd = ('fuel role --rel {} --delete'
' --role test-role'.format(release_id))
result = self.ssh_manager.execute(
@ -322,7 +389,7 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
ip=self.ssh_manager.admin_ip,
cmd=cmd,
)
self.show_step(8)
assert_equal(result['exit_code'], 1,
"Controller role shouldn't be able to be deleted")
@ -335,11 +402,17 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
"""Update controller role using Fuel CLI
Scenario:
1. Revert snapshot "ready_with_3_slaves"
2. Download controller role yaml to master
3. Modify "id" section to incorrect value
4. Upload changes using Fuel CLI
5. Check that error message was got
1. Setup master node
2. SSH to the master node
3. Download to file controller role with command:
fuel role --rel 2 --role controller --file controller.yaml
4. Modify created file: change "id" value at
the "volumes_roles_mapping" to something incorrect,
for ex.: "id: blabla"
5. Save file and upload it to the nailgun with:
fuel role --rel 2 --role controller --update --file
controller.yaml
There should be an error message and role shouldn't be updated.
Duration 20m
"""
@ -349,17 +422,17 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
release_name=OPENSTACK_RELEASE)[0]
self.show_step(2)
self.show_step(3)
self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd='fuel role --rel {} --role controller --file'
' /tmp/controller.yaml'.format(release_id))
self.show_step(3)
self.show_step(4)
self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd="sed -i -r 's/id: os/id: blabla/' /tmp/controller.yaml")
self.show_step(4)
self.show_step(5)
self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,

View File

@ -36,17 +36,18 @@ class HaTunGroup1(TestBasic):
groups=["tun_controller_base_os"])
@log_snapshot_after_test
def tun_controller_base_os(self):
"""Deployment with 3 controllers+Operating System, NeutronTUN
"""Deploy 3 controllers with base_os using Neutron Tun
Scenario:
1. Create cluster using NeutronTUN provider
2. Add 3 nodes with controller + operation system role
3. Add 2 nodes with compute role
4. Add 1 node with cinder role
5. Verify networks
6. Deploy the cluster
7. Verify networks
8. Run OSTF
1. Create new environment
2. Choose Neutron, tunnelling segmentation
3. Add 3 controller+operating system
4. Add 2 compute
5. Add 1 cinder
6. Verify networks
7. Deploy the environment
8. Verify networks
9. Run OSTF tests
Duration XXXm
Snapshot tun_controller_base_os
@ -89,20 +90,26 @@ class HaTunGroup1(TestBasic):
groups=["tun_ceph_for_images_and_objects"])
@log_snapshot_after_test
def tun_ceph_for_images_and_objects(self):
"""Deployment with 3 controllers, NeutronTUN, with Ceph for images and
Ceph RadosGW for objects
"""Deployment with 3 controllers, NeutronTUN,
with Ceph for images and RadosGW
Scenario:
1. Create cluster using NeutronTUN provider and external dns and
ntp servers, Ceph for Images and Ceph RadosGW for objects
2. Add 3 nodes with controller role
3. Add 2 nodes with compute role
4. Add 1 node with cinder role
5. Add 3 nodes with ceph OSD role
6. Verify networks
7. Deploy the cluster
8. Verify networks
9. Run OSTF
1. Create new environment
2. Choose Neutron VxLAN
3. Choose Ceph for images
4. Choose Ceph RadosGW for objects
5. Add 3 controller
6. Add 2 compute
7. Add 1 cinder
8. Add 3 ceph nodes
9. Change default dns server to any 2 public dns servers
to the 'Host OS DNS Servers' on Settings tab
10. Change default ntp servers to any 2 public ntp servers
to the 'Host OS NTP Servers' on Settings tab
11. Verify networks
12. Deploy the environment
13. Verify networks
14. Run OSTF tests
Duration XXXm
Snapshot tun_ceph_images_rados_objects

View File

@ -34,19 +34,21 @@ class HaTunGroup2(TestBasic):
groups=["tun_ha_ceph_base_os"])
@log_snapshot_after_test
def tun_ha_ceph_base_os(self):
"""Deployment with 3 controllers, NeutronTUN, with Ceph all for all
and operating system
"""Deploy 3 controllers, 1 base_os, 2 computes
and 3 ceph nodes with Neutron Tun
Scenario:
1. Create cluster using NeutronTUN provider
2. Add 3 nodes with controller role
3. Add 2 nodes with compute role
4. Add 3 node with ceph role
5. Add 1 node with operation system role
6. Verify networks
7. Deploy the cluster
1. Create new environment
2. Choose Neutron TUN
3. Choose Ceph for all
4. Add 3 controller
5. Add 2 compute
6. Add 3 ceph
7. Add 1 Operating System nodes
8. Verify networks
9. Run OSTF
9. Deploy the environment
10. Verify networks
11. Run OSTF tests
Duration XXXm
Snapshot tun_ha_ceph_base_os
@ -100,21 +102,23 @@ class HaTunGroup2(TestBasic):
groups=["tun_ceph_all"])
@log_snapshot_after_test
def tun_ceph_all(self):
"""Deployment with 3 controllers, NeutronTUN, with Ceph for volumes,
images, ephemeral and Rados GW for objects
"""Deployment with 3 controllers, NeutronVxLAN,
with Ceph for volumes and images, ephemeral and Rados GW for objects
Scenario:
1. Create cluster using NeutronTUN provider, Ceph for Images,
Volumes, Objects, Ephemeral, non-default OS credentials
2. Add 3 nodes with controller role
3. Add 2 nodes with compute role
4. Add 3 nodes with ceph OSD role
5. Untag management and storage networks, assign it to separate
interfaces (default behaviour of update_nodes)
6. Verify networks
7. Deploy the cluster
1. Create new environment
2. Choose Neutron, VxLAN
3. Choose Ceph for volumes and images, ceph for ephemeral
and Rados GW for objects
4. Add 3 controller
5. Add 2 compute
6. Add 3 ceph nodes
7. Untag management and storage networks
and move them to separate interfaces
8. Verify networks
9. Run OSTF
9. Start deployment
10. Verify networks
11. Run OSTF
Duration XXXm
Snapshot tun_ceph_all

View File

@ -38,23 +38,28 @@ class HaTunGroup3(TestBasic):
groups=["tun_no_volumes_ceph_for_images_and_ephemeral"])
@log_snapshot_after_test
def tun_no_volumes_ceph_for_images_and_ephemeral(self):
"""Deployment with 3 controllers, NeutronVxLAN, with no storage for
volumes and Ceph for images and ephemeral
"""Deployment with 3 controllers, NeutronVxLAN,
with no storage for volumes and ceph for images and ephemeral
Scenario:
1. Create cluster using NeutronTUN provider, external dns and ntp
servers, no storage for volumes, Ceph for Images and ephemeral,
Ceph replica factor 2
2. Add 3 nodes with controller role
3. Add 2 nodes with compute role
4. Add 2 nodes with ceph OSD role
5. Change default partitioning for vdc of Ceph node
6. Change public network from /24 to /25
7. Verify networks
8. Deploy the cluster
9. Validate partition on Ceph node
10. Verify networks
11. Run OSTF
1. Create new environment
2. Choose Neutron, VxLAN
3. Uncheck cinder for volumes and choose ceph for images,
ceph for ephemeral
4. Change ceph replication factor to 2
5. Add 3 controller
6. Add 2 compute
7. Add 2 ceph nodes
8. Change default disks partitioning for ceph nodes for 'vdc'
9. Change default dns server to any 2 public dns servers
to the 'Host OS DNS Servers' on Settings tab
10. Change default ntp servers to any 2 public ntp servers
to the 'Host OS NTP Servers' on Settings tab
11. Change default public net mask from /24 to /25
12. Verify networks
13. Deploy cluster
14. Verify networks
15. Run OSTF
Duration 180m
Snapshot tun_no_volumes_ceph_for_images_and_ephemeral
@ -134,22 +139,23 @@ class HaTunGroup3(TestBasic):
groups=["tun_5_ctrl_ceph_ephemeral"])
@log_snapshot_after_test
def tun_5_ctrl_ceph_ephemeral(self):
"""Deployment with 5 controllers, NeutronTUN, with Ceph RDB for
ephemeral volumes
"""Deployment with 5 controllers, NeutronTUN,
with Ceph RBD for ephemeral volumes
Scenario:
1. Create cluster using NeutronTUN provider, Ceph RDB for ephemeral
volumes
2. Add 5 nodes with controller role
3. Add 1 nodes with compute role
4. Add 3 nodes with ceph OSD role
5. Change default partitioning for vdc of Ceph nodes
6. Change public network mask from /24 to /25
7. Verify networks
8. Deploy the cluster
9. Validate partition on Ceph node
10. Verify networks
11. Run OSTF
1. Create new environment
2. Choose Neutron, tunnelling segmentation
3. Choose Ceph RBD for ephemeral volumes
and uncheck Cinder LVM over iSCSI for volumes
4. Add 5 controllers
5. Add 1 compute
6. Add 3 ceph
7. Change default disks partitioning for ceph nodes for vdc
8. Change public default mask from /24 to /25
9. Verify networks
10. Deploy the environment
11. Verify networks
12. Run OSTF tests
Duration XXXm
Snapshot tun_5_ctrl_ceph_ephemeral

View File

@ -29,19 +29,23 @@ class HaVlanGroup1(TestBasic):
groups=["cinder_ceph_for_images"])
@log_snapshot_after_test
def cinder_ceph_for_images(self):
"""Deploy cluster with cinder and ceph for images
"""Deployment with 3 controllers, NeutronVLAN,
with Ceph for images and other disk configuration
Scenario:
1. Create cluster
2. Add 3 node with controller role
3. Add 2 node with compute role
4. Add 3 nodes with ceph OSD roles
5. Add 1 node with cinder
6. Change disks configuration for ceph nodes
7. Verify networks
8. Deploy the cluster
1. Create new environment
2. Choose Neutron, VLAN
3. Choose Ceph for images
4. Add 3 controller
5. Add 2 compute
6. Add 1 cinder
7. Add 3 ceph
8. Change disk configuration for both Ceph nodes.
Change 'Ceph' volume for vdc
9. Verify networks
10. Run OSTF
10. Deploy the environment
11. Verify networks
12. Run OSTF tests
Duration 180m
Snapshot cinder_ceph_for_images
@ -101,18 +105,21 @@ class HaVlanGroup1(TestBasic):
groups=["ceph_for_volumes_swift"])
@log_snapshot_after_test
def ceph_for_volumes_swift(self):
"""Deploy cluster with ceph for volumes and swift
"""Deployment with 5 controllers, NeutronVLAN, with Ceph for volumes
Scenario:
1. Create cluster
2. Add 5 node with controller role
3. Add 2 node with compute role
4. Add 2 nodes with ceph OSD roles
5. Change disks configuration for ceph nodes
6. Verify networks
7. Deploy the cluster
8. Verify networks
9. Run OSTF
1. Create new environment
2. Choose Neutron, VLAN
3. Choose Ceph for volumes
4. Add 5 controller
5. Add 2 compute
6. Add 2 ceph nodes
7. Change default partitioning scheme for both ceph nodes for 'vdc'
8. Change ceph replication factor to 2
9. Verify networks
10. Deploy cluster
11. Verify networks
12. Run OSTF tests
Duration 180m
Snapshot ceph_for_volumes_swift

View File

@ -28,18 +28,20 @@ class HaVlanGroup2(TestBasic):
groups=["cinder_ceph_for_ephemeral"])
@log_snapshot_after_test
def cinder_ceph_for_ephemeral(self):
"""Deploy cluster with cinder and ceph for ephemeral
"""Deployment with 3 controllers, NeutronVLAN, with Ceph for ephemeral
Scenario:
1. Create cluster
2. Add 3 node with controller role
3. Add 2 node with compute role
4. Add 3 nodes with ceph OSD roles
5. Add 1 node with cinder
6. Verify networks
7. Deploy the cluster
1. Create new environment
2. Choose Neutron, VLAN
3. Choose cinder for volumes and Ceph for ephemeral
4. Add 3 controller
5. Add 2 compute
6. Add 1 cinder
7. Add 3 ceph
8. Verify networks
9. Run OSTF
9. Deploy the environment
10. Verify networks
11. Run OSTF tests
Duration 180m
Snapshot cinder_ceph_for_ephemeral
@ -90,18 +92,23 @@ class HaVlanGroup2(TestBasic):
groups=["cinder_ceph_for_images_ephemeral"])
@log_snapshot_after_test
def cinder_ceph_for_images_ephemeral(self):
"""Deploy cluster with cinder and ceph for images and ephemeral
"""Deployment with 3 controllers, NeutronVLAN, with Ceph for
images and ephemeral
Scenario:
1. Create cluster
2. Add 3 node with controller role
3. Add 2 node with compute role
4. Add 3 nodes with ceph OSD roles
5. Add 1 node with cinder
6. Verify networks
7. Deploy the cluster
8. Verify networks
9. Run OSTF
1. Create new environment
2. Choose Neutron, VLAN
3. Choose Ceph for images and ceph for ephemeral
4. Add 3 controller
5. Add 2 compute
6. Add 1 cinder
7. Add 3 ceph
8. Untag management and storage networks and move them to separate
interfaces
9. Verify networks
10. Deploy the environment
11. Verify networks
12. Run OSTF tests
Duration 180m
Snapshot cinder_ceph_for_images_ephemeral

View File

@ -30,16 +30,16 @@ class HaVlanGroup3(TestBasic):
NeutronVLAN with no storage for volumes and swift
Scenario:
1. Create new environment
2. Choose Neutron, VLAN
3. Uncheck cinder for volumes
4. Add 3 controller
5. Add 2 compute
6. Change public net mask from /24 to /25
7. Verify networks
8. Deploy the environment
9. Verify networks
10. Run OSTF tests
1. Create new environment
2. Choose Neutron, VLAN
3. Uncheck cinder for volumes
4. Add 3 controller
5. Add 2 compute
6. Change public net mask from /24 to /25
7. Verify networks
8. Deploy the environment
9. Verify networks
10. Run OSTF tests
Duration: 180 min
Snapshot: no_storage_for_volumes_swift
@ -100,18 +100,18 @@ class HaVlanGroup3(TestBasic):
with Ceph for volumes and ephemeral
Scenario:
1. Create new environment
2. Choose Neutron, VLAN
3. Choose Ceph for volumes and Ceph for ephemeral
4. Change openstack username, password, tenant
5. Add 3 controller
6. Add 2 compute
7. Add 3 ceph nodes
8. Change default management net mask from /24 to /25
9. Verify networks
10. Start deployment
11. Verify networks
12. Run OSTF
1. Create new environment
2. Choose Neutron, VLAN
3. Choose Ceph for volumes and Ceph for ephemeral
4. Change openstack username, password, tenant
5. Add 3 controller
6. Add 2 compute
7. Add 3 ceph nodes
8. Change default management net mask from /24 to /25
9. Verify networks
10. Start deployment
11. Verify networks
12. Run OSTF
Duration: 180m
Snapshot: ceph_volumes_ephemeral
@ -132,17 +132,15 @@ class HaVlanGroup3(TestBasic):
self.show_step(1, initialize=True)
self.show_step(2)
self.show_step(3)
self.show_step(4)
self.show_step(5)
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
settings=data
)
self.show_step(4)
self.show_step(5)
self.show_step(6)
self.show_step(7)
self.show_step(8)
self.fuel_web.update_nodes(
cluster_id,
{
@ -157,20 +155,20 @@ class HaVlanGroup3(TestBasic):
}
)
self.show_step(9)
self.show_step(8)
self.fuel_web.update_network_cidr(cluster_id, 'management')
self.show_step(10)
self.show_step(9)
self.fuel_web.verify_network(cluster_id)
self.show_step(11)
self.show_step(10)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(12)
self.show_step(11)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.check_ceph_status(cluster_id)
self.show_step(13)
self.show_step(12)
self.fuel_web.run_ostf(cluster_id)
self.env.make_snapshot('ceph_volumes_ephemeral')

View File

@ -31,18 +31,18 @@ class HaVlanGroup4(TestBasic):
and other disk configuration
Scenario:
1. Create new environment
2. Choose Neutron, VLAN
3. Add 4 controllers
4. Add 2 computes
5. Add 3 cinders
6. Change disk configuration for all Cinder nodes.
Change 'Cinder' volume for vdc
7. Verify networks
8. Deploy the environment
9. Verify networks
10. Check disk configuration
11. Run OSTF tests
1. Create new environment
2. Choose Neutron, VLAN
3. Add 4 controller
4. Add 2 compute
5. Add 3 cinder
6. Change disk configuration for all Cinder nodes.
Change 'Cinder' volume for vdc
7. Verify networks
8. Deploy the environment
9. Verify networks
10. Check disk configuration
11. Run OSTF tests
Notation: "By default recommended use uneven numbers of controllers,
but nowhere there is information we cannot deploy with even
@ -109,23 +109,23 @@ class HaVlanGroup4(TestBasic):
groups=["ceph_rados_gw_no_storage_volumes"])
@log_snapshot_after_test
def ceph_rados_gw_no_storage_volumes(self):
"""Deployment with 3 controllers, NeutronVLAN,
with no storage for volumes and ceph for images and Rados GW
"""Deployment with 3 controllers, NeutronVLAN, with no storage for
volumes and ceph for images and Rados GW
Scenario:
1. Create new environment
2. Choose Neutron, VLAN
3. Uncheck cinder storage for volumes and choose ceph for images and
Rados GW
4. Change openstack username, password, tenant
5. Add 3 controller
6. Add 2 compute
7. Add 3 ceph nodes
8. Change storage net mask from /24 to /25
9. Verify networks
10. Start deployment
11. Verify networks
12. Run OSTF
1. Create new environment
2. Choose Neutron, VLAN
3. Uncheck cinder storage for volumes and choose ceph
for images and Rados GW
4. Change openstack username, password, tenant
5. Add 3 controller
6. Add 2 compute
7. Add 3 ceph nodes
8. Change storage net mask /24 to /25
9. Verify networks
10. Start deployment
11. Verify networks
12. Run OSTF
Duration: 180 min
Snapshot: ceph_rados_gw_no_storage_volumes
@ -145,11 +145,11 @@ class HaVlanGroup4(TestBasic):
self.show_step(1, initialize=True)
self.show_step(2)
self.show_step(3)
self.show_step(4)
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
settings=data
)
self.show_step(4)
self.show_step(5)
self.show_step(6)
self.show_step(7)

View File

@ -29,18 +29,22 @@ class HaVlanGroup5(TestBasic):
groups=["ceph_for_volumes_images_ephemeral_rados"])
@log_snapshot_after_test
def ceph_for_volumes_images_ephemeral_rados(self):
"""Deploy cluster with ceph for volumes, images, ephemeral, rados
"""Deployment with 3 controllers, NeutronVLAN,
with Ceph for volumes and images, ephemeral and Rados GW for objects
Scenario:
1. Create cluster
2. Add 3 nodes with controller role
3. Add 2 nodes with compute role
4. Add 3 nodes with ceph OSD roles
5. Verify networks
6. Deploy the cluster
7. Check ceph status
1. Create environment using fuel-qa
2. Choose Neutron, VLAN
3. Choose Ceph for volumes and images,
ceph for ephemeral and Rados GW for objects
4. Add 3 controller
5. Add 2 compute
6. Add 3 ceph nodes
7. Untag all networks and move them to separate interfaces
8. Verify networks
9. Run OSTF
9. Deploy cluster
10. Verify networks
11. Run OSTF
Duration 180m
Snapshot ceph_for_volumes_images_ephemeral_rados
@ -59,13 +63,16 @@ class HaVlanGroup5(TestBasic):
'password': 'cephvolumesimagesephemeralrados'
}
self.show_step(1, initialize=True)
self.show_step(2)
self.show_step(3)
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
settings=data
)
self.show_step(2)
self.show_step(3)
self.show_step(4)
self.show_step(5)
self.show_step(6)
self.show_step(7)
self.fuel_web.update_nodes(
cluster_id,
{
@ -79,17 +86,17 @@ class HaVlanGroup5(TestBasic):
'slave-08': ['ceph-osd']
}
)
self.show_step(5)
self.fuel_web.verify_network(cluster_id)
self.show_step(6)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(7)
self.fuel_web.check_ceph_status(cluster_id)
self.show_step(8)
self.fuel_web.verify_network(cluster_id)
self.show_step(9)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.check_ceph_status(cluster_id)
self.show_step(10)
self.fuel_web.verify_network(cluster_id)
self.show_step(11)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("ceph_for_volumes_images_ephemeral_rados")
@ -98,24 +105,27 @@ class HaVlanGroup5(TestBasic):
groups=["cinder_ceph_for_images_ephemeral_rados"])
@log_snapshot_after_test
def cinder_ceph_for_images_ephemeral_rados(self):
"""Deploy cluster with cinder volumes and ceph for images,
ephemeral, rados
"""Deployment with 3 controllers, NeutronVLAN, with cinder for volumes
and ceph for images, ephemeral and Rados GW for objects
Scenario:
1. Create cluster
2. Add 3 nodes with controller role
3. Add 2 nodes with compute role
4. Add 3 nodes with ceph OSD roles
5. Add 1 cinder node
6. Change disks configuration for ceph and cinder nodes
7. Change default dns server
8. Change default NTP server
9. Change public net mask from /24 to /25
10. Verify networks
11. Deploy the cluster
12. Check ceph status
13. Verify networks
14. Check ceph disks partitioning
1. Create new environment
2. Choose Neutron, VLAN
3. Choose cinder for volumes and ceph for images, ceph for
ephemeral and Rados GW for objects
4. Add 3 controller
5. Add 2 compute
6. Add 3 ceph nodes
7. Add 1 cinder node
8. Change default public net mask from /24 to /25
9. Change default partitioning for ceph and cinder nodes for vdc
10. Change default dns server to any 2 public dns servers to the
'Host OS DNS Servers' on Settings tab
11. Change default ntp servers to any 2 public ntp servers to the
'Host OS NTP Servers' on Settings tab
12. Verify networks
13. Deploy cluster
14. Verify networks
15. Run OSTF
Duration 180m
@ -144,6 +154,8 @@ class HaVlanGroup5(TestBasic):
self.show_step(3)
self.show_step(4)
self.show_step(5)
self.show_step(6)
self.show_step(7)
self.fuel_web.update_nodes(
cluster_id,
{
@ -158,12 +170,12 @@ class HaVlanGroup5(TestBasic):
'slave-09': ['cinder']
}
)
self.show_step(9)
self.show_step(8)
self.fuel_web.update_network_cidr(cluster_id, 'public')
self.show_step(6)
self.show_step(7)
self.show_step(8)
self.show_step(9)
self.show_step(10)
self.show_step(11)
ceph_nodes = self.fuel_web.\
get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
role_status='pending_roles')
@ -178,15 +190,14 @@ class HaVlanGroup5(TestBasic):
cinder_image_size = self.fuel_web.\
update_node_partitioning(cinder_node, node_role='cinder')
self.show_step(10)
self.fuel_web.verify_network(cluster_id)
self.show_step(11)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(12)
self.fuel_web.check_ceph_status(cluster_id)
self.show_step(13)
self.fuel_web.verify_network(cluster_id)
self.show_step(13)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.check_ceph_status(cluster_id)
self.show_step(14)
self.fuel_web.verify_network(cluster_id)
for ceph in ceph_nodes:
checkers.check_ceph_image_size(ceph['ip'], ceph_image_size)

View File

@ -29,21 +29,26 @@ class HaVlanGroup6(TestBasic):
groups=["ceph_for_images_ephemeral_rados"])
@log_snapshot_after_test
def ceph_for_images_ephemeral_rados(self):
"""Deploy cluster with ceph for images, ephemeral and rados
"""Deployment with 3 controllers, NeutronVLAN, with no storage for
volumes and ceph for images, ephemeral and Rados GW for objects
Scenario:
1. Create cluster
2. Add 3 node with controller role
3. Add 2 node with compute role
4. Add 3 nodes with ceph OSD roles
5. Change disks configuration for ceph nodes
6. Change default dns and NTP values
1. Create new environment
2. Choose Neutron, VLAN
3. Uncheck cinder for volumes and choose ceph for images,
ceph for ephemeral and Rados GW for objects
4. Add 3 controller
5. Add 2 compute
6. Add 3 ceph nodes
7. Verify networks
8. Deploy the cluster
9. Check ceph status
10. Verify networks
11. Check partitioning for ceph disks
12. Run OSTF
8. Change default disks partitioning for ceph nodes for 'vdc'
9. Change default dns server to any 2 public dns servers to the
'Host OS DNS Servers' on Settings tab
10. Change default ntp servers to any 2 public ntp servers to the
'Host OS NTP Servers' on Settings tab
11. Deploy cluster
12. Verify networks
13. Run OSTF
Duration 180m
Snapshot ceph_for_images_ephemeral_rados
@ -71,6 +76,8 @@ class HaVlanGroup6(TestBasic):
self.show_step(2)
self.show_step(3)
self.show_step(4)
self.show_step(5)
self.show_step(6)
self.fuel_web.update_nodes(
cluster_id,
{
@ -87,8 +94,9 @@ class HaVlanGroup6(TestBasic):
self.show_step(7)
self.fuel_web.verify_network(cluster_id)
self.show_step(5)
self.show_step(6)
self.show_step(8)
self.show_step(9)
self.show_step(10)
ceph_nodes = self.fuel_web.\
get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
role_status='pending_roles')
@ -96,18 +104,16 @@ class HaVlanGroup6(TestBasic):
ceph_image_size = self.fuel_web.\
update_node_partitioning(ceph_node, node_role='ceph')
self.show_step(8)
self.show_step(11)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(9)
self.fuel_web.check_ceph_status(cluster_id)
self.show_step(10)
self.show_step(12)
self.fuel_web.verify_network(cluster_id)
self.show_step(11)
for ceph in ceph_nodes:
checkers.check_ceph_image_size(ceph['ip'], ceph_image_size)
self.show_step(12)
self.show_step(13)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("ceph_for_images_ephemeral_rados")
@ -116,21 +122,23 @@ class HaVlanGroup6(TestBasic):
groups=["ceph_for_volumes_images_ephemeral"])
@log_snapshot_after_test
def ceph_for_volumes_images_ephemeral(self):
"""Deploy cluster with ceph for volumes and images, ephemeral
"""Deployment with 5 controllers, NeutronVLAN,
with Ceph for volumes and images, ephemeral
Scenario:
1. Create cluster
2. Add 5 node with controller role
3. Add 2 node with compute role
4. Add 2 nodes with ceph OSD roles
5. Change disks configuration for ceph nodes
6. Change management net mask from /24 to /25
7. Verify networks
8. Deploy the cluster
9. Check ceph status
1. Create new environment
2. Choose Neutron, VLAN
3. Choose Ceph for volumes and images, ceph for ephemeral
4. Add 5 controller
5. Add 2 compute
6. Add 2 ceph nodes
7. Change ceph replication factor to 2
8. Change management net default mask from /24 to /25
9. Change default disk partitioning for ceph nodes for vdc
10. Verify networks
11. Check partitioning for ceph nodes
12. Run OSTF
11. Deploy changes
12. Verify networks
13. Run OSTF
Duration 180m
Snapshot ceph_for_volumes_images_ephemeral
@ -156,6 +164,9 @@ class HaVlanGroup6(TestBasic):
self.show_step(2)
self.show_step(3)
self.show_step(4)
self.show_step(5)
self.show_step(6)
self.show_step(7)
self.fuel_web.update_nodes(
cluster_id,
{
@ -170,10 +181,10 @@ class HaVlanGroup6(TestBasic):
'slave-09': ['ceph-osd']
}
)
self.show_step(6)
self.show_step(8)
self.fuel_web.update_network_cidr(cluster_id, 'management')
self.show_step(5)
self.show_step(9)
ceph_nodes = self.fuel_web.\
get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
role_status='pending_roles')
@ -181,19 +192,17 @@ class HaVlanGroup6(TestBasic):
ceph_image_size = self.fuel_web.\
update_node_partitioning(ceph_node, node_role='ceph')
self.show_step(7)
self.fuel_web.verify_network(cluster_id)
self.show_step(8)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(9)
self.fuel_web.check_ceph_status(cluster_id)
self.show_step(10)
self.fuel_web.verify_network(cluster_id)
self.show_step(11)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.check_ceph_status(cluster_id)
self.show_step(12)
self.fuel_web.verify_network(cluster_id)
for ceph in ceph_nodes:
checkers.check_ceph_image_size(ceph['ip'], ceph_image_size)
self.show_step(12)
self.show_step(13)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("ceph_for_volumes_images_ephemeral")

View File

@ -29,20 +29,27 @@ class HaVlanGroup7(TestBasic):
groups=["ceph_for_images"])
@log_snapshot_after_test
def ceph_for_images(self):
"""Deploy cluster with no volume storage and ceph for images
"""Deployment with 3 controllers, NeutronVLAN,
with no storage for volumes and ceph for images
Scenario:
1. Create cluster
2. Add 3 node with controller role
3. Add 2 node with compute role
4. Add 3 nodes with ceph OSD roles
5. Change ceph replication factor to 3
6. Change disks configuration for ceph nodes
7. Change default NTP and DNS
8. Verify networks
9. Deploy the cluster
10. Verify networks
11. Run OSTF
1. Create new environment
2. Choose Neutron, VLAN
3. Uncheck cinder for volumes and choose ceph for images
4. Add 3 controller
5. Add 2 compute
6. Add 3 ceph nodes
7. Change default disks partitioning for ceph nodes for 'vdc'
8. Change default dns server to any 2 public dns servers to the
'Host OS DNS Servers' on Settings tab
9. Change default ntp servers to any 2 public ntp servers to the
'Host OS NTP Servers' on Settings tab
10. Untag management and storage networks
and move them to separate interfaces
11. Verify networks
12. Deploy cluster
13. Verify networks
14. Run OSTF
Duration 180m
Snapshot ceph_for_images
@ -104,17 +111,18 @@ class HaVlanGroup7(TestBasic):
groups=["ha_vlan_operating_system"])
@log_snapshot_after_test
def ha_vlan_operating_system(self):
"""Deploy cluster with cinder/swift and one Operating system node
"""Deployment with 3 controllers, NeutronVlan, with Operating System
Scenario:
1. Create cluster
2. Add 3 node with controller role
3. Add 2 node with compute role
4. Add 1 node with Operating system
5. Verify networks
6. Deploy the cluster
7. Verify networks
8. Run OSTF
1. Create new environment
2. Choose Neutron Vlan
3. Add 3 controller
4. Add 2 compute
5. Add 1 Operating System node
6. Verify networks
7. Deploy the environment
8. Verify networks
9. Run OSTF tests
Duration 180m
Snapshot ceph_for_volumes_swift

View File

@ -61,7 +61,7 @@ class TestUseMirror(TestBasic):
@test(groups=['fuel-mirror', 'deploy_with_custom_mirror'],
depends_on=[SetupEnvironment.prepare_slaves_5])
def deploy_with_custom_mirror(self):
"""Create mirror for deployment without internet dependencies.
"""Create local mirrors for Ubuntu repos using fuel-mirror tool
Scenario:
1. Create cluster with neutron networking

View File

@ -40,7 +40,7 @@ class MongoMultirole(TestBasic):
5. Add 1 compute
6. Add 3 mongo+cinder
7. Move Storage network to eth1 and specify vlan start
8. Move Management network to eth2 (it's untagged by default)
8. Move Management network to eth2 and untag it
9. Verify networks
10. Deploy the environment
11. Verify networks

View File

@ -29,20 +29,21 @@ class MultiroleGroup1(TestBasic):
groups=["controller_ceph_and_compute_cinder"])
@log_snapshot_after_test
def controller_ceph_and_compute_cinder(self):
"""Deploy cluster with controller+ceph and compute+cinder
"""Deployment with 3 Controllers plus Ceph, Neutron Vxlan
and non-default disks partition
Scenario:
1. Create cluster
2. Choose cinder and ceph for images
3. Add 3 node with controller+ceph role
4. Add 1 node with compute+cinder role
5. Change disks configuration for ceph nodes
1. Create new environment
2. Choose Neutron Vxlan
3. Choose Cinder for volumes and Ceph for images
4. Add 3 controller+ceph
5. Add 1 compute+cinder
6. Verify networks
7. Deploy the cluster
8. Check ceph status
7. Change disk configuration for all Ceph nodes.
Change 'Ceph' volume for vdc
8. Deploy the environment
9. Verify networks
10. Check partitions on ceph nodes
11. Run OSTF
10. Run OSTF tests
Duration 180m
Snapshot controller_ceph_and_compute_cinder
@ -61,12 +62,13 @@ class MultiroleGroup1(TestBasic):
}
self.show_step(1, initialize=True)
self.show_step(2)
self.show_step(3)
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
settings=data
)
self.show_step(3)
self.show_step(4)
self.show_step(5)
self.fuel_web.update_nodes(
cluster_id,
{
@ -79,7 +81,7 @@ class MultiroleGroup1(TestBasic):
self.show_step(6)
self.fuel_web.verify_network(cluster_id)
self.show_step(5)
self.show_step(7)
ceph_nodes = self.fuel_web.\
get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
role_status='pending_roles')
@ -87,18 +89,16 @@ class MultiroleGroup1(TestBasic):
ceph_image_size = self.fuel_web.\
update_node_partitioning(ceph_node, node_role='ceph')
self.show_step(7)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(8)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.check_ceph_status(cluster_id)
self.show_step(9)
self.fuel_web.verify_network(cluster_id)
self.show_step(10)
for ceph in ceph_nodes:
checkers.check_ceph_image_size(ceph['ip'], ceph_image_size)
self.show_step(11)
self.show_step(10)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("controller_ceph_and_compute_cinder")
@ -107,18 +107,19 @@ class MultiroleGroup1(TestBasic):
groups=["controller_ceph_cinder_compute_ceph_cinder"])
@log_snapshot_after_test
def controller_ceph_cinder_compute_ceph_cinder(self):
"""Deploy cluster with controller+ceph+cinder and compute+ceph+cinder
"""Deployment with 3 Controllers plus Ceph plus Cinder, Neutron Vlan,
cinder for volumes, ceph for images/ephemeral/objects
Scenario:
1. Create cluster
2. Choose cinder and ceph for images, ephemeral, objects
3. Add 3 node with controller+ceph+cinder role
4. Add 1 node with compute+ceph+cinder role
5. Verify networks
6. Deploy the cluster
7. Check ceph status
1. Create new environment
2. Choose Neutron, Vlan
3. Choose cinder for volumes and ceph for images/ephemeral/objects
4. Add 3 controllers+ceph+cinder
5. Add 1 compute+ceph+cinder
6. Verify networks
7. Deploy the environment
8. Verify networks
9. Run OSTF
9. Run OSTF tests
Duration 180m
Snapshot controller_ceph_cinder_compute_ceph_cinder
@ -137,12 +138,13 @@ class MultiroleGroup1(TestBasic):
}
self.show_step(1, initialize=True)
self.show_step(2)
self.show_step(3)
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
settings=data
)
self.show_step(3)
self.show_step(4)
self.show_step(5)
self.fuel_web.update_nodes(
cluster_id,
{
@ -153,11 +155,10 @@ class MultiroleGroup1(TestBasic):
}
)
self.show_step(5)
self.fuel_web.verify_network(cluster_id)
self.show_step(6)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.verify_network(cluster_id)
self.show_step(7)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.check_ceph_status(cluster_id)
self.show_step(8)
self.fuel_web.verify_network(cluster_id)

View File

@ -33,13 +33,18 @@ class MixedComponents(TestBasic):
Scenario:
1. Create new environment
2. Choose Neutron + TUN, Cinder
3. Enable Sahara, Murano and Ceilometer
4. Add 3 controller, 1 compute, 1 cinder and 3 mongo nodes
5. Verify networks
6. Deploy the environment
7. Verify networks
8. Run OSTF tests
2. Choose Neutron, TUN
3. Choose Murano
4. Choose Sahara
5. Choose Ceilometer
6. Add 3 controller
7. Add 1 compute
8. Add 1 cinder
9. Add 3 mongo
10. Verify networks
11. Deploy the environment
12. Verify networks
13. Run OSTF tests
Duration: 300 min
Snapshot: mixed_components_murano_sahara_ceilometer
@ -60,12 +65,17 @@ class MixedComponents(TestBasic):
self.show_step(1, initialize=True)
self.show_step(2)
self.show_step(3)
self.show_step(4)
self.show_step(5)
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
settings=data
)
self.show_step(4)
self.show_step(6)
self.show_step(7)
self.show_step(8)
self.show_step(9)
self.fuel_web.update_nodes(
cluster_id,
{
@ -80,16 +90,16 @@ class MixedComponents(TestBasic):
}
)
self.show_step(5)
self.show_step(10)
self.fuel_web.verify_network(cluster_id)
self.show_step(6)
self.show_step(11)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(7)
self.show_step(12)
self.fuel_web.verify_network(cluster_id)
self.show_step(8)
self.show_step(13)
self.fuel_web.run_ostf(cluster_id, test_sets=['smoke', 'sanity', 'ha',
'tests_platform'])

View File

@ -47,7 +47,8 @@ class HaScaleGroup1(TestBasic):
groups=["add_controllers_stop"])
@log_snapshot_after_test
def add_controllers_stop(self):
"""Check add 2 controllers with stop deployment
"""Add 2 controllers, deploy, stop deploy, remove added controllers,
add 2 controllers once again
Scenario:
1. Create cluster
@ -105,7 +106,7 @@ class HaScaleGroup1(TestBasic):
groups=["add_ceph_stop"])
@log_snapshot_after_test
def add_ceph_stop(self):
"""Check add 2 ceph nodes with stop deployment
"""Add 2 ceph-osd, deploy, stop deploy, re-deploy again
Scenario:
1. Create cluster

View File

@ -32,7 +32,7 @@ class HaScaleGroup2(TestBasic):
groups=["replace_primary_controller"])
@log_snapshot_after_test
def replace_primary_controller(self):
"""Check add 2 controllers with stop deployment
"""Replace the primary controller in the cluster
Scenario:
1. Create cluster
@ -106,7 +106,8 @@ class HaScaleGroup2(TestBasic):
groups=["remove_controllers"])
@log_snapshot_after_test
def remove_controllers(self):
"""Remove two controllers
"""Deploy cluster with 3 controllers, remove 2 controllers
and re-deploy, check hosts and corosync
Scenario:
1. Create cluster
@ -167,6 +168,7 @@ class HaScaleGroup2(TestBasic):
node = self.fuel_web.get_nailgun_node_by_devops_node(
self.env.d_env.get_node(name='slave-01'))
self.show_step(8)
self.show_step(9)
for host in hosts:
result = self.ssh_manager.execute_on_remote(
ip=node['ip'], cmd="grep '{}' /etc/hosts".format(host))

View File

@ -27,24 +27,27 @@ class HaScaleGroup3(TestBasic):
groups=["add_delete_compute"])
@log_snapshot_after_test
def add_delete_compute(self):
"""Check add, add/delete, delete compute node
"""Deployment with 3 controllers, NeutronVLAN, add, add/delete/,
delete compute node
Scenario:
1. Create cluster
2. Add 3 controller node
3. Deploy the cluster
4. Add 2 compute nodes
5. Deploy changes
6. Verify network
7. Run OSTF
8. Add 1 compute node and delete one deployed compute node
9. Deploy changes
10. Run OSTF
11. Verify networks
12. Delete one compute node
13. Deploy changes
14. Verify networks
15. Run OSTF
1. Create new environment
2. Choose Neutron, VLAN
3. Add 3 controller
4. Deploy the environment
5. Add 2 compute
6. Verify networks
7. Deploy the environment
8. Verify networks
9. Run OSTF tests
10. Add 1 new compute node and delete one deployed compute
11. Re-deploy
12. Verify networks
13. Run OSTF tests
14. Delete one compute node
15. Re-deploy cluster
16. Verify networks
17. Run OSTF
Duration 120m
Snapshot add_delete_compute
@ -56,6 +59,7 @@ class HaScaleGroup3(TestBasic):
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE)
self.show_step(2)
self.show_step(3)
self.fuel_web.update_nodes(
cluster_id,
{
@ -64,24 +68,26 @@ class HaScaleGroup3(TestBasic):
'slave-03': ['controller']
}
)
self.show_step(3)
self.show_step(4)
self.fuel_web.deploy_cluster_wait(cluster_id)
nodes = {'slave-04': ['compute'],
'slave-05': ['compute']}
self.show_step(4)
self.show_step(5)
self.fuel_web.update_nodes(
cluster_id, nodes,
True, False
)
self.show_step(5)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(6)
self.fuel_web.verify_network(cluster_id)
self.show_step(7)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(8)
self.fuel_web.verify_network(cluster_id)
self.show_step(9)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.show_step(8)
self.show_step(10)
nodes = {'slave-06': ['compute']}
self.fuel_web.update_nodes(
cluster_id, nodes,
@ -92,24 +98,24 @@ class HaScaleGroup3(TestBasic):
cluster_id, nodes,
False, True
)
self.show_step(9)
self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)
self.show_step(11)
self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)
self.show_step(12)
self.fuel_web.verify_network(cluster_id)
self.show_step(10)
self.show_step(13)
self.fuel_web.run_ostf(cluster_id=cluster_id, should_fail=1)
self.show_step(12)
self.show_step(14)
nodes = {'slave-04': ['compute']}
self.fuel_web.update_nodes(
cluster_id, nodes,
False, True
)
self.show_step(13)
self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)
self.show_step(14)
self.fuel_web.verify_network(cluster_id)
self.show_step(15)
self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)
self.show_step(16)
self.fuel_web.verify_network(cluster_id)
self.show_step(17)
self.fuel_web.run_ostf(cluster_id=cluster_id, should_fail=1)
self.env.make_snapshot("add_delete_compute")
@ -117,10 +123,11 @@ class HaScaleGroup3(TestBasic):
groups=["add_delete_cinder"])
@log_snapshot_after_test
def add_delete_cinder(self):
"""Check add, add/delete, delete cinder node
"""Deployment with 3 controllers, NeutronVlan, with add, delete,
add/delete cinder node
Scenario:
1. Create cluster
1. Create cluster: Neutron VLAN, default storages
2. Add 3 controller and 2 compute node
3. Deploy the cluster
4. Add 1 cinder nodes

View File

@ -27,10 +27,12 @@ class HaScaleGroup4(TestBasic):
groups=["add_delete_ceph"])
@log_snapshot_after_test
def add_delete_ceph(self):
"""Check add, add/delete, delete ceph node
"""Deployment with 3 controllers, NeutronVlan, with add, delete,
add/delete ceph node
Scenario:
1. Create cluster
1. Create cluster: Neutron VLAN, ceph for volumes and images,
ceph for ephemeral and Rados GW
2. Add 3 controller, 1 compute, 3 ceph nodes
3. Deploy the cluster
4. Add 1 ceph node
@ -132,10 +134,12 @@ class HaScaleGroup4(TestBasic):
groups=["add_delete_cinder_ceph"])
@log_snapshot_after_test
def add_delete_cinder_ceph(self):
"""Check add, add/delete, delete cinder and ceph node
"""Deployment with 3 controllers, NeutronVLan, with add, delete,
add/delete cinder and ceph node
Scenario:
1. Create cluster
1. Create cluster: Neutron VLAN, cinder for volumes
and ceph for images
2. Add 3 controller+ceph, 1 compute and 1 cinder nodes
3. Deploy the cluster
4. Add 1 ceph node and 1 cinder node
@ -144,15 +148,15 @@ class HaScaleGroup4(TestBasic):
7. Run OSTF
8. Add 1 cinder node and delete 1 deployed cinder node
9. Deploy changes
10. Run OSTF
11. Verify networks
10. Verify network
11. Run OSTF
12. Add 1 ceph node and delete 1 deployed ceph node
13. Deploy changes
14. Verify networks
14. Verify network
15. Run OSTF
16. Delete 1 cinder and 1 ceph node
17. Deploy changes
18. Verify networks
18. Verify network
19. Run OSTF
Duration 120m
@ -210,9 +214,9 @@ class HaScaleGroup4(TestBasic):
)
self.show_step(9)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(11)
self.fuel_web.verify_network(cluster_id)
self.show_step(10)
self.fuel_web.verify_network(cluster_id)
self.show_step(11)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.show_step(12)

View File

@ -30,7 +30,7 @@ class HaScaleGroup5(TestBasic):
"""Deployment with 3 controllers, NeutronVxlan, with add, delete,
add/delete compute+cinder+ceph node
Scenarion:
Scenario:
1. Deploy cluster 3 controllers, 2 computes + ceph + cinder,
Neutron VXLAN, cinder for volumes, ceph for images.
2. Verify networks
@ -163,7 +163,7 @@ class HaScaleGroup5(TestBasic):
"""Deployment with 3 controllers, NeutronVxlan, with add, delete,
add/delete controller+cinder+ceph node
Scenarion:
Scenario:
1. Deploy cluster 3 controller+cinder+ceph, 2 computes,
Neutron VXLAN, cinder for volumes, ceph for images + Rados GW
2. Verify networks

View File

@ -29,7 +29,7 @@ class HaScaleGroup6(TestBasic):
"""Deployment with 3 controllers, NeutronVlan, with add, delete,
add/delete cinder+ceph node
Scenarion:
Scenario:
1. Deploy cluster 3 controllers, 1 computes, 2 ceph + cinder,
Neutron VLAN, cinder for volumes, ceph for images and ephemeral
2. Verify networks

View File

@ -163,6 +163,11 @@ class TestsDeployPlatformComponents(BaseDeployPlatformComponents):
}
)
self.show_step(7)
self.show_step(8)
self.show_step(9)
self.show_step(10)
self.show_step(11)
self._deploy_and_check(cluster_id=cluster_id)
# TODO: Test is disabled, until Murano plugin is not available.
@ -230,13 +235,19 @@ class TestsDeployPlatformComponents(BaseDeployPlatformComponents):
}
)
self.show_step(7)
self.show_step(8)
self.show_step(9)
self.show_step(10)
self.show_step(11)
self._deploy_and_check(cluster_id=cluster_id)
@test(depends_on=[SetupEnvironment.prepare_slaves_9],
groups=["acceptance_deploy_platform_components_ceilometer"])
@log_snapshot_after_test
def acceptance_deploy_platform_components_ceilometer(self):
"""Deploy cluster: detached keystone, rabbitmq, database, ceilometer
"""Deploy cluster with detached keystone, rabbitmq,
database and Ceilometer
Scenario:
1. Install db, rabbitmq, keystone plugin on the master node.
@ -298,6 +309,11 @@ class TestsDeployPlatformComponents(BaseDeployPlatformComponents):
}
)
self.show_step(7)
self.show_step(8)
self.show_step(9)
self.show_step(10)
self.show_step(11)
self._deploy_and_check(cluster_id=cluster_id)

View File

@ -33,18 +33,20 @@ class SeparateDbCeph(TestBasic):
groups=["separate_db_ceph_service"])
@log_snapshot_after_test
def separate_db_ceph_service(self):
"""Deploy cluster with 3 separate database roles and ceph
"""Deployment with separate db nodes and ceph for all
Scenario:
1. Create cluster
2. Add 3 nodes with controller role
3. Add 3 nodes with database role
4. Add 1 compute and 2 ceph nodes
5. Choose ceph for volumes, images, ephemeral and objects
6. Verify networks
7. Deploy the cluster
8. Verify networks
9. Run OSTF
1. Install the plugin on the master node
2. Create Ubuntu, Neutron VXLAN, ceph for all cluster
3. Change ceph replication factor to 2
4. Add 3 nodes with controller role
5. Add 3 nodes with db role
6. Add 1 compute node
7. Add 2 ceph nodes
8. Run network verification
9. Deploy changes
10. Run network verification
11. Run OSTF tests
Duration 120m
Snapshot separate_db_ceph_service

View File

@ -33,17 +33,20 @@ class SeparateKeystoneCeph(TestBasic):
groups=["separate_keystone_ceph_service"])
@log_snapshot_after_test
def separate_keystone_ceph_service(self):
"""Deploy cluster with 3 separate keystone roles and ceph
"""Deployment with separate keystone nodes and ceph for all
Scenario:
1. Create cluster
2. Add 3 nodes with controller role
3. Add 3 nodes with keystone+database role
4. Add 1 compute and 2 ceph nodes
5. Verify networks
6. Deploy the cluster
7. Verify networks
8. Run OSTF
1. Install database and keystone plugins on the master node
2. Create Ubuntu, Neutron VXLAN, ceph for all storages cluster
3. Change ceph replication factor to 2
4. Add 3 nodes with controller role
5. Add 3 nodes with database+keystone role
6. Add 1 compute node
7. Add 2 ceph nodes
8. Run network verification
9. Deploy changes
10. Run network verification
11. Run OSTF tests
Duration 120m
Snapshot separate_keystone_ceph_service

View File

@ -33,17 +33,20 @@ class SeparateRabbitCeph(TestBasic):
groups=["separate_rabbit_ceph_service"])
@log_snapshot_after_test
def separate_rabbit_ceph_service(self):
"""Deploy cluster with 3 separate rabbit roles and ceph
"""Deployment with separate rabbitmq nodes and ceph for all
Scenario:
1. Create cluster
2. Add 3 nodes with controller role
3. Add 3 nodes with rabbit role
4. Add 1 compute and 2 ceph nodes
5. Verify networks
6. Deploy the cluster
7. Verify networks
8. Run OSTF
1. Install the plugin on the master node
2. Create Ubuntu, Neutron VXLAN, ceph for all storages cluster
3. Change ceph replication factor to 2
4. Add 3 nodes with controller role
5. Add 3 nodes with rabbitmq role
6. Add 1 compute node
7. Add 2 ceph node
8. Run network verification
9. Deploy changes
10. Run network verification
11. Run OSTF tests
Duration 120m
Snapshot separate_rabbit_ceph_service

View File

@ -96,7 +96,7 @@ class FailoverGroup1(TestBasic):
"""Lock DB access from primary controller
Scenario:
1. Revert environment with 3 controller nodes
1. Pre-condition - do steps from 'deploy_ha_cinder' test
2. Lock DB access from primary controller
(emulate non-responsiveness of MySQL from the controller
where management VIP located)
@ -153,14 +153,25 @@ class FailoverGroup1(TestBasic):
groups=['recovery_neutron_agents_after_restart'])
@log_snapshot_after_test
def recovery_neutron_agents_after_restart(self):
"""Recovery of Neutron agents after restart
"""Recovery of neutron agents after restart
Scenario:
1. Revert environment with 3 controller nodes
2. Kill Neutron agents at all on one of the controllers.
Pacemaker should restart it
3. Verify networks
4. Run OSTF tests
1. Pre-condition - do steps from 'deploy_ha_cinder' test
2. Kill neutron agents at all on one of the controllers.
Pacemaker should restart it
2.1 verify output crm status | grep -A1 "clone_p_neutron-l3-agent"
have failed status for controller
2.2 verify neutron-l3-proccess restarted
by ps -aux | grep neutron-l3-agent
2.3 verify output crm status | grep -A1 "clone_p_neutron-l3-agent"
have started status for controller
3. Verify networks
4. Run OSTF tests
Duration 20m
Snapshot recovery_neutron_agents_after_restart
@ -239,10 +250,10 @@ class FailoverGroup1(TestBasic):
groups=['safe_reboot_primary_controller'])
@log_snapshot_after_test
def safe_reboot_primary_controller(self):
"""Safe reboot of primary controller with Cinder for storage
"""Safe reboot of primary controller
Scenario:
1. Revert environment with 3 controller nodes
1. Pre-condition - do steps from 'deploy_ha_cinder' test
2. Safe reboot of primary controller
3. Wait up to 10 minutes for HA readiness
4. Verify networks
@ -282,11 +293,11 @@ class FailoverGroup1(TestBasic):
groups=['hard_reset_primary_controller'])
@log_snapshot_after_test
def hard_reset_primary_controller(self):
"""Hard reset of primary controller with Cinder for storage
"""Hard reset of primary controller
Scenario:
1. Revert environment with 3 controller nodes
2. Safe reboot of primary controller
1. Pre-condition - do steps from 'deploy_ha_cinder' test
2. Hard reset of primary controller
3. Wait up to 10 minutes for HA readiness
4. Verify networks
5. Run OSTF tests
@ -328,7 +339,7 @@ class FailoverGroup1(TestBasic):
"""Power outage of Neutron vlan, cinder/swift cluster
Scenario:
1. Revert environment with 3 controller nodes
1. Pre-condition - do steps from 'deploy_ha_cinder' test
2. Create 2 instances
3. Create 2 volumes
4. Attach volumes to instances
@ -389,7 +400,7 @@ class FailoverGroup1(TestBasic):
self.fuel_web.assert_ha_services_ready(cluster_id)
self.show_step(10)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.fuel_web.verify_network(cluster_id)
self.show_step(11)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(cluster_id=cluster_id)

View File

@ -93,10 +93,10 @@ class FailoverGroup2(TestBasic):
groups=['safe_reboot_primary_controller_ceph'])
@log_snapshot_after_test
def safe_reboot_primary_controller_ceph(self):
"""Safe reboot of primary controller with Ceph for storage
"""Safe reboot of primary controller on ceph cluster
Scenario:
1. Revert environment with 3 controller nodes
1. Pre-condition - do steps from 'deploy_ha_ceph' test
2. Safe reboot of primary controller
3. Wait up to 10 minutes for HA readiness
4. Verify networks
@ -107,7 +107,7 @@ class FailoverGroup2(TestBasic):
"""
self.show_step(1, initialize=True)
self.env.revert_snapshot('deploy_ha_cinder')
self.env.revert_snapshot('deploy_ha_ceph')
cluster_id = self.fuel_web.get_last_created_cluster()
controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
@ -136,11 +136,11 @@ class FailoverGroup2(TestBasic):
groups=['hard_reset_primary_controller_ceph'])
@log_snapshot_after_test
def hard_reboot_primary_controller_ceph(self):
"""Hard reset of primary controller with Ceph for storage
"""Hard reboot of primary controller with Ceph for storage
Scenario:
1. Revert environment with 3 controller nodes
2. Safe reboot of primary controller
1. Pre-condition - do steps from 'deploy_ha_ceph' test
2. Hard reboot of primary controller
3. Wait up to 10 minutes for HA readiness
4. Verify networks
5. Run OSTF tests
@ -160,17 +160,22 @@ class FailoverGroup2(TestBasic):
'found {} nodes!'.format(len(controllers)))
self.show_step(2)
self.show_step(3)
self.show_step(4)
self.show_step(5)
self.show_step(6)
self.show_step(7)
target_controller = self.fuel_web.get_nailgun_primary_node(
self.fuel_web.get_devops_node_by_nailgun_node(controllers[0]))
self.fuel_web.cold_restart_nodes([target_controller])
self.show_step(3)
self.show_step(8)
self.fuel_web.assert_ha_services_ready(cluster_id, timeout=60 * 10)
self.show_step(4)
self.show_step(9)
self.fuel_web.verify_network(cluster_id)
self.show_step(5)
self.show_step(10)
self.fuel_web.run_ostf(cluster_id)
self.env.make_snapshot('safe_reboot_primary_controller_ceph')

View File

@ -95,19 +95,19 @@ class FailoverGroupMongo(TestBasic):
groups=['kill_mongo_processes'])
@log_snapshot_after_test
def kill_mongo_processes(self):
"""Kill MongoDB processes
"""Kill mongo processes
Scenario:
1. Revert environment with MongoDB nodes
2. Kill mongodb processes on 1st node
1. Pre-condition - do steps from 'deploy_mongo_cluster' test
2. Kill mongo processes on 1st node
3. Wait 1 minute
4. Check new mongodb processes exist on 1st node
5. Kill mongodb processes on 2nd node
4. Check new mongo processes exist on 1st node
5. Kill mongo processes on 2nd node
6. Wait 1 minute
7. Check new mongodb processes exist on 2nd node
8. Kill mongodb processes on 3rd node
7. Check new mongo processes exist on 2nd node
8. Kill mongo processes on 3rd node
9. Wait 1 minute
10. Check new mongodb processes exist on 3rd node
10. Check new mongo processes exist on 3rd node
11. Run OSTF tests
Duration 60m
@ -154,11 +154,11 @@ class FailoverGroupMongo(TestBasic):
@test(depends_on_groups=['deploy_mongo_cluster'],
groups=['close_connections_for_mongo'])
def close_connections_for_mongo(self):
"""Block network connections for 1 MongoDB node
"""Close connection for Mongo node
Scenario:
1. Revert environment with MongoDB nodes
2. Close management network for 1 MongoDB node
1. Pre-condition - do steps from 'deploy_mongo_cluster' test
2. Close management network for 1 Mongo node
3. Run OSTF tests
Duration 60m
@ -192,10 +192,10 @@ class FailoverGroupMongo(TestBasic):
@test(depends_on_groups=['deploy_mongo_cluster'],
groups=['shut_down_mongo_node'])
def shut_down_mongo_node(self):
"""Shut down MongoDB node
"""Shut down Mongo node for Neutron
Scenario:
1. Revert environment with MongoDB nodes
1. Pre-condition - do steps from 'deploy_mongo_cluster' test
2. Shut down 1 Mongo node
3. Verify networks
4. Run OSTF tests