From 08db3ad05f8cc7dba955f92e39dbbac84a0c4204 Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Wed, 18 Dec 2019 09:50:01 +0100 Subject: [PATCH] Retire repository Fuel (from openstack namespace) and fuel-ccp (in x namespace) repositories are unused and ready to retire. This change removes all content from the repository and adds the usual README file to point out that the repository is retired following the process from https://docs.openstack.org/infra/manual/drivers.html#retiring-a-project See also http://lists.openstack.org/pipermail/openstack-discuss/2019-December/011647.html Depends-On: https://review.opendev.org/699362 Change-Id: I37b6a82c9c3c3893bb4b9b6a4c4b5a83a6d8193c --- .coveragerc | 5 - .gitignore | 63 - .pylintrc | 481 --- .pylintrc_gerrit | 482 --- MAINTAINERS | 72 - README.md | 25 - README.rst | 10 + core/__init__.py | 17 - core/_tests/__init__.py | 0 core/_tests/helpers/__init__.py | 0 core/_tests/helpers/test_log_helpers.py | 265 -- core/_tests/helpers/test_setup_teardown.py | 255 -- core/_tests/models/__init__.py | 0 core/_tests/models/fuel_client/__init__.py | 0 .../_tests/models/fuel_client/test_adapter.py | 115 - core/_tests/models/fuel_client/test_client.py | 52 - .../models/fuel_client/test_ostf_client.py | 128 - core/_tests/models/test_collector_client.py | 153 - core/_tests/models/test_value_objects.py | 86 - core/helpers/__init__.py | 0 core/helpers/log_helpers.py | 270 -- core/helpers/setup_teardown.py | 337 -- core/models/__init__.py | 0 core/models/collector_client.py | 79 - core/models/fuel_client/__init__.py | 17 - core/models/fuel_client/base_client.py | 59 - core/models/fuel_client/client.py | 35 - core/models/fuel_client/ostf_client.py | 79 - core/models/value_objects.py | 182 - core/pytest.ini | 3 - doc/Makefile | 27 - doc/base_tests.rst | 768 ---- doc/conf.py | 51 - doc/fuel_tests.rst | 34 - doc/general.rst | 19 - doc/helpers.rst | 170 - doc/index.rst | 14 - doc/models.rst | 24 - doc/requirements.txt | 1 - doc/system_tests.rst | 137 - doc/testrail.rst | 69 - fuel_tests/__init__.py | 0 fuel_tests/models/__init__.py | 0 fuel_tests/models/manager.py | 329 -- fuel_tests/tests/__init__.py | 0 fuel_tests/tests/conftest.py | 193 - fuel_tests/tests/test_admin_node.py | 91 - fuel_tests/tests/test_ceph.py | 115 - fuel_tests/tests/test_discovery_slave.py | 193 - fuel_tests/tests/test_fuel_migration.py | 244 -- fuel_tests/tests/test_l2_network_config.py | 153 - fuel_tests/tests/test_neutron.py | 261 -- fuel_tests/tests/test_neutron_ipv6.py | 246 -- fuel_tests/tests/test_restart.py | 92 - fuelweb_test/__init__.py | 104 - .../config_templates/create_primary_role.yaml | 13 - .../config_templates/create_role.yaml | 12 - .../config_templates/custom_graph_tasks.yaml | 27 - .../config_templates/custom_yaql_tasks.yaml | 9 - fuelweb_test/config_templates/keystone.yaml | 8 - .../config_templates/keystone_ldap.yaml | 62 - .../config_templates/master_node_tasks.yaml | 22 - fuelweb_test/config_templates/neutron.yaml | 16 - .../config_templates/new_fields_compute.yaml | 8 - .../new_fields_controller.yaml | 62 - fuelweb_test/config_templates/nova_cpu.yaml | 8 - .../config_templates/nova_cpu_old.yaml | 8 - fuelweb_test/config_templates/nova_disk.yaml | 8 - .../nova_disk_cinder_role.yaml | 11 - fuelweb_test/config_templates/nova_quota.yaml | 13 - .../config_templates/prepare_release_image.py | 59 - .../release_custom_tasks.yaml | 7 - fuelweb_test/helpers/__init__.py | 0 fuelweb_test/helpers/ceph.py | 276 -- fuelweb_test/helpers/checkers.py | 1555 -------- fuelweb_test/helpers/cic_maintenance_mode.py | 54 - fuelweb_test/helpers/cloud_image.py | 101 - fuelweb_test/helpers/common.py | 315 -- fuelweb_test/helpers/decorators.py | 533 --- fuelweb_test/helpers/eb_tables.py | 84 - fuelweb_test/helpers/fuel_actions.py | 579 --- fuelweb_test/helpers/fuel_release_hacks.py | 68 - fuelweb_test/helpers/gerrit/__init__.py | 0 fuelweb_test/helpers/gerrit/content_parser.py | 71 - fuelweb_test/helpers/gerrit/gerrit_client.py | 120 - .../helpers/gerrit/gerrit_info_provider.py | 106 - fuelweb_test/helpers/gerrit/rules.py | 101 - fuelweb_test/helpers/gerrit/utils.py | 47 - .../helpers/granular_deployment_checkers.py | 102 - .../helpers/instance_initial_scenario | 10 - fuelweb_test/helpers/ironic_actions.py | 118 - fuelweb_test/helpers/log_server.py | 81 - fuelweb_test/helpers/metaclasses.py | 37 - .../helpers/multiple_networks_hacks.py | 83 - fuelweb_test/helpers/nessus.py | 197 - fuelweb_test/helpers/os_actions.py | 804 ---- fuelweb_test/helpers/ovs.py | 94 - fuelweb_test/helpers/pacemaker.py | 131 - fuelweb_test/helpers/patching.py | 622 --- fuelweb_test/helpers/rally.py | 427 -- fuelweb_test/helpers/regenerate_centos_repo | 5 - fuelweb_test/helpers/regenerate_repo.py | 388 -- fuelweb_test/helpers/regenerate_ubuntu_repo | 92 - fuelweb_test/helpers/replace_repos.py | 238 -- fuelweb_test/helpers/security.py | 129 - fuelweb_test/helpers/ssh_manager.py | 412 -- fuelweb_test/helpers/ssl_helpers.py | 88 - fuelweb_test/helpers/tempest.conf.template | 76 - fuelweb_test/helpers/uca.py | 39 - fuelweb_test/helpers/utils.py | 1637 -------- fuelweb_test/models/__init__.py | 1 - fuelweb_test/models/collector_client.py | 32 - fuelweb_test/models/environment.py | 814 ---- fuelweb_test/models/fuel_web_client.py | 3544 ----------------- fuelweb_test/models/nailgun_client.py | 920 ----- fuelweb_test/network_templates/ceph.yaml | 307 -- fuelweb_test/network_templates/cinder.yaml | 302 -- .../network_templates/cinder_add_nodes.yaml | 308 -- fuelweb_test/network_templates/default.yaml | 135 - .../default_no_mgmt_nwk.yaml | 126 - .../network_templates/default_ovs.yaml | 146 - fuelweb_test/network_templates/hardware.yaml | 223 -- .../network_templates/public_api.yaml | 164 - .../network_templates/two_nodegroups.yaml | 283 -- fuelweb_test/network_templates/upgrades.yaml | 313 -- fuelweb_test/ostf_test_mapping.py | 130 - fuelweb_test/rally/scenarios/nova.json | 26 - fuelweb_test/rally/scenarios/scenarios.yaml | 16 - fuelweb_test/requirements-devops-source.txt | 6 - fuelweb_test/requirements-devops.txt | 3 - fuelweb_test/requirements.txt | 35 - fuelweb_test/settings.py | 761 ---- fuelweb_test/testrail/__init__.py | 20 - fuelweb_test/testrail/builds.py | 205 - fuelweb_test/testrail/datetime_util.py | 36 - .../generate_failure_group_statistics.py | 845 ---- fuelweb_test/testrail/generate_statistics.py | 533 --- fuelweb_test/testrail/launchpad_client.py | 49 - fuelweb_test/testrail/report.py | 717 ---- fuelweb_test/testrail/report_pi.py | 209 - .../testrail/report_tempest_results.py | 282 -- fuelweb_test/testrail/settings.py | 96 - fuelweb_test/testrail/testrail.py | 116 - fuelweb_test/testrail/testrail_client.py | 489 --- .../testrail/upload_cases_description.py | 524 --- .../testrail/upload_tempest_test_suite.py | 232 -- fuelweb_test/tests/__init__.py | 0 fuelweb_test/tests/base_test_case.py | 510 --- fuelweb_test/tests/cluster_configs.yaml | 177 - fuelweb_test/tests/plugins/__init__.py | 0 .../tests/plugins/plugin_contrail/__init__.py | 0 .../test_fuel_plugin_contrail.py | 795 ---- .../tests/plugins/plugin_emc/__init__.py | 0 .../plugins/plugin_emc/test_plugin_emc.py | 218 - .../plugins/plugin_etckeeper/__init__.py | 0 .../plugin_etckeeper/test_plugin_etckeeper.py | 124 - .../tests/plugins/plugin_example/__init__.py | 0 .../test_fuel_plugin_example.py | 508 --- .../test_fuel_plugin_example_postdeploy.py | 580 --- .../plugins/plugin_glusterfs/__init__.py | 0 .../plugin_glusterfs/test_plugin_glusterfs.py | 222 -- .../tests/plugins/plugin_lbaas/__init__.py | 0 .../plugins/plugin_lbaas/test_plugin_lbaas.py | 270 -- .../tests/plugins/plugin_murano/__init__.py | 0 .../plugin_murano/test_plugin_murano.py | 133 - .../tests/plugins/plugin_reboot/__init__.py | 0 .../plugins/plugin_reboot/reboot_tasks.yaml | 41 - .../plugin_reboot/test_plugin_reboot_task.py | 267 -- .../plugin_vip_reservation/__init__.py | 0 .../plugin_vip_reservation/metadata.yaml | 16 - .../plugin_vip_reservation/network_roles.yaml | 14 - .../plugins/plugin_vip_reservation/tasks.yaml | 1 - .../test_plugin_vip_reservation.py | 402 -- .../tests/plugins/plugin_zabbix/__init__.py | 0 .../plugin_zabbix/test_plugin_zabbix.py | 675 ---- fuelweb_test/tests/test_admin_node.py | 614 --- fuelweb_test/tests/test_backup_restore.py | 448 --- fuelweb_test/tests/test_bdd.py | 80 - fuelweb_test/tests/test_bond_offloading.py | 260 -- fuelweb_test/tests/test_bonding.py | 567 --- fuelweb_test/tests/test_bonding_base.py | 252 -- fuelweb_test/tests/test_ceph.py | 1129 ------ fuelweb_test/tests/test_cgroups.py | 485 --- fuelweb_test/tests/test_cli.py | 674 ---- fuelweb_test/tests/test_cli_base.py | 502 --- fuelweb_test/tests/test_cpu_pinning.py | 657 --- fuelweb_test/tests/test_custom_hostname.py | 288 -- fuelweb_test/tests/test_dpdk.py | 429 -- fuelweb_test/tests/test_environment_action.py | 523 --- fuelweb_test/tests/test_graph_extension.py | 152 - fuelweb_test/tests/test_ha_one_controller.py | 937 ----- .../tests/test_ha_one_controller_base.py | 63 - fuelweb_test/tests/test_ironic_base.py | 510 --- fuelweb_test/tests/test_jumbo_frames.py | 575 --- .../tests/test_manual_vip_allocation.py | 191 - fuelweb_test/tests/test_multipath_devices.py | 208 - fuelweb_test/tests/test_multiple_networks.py | 775 ---- fuelweb_test/tests/test_multiqueue.py | 153 - fuelweb_test/tests/test_net_templates.py | 459 --- fuelweb_test/tests/test_net_templates_base.py | 499 --- .../test_net_templates_multiple_networks.py | 127 - fuelweb_test/tests/test_neutron.py | 256 -- fuelweb_test/tests/test_neutron_ipv6.py | 133 - fuelweb_test/tests/test_neutron_public.py | 155 - fuelweb_test/tests/test_neutron_tun.py | 407 -- fuelweb_test/tests/test_neutron_tun_base.py | 69 - .../tests/test_node_reinstallation.py | 782 ---- fuelweb_test/tests/test_offloading_types.py | 243 -- fuelweb_test/tests/test_ovs_firewall.py | 740 ---- fuelweb_test/tests/test_public_api.py | 222 -- fuelweb_test/tests/test_pullrequest.py | 62 - fuelweb_test/tests/test_reduced_footprint.py | 728 ---- fuelweb_test/tests/test_services.py | 980 ----- .../tests/test_services_reconfiguration.py | 1426 ------- fuelweb_test/tests/test_sriov.py | 137 - fuelweb_test/tests/test_ssl.py | 117 - .../tests/test_states_unlock_settings_tab.py | 408 -- fuelweb_test/tests/test_support_hugepages.py | 464 --- fuelweb_test/tests/test_ubuntu_bootstrap.py | 607 --- .../tests/test_unlock_settings_tab.py | 288 -- fuelweb_test/tests/tests_cli/__init__.py | 0 .../tests/tests_cli/test_cli_deploy.py | 182 - .../tests/tests_cli/test_cli_deploy_ceph.py | 196 - fuelweb_test/tests/tests_cli/test_cli_role.py | 429 -- fuelweb_test/tests/tests_configdb/__init__.py | 0 .../tests/tests_configdb/test_configdb_api.py | 282 -- .../tests/tests_configdb/test_configdb_cli.py | 587 --- .../tests/tests_custom_graph/__init__.py | 0 .../tests_custom_graph/test_custom_graph.py | 911 ----- .../tests/tests_deployments/__init__.py | 0 .../tests_neutron_tun/__init__.py | 0 .../tests_neutron_tun/test_ha_tun_group_1.py | 175 - .../tests_neutron_tun/test_ha_tun_group_2.py | 167 - .../tests_neutron_tun/test_ha_tun_group_3.py | 214 - .../tests_neutron_vlan/__init__.py | 0 .../test_ha_vlan_group_1.py | 176 - .../test_ha_vlan_group_2.py | 157 - .../test_ha_vlan_group_3.py | 174 - .../test_ha_vlan_group_4.py | 183 - .../test_ha_vlan_group_5.py | 301 -- .../test_ha_vlan_group_6.py | 208 - .../test_ha_vlan_group_7.py | 162 - .../tests/tests_extra_computes/__init__.py | 0 .../base_extra_computes.py | 575 --- .../test_ol_basic_actions.py | 336 -- .../tests_extra_computes/test_ol_migration.py | 226 -- .../test_rh_basic_actions.py | 332 -- .../tests_extra_computes/test_rh_migration.py | 229 -- fuelweb_test/tests/tests_ibp/__init__.py | 0 fuelweb_test/tests/tests_ibp/test_ibp.py | 296 -- fuelweb_test/tests/tests_lcm/__init__.py | 0 fuelweb_test/tests/tests_lcm/base_lcm_test.py | 857 ---- .../ensurability/cinder.yaml | 63 - .../ensurability/cluster_settings.yaml | 421 -- .../ensurability/compute.yaml | 89 - .../ensurability/controller.yaml | 322 -- .../ensurability/nodes_settings.yaml | 51 - .../idempotency/cinder.yaml | 48 - .../idempotency/compute.yaml | 67 - .../idempotency/controller.yaml | 194 - .../ensurability/cluster_settings.yaml | 421 -- .../ensurability/compute.yaml | 94 - .../ensurability/controller.yaml | 335 -- .../ensurability/ironic.yaml | 56 - .../ensurability/nodes_settings.yaml | 51 - .../idempotency/compute.yaml | 67 - .../idempotency/controller.yaml | 187 - .../idempotency/ironic.yaml | 43 - .../ensurability/cluster_settings.yaml | 421 -- .../ensurability/compute.yaml | 89 - .../ensurability/controller.yaml | 332 -- .../ensurability/mongo.yaml | 60 - .../ensurability/nodes_settings.yaml | 51 - .../idempotency/compute.yaml | 66 - .../idempotency/controller.yaml | 188 - .../idempotency/mongo.yaml | 45 - .../ensurability/ceph-osd.yaml | 62 - .../ensurability/cluster_settings.yaml | 421 -- .../ensurability/compute.yaml | 89 - .../ensurability/controller.yaml | 406 -- .../ensurability/nodes_settings.yaml | 51 - .../idempotency/ceph-osd.yaml | 46 - .../idempotency/compute.yaml | 66 - .../idempotency/controller.yaml | 194 - .../ensurability/ceph-osd_compute.yaml | 106 - .../ensurability/cluster_settings.yaml | 421 -- .../ensurability/controller_mongo.yaml | 268 -- .../ensurability/nodes_settings.yaml | 34 - .../primary-controller_mongo.yaml | 468 --- .../idempotency/ceph-osd_compute.yaml | 67 - .../idempotency/controller_mongo.yaml | 132 - .../idempotency/primary-controller_mongo.yaml | 185 - .../tests/tests_lcm/test_ensurability.py | 444 --- .../tests/tests_lcm/test_idempotency.py | 223 -- .../tests/tests_lcm/test_task_coverage.py | 136 - fuelweb_test/tests/tests_mirrors/__init__.py | 0 .../tests/tests_mirrors/test_create_mirror.py | 79 - .../tests/tests_mirrors/test_use_mirror.py | 203 - .../tests/tests_multirole/__init__.py | 0 .../tests_multirole/test_mongo_multirole.py | 196 - .../tests_multirole/test_multirole_group_1.py | 169 - .../tests/tests_os_components/__init__.py | 0 .../test_mixed_os_components.py | 111 - .../test_murano_os_component.py | 97 - .../test_sahara_os_component.py | 106 - fuelweb_test/tests/tests_patching/__init__.py | 0 .../tests/tests_patching/test_patching.py | 457 --- fuelweb_test/tests/tests_scale/__init__.py | 0 .../tests/tests_scale/test_scale_group_1.py | 281 -- .../tests/tests_scale/test_scale_group_2.py | 189 - .../tests/tests_scale/test_scale_group_3.py | 214 - .../tests/tests_scale/test_scale_group_4.py | 262 -- .../tests/tests_scale/test_scale_group_5.py | 292 -- .../tests/tests_scale/test_scale_group_6.py | 153 - fuelweb_test/tests/tests_security/__init__.py | 0 .../tests/tests_security/test_lynis_audit.py | 129 - .../tests/tests_security/test_run_nessus.py | 280 -- .../tests/tests_separate_services/__init__.py | 0 .../test_deploy_platform_components.py | 395 -- .../test_separate_haproxy.py | 171 - .../test_separate_horizon.py | 250 -- .../test_separate_multiroles.py | 218 - .../test_separate_rabbitmq.py | 283 -- .../test_separate_rabbitmq_ceph.py | 127 - fuelweb_test/tests/tests_strength/__init__.py | 0 .../test_cic_maintenance_mode.py | 450 --- .../tests/tests_strength/test_failover.py | 383 -- .../tests_strength/test_failover_base.py | 1414 ------- .../tests_strength/test_failover_group_1.py | 406 -- .../tests_strength/test_failover_group_2.py | 352 -- .../tests_strength/test_failover_group_3.py | 466 --- .../tests_strength/test_failover_mongo.py | 250 -- .../tests_strength/test_failover_with_ceph.py | 74 - .../tests_strength/test_huge_environments.py | 329 -- .../tests/tests_strength/test_image_based.py | 110 - .../tests/tests_strength/test_load.py | 141 - .../tests/tests_strength/test_load_base.py | 77 - .../test_master_node_failover.py | 232 -- .../tests_strength/test_network_outage.py | 405 -- .../tests/tests_strength/test_neutron.py | 252 -- .../tests/tests_strength/test_neutron_base.py | 521 --- .../test_ostf_repeatable_tests.py | 119 - .../tests_strength/test_repetitive_restart.py | 156 - .../tests/tests_strength/test_restart.py | 390 -- fuelweb_test/tests/tests_uca/__init__.py | 0 fuelweb_test/tests/tests_uca/test_uca.py | 348 -- fuelweb_test/tests/tests_upgrade/__init__.py | 41 - .../example_upgrade_scenario.yaml | 23 - .../tests/tests_upgrade/octane_patcher.sh | 26 - .../tests/tests_upgrade/test_clone_env.py | 327 -- .../test_data_driven_upgrade_base.py | 468 --- .../test_data_driven_upgrade_ceph_ha.py | 291 -- ...ata_driven_upgrade_multirack_deployment.py | 386 -- .../test_data_driven_upgrade_net_tmpl.py | 363 -- .../test_data_driven_upgrade_no_cluster.py | 204 - .../test_data_driven_upgrade_plugin.py | 227 -- .../test_data_driven_upgrade_smoke.py | 397 -- .../tests_upgrade/test_node_reassignment.py | 269 -- .../tests/tests_upgrade/test_os_upgrade.py | 422 -- .../tests/tests_upgrade/upgrade_base.py | 450 --- .../tests/tests_upgrade/upgrader_tool.py | 189 - gates_tests/__init__.py | 0 gates_tests/devops_templates/default.yaml | 235 -- .../devops_templates/ironic_template.yaml | 208 - gates_tests/helpers/__init__.py | 0 gates_tests/helpers/exceptions.py | 50 - .../helpers/fuel_library_modules_mapping.yaml | 199 - .../openstack_puppet_projects_mapping.yaml | 40 - gates_tests/helpers/utils.py | 531 --- gates_tests/tests/__init__.py | 0 gates_tests/tests/test_nailgun_agent.py | 103 - gates_tests/tests/test_review_fuel_web.py | 93 - gates_tests/tests/test_review_in_astute.py | 115 - .../tests/test_review_in_fuel_agent.py | 134 - .../tests/test_review_in_fuel_client.py | 206 - gates_tests/tests/test_review_in_ostf.py | 143 - .../deb/deployment/ceph/ceph-deploy/test.yaml | 3 - .../deb/deployment/ceph/libleveldb1/test.yaml | 3 - .../deb/deployment/ceph/libsnappy1/test.yaml | 3 - .../ceph/libtcmalloc-minimal4/test.yaml | 3 - .../deb/deployment/ceph/libunwind8/test.yaml | 3 - .../deployment/ceph/python-blinker/test.yaml | 3 - .../deployment/ceph/python-flask/test.yaml | 3 - .../ceph/python-itsdangerous/test.yaml | 3 - .../ceph/python-pyinotify/test.yaml | 3 - .../deployment/ceph/python-werkzeug/test.yaml | 3 - packages_tests/deb/deployment/ceph/test.yaml | 3 - .../deb/deployment/ceph/xfsprogs/test.yaml | 3 - .../deployment/cinder/cinder-backup/test.yaml | 3 - .../deb/deployment/cinder/test.yaml | 3 - .../deb/deployment/general/test.yaml | 3 - .../deb/deployment/glance/test.yaml | 3 - .../deb/deployment/neutron/dkms/test.yaml | 3 - .../deployment/neutron/libipset3/test.yaml | 3 - .../neutron/neutron-common/test.yaml | 3 - .../deb/deployment/neutron/test.yaml | 3 - .../nova/nova-compute-kvm/test.yaml | 3 - .../nova/nova-compute-libvirt/test.yaml | 3 - .../nova/nova-compute-qemu/test.yaml | 3 - .../deployment/nova/nova-compute/test.yaml | 3 - .../deployment/nova/nova-network/test.yaml | 3 - packages_tests/deb/deployment/nova/test.yaml | 3 - packages_tests/deb/deployment/swift/test.yaml | 3 - packages_tests/deb/deployment/test.yaml | 3 - packages_tests/deb/master/test.yaml | 3 - packages_tests/deb/packages.yaml | 3141 --------------- packages_tests/deb/provisioning/test.yaml | 3 - .../rpm/deployment/ceph/at/test.yaml | 3 - .../rpm/deployment/ceph/bc/test.yaml | 3 - .../rpm/deployment/ceph/ceph-deploy/test.yaml | 3 - .../rpm/deployment/ceph/ceph/test.yaml | 3 - .../rpm/deployment/ceph/db4-devel/test.yaml | 3 - .../rpm/deployment/ceph/ed/test.yaml | 3 - .../rpm/deployment/ceph/gdbm-devel/test.yaml | 3 - .../deployment/ceph/gperftools-libs/test.yaml | 3 - .../rpm/deployment/ceph/leveldb/test.yaml | 3 - .../rpm/deployment/ceph/libunwind/test.yaml | 3 - .../rpm/deployment/ceph/mailx/test.yaml | 3 - .../rpm/deployment/ceph/patch/test.yaml | 3 - .../rpm/deployment/ceph/pax/test.yaml | 3 - .../deployment/ceph/python-flask/test.yaml | 3 - .../ceph/python-itsdangerous/test.yaml | 3 - .../deployment/ceph/python-werkzeug/test.yaml | 3 - .../deployment/ceph/redhat-lsb-core/test.yaml | 3 - .../rpm/deployment/ceph/snappy/test.yaml | 3 - packages_tests/rpm/deployment/ceph/test.yaml | 3 - .../rpm/deployment/ceph/time/test.yaml | 3 - .../rpm/deployment/cinder/libmnl/test.yaml | 3 - .../rpm/deployment/cinder/test.yaml | 3 - .../rpm/deployment/general/test.yaml | 3 - .../rpm/deployment/glance/test.yaml | 3 - .../rpm/deployment/neutron/dkms/test.yaml | 3 - .../rpm/deployment/neutron/ipset/test.yaml | 3 - .../deployment/neutron/libipset3/test.yaml | 3 - .../neutron/openstack-neutron/test.yaml | 3 - .../neutron/openvswitch-common/test.yaml | 3 - .../neutron/openvswitch-switch/test.yaml | 3 - .../neutron/python-jsonrpclib/test.yaml | 3 - .../rpm/deployment/neutron/test.yaml | 3 - .../nova/openstack-nova-compute/test.yaml | 3 - .../nova/openstack-nova-network/test.yaml | 3 - packages_tests/rpm/deployment/nova/test.yaml | 3 - .../perl/perl-Test-Simple/test.yaml | 3 - packages_tests/rpm/deployment/perl/test.yaml | 3 - packages_tests/rpm/deployment/swift/test.yaml | 3 - packages_tests/rpm/deployment/test.yaml | 3 - packages_tests/rpm/master/test.yaml | 3 - packages_tests/rpm/packages.yaml | 2614 ------------ packages_tests/rpm/provisioning/test.yaml | 3 - pytest.ini | 10 - run_system_test.py | 217 - run_tests.sh | 7 - system_test/__init__.py | 53 - system_test/actions/__init__.py | 26 - system_test/actions/base.py | 541 --- system_test/actions/fuelmaster_actions.py | 36 - system_test/actions/ostf_actions.py | 101 - system_test/actions/plugins_actions.py | 114 - system_test/actions/strength_actions.py | 378 -- system_test/core/__init__.py | 23 - system_test/core/config.py | 86 - system_test/core/decorators.py | 50 - system_test/core/discover.py | 154 - system_test/core/factory.py | 224 -- system_test/core/repository.py | 187 - system_test/helpers/__init__.py | 0 system_test/helpers/decorators.py | 84 - system_test/tests/__init__.py | 20 - system_test/tests/base.py | 25 - system_test/tests/plugins/__init__.py | 0 .../tests/plugins/plugin_example/__init__.py | 0 .../plugin_example/test_plugin_example.py | 107 - .../plugin_example/test_plugin_example_v3.py | 73 - system_test/tests/strength/__init__.py | 0 .../strength/test_destroy_controllers.py | 123 - .../tests/strength/test_filling_root.py | 77 - system_test/tests/test_create_deploy_ostf.py | 41 - system_test/tests/test_delete_after_deploy.py | 42 - system_test/tests/test_deploy_check_rados.py | 75 - .../tests/test_redeploy_after_reset.py | 46 - system_test/tests/test_redeploy_after_stop.py | 43 - .../cluster_configs/networks/neutron_gre.yaml | 4 - .../cluster_configs/networks/neutron_tun.yaml | 4 - .../networks/neutron_vlan.yaml | 4 - .../mixed/1ctrl_1comp_1cndr_mongo_2ceph.yaml | 14 - .../nodes/mixed/3ctrl_3comp_ceph.yaml | 9 - .../nodes/mixed/3ctrl_mongo_3comp_ceph.yaml | 10 - .../nodes/single/1ctrl_1comp.yaml | 7 - .../nodes/single/1ctrl_1comp_1cndr.yaml | 11 - .../nodes/single/1ctrl_1comp_1cndr_3ceph.yaml | 13 - .../single/1ctrl_1comp_3ceph_1mongo.yaml | 13 - .../nodes/single/1ctrl_2comp.yaml | 7 - .../1ctrl_2comp_1cndr_3ceph_1mongo.yaml | 16 - .../nodes/single/3ctrl_1comp_1cndr.yaml | 11 - .../nodes/single/3ctrl_2comp_1cndr.yaml | 11 - .../settings/cephVolImgRados.yaml | 5 - .../settings/cephVolImgRados_ceilometer.yaml | 5 - .../settings/cephVolImg_ceilometer.yaml | 5 - .../cephVolImg_ceilometer_sahara.yaml | 5 - .../settings/cephVolImg_wo_components.yaml | 5 - .../settings/cinder_ceilometer.yaml | 5 - .../cinder_cephImgRados_ceilometer.yaml | 5 - .../cinder_cephImgRados_wo_components.yaml | 5 - .../settings/cinder_cephImg_ceilometer.yaml | 5 - .../settings/cinder_sahara.yaml | 5 - .../settings/cinder_wo_components.yaml | 5 - .../settings/cinder_wo_componets.yaml | 5 - .../settings/components/ceilometer.yaml | 4 - .../components/ceilometer_murano.yaml | 4 - .../components/ceilometer_murano_sahara.yaml | 4 - .../components/ceilometer_sahara.yaml | 4 - .../settings/components/murano.yaml | 4 - .../settings/components/murano_sahara.yaml | 4 - .../settings/components/sahara.yaml | 4 - .../settings/components/wo_components.yaml | 4 - .../settings/storages/cephImgVol.yaml | 7 - .../settings/storages/cephImgVolRados.yaml | 7 - .../settings/storages/cinder_cephImg.yaml | 7 - .../storages/cinder_cephImgRados.yaml | 7 - .../settings/storages/cinder_only.yaml | 7 - .../devops_configs/centos_master.yaml | 190 - .../devops_configs/default.yaml | 190 - .../devops_configs/default30-2groups.yaml | 506 --- .../devops_configs/default30-2groups_.yaml | 321 -- .../devops_configs/default30-bond.yaml | 230 -- .../devops_configs/default30.yaml | 209 - .../devops_configs/external_haproxy.yaml | 454 --- .../devops_configs/ironic.yaml | 224 -- .../devops_configs/multipath.yaml | 172 - .../devops_configs/multirack.yaml | 449 --- .../devops_configs/public_api.yaml | 207 - .../devops_configs/security_scan.yaml | 202 - .../devops_configs/vcenter_ha_default.yaml | 208 - ...entos_master_ceph_all_on_neutron_vlan.yaml | 40 - .../ceph_all_ceilo_on_neutron_tun.yaml | 13 - .../ceph_all_on_neutron_vlan.yaml | 38 - .../ceph_all_on_neutron_vlan_30-2groups.yaml | 38 - .../ceph_all_on_neutron_vlan_30-bond.yaml | 38 - .../ceph_all_on_neutron_vlan_30.yaml | 38 - .../example_test_environment.yaml | 40 - .../tests_configs/external_haproxy.yaml | 44 - ...ph_2ctrl_1comp_1comp_ceph_neutronVLAN.yaml | 13 - .../single/3ctrl_2comp_1cndr_neutronVLAN.yaml | 13 - .../tests_configs/multipath_3_nodes.yaml | 41 - .../tests_configs/multirack.yaml | 44 - .../tests_configs/public_api.yaml | 41 - ...rl_1comp_1cndr_neutronTUN_scale_3ctrl.yaml | 17 - ...h_neutronVLAN_CephImgRados_ceilometer.yaml | 14 - ..._1cndr_3ceph_neutronVLAN_cephImgRados.yaml | 13 - .../1ctrl_1comp_1cndr_neutronTUN_sahara.yaml | 13 - ...utronTUN_CephImgVol_ceilometer_sahara.yaml | 13 - .../simple/single/1ctrl_1comp_neutronTUN.yaml | 13 - .../single/1ctrl_1comp_neutronVLAN.yaml | 13 - ...1mongo_neutronVLAN_CephImg_ceilometer.yaml | 13 - .../single/1ctrl_2comp_neutronVLAN.yaml | 13 - tox.ini | 99 - utils/jenkins/conv_snapshot_file.py | 212 - utils/jenkins/fuel_logs.py | 890 ----- utils/jenkins/system_tests.sh | 395 -- 559 files changed, 10 insertions(+), 101975 deletions(-) delete mode 100644 .coveragerc delete mode 100644 .gitignore delete mode 100644 .pylintrc delete mode 100644 .pylintrc_gerrit delete mode 100644 MAINTAINERS delete mode 100644 README.md create mode 100644 README.rst delete mode 100644 core/__init__.py delete mode 100644 core/_tests/__init__.py delete mode 100644 core/_tests/helpers/__init__.py delete mode 100644 core/_tests/helpers/test_log_helpers.py delete mode 100644 core/_tests/helpers/test_setup_teardown.py delete mode 100644 core/_tests/models/__init__.py delete mode 100644 core/_tests/models/fuel_client/__init__.py delete mode 100644 core/_tests/models/fuel_client/test_adapter.py delete mode 100644 core/_tests/models/fuel_client/test_client.py delete mode 100644 core/_tests/models/fuel_client/test_ostf_client.py delete mode 100644 core/_tests/models/test_collector_client.py delete mode 100644 core/_tests/models/test_value_objects.py delete mode 100644 core/helpers/__init__.py delete mode 100644 core/helpers/log_helpers.py delete mode 100644 core/helpers/setup_teardown.py delete mode 100644 core/models/__init__.py delete mode 100644 core/models/collector_client.py delete mode 100644 core/models/fuel_client/__init__.py delete mode 100644 core/models/fuel_client/base_client.py delete mode 100644 core/models/fuel_client/client.py delete mode 100644 core/models/fuel_client/ostf_client.py delete mode 100644 core/models/value_objects.py delete mode 100644 core/pytest.ini delete mode 100644 doc/Makefile delete mode 100644 doc/base_tests.rst delete mode 100644 doc/conf.py delete mode 100644 doc/fuel_tests.rst delete mode 100644 doc/general.rst delete mode 100644 doc/helpers.rst delete mode 100644 doc/index.rst delete mode 100644 doc/models.rst delete mode 100644 doc/requirements.txt delete mode 100644 doc/system_tests.rst delete mode 100644 doc/testrail.rst delete mode 100644 fuel_tests/__init__.py delete mode 100644 fuel_tests/models/__init__.py delete mode 100644 fuel_tests/models/manager.py delete mode 100644 fuel_tests/tests/__init__.py delete mode 100644 fuel_tests/tests/conftest.py delete mode 100644 fuel_tests/tests/test_admin_node.py delete mode 100644 fuel_tests/tests/test_ceph.py delete mode 100644 fuel_tests/tests/test_discovery_slave.py delete mode 100644 fuel_tests/tests/test_fuel_migration.py delete mode 100644 fuel_tests/tests/test_l2_network_config.py delete mode 100644 fuel_tests/tests/test_neutron.py delete mode 100644 fuel_tests/tests/test_neutron_ipv6.py delete mode 100644 fuel_tests/tests/test_restart.py delete mode 100644 fuelweb_test/__init__.py delete mode 100644 fuelweb_test/config_templates/create_primary_role.yaml delete mode 100644 fuelweb_test/config_templates/create_role.yaml delete mode 100644 fuelweb_test/config_templates/custom_graph_tasks.yaml delete mode 100644 fuelweb_test/config_templates/custom_yaql_tasks.yaml delete mode 100644 fuelweb_test/config_templates/keystone.yaml delete mode 100644 fuelweb_test/config_templates/keystone_ldap.yaml delete mode 100644 fuelweb_test/config_templates/master_node_tasks.yaml delete mode 100644 fuelweb_test/config_templates/neutron.yaml delete mode 100644 fuelweb_test/config_templates/new_fields_compute.yaml delete mode 100644 fuelweb_test/config_templates/new_fields_controller.yaml delete mode 100644 fuelweb_test/config_templates/nova_cpu.yaml delete mode 100644 fuelweb_test/config_templates/nova_cpu_old.yaml delete mode 100644 fuelweb_test/config_templates/nova_disk.yaml delete mode 100644 fuelweb_test/config_templates/nova_disk_cinder_role.yaml delete mode 100644 fuelweb_test/config_templates/nova_quota.yaml delete mode 100644 fuelweb_test/config_templates/prepare_release_image.py delete mode 100644 fuelweb_test/config_templates/release_custom_tasks.yaml delete mode 100644 fuelweb_test/helpers/__init__.py delete mode 100644 fuelweb_test/helpers/ceph.py delete mode 100644 fuelweb_test/helpers/checkers.py delete mode 100644 fuelweb_test/helpers/cic_maintenance_mode.py delete mode 100644 fuelweb_test/helpers/cloud_image.py delete mode 100644 fuelweb_test/helpers/common.py delete mode 100644 fuelweb_test/helpers/decorators.py delete mode 100644 fuelweb_test/helpers/eb_tables.py delete mode 100644 fuelweb_test/helpers/fuel_actions.py delete mode 100644 fuelweb_test/helpers/fuel_release_hacks.py delete mode 100644 fuelweb_test/helpers/gerrit/__init__.py delete mode 100644 fuelweb_test/helpers/gerrit/content_parser.py delete mode 100644 fuelweb_test/helpers/gerrit/gerrit_client.py delete mode 100644 fuelweb_test/helpers/gerrit/gerrit_info_provider.py delete mode 100644 fuelweb_test/helpers/gerrit/rules.py delete mode 100644 fuelweb_test/helpers/gerrit/utils.py delete mode 100644 fuelweb_test/helpers/granular_deployment_checkers.py delete mode 100644 fuelweb_test/helpers/instance_initial_scenario delete mode 100644 fuelweb_test/helpers/ironic_actions.py delete mode 100644 fuelweb_test/helpers/log_server.py delete mode 100644 fuelweb_test/helpers/metaclasses.py delete mode 100644 fuelweb_test/helpers/multiple_networks_hacks.py delete mode 100644 fuelweb_test/helpers/nessus.py delete mode 100644 fuelweb_test/helpers/os_actions.py delete mode 100644 fuelweb_test/helpers/ovs.py delete mode 100644 fuelweb_test/helpers/pacemaker.py delete mode 100644 fuelweb_test/helpers/patching.py delete mode 100644 fuelweb_test/helpers/rally.py delete mode 100644 fuelweb_test/helpers/regenerate_centos_repo delete mode 100644 fuelweb_test/helpers/regenerate_repo.py delete mode 100644 fuelweb_test/helpers/regenerate_ubuntu_repo delete mode 100644 fuelweb_test/helpers/replace_repos.py delete mode 100644 fuelweb_test/helpers/security.py delete mode 100644 fuelweb_test/helpers/ssh_manager.py delete mode 100644 fuelweb_test/helpers/ssl_helpers.py delete mode 100644 fuelweb_test/helpers/tempest.conf.template delete mode 100644 fuelweb_test/helpers/uca.py delete mode 100644 fuelweb_test/helpers/utils.py delete mode 100644 fuelweb_test/models/__init__.py delete mode 100644 fuelweb_test/models/collector_client.py delete mode 100644 fuelweb_test/models/environment.py delete mode 100644 fuelweb_test/models/fuel_web_client.py delete mode 100644 fuelweb_test/models/nailgun_client.py delete mode 100644 fuelweb_test/network_templates/ceph.yaml delete mode 100644 fuelweb_test/network_templates/cinder.yaml delete mode 100644 fuelweb_test/network_templates/cinder_add_nodes.yaml delete mode 100644 fuelweb_test/network_templates/default.yaml delete mode 100644 fuelweb_test/network_templates/default_no_mgmt_nwk.yaml delete mode 100644 fuelweb_test/network_templates/default_ovs.yaml delete mode 100644 fuelweb_test/network_templates/hardware.yaml delete mode 100644 fuelweb_test/network_templates/public_api.yaml delete mode 100644 fuelweb_test/network_templates/two_nodegroups.yaml delete mode 100644 fuelweb_test/network_templates/upgrades.yaml delete mode 100644 fuelweb_test/ostf_test_mapping.py delete mode 100644 fuelweb_test/rally/scenarios/nova.json delete mode 100644 fuelweb_test/rally/scenarios/scenarios.yaml delete mode 100644 fuelweb_test/requirements-devops-source.txt delete mode 100644 fuelweb_test/requirements-devops.txt delete mode 100644 fuelweb_test/requirements.txt delete mode 100644 fuelweb_test/settings.py delete mode 100644 fuelweb_test/testrail/__init__.py delete mode 100644 fuelweb_test/testrail/builds.py delete mode 100644 fuelweb_test/testrail/datetime_util.py delete mode 100644 fuelweb_test/testrail/generate_failure_group_statistics.py delete mode 100644 fuelweb_test/testrail/generate_statistics.py delete mode 100644 fuelweb_test/testrail/launchpad_client.py delete mode 100755 fuelweb_test/testrail/report.py delete mode 100644 fuelweb_test/testrail/report_pi.py delete mode 100644 fuelweb_test/testrail/report_tempest_results.py delete mode 100644 fuelweb_test/testrail/settings.py delete mode 100644 fuelweb_test/testrail/testrail.py delete mode 100644 fuelweb_test/testrail/testrail_client.py delete mode 100644 fuelweb_test/testrail/upload_cases_description.py delete mode 100644 fuelweb_test/testrail/upload_tempest_test_suite.py delete mode 100644 fuelweb_test/tests/__init__.py delete mode 100644 fuelweb_test/tests/base_test_case.py delete mode 100644 fuelweb_test/tests/cluster_configs.yaml delete mode 100644 fuelweb_test/tests/plugins/__init__.py delete mode 100644 fuelweb_test/tests/plugins/plugin_contrail/__init__.py delete mode 100644 fuelweb_test/tests/plugins/plugin_contrail/test_fuel_plugin_contrail.py delete mode 100644 fuelweb_test/tests/plugins/plugin_emc/__init__.py delete mode 100644 fuelweb_test/tests/plugins/plugin_emc/test_plugin_emc.py delete mode 100644 fuelweb_test/tests/plugins/plugin_etckeeper/__init__.py delete mode 100644 fuelweb_test/tests/plugins/plugin_etckeeper/test_plugin_etckeeper.py delete mode 100644 fuelweb_test/tests/plugins/plugin_example/__init__.py delete mode 100644 fuelweb_test/tests/plugins/plugin_example/test_fuel_plugin_example.py delete mode 100644 fuelweb_test/tests/plugins/plugin_example/test_fuel_plugin_example_postdeploy.py delete mode 100644 fuelweb_test/tests/plugins/plugin_glusterfs/__init__.py delete mode 100644 fuelweb_test/tests/plugins/plugin_glusterfs/test_plugin_glusterfs.py delete mode 100644 fuelweb_test/tests/plugins/plugin_lbaas/__init__.py delete mode 100644 fuelweb_test/tests/plugins/plugin_lbaas/test_plugin_lbaas.py delete mode 100644 fuelweb_test/tests/plugins/plugin_murano/__init__.py delete mode 100644 fuelweb_test/tests/plugins/plugin_murano/test_plugin_murano.py delete mode 100644 fuelweb_test/tests/plugins/plugin_reboot/__init__.py delete mode 100644 fuelweb_test/tests/plugins/plugin_reboot/reboot_tasks.yaml delete mode 100644 fuelweb_test/tests/plugins/plugin_reboot/test_plugin_reboot_task.py delete mode 100644 fuelweb_test/tests/plugins/plugin_vip_reservation/__init__.py delete mode 100644 fuelweb_test/tests/plugins/plugin_vip_reservation/metadata.yaml delete mode 100644 fuelweb_test/tests/plugins/plugin_vip_reservation/network_roles.yaml delete mode 100644 fuelweb_test/tests/plugins/plugin_vip_reservation/tasks.yaml delete mode 100644 fuelweb_test/tests/plugins/plugin_vip_reservation/test_plugin_vip_reservation.py delete mode 100644 fuelweb_test/tests/plugins/plugin_zabbix/__init__.py delete mode 100644 fuelweb_test/tests/plugins/plugin_zabbix/test_plugin_zabbix.py delete mode 100644 fuelweb_test/tests/test_admin_node.py delete mode 100644 fuelweb_test/tests/test_backup_restore.py delete mode 100644 fuelweb_test/tests/test_bdd.py delete mode 100644 fuelweb_test/tests/test_bond_offloading.py delete mode 100644 fuelweb_test/tests/test_bonding.py delete mode 100644 fuelweb_test/tests/test_bonding_base.py delete mode 100644 fuelweb_test/tests/test_ceph.py delete mode 100644 fuelweb_test/tests/test_cgroups.py delete mode 100644 fuelweb_test/tests/test_cli.py delete mode 100644 fuelweb_test/tests/test_cli_base.py delete mode 100644 fuelweb_test/tests/test_cpu_pinning.py delete mode 100644 fuelweb_test/tests/test_custom_hostname.py delete mode 100644 fuelweb_test/tests/test_dpdk.py delete mode 100644 fuelweb_test/tests/test_environment_action.py delete mode 100644 fuelweb_test/tests/test_graph_extension.py delete mode 100644 fuelweb_test/tests/test_ha_one_controller.py delete mode 100644 fuelweb_test/tests/test_ha_one_controller_base.py delete mode 100644 fuelweb_test/tests/test_ironic_base.py delete mode 100644 fuelweb_test/tests/test_jumbo_frames.py delete mode 100644 fuelweb_test/tests/test_manual_vip_allocation.py delete mode 100644 fuelweb_test/tests/test_multipath_devices.py delete mode 100644 fuelweb_test/tests/test_multiple_networks.py delete mode 100644 fuelweb_test/tests/test_multiqueue.py delete mode 100644 fuelweb_test/tests/test_net_templates.py delete mode 100644 fuelweb_test/tests/test_net_templates_base.py delete mode 100644 fuelweb_test/tests/test_net_templates_multiple_networks.py delete mode 100644 fuelweb_test/tests/test_neutron.py delete mode 100644 fuelweb_test/tests/test_neutron_ipv6.py delete mode 100644 fuelweb_test/tests/test_neutron_public.py delete mode 100644 fuelweb_test/tests/test_neutron_tun.py delete mode 100644 fuelweb_test/tests/test_neutron_tun_base.py delete mode 100644 fuelweb_test/tests/test_node_reinstallation.py delete mode 100644 fuelweb_test/tests/test_offloading_types.py delete mode 100644 fuelweb_test/tests/test_ovs_firewall.py delete mode 100644 fuelweb_test/tests/test_public_api.py delete mode 100644 fuelweb_test/tests/test_pullrequest.py delete mode 100644 fuelweb_test/tests/test_reduced_footprint.py delete mode 100644 fuelweb_test/tests/test_services.py delete mode 100644 fuelweb_test/tests/test_services_reconfiguration.py delete mode 100644 fuelweb_test/tests/test_sriov.py delete mode 100644 fuelweb_test/tests/test_ssl.py delete mode 100644 fuelweb_test/tests/test_states_unlock_settings_tab.py delete mode 100644 fuelweb_test/tests/test_support_hugepages.py delete mode 100644 fuelweb_test/tests/test_ubuntu_bootstrap.py delete mode 100644 fuelweb_test/tests/test_unlock_settings_tab.py delete mode 100644 fuelweb_test/tests/tests_cli/__init__.py delete mode 100644 fuelweb_test/tests/tests_cli/test_cli_deploy.py delete mode 100644 fuelweb_test/tests/tests_cli/test_cli_deploy_ceph.py delete mode 100644 fuelweb_test/tests/tests_cli/test_cli_role.py delete mode 100644 fuelweb_test/tests/tests_configdb/__init__.py delete mode 100644 fuelweb_test/tests/tests_configdb/test_configdb_api.py delete mode 100644 fuelweb_test/tests/tests_configdb/test_configdb_cli.py delete mode 100644 fuelweb_test/tests/tests_custom_graph/__init__.py delete mode 100644 fuelweb_test/tests/tests_custom_graph/test_custom_graph.py delete mode 100644 fuelweb_test/tests/tests_deployments/__init__.py delete mode 100644 fuelweb_test/tests/tests_deployments/tests_neutron_tun/__init__.py delete mode 100644 fuelweb_test/tests/tests_deployments/tests_neutron_tun/test_ha_tun_group_1.py delete mode 100644 fuelweb_test/tests/tests_deployments/tests_neutron_tun/test_ha_tun_group_2.py delete mode 100644 fuelweb_test/tests/tests_deployments/tests_neutron_tun/test_ha_tun_group_3.py delete mode 100644 fuelweb_test/tests/tests_deployments/tests_neutron_vlan/__init__.py delete mode 100644 fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_1.py delete mode 100644 fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_2.py delete mode 100644 fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_3.py delete mode 100644 fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_4.py delete mode 100644 fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_5.py delete mode 100644 fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_6.py delete mode 100644 fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_7.py delete mode 100644 fuelweb_test/tests/tests_extra_computes/__init__.py delete mode 100644 fuelweb_test/tests/tests_extra_computes/base_extra_computes.py delete mode 100644 fuelweb_test/tests/tests_extra_computes/test_ol_basic_actions.py delete mode 100644 fuelweb_test/tests/tests_extra_computes/test_ol_migration.py delete mode 100644 fuelweb_test/tests/tests_extra_computes/test_rh_basic_actions.py delete mode 100644 fuelweb_test/tests/tests_extra_computes/test_rh_migration.py delete mode 100644 fuelweb_test/tests/tests_ibp/__init__.py delete mode 100644 fuelweb_test/tests/tests_ibp/test_ibp.py delete mode 100644 fuelweb_test/tests/tests_lcm/__init__.py delete mode 100644 fuelweb_test/tests/tests_lcm/base_lcm_test.py delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/ensurability/cinder.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/ensurability/cluster_settings.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/ensurability/compute.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/ensurability/controller.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/ensurability/nodes_settings.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/idempotency/cinder.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/idempotency/compute.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/idempotency/controller.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/ensurability/cluster_settings.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/ensurability/compute.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/ensurability/controller.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/ensurability/ironic.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/ensurability/nodes_settings.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/idempotency/compute.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/idempotency/controller.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/idempotency/ironic.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/ensurability/cluster_settings.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/ensurability/compute.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/ensurability/controller.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/ensurability/mongo.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/ensurability/nodes_settings.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/idempotency/compute.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/idempotency/controller.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/idempotency/mongo.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/ensurability/ceph-osd.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/ensurability/cluster_settings.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/ensurability/compute.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/ensurability/controller.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/ensurability/nodes_settings.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/idempotency/ceph-osd.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/idempotency/compute.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/idempotency/controller.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/ensurability/ceph-osd_compute.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/ensurability/cluster_settings.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/ensurability/controller_mongo.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/ensurability/nodes_settings.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/ensurability/primary-controller_mongo.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/idempotency/ceph-osd_compute.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/idempotency/controller_mongo.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/idempotency/primary-controller_mongo.yaml delete mode 100644 fuelweb_test/tests/tests_lcm/test_ensurability.py delete mode 100644 fuelweb_test/tests/tests_lcm/test_idempotency.py delete mode 100644 fuelweb_test/tests/tests_lcm/test_task_coverage.py delete mode 100644 fuelweb_test/tests/tests_mirrors/__init__.py delete mode 100644 fuelweb_test/tests/tests_mirrors/test_create_mirror.py delete mode 100644 fuelweb_test/tests/tests_mirrors/test_use_mirror.py delete mode 100644 fuelweb_test/tests/tests_multirole/__init__.py delete mode 100644 fuelweb_test/tests/tests_multirole/test_mongo_multirole.py delete mode 100644 fuelweb_test/tests/tests_multirole/test_multirole_group_1.py delete mode 100644 fuelweb_test/tests/tests_os_components/__init__.py delete mode 100644 fuelweb_test/tests/tests_os_components/test_mixed_os_components.py delete mode 100644 fuelweb_test/tests/tests_os_components/test_murano_os_component.py delete mode 100644 fuelweb_test/tests/tests_os_components/test_sahara_os_component.py delete mode 100644 fuelweb_test/tests/tests_patching/__init__.py delete mode 100644 fuelweb_test/tests/tests_patching/test_patching.py delete mode 100644 fuelweb_test/tests/tests_scale/__init__.py delete mode 100644 fuelweb_test/tests/tests_scale/test_scale_group_1.py delete mode 100644 fuelweb_test/tests/tests_scale/test_scale_group_2.py delete mode 100644 fuelweb_test/tests/tests_scale/test_scale_group_3.py delete mode 100644 fuelweb_test/tests/tests_scale/test_scale_group_4.py delete mode 100644 fuelweb_test/tests/tests_scale/test_scale_group_5.py delete mode 100644 fuelweb_test/tests/tests_scale/test_scale_group_6.py delete mode 100644 fuelweb_test/tests/tests_security/__init__.py delete mode 100644 fuelweb_test/tests/tests_security/test_lynis_audit.py delete mode 100644 fuelweb_test/tests/tests_security/test_run_nessus.py delete mode 100644 fuelweb_test/tests/tests_separate_services/__init__.py delete mode 100644 fuelweb_test/tests/tests_separate_services/test_deploy_platform_components.py delete mode 100644 fuelweb_test/tests/tests_separate_services/test_separate_haproxy.py delete mode 100644 fuelweb_test/tests/tests_separate_services/test_separate_horizon.py delete mode 100644 fuelweb_test/tests/tests_separate_services/test_separate_multiroles.py delete mode 100644 fuelweb_test/tests/tests_separate_services/test_separate_rabbitmq.py delete mode 100644 fuelweb_test/tests/tests_separate_services/test_separate_rabbitmq_ceph.py delete mode 100644 fuelweb_test/tests/tests_strength/__init__.py delete mode 100644 fuelweb_test/tests/tests_strength/test_cic_maintenance_mode.py delete mode 100644 fuelweb_test/tests/tests_strength/test_failover.py delete mode 100644 fuelweb_test/tests/tests_strength/test_failover_base.py delete mode 100644 fuelweb_test/tests/tests_strength/test_failover_group_1.py delete mode 100644 fuelweb_test/tests/tests_strength/test_failover_group_2.py delete mode 100644 fuelweb_test/tests/tests_strength/test_failover_group_3.py delete mode 100644 fuelweb_test/tests/tests_strength/test_failover_mongo.py delete mode 100644 fuelweb_test/tests/tests_strength/test_failover_with_ceph.py delete mode 100644 fuelweb_test/tests/tests_strength/test_huge_environments.py delete mode 100644 fuelweb_test/tests/tests_strength/test_image_based.py delete mode 100644 fuelweb_test/tests/tests_strength/test_load.py delete mode 100644 fuelweb_test/tests/tests_strength/test_load_base.py delete mode 100644 fuelweb_test/tests/tests_strength/test_master_node_failover.py delete mode 100644 fuelweb_test/tests/tests_strength/test_network_outage.py delete mode 100644 fuelweb_test/tests/tests_strength/test_neutron.py delete mode 100644 fuelweb_test/tests/tests_strength/test_neutron_base.py delete mode 100644 fuelweb_test/tests/tests_strength/test_ostf_repeatable_tests.py delete mode 100644 fuelweb_test/tests/tests_strength/test_repetitive_restart.py delete mode 100644 fuelweb_test/tests/tests_strength/test_restart.py delete mode 100644 fuelweb_test/tests/tests_uca/__init__.py delete mode 100644 fuelweb_test/tests/tests_uca/test_uca.py delete mode 100644 fuelweb_test/tests/tests_upgrade/__init__.py delete mode 100644 fuelweb_test/tests/tests_upgrade/example_upgrade_scenario.yaml delete mode 100644 fuelweb_test/tests/tests_upgrade/octane_patcher.sh delete mode 100644 fuelweb_test/tests/tests_upgrade/test_clone_env.py delete mode 100644 fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_base.py delete mode 100644 fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_ceph_ha.py delete mode 100644 fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_multirack_deployment.py delete mode 100644 fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_net_tmpl.py delete mode 100644 fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_no_cluster.py delete mode 100644 fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_plugin.py delete mode 100644 fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_smoke.py delete mode 100644 fuelweb_test/tests/tests_upgrade/test_node_reassignment.py delete mode 100644 fuelweb_test/tests/tests_upgrade/test_os_upgrade.py delete mode 100644 fuelweb_test/tests/tests_upgrade/upgrade_base.py delete mode 100644 fuelweb_test/tests/tests_upgrade/upgrader_tool.py delete mode 100644 gates_tests/__init__.py delete mode 100644 gates_tests/devops_templates/default.yaml delete mode 100644 gates_tests/devops_templates/ironic_template.yaml delete mode 100644 gates_tests/helpers/__init__.py delete mode 100644 gates_tests/helpers/exceptions.py delete mode 100644 gates_tests/helpers/fuel_library_modules_mapping.yaml delete mode 100644 gates_tests/helpers/openstack_puppet_projects_mapping.yaml delete mode 100644 gates_tests/helpers/utils.py delete mode 100644 gates_tests/tests/__init__.py delete mode 100644 gates_tests/tests/test_nailgun_agent.py delete mode 100644 gates_tests/tests/test_review_fuel_web.py delete mode 100644 gates_tests/tests/test_review_in_astute.py delete mode 100644 gates_tests/tests/test_review_in_fuel_agent.py delete mode 100644 gates_tests/tests/test_review_in_fuel_client.py delete mode 100644 gates_tests/tests/test_review_in_ostf.py delete mode 100644 packages_tests/deb/deployment/ceph/ceph-deploy/test.yaml delete mode 100644 packages_tests/deb/deployment/ceph/libleveldb1/test.yaml delete mode 100644 packages_tests/deb/deployment/ceph/libsnappy1/test.yaml delete mode 100644 packages_tests/deb/deployment/ceph/libtcmalloc-minimal4/test.yaml delete mode 100644 packages_tests/deb/deployment/ceph/libunwind8/test.yaml delete mode 100644 packages_tests/deb/deployment/ceph/python-blinker/test.yaml delete mode 100644 packages_tests/deb/deployment/ceph/python-flask/test.yaml delete mode 100644 packages_tests/deb/deployment/ceph/python-itsdangerous/test.yaml delete mode 100644 packages_tests/deb/deployment/ceph/python-pyinotify/test.yaml delete mode 100644 packages_tests/deb/deployment/ceph/python-werkzeug/test.yaml delete mode 100644 packages_tests/deb/deployment/ceph/test.yaml delete mode 100644 packages_tests/deb/deployment/ceph/xfsprogs/test.yaml delete mode 100644 packages_tests/deb/deployment/cinder/cinder-backup/test.yaml delete mode 100644 packages_tests/deb/deployment/cinder/test.yaml delete mode 100644 packages_tests/deb/deployment/general/test.yaml delete mode 100644 packages_tests/deb/deployment/glance/test.yaml delete mode 100644 packages_tests/deb/deployment/neutron/dkms/test.yaml delete mode 100644 packages_tests/deb/deployment/neutron/libipset3/test.yaml delete mode 100644 packages_tests/deb/deployment/neutron/neutron-common/test.yaml delete mode 100644 packages_tests/deb/deployment/neutron/test.yaml delete mode 100644 packages_tests/deb/deployment/nova/nova-compute-kvm/test.yaml delete mode 100644 packages_tests/deb/deployment/nova/nova-compute-libvirt/test.yaml delete mode 100644 packages_tests/deb/deployment/nova/nova-compute-qemu/test.yaml delete mode 100644 packages_tests/deb/deployment/nova/nova-compute/test.yaml delete mode 100644 packages_tests/deb/deployment/nova/nova-network/test.yaml delete mode 100644 packages_tests/deb/deployment/nova/test.yaml delete mode 100644 packages_tests/deb/deployment/swift/test.yaml delete mode 100644 packages_tests/deb/deployment/test.yaml delete mode 100644 packages_tests/deb/master/test.yaml delete mode 100644 packages_tests/deb/packages.yaml delete mode 100644 packages_tests/deb/provisioning/test.yaml delete mode 100644 packages_tests/rpm/deployment/ceph/at/test.yaml delete mode 100644 packages_tests/rpm/deployment/ceph/bc/test.yaml delete mode 100644 packages_tests/rpm/deployment/ceph/ceph-deploy/test.yaml delete mode 100644 packages_tests/rpm/deployment/ceph/ceph/test.yaml delete mode 100644 packages_tests/rpm/deployment/ceph/db4-devel/test.yaml delete mode 100644 packages_tests/rpm/deployment/ceph/ed/test.yaml delete mode 100644 packages_tests/rpm/deployment/ceph/gdbm-devel/test.yaml delete mode 100644 packages_tests/rpm/deployment/ceph/gperftools-libs/test.yaml delete mode 100644 packages_tests/rpm/deployment/ceph/leveldb/test.yaml delete mode 100644 packages_tests/rpm/deployment/ceph/libunwind/test.yaml delete mode 100644 packages_tests/rpm/deployment/ceph/mailx/test.yaml delete mode 100644 packages_tests/rpm/deployment/ceph/patch/test.yaml delete mode 100644 packages_tests/rpm/deployment/ceph/pax/test.yaml delete mode 100644 packages_tests/rpm/deployment/ceph/python-flask/test.yaml delete mode 100644 packages_tests/rpm/deployment/ceph/python-itsdangerous/test.yaml delete mode 100644 packages_tests/rpm/deployment/ceph/python-werkzeug/test.yaml delete mode 100644 packages_tests/rpm/deployment/ceph/redhat-lsb-core/test.yaml delete mode 100644 packages_tests/rpm/deployment/ceph/snappy/test.yaml delete mode 100644 packages_tests/rpm/deployment/ceph/test.yaml delete mode 100644 packages_tests/rpm/deployment/ceph/time/test.yaml delete mode 100644 packages_tests/rpm/deployment/cinder/libmnl/test.yaml delete mode 100644 packages_tests/rpm/deployment/cinder/test.yaml delete mode 100644 packages_tests/rpm/deployment/general/test.yaml delete mode 100644 packages_tests/rpm/deployment/glance/test.yaml delete mode 100644 packages_tests/rpm/deployment/neutron/dkms/test.yaml delete mode 100644 packages_tests/rpm/deployment/neutron/ipset/test.yaml delete mode 100644 packages_tests/rpm/deployment/neutron/libipset3/test.yaml delete mode 100644 packages_tests/rpm/deployment/neutron/openstack-neutron/test.yaml delete mode 100644 packages_tests/rpm/deployment/neutron/openvswitch-common/test.yaml delete mode 100644 packages_tests/rpm/deployment/neutron/openvswitch-switch/test.yaml delete mode 100644 packages_tests/rpm/deployment/neutron/python-jsonrpclib/test.yaml delete mode 100644 packages_tests/rpm/deployment/neutron/test.yaml delete mode 100644 packages_tests/rpm/deployment/nova/openstack-nova-compute/test.yaml delete mode 100644 packages_tests/rpm/deployment/nova/openstack-nova-network/test.yaml delete mode 100644 packages_tests/rpm/deployment/nova/test.yaml delete mode 100644 packages_tests/rpm/deployment/perl/perl-Test-Simple/test.yaml delete mode 100644 packages_tests/rpm/deployment/perl/test.yaml delete mode 100644 packages_tests/rpm/deployment/swift/test.yaml delete mode 100644 packages_tests/rpm/deployment/test.yaml delete mode 100644 packages_tests/rpm/master/test.yaml delete mode 100644 packages_tests/rpm/packages.yaml delete mode 100644 packages_tests/rpm/provisioning/test.yaml delete mode 100644 pytest.ini delete mode 100755 run_system_test.py delete mode 100755 run_tests.sh delete mode 100644 system_test/__init__.py delete mode 100644 system_test/actions/__init__.py delete mode 100644 system_test/actions/base.py delete mode 100644 system_test/actions/fuelmaster_actions.py delete mode 100644 system_test/actions/ostf_actions.py delete mode 100644 system_test/actions/plugins_actions.py delete mode 100644 system_test/actions/strength_actions.py delete mode 100644 system_test/core/__init__.py delete mode 100644 system_test/core/config.py delete mode 100644 system_test/core/decorators.py delete mode 100644 system_test/core/discover.py delete mode 100644 system_test/core/factory.py delete mode 100644 system_test/core/repository.py delete mode 100644 system_test/helpers/__init__.py delete mode 100644 system_test/helpers/decorators.py delete mode 100644 system_test/tests/__init__.py delete mode 100644 system_test/tests/base.py delete mode 100644 system_test/tests/plugins/__init__.py delete mode 100644 system_test/tests/plugins/plugin_example/__init__.py delete mode 100644 system_test/tests/plugins/plugin_example/test_plugin_example.py delete mode 100644 system_test/tests/plugins/plugin_example/test_plugin_example_v3.py delete mode 100644 system_test/tests/strength/__init__.py delete mode 100644 system_test/tests/strength/test_destroy_controllers.py delete mode 100644 system_test/tests/strength/test_filling_root.py delete mode 100644 system_test/tests/test_create_deploy_ostf.py delete mode 100644 system_test/tests/test_delete_after_deploy.py delete mode 100644 system_test/tests/test_deploy_check_rados.py delete mode 100644 system_test/tests/test_redeploy_after_reset.py delete mode 100644 system_test/tests/test_redeploy_after_stop.py delete mode 100644 system_test/tests_templates/cluster_configs/networks/neutron_gre.yaml delete mode 100644 system_test/tests_templates/cluster_configs/networks/neutron_tun.yaml delete mode 100644 system_test/tests_templates/cluster_configs/networks/neutron_vlan.yaml delete mode 100644 system_test/tests_templates/cluster_configs/nodes/mixed/1ctrl_1comp_1cndr_mongo_2ceph.yaml delete mode 100644 system_test/tests_templates/cluster_configs/nodes/mixed/3ctrl_3comp_ceph.yaml delete mode 100644 system_test/tests_templates/cluster_configs/nodes/mixed/3ctrl_mongo_3comp_ceph.yaml delete mode 100644 system_test/tests_templates/cluster_configs/nodes/single/1ctrl_1comp.yaml delete mode 100644 system_test/tests_templates/cluster_configs/nodes/single/1ctrl_1comp_1cndr.yaml delete mode 100644 system_test/tests_templates/cluster_configs/nodes/single/1ctrl_1comp_1cndr_3ceph.yaml delete mode 100644 system_test/tests_templates/cluster_configs/nodes/single/1ctrl_1comp_3ceph_1mongo.yaml delete mode 100644 system_test/tests_templates/cluster_configs/nodes/single/1ctrl_2comp.yaml delete mode 100644 system_test/tests_templates/cluster_configs/nodes/single/1ctrl_2comp_1cndr_3ceph_1mongo.yaml delete mode 100644 system_test/tests_templates/cluster_configs/nodes/single/3ctrl_1comp_1cndr.yaml delete mode 100644 system_test/tests_templates/cluster_configs/nodes/single/3ctrl_2comp_1cndr.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/cephVolImgRados.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/cephVolImgRados_ceilometer.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/cephVolImg_ceilometer.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/cephVolImg_ceilometer_sahara.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/cephVolImg_wo_components.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/cinder_ceilometer.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/cinder_cephImgRados_ceilometer.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/cinder_cephImgRados_wo_components.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/cinder_cephImg_ceilometer.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/cinder_sahara.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/cinder_wo_components.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/cinder_wo_componets.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/components/ceilometer.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/components/ceilometer_murano.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/components/ceilometer_murano_sahara.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/components/ceilometer_sahara.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/components/murano.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/components/murano_sahara.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/components/sahara.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/components/wo_components.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/storages/cephImgVol.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/storages/cephImgVolRados.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/storages/cinder_cephImg.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/storages/cinder_cephImgRados.yaml delete mode 100644 system_test/tests_templates/cluster_configs/settings/storages/cinder_only.yaml delete mode 100644 system_test/tests_templates/devops_configs/centos_master.yaml delete mode 100644 system_test/tests_templates/devops_configs/default.yaml delete mode 100644 system_test/tests_templates/devops_configs/default30-2groups.yaml delete mode 100644 system_test/tests_templates/devops_configs/default30-2groups_.yaml delete mode 100644 system_test/tests_templates/devops_configs/default30-bond.yaml delete mode 100644 system_test/tests_templates/devops_configs/default30.yaml delete mode 100644 system_test/tests_templates/devops_configs/external_haproxy.yaml delete mode 100644 system_test/tests_templates/devops_configs/ironic.yaml delete mode 100644 system_test/tests_templates/devops_configs/multipath.yaml delete mode 100644 system_test/tests_templates/devops_configs/multirack.yaml delete mode 100644 system_test/tests_templates/devops_configs/public_api.yaml delete mode 100644 system_test/tests_templates/devops_configs/security_scan.yaml delete mode 100644 system_test/tests_templates/devops_configs/vcenter_ha_default.yaml delete mode 100644 system_test/tests_templates/tests_configs/centos_master_ceph_all_on_neutron_vlan.yaml delete mode 100644 system_test/tests_templates/tests_configs/ceph_all_ceilo_on_neutron_tun.yaml delete mode 100644 system_test/tests_templates/tests_configs/ceph_all_on_neutron_vlan.yaml delete mode 100644 system_test/tests_templates/tests_configs/ceph_all_on_neutron_vlan_30-2groups.yaml delete mode 100644 system_test/tests_templates/tests_configs/ceph_all_on_neutron_vlan_30-bond.yaml delete mode 100644 system_test/tests_templates/tests_configs/ceph_all_on_neutron_vlan_30.yaml delete mode 100644 system_test/tests_templates/tests_configs/example_test_environment.yaml delete mode 100644 system_test/tests_templates/tests_configs/external_haproxy.yaml delete mode 100644 system_test/tests_templates/tests_configs/ha/pairwise/1ctrl_ceph_2ctrl_1comp_1comp_ceph_neutronVLAN.yaml delete mode 100644 system_test/tests_templates/tests_configs/ha/single/3ctrl_2comp_1cndr_neutronVLAN.yaml delete mode 100644 system_test/tests_templates/tests_configs/multipath_3_nodes.yaml delete mode 100644 system_test/tests_templates/tests_configs/multirack.yaml delete mode 100644 system_test/tests_templates/tests_configs/public_api.yaml delete mode 100644 system_test/tests_templates/tests_configs/scale/1ctrl_1comp_1cndr_neutronTUN_scale_3ctrl.yaml delete mode 100644 system_test/tests_templates/tests_configs/simple/pairwise/1ctrl_1comp_1cndr_mongo_2ceph_neutronVLAN_CephImgRados_ceilometer.yaml delete mode 100644 system_test/tests_templates/tests_configs/simple/single/1ctrl_1comp_1cndr_3ceph_neutronVLAN_cephImgRados.yaml delete mode 100644 system_test/tests_templates/tests_configs/simple/single/1ctrl_1comp_1cndr_neutronTUN_sahara.yaml delete mode 100644 system_test/tests_templates/tests_configs/simple/single/1ctrl_1comp_3ceph_1mongo_neutronTUN_CephImgVol_ceilometer_sahara.yaml delete mode 100644 system_test/tests_templates/tests_configs/simple/single/1ctrl_1comp_neutronTUN.yaml delete mode 100644 system_test/tests_templates/tests_configs/simple/single/1ctrl_1comp_neutronVLAN.yaml delete mode 100644 system_test/tests_templates/tests_configs/simple/single/1ctrl_2comp_1cndr_3ceph_1mongo_neutronVLAN_CephImg_ceilometer.yaml delete mode 100644 system_test/tests_templates/tests_configs/simple/single/1ctrl_2comp_neutronVLAN.yaml delete mode 100644 tox.ini delete mode 100755 utils/jenkins/conv_snapshot_file.py delete mode 100755 utils/jenkins/fuel_logs.py delete mode 100755 utils/jenkins/system_tests.sh diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 23d4372c7..000000000 --- a/.coveragerc +++ /dev/null @@ -1,5 +0,0 @@ -[run] -source = - core -omit = - core/_tests/* diff --git a/.gitignore b/.gitignore deleted file mode 100644 index ba53cadba..000000000 --- a/.gitignore +++ /dev/null @@ -1,63 +0,0 @@ -*.py[cod] - -# C extensions -*.so - -# Packages -*.egg -*.egg-info -dist -build -eggs -parts -bin -var -sdist -develop-eggs -.installed.cfg -lib -lib64 -MANIFEST -TAGS -.venv - -# Installer logs -pip-log.txt - -# Unit test / coverage reports -.coverage -.tox -nosetests.xml -unit.xml -/htmlcov/ - -# Translations -*.mo - -# Mr Developer -.mr.developer.cfg -.project -.pydevproject -.idea - -# Local example -example_local.py - -# Local settings -local_settings.py - -# Documentation -doc/_build/ - -# Logs -/logs -*.log - -# Certs -/ca.crt -/ca.pem - -# Cache -/.cache -/core/.cache -__pycache__ diff --git a/.pylintrc b/.pylintrc deleted file mode 100644 index 6cac8d7e9..000000000 --- a/.pylintrc +++ /dev/null @@ -1,481 +0,0 @@ -[MASTER] - -# Specify a configuration file. -#rcfile= - -# Python code to execute, usually for sys.path manipulation such as -# pygtk.require(). -#init-hook= - -# Add files or directories to the blacklist. They should be base names, not -# paths. -ignore=CVS, tox, logs - -# Pickle collected data for later comparisons. -persistent=yes - -# List of plugins (as comma separated values of python modules names) to load, -# usually to register additional checkers. -load-plugins= - -# Use multiple processes to speed up Pylint. -jobs=1 - -# Allow loading of arbitrary C extensions. Extensions are imported into the -# active Python interpreter and may run arbitrary code. -unsafe-load-any-extension=no - -# A comma-separated list of package or module names from where C extensions may -# be loaded. Extensions are loading into the active Python interpreter and may -# run arbitrary code -extension-pkg-whitelist= - -# Allow optimization of some AST trees. This will activate a peephole AST -# optimizer, which will apply various small optimizations. For instance, it can -# be used to obtain the result of joining multiple strings with the addition -# operator. Joining a lot of strings can lead to a maximum recursion error in -# Pylint and this flag can prevent that. It has one side effect, the resulting -# AST will be different than the one from reality. -optimize-ast=no - - -[MESSAGES CONTROL] - -# Only show warnings with the listed confidence levels. Leave empty to show -# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED -confidence= - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time. See also the "--disable" option for examples. - -# old-style-class (C1001) -# return-arg-in-generator (E0106) -# slots-on-old-class (E1001) -# super-on-old-class (E1002) -# missing-super-argument (E1004) -# print-statement (E1601) -# parameter-unpacking (E1602) -# unpacking-in-except (E1603) -# old-raise-syntax (E1604) -# backtick (E1605) -# long-suffix (E1606) -# old-ne-operator (E1607) -# old-octal-literal (E1608) -# import-star-module-level (E1609) -# lowercase-l-suffix (W0332) -# deprecated-module (W0402) -# invalid-encoded-data (W0512) -# property-on-old-class (W1001) -# boolean-datetime (W1502) -# deprecated-method (W1505) -# apply-builtin (W1601) -# basestring-builtin (W1602) -# buffer-builtin (W1603) -# cmp-builtin (W1604) -# coerce-builtin (W1605) -# execfile-builtin (W1606) -# file-builtin (W1607) -# long-builtin (W1608) -# raw_input-builtin (W1609) -# reduce-builtin (W1610) -# standarderror-builtin (W1611) -# unicode-builtin (W1612) -# xrange-builtin (W1613) -# coerce-method (W1614) -# delslice-method (W1615) -# getslice-method (W1616) -# setslice-method (W1617) -# old-division (W1619) -# dict-iter-method (W1620) -# dict-view-method (W1621) -# next-method-called (W1622) -# metaclass-assignment (W1623) -# indexing-exception (W1624) -# raising-string (W1625) -# reload-builtin (W1626) -# oct-method (W1627) -# hex-method (W1628) -# nonzero-method (W1629) -# cmp-method (W1630) -# input-builtin (W1632) -# round-builtin (W1633) -# intern-builtin (W1634) -# unichr-builtin (W1635) -# map-builtin-not-iterating (W1636) -# zip-builtin-not-iterating (W1637) -# range-builtin-not-iterating (W1638) -# filter-builtin-not-iterating (W1639) -# filter-builtin-not-iterating (W1639) -# using-cmp-argument (W1640) - -enable = E0106,C1001,E1001,E1002,E1004,E1601,E1602,E1603,E1604,E1605,E1606,E1607,E1608,E1609,W0332,W0402,W0512,W1001,W1502,W1505,W1601,W1602,W1603,W1604,W1605,W1606,W1607,W1608,W1609,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1619,W1620,W1621,W1622,W1623,W1624,W1625,W1626,W1627,W1628,W1629,W1630,W1632,W1633,W1634,W1635,W1636,W1637,W1638,W1639,W1640, - -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifiers separated by comma (,) or put this -# option multiple times (only on the command line, not in the configuration -# file where it should appear only once).You can also use "--disable=all" to -# disable everything first and then reenable specific checks. For example, if -# you want to run only the similarities checker, you can use "--disable=all -# --enable=similarities". If you want to run only the classes checker, but have -# no Warning level messages displayed, use"--disable=all --enable=classes -# --disable=W" - -# Disabling pointless reports: -# RP0401: External dependencies -# RP0402: Modules dependencies graph -# RP0801: Duplication -# R0801: Duplication -# cyclic-import (R0401) - produces false-negative results - -# Disabling messages: -# pointless-string-statement (W0105) -# unnecessary-lambda (W0108) -# deprecated-lambda (W0110) -# bad-builtin (W0141) -# fixme (W0511) -# unused-argument (W0613) -# redefined-outer-name (W0621) -# cell-var-from-loop (W0640) -# bare-except (W0702) -# broad-except (W0703) -# logging-format-interpolation (W1202) -# anomalous-backslash-in-string (W1401) - DO NOT ENABLE, INCORRECTLY PARSES REGEX -# no-absolute-import (W1618): -# import missing `from __future__ import absolute_import` Used when an import is not accompanied by from __future__ import absolute_import (default behaviour in Python 3) - -# invalid-name (C0103) -# missing-docstring (C0111) -# misplaced-comparison-constant (C0122) -# too-many-lines (C0302) -# bad-continuation (C0330) - -# too-many-ancestors (R0901) -# too-many-public-methods (R0904) -# too-few-public-methods (R0903) -# too-many-return-statements (R0911) -# too-many-branches (R0912) -# too-many-arguments (R0913) -# too-many-locals (R0914) -# too-many-statements (R0915) - -# locally-disabled (I0011) -# locally-enabled (I0012) - -disable=RP0401,RP0402,RP0801,R0801,W0141,W1618,W0621,W1202,W1401,W0703,W0702,C0111,W0640,C0122,W0511, W0613, C0103, R0903, C0330, C0302, R0915, R0914, R0912, W0105, R0904, R0911, W0108, W0110, R0913, R0901, R0401, I0011, I0012 - -[REPORTS] - -# Set the output format. Available formats are text, parseable, colorized, msvs -# (visual studio) and html. You can also give a reporter class, eg -# mypackage.mymodule.MyReporterClass. -output-format=colorized -#output-format=parseable - -# Put messages in a separate file for each module / package specified on the -# command line instead of printing them on stdout. Reports (if any) will be -# written in a file name "pylint_global.[txt|html]". -files-output=no - -# Tells whether to display a full report or only the messages -reports=yes - -# Python expression which should return a note less than 10 (10 is the highest -# note). You have access to the variables errors warning, statement which -# respectively contain the number of errors / warnings messages and the total -# number of statements analyzed. This is used by the global evaluation report -# (RP0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) - -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details -#msg-template= - - -[VARIABLES] - -# Tells whether we should check for unused import in __init__ files. -init-import=no - -# A regular expression matching the name of dummy variables (i.e. expectedly -# not used). -dummy-variables-rgx=_$|dummy - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -additional-builtins= - -# List of strings which can identify a callback function by name. A callback -# name must start or end with one of those strings. -callbacks=cb_,_cb - - -[TYPECHECK] - -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes - -# List of module names for which member attributes should not be checked -# (useful for modules/projects where namespaces are manipulated during runtime -# and thus existing member attributes cannot be deduced by static analysis. It -# supports qualified module names, as well as Unix pattern matching. -ignored-modules= - -# List of classes names for which member attributes should not be checked -# (useful for classes with attributes dynamically set). This supports can work -# with qualified names. -ignored-classes= - -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E1101 when accessed. Python regular -# expressions are accepted. -generated-members= - - -[SPELLING] - -# Spelling dictionary name. Available dictionaries: none. To make it working -# install python-enchant package. -spelling-dict= - -# List of comma separated words that should not be checked. -spelling-ignore-words= - -# A path to a file that contains private dictionary; one word per line. -spelling-private-dict-file= - -# Tells whether to store unknown words to indicated private dictionary in -# --spelling-private-dict-file option instead of raising a message. -spelling-store-unknown-words=no - - -[SIMILARITIES] - -# Minimum lines number of a similarity. -min-similarity-lines=10 - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - -# Ignore imports when computing similarities. -ignore-imports=no - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes=FIXME,XXX,TODO - - -[LOGGING] - -# Logging modules to check that the string format arguments are in logging -# function parameter format -logging-modules=logging - - -[FORMAT] - -# Maximum number of characters on a single line. -max-line-length=80 - -# Regexp for a line that is allowed to be longer than the limit. -ignore-long-lines=^\s*(# )??$ - -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=no - -# List of optional constructs for which whitespace checking is disabled. `dict- -# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. -# `trailing-comma` allows a space between comma and closing bracket: (a, ). -# `empty-line` allows space-only lines. -no-space-check=trailing-comma,dict-separator - -# Maximum number of lines in a module -max-module-lines=1500 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - -# Number of spaces of indent required inside a hanging or continued line. -indent-after-paren=4 - -# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. -expected-line-ending-format= - - -[BASIC] - -# List of builtins function names that should not be used, separated by a comma -bad-functions=map,filter,input - -# Good variable names which should always be accepted, separated by a comma -good-names=i,j,k,ex,Run,_,x,e,ip - -# Bad variable names which should always be refused, separated by a comma -bad-names=foo,bar,baz,toto,tutu,tata - -# Colon-delimited sets of names that determine each other's naming style when -# the name regexes allow several styles. -name-group= - -# Include a hint for the correct naming format with invalid-name -include-naming-hint=no - -# Regular expression matching correct function names -function-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for function names -function-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct variable names -variable-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for variable names -variable-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct constant names -const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Naming hint for constant names -const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Regular expression matching correct attribute names -attr-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for attribute names -attr-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct argument names -argument-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for argument names -argument-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct class attribute names -class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Naming hint for class attribute names -class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Regular expression matching correct inline iteration names -inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ - -# Naming hint for inline iteration names -inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ - -# Regular expression matching correct class names -class-rgx=[A-Z_][a-zA-Z0-9]+$ - -# Naming hint for class names -class-name-hint=[A-Z_][a-zA-Z0-9]+$ - -# Regular expression matching correct module names -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Naming hint for module names -module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Regular expression matching correct method names -method-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for method names -method-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=^_ - -# Minimum line length for functions/classes that require docstrings, shorter -# ones are exempt. -docstring-min-length=-1 - - -[ELIF] - -# Maximum number of nested blocks for function / method body -max-nested-blocks=5 - - -[IMPORTS] - -# Deprecated modules which should not be used, separated by a comma -deprecated-modules=regsub,TERMIOS,Bastion,rexec - -# Create a graph of every (i.e. internal and external) dependencies in the -# given file (report RP0402 must not be disabled) -import-graph= - -# Create a graph of external dependencies in the given file (report RP0402 must -# not be disabled) -ext-import-graph= - -# Create a graph of internal dependencies in the given file (report RP0402 must -# not be disabled) -int-import-graph= - - -[DESIGN] - -# Maximum number of arguments for function / method -max-args=10 - -# Argument names that match this expression will be ignored. Default to name -# with leading underscore -ignored-argument-names=_.* - -# Maximum number of locals for function / method body -max-locals=15 - -# Maximum number of return / yield for function / method body -max-returns=6 - -# Maximum number of branch for function / method body -max-branches=12 - -# Maximum number of statements in function / method body -max-statements=50 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of attributes for a class (see R0902). -max-attributes=15 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=2 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - -# Maximum number of boolean expressions in a if statement -max-bool-expr=5 - - -[CLASSES] - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__,__new__,setUp - -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls - -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=mcs - -# List of member names, which should be excluded from the protected access -# warning. -exclude-protected=_asdict,_fields,_replace,_source,_make - - -[EXCEPTIONS] - -# Exceptions that will emit a warning when being caught. Defaults to -# "Exception" -overgeneral-exceptions=Exception diff --git a/.pylintrc_gerrit b/.pylintrc_gerrit deleted file mode 100644 index 390938443..000000000 --- a/.pylintrc_gerrit +++ /dev/null @@ -1,482 +0,0 @@ -[MASTER] - -# Specify a configuration file. -#rcfile= - -# Python code to execute, usually for sys.path manipulation such as -# pygtk.require(). -#init-hook= - -# Add files or directories to the blacklist. They should be base names, not -# paths. -ignore=CVS, tox, logs - -# Pickle collected data for later comparisons. -persistent=yes - -# List of plugins (as comma separated values of python modules names) to load, -# usually to register additional checkers. -load-plugins= - -# Use multiple processes to speed up Pylint. -jobs=1 - -# Allow loading of arbitrary C extensions. Extensions are imported into the -# active Python interpreter and may run arbitrary code. -unsafe-load-any-extension=no - -# A comma-separated list of package or module names from where C extensions may -# be loaded. Extensions are loading into the active Python interpreter and may -# run arbitrary code -extension-pkg-whitelist= - -# Allow optimization of some AST trees. This will activate a peephole AST -# optimizer, which will apply various small optimizations. For instance, it can -# be used to obtain the result of joining multiple strings with the addition -# operator. Joining a lot of strings can lead to a maximum recursion error in -# Pylint and this flag can prevent that. It has one side effect, the resulting -# AST will be different than the one from reality. -optimize-ast=no - - -[MESSAGES CONTROL] - -# Only show warnings with the listed confidence levels. Leave empty to show -# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED -confidence= - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time. See also the "--disable" option for examples. - -# old-style-class (C1001) -# return-arg-in-generator (E0106) -# slots-on-old-class (E1001) -# super-on-old-class (E1002) -# missing-super-argument (E1004) -# print-statement (E1601) -# parameter-unpacking (E1602) -# unpacking-in-except (E1603) -# old-raise-syntax (E1604) -# backtick (E1605) -# long-suffix (E1606) -# old-ne-operator (E1607) -# old-octal-literal (E1608) -# import-star-module-level (E1609) -# lowercase-l-suffix (W0332) -# deprecated-module (W0402) -# invalid-encoded-data (W0512) -# property-on-old-class (W1001) -# boolean-datetime (W1502) -# deprecated-method (W1505) -# apply-builtin (W1601) -# basestring-builtin (W1602) -# buffer-builtin (W1603) -# cmp-builtin (W1604) -# coerce-builtin (W1605) -# execfile-builtin (W1606) -# file-builtin (W1607) -# long-builtin (W1608) -# raw_input-builtin (W1609) -# reduce-builtin (W1610) -# standarderror-builtin (W1611) -# unicode-builtin (W1612) -# xrange-builtin (W1613) -# coerce-method (W1614) -# delslice-method (W1615) -# getslice-method (W1616) -# setslice-method (W1617) -# old-division (W1619) -# dict-iter-method (W1620) -# dict-view-method (W1621) -# next-method-called (W1622) -# metaclass-assignment (W1623) -# indexing-exception (W1624) -# raising-string (W1625) -# reload-builtin (W1626) -# oct-method (W1627) -# hex-method (W1628) -# nonzero-method (W1629) -# cmp-method (W1630) -# input-builtin (W1632) -# round-builtin (W1633) -# intern-builtin (W1634) -# unichr-builtin (W1635) -# map-builtin-not-iterating (W1636) -# zip-builtin-not-iterating (W1637) -# range-builtin-not-iterating (W1638) -# filter-builtin-not-iterating (W1639) -# filter-builtin-not-iterating (W1639) -# using-cmp-argument (W1640) - -enable = E0106,C1001,E1001,E1002,E1004,E1601,E1602,E1603,E1604,E1605,E1606,E1607,E1608,E1609,W0332,W0402,W0512,W1001,W1502,W1505,W1601,W1602,W1603,W1604,W1605,W1606,W1607,W1608,W1609,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1619,W1620,W1621,W1622,W1623,W1624,W1625,W1626,W1627,W1628,W1629,W1630,W1632,W1633,W1634,W1635,W1636,W1637,W1638,W1639,W1640, - -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifiers separated by comma (,) or put this -# option multiple times (only on the command line, not in the configuration -# file where it should appear only once).You can also use "--disable=all" to -# disable everything first and then reenable specific checks. For example, if -# you want to run only the similarities checker, you can use "--disable=all -# --enable=similarities". If you want to run only the classes checker, but have -# no Warning level messages displayed, use"--disable=all --enable=classes -# --disable=W" - -# Disabling pointless reports: -# RP0401: External dependencies -# RP0402: Modules dependencies graph -# RP0801: Duplication -# R0801: Duplication -# cyclic-import (R0401) - produces false-negative results - -# Disabling messages: -# pointless-string-statement (W0105) -# unnecessary-lambda (W0108) -# deprecated-lambda (W0110) -# bad-builtin (W0141) -# protected-access (W0212) -# fixme (W0511) -# unused-argument (W0613) -# redefined-outer-name (W0621) -# cell-var-from-loop (W0640) -# bare-except (W0702) -# broad-except (W0703) -# logging-format-interpolation (W1202) -# anomalous-backslash-in-string (W1401) - DO NOT ENABLE, INCORRECTLY PARSES REGEX -# no-absolute-import (W1618): -# import missing `from __future__ import absolute_import` Used when an import is not accompanied by from __future__ import absolute_import (default behaviour in Python 3) - -# invalid-name (C0103) -# missing-docstring (C0111) -# misplaced-comparison-constant (C0122) -# too-many-lines (C0302) -# bad-continuation (C0330) - -# too-many-ancestors (R0901) -# too-many-public-methods (R0904) -# too-few-public-methods (R0903) -# too-many-return-statements (R0911) -# too-many-branches (R0912) -# too-many-arguments (R0913) -# too-many-locals (R0914) -# too-many-statements (R0915) - -# locally-disabled (I0011) -# locally-enabled (I0012) - -disable=RP0401,RP0402,RP0801,R0801, W0141,W1618,W0621,W1202,W1401,W0703,W0702,C0111,W0640,C0122,W0511, W0613, C0103, R0903, C0330, W0212, C0302, R0915, R0914, R0912, W0105, R0904, R0911, W0108, W0110, R0913, R0901, R0401, I0011, I0012 - -[REPORTS] - -# Set the output format. Available formats are text, parseable, colorized, msvs -# (visual studio) and html. You can also give a reporter class, eg -# mypackage.mymodule.MyReporterClass. -output-format=text -#output-format=parseable - -# Put messages in a separate file for each module / package specified on the -# command line instead of printing them on stdout. Reports (if any) will be -# written in a file name "pylint_global.[txt|html]". -files-output=no - -# Tells whether to display a full report or only the messages -reports=yes - -# Python expression which should return a note less than 10 (10 is the highest -# note). You have access to the variables errors warning, statement which -# respectively contain the number of errors / warnings messages and the total -# number of statements analyzed. This is used by the global evaluation report -# (RP0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) - -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details -#msg-template= - - -[VARIABLES] - -# Tells whether we should check for unused import in __init__ files. -init-import=no - -# A regular expression matching the name of dummy variables (i.e. expectedly -# not used). -dummy-variables-rgx=_$|dummy - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -additional-builtins= - -# List of strings which can identify a callback function by name. A callback -# name must start or end with one of those strings. -callbacks=cb_,_cb - - -[TYPECHECK] - -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes - -# List of module names for which member attributes should not be checked -# (useful for modules/projects where namespaces are manipulated during runtime -# and thus existing member attributes cannot be deduced by static analysis. It -# supports qualified module names, as well as Unix pattern matching. -ignored-modules= - -# List of classes names for which member attributes should not be checked -# (useful for classes with attributes dynamically set). This supports can work -# with qualified names. -ignored-classes= - -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E1101 when accessed. Python regular -# expressions are accepted. -generated-members= - - -[SPELLING] - -# Spelling dictionary name. Available dictionaries: none. To make it working -# install python-enchant package. -spelling-dict= - -# List of comma separated words that should not be checked. -spelling-ignore-words= - -# A path to a file that contains private dictionary; one word per line. -spelling-private-dict-file= - -# Tells whether to store unknown words to indicated private dictionary in -# --spelling-private-dict-file option instead of raising a message. -spelling-store-unknown-words=no - - -[SIMILARITIES] - -# Minimum lines number of a similarity. -min-similarity-lines=10 - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - -# Ignore imports when computing similarities. -ignore-imports=no - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes=FIXME,XXX,TODO - - -[LOGGING] - -# Logging modules to check that the string format arguments are in logging -# function parameter format -logging-modules=logging - - -[FORMAT] - -# Maximum number of characters on a single line. -max-line-length=80 - -# Regexp for a line that is allowed to be longer than the limit. -ignore-long-lines=^\s*(# )??$ - -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=no - -# List of optional constructs for which whitespace checking is disabled. `dict- -# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. -# `trailing-comma` allows a space between comma and closing bracket: (a, ). -# `empty-line` allows space-only lines. -no-space-check=trailing-comma,dict-separator - -# Maximum number of lines in a module -max-module-lines=1500 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - -# Number of spaces of indent required inside a hanging or continued line. -indent-after-paren=4 - -# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. -expected-line-ending-format= - - -[BASIC] - -# List of builtins function names that should not be used, separated by a comma -bad-functions=map,filter,input - -# Good variable names which should always be accepted, separated by a comma -good-names=i,j,k,ex,Run,_,x,e,ip - -# Bad variable names which should always be refused, separated by a comma -bad-names=foo,bar,baz,toto,tutu,tata - -# Colon-delimited sets of names that determine each other's naming style when -# the name regexes allow several styles. -name-group= - -# Include a hint for the correct naming format with invalid-name -include-naming-hint=no - -# Regular expression matching correct function names -function-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for function names -function-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct variable names -variable-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for variable names -variable-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct constant names -const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Naming hint for constant names -const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Regular expression matching correct attribute names -attr-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for attribute names -attr-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct argument names -argument-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for argument names -argument-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct class attribute names -class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Naming hint for class attribute names -class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Regular expression matching correct inline iteration names -inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ - -# Naming hint for inline iteration names -inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ - -# Regular expression matching correct class names -class-rgx=[A-Z_][a-zA-Z0-9]+$ - -# Naming hint for class names -class-name-hint=[A-Z_][a-zA-Z0-9]+$ - -# Regular expression matching correct module names -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Naming hint for module names -module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Regular expression matching correct method names -method-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for method names -method-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=^_ - -# Minimum line length for functions/classes that require docstrings, shorter -# ones are exempt. -docstring-min-length=-1 - - -[ELIF] - -# Maximum number of nested blocks for function / method body -max-nested-blocks=5 - - -[IMPORTS] - -# Deprecated modules which should not be used, separated by a comma -deprecated-modules=regsub,TERMIOS,Bastion,rexec - -# Create a graph of every (i.e. internal and external) dependencies in the -# given file (report RP0402 must not be disabled) -import-graph= - -# Create a graph of external dependencies in the given file (report RP0402 must -# not be disabled) -ext-import-graph= - -# Create a graph of internal dependencies in the given file (report RP0402 must -# not be disabled) -int-import-graph= - - -[DESIGN] - -# Maximum number of arguments for function / method -max-args=10 - -# Argument names that match this expression will be ignored. Default to name -# with leading underscore -ignored-argument-names=_.* - -# Maximum number of locals for function / method body -max-locals=15 - -# Maximum number of return / yield for function / method body -max-returns=6 - -# Maximum number of branch for function / method body -max-branches=12 - -# Maximum number of statements in function / method body -max-statements=50 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of attributes for a class (see R0902). -max-attributes=15 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=2 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - -# Maximum number of boolean expressions in a if statement -max-bool-expr=5 - - -[CLASSES] - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__,__new__,setUp - -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls - -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=mcs - -# List of member names, which should be excluded from the protected access -# warning. -exclude-protected=_asdict,_fields,_replace,_source,_make - - -[EXCEPTIONS] - -# Exceptions that will emit a warning when being caught. Defaults to -# "Exception" -overgeneral-exceptions=Exception diff --git a/MAINTAINERS b/MAINTAINERS deleted file mode 100644 index a03ec8d6d..000000000 --- a/MAINTAINERS +++ /dev/null @@ -1,72 +0,0 @@ ---- -description: - For Fuel team structure and contribution policy, see [1]. - - This is repository level MAINTAINERS file. All contributions to this - repository must be approved by one or more Core Reviewers [2]. - If you are contributing to files (or create new directories) in - root folder of this repository, please contact Core Reviewers for - review and merge requests. - - If you are contributing to subfolders of this repository, please - check 'maintainers' section of this file in order to find maintainers - for those specific modules. - - It is mandatory to get +1 from one or more maintainers before asking - Core Reviewers for review/merge in order to decrease a load on Core Reviewers [3]. - Exceptions are when maintainers are actually cores, or when maintainers - are not available for some reason (e.g. on vacation). - - [1] https://specs.openstack.org/openstack/fuel-specs/policy/team-structure - [2] https://review.openstack.org/#/admin/groups/662,members - [3] http://lists.openstack.org/pipermail/openstack-dev/2015-August/072406.html - - Please keep this file in YAML format in order to allow helper scripts - to read this as a configuration data. - -maintainers: - - - ./: - - name: Vladimir Khlyunev - email: vkhlyunev@mirantis.com - IRC: vkhlyunev - - - name: Alexandr Kostrikov - email: akostrikov@mirantis.com - IRC: akostrikov_mirantis - - - name: Artem Grechanichenko - email: agrechanichenko@mirantis.com - IRC: agrechanicheko - - - name: Maksym Strukov - email: mstrukov@mirantis.com - IRC: mstrukov - - - name: Aleksandr Kurenyshev - email: akurenyshev@mirantis.com - IRC: akurenyshev - - - fuelweb_test/tests/plugins/plugin_zabbix/: - - - name: Swann Croiset - email: scroiset@mirantis.com - IRC: swann - - - name: Simon Pasquier - email: spasquier@mirantis.com - IRC: pasquier-s - - - name: Maciej Relewicz - email: mrelewicz@mirantis.com - IRC: rlu - - - name: Bartosz Kupidura - email: bkupidura@mirantis.com - IRC: zynzel - - - fuelweb_test/tests/tests_extra_computes/: - - - name: Victor Ryzhenkin - email: vryzhenkin@mirantis.com - IRC: freerunner diff --git a/README.md b/README.md deleted file mode 100644 index b1fdeb79c..000000000 --- a/README.md +++ /dev/null @@ -1,25 +0,0 @@ -Team and repository tags -======================== - -[![Team and repository tags](http://governance.openstack.org/badges/fuel-qa.svg)](http://governance.openstack.org/reference/tags/index.html) - - - -Tests documentation -------------------- - -[Devops documentation](http://docs.fuel-infra.org/fuel-dev/devops.html) - -Code-generated documentation ----------------------------- - -You need to run `make doc-html` to generate them. - -Output is stored in `doc/_build/html/index.html`. - - -For 'make iso' --------------- - -[Building ISO documentation](http://docs.fuel-infra.org/fuel-dev/develop/env.html#building-the-fuel-iso) - diff --git a/README.rst b/README.rst new file mode 100644 index 000000000..86e34d67c --- /dev/null +++ b/README.rst @@ -0,0 +1,10 @@ +This project is no longer maintained. + +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". + +For any further questions, please email +openstack-discuss@lists.openstack.org or join #openstack-dev on +Freenode. diff --git a/core/__init__.py b/core/__init__.py deleted file mode 100644 index 74b32bcc3..000000000 --- a/core/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -logger = logging.getLogger('fuel-qa.{}'.format(__name__)) diff --git a/core/_tests/__init__.py b/core/_tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/core/_tests/helpers/__init__.py b/core/_tests/helpers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/core/_tests/helpers/test_log_helpers.py b/core/_tests/helpers/test_log_helpers.py deleted file mode 100644 index 30b3f4f45..000000000 --- a/core/_tests/helpers/test_log_helpers.py +++ /dev/null @@ -1,265 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import absolute_import -from __future__ import unicode_literals - -import logging -import unittest - -# pylint: disable=import-error -import mock -from mock import call -from mock import Mock -from mock import patch -# pylint: enable=import-error - -from core.helpers import log_helpers - -# pylint: disable=no-self-use - - -@mock.patch('core.helpers.log_helpers.logger', autospec=True) -class TestLogWrap(unittest.TestCase): - def test_no_args(self, logger): - @log_helpers.logwrap - def func(): - return 'No args' - - result = func() - self.assertEqual(result, 'No args') - logger.assert_has_calls(( - mock.call.log( - level=logging.DEBUG, - msg="Calling: \n'func'()" - ), - mock.call.log( - level=logging.DEBUG, - msg="Done: 'func' with result:\n{}".format( - log_helpers.pretty_repr(result)) - ), - )) - - def test_args_simple(self, logger): - arg = 'test arg' - - @log_helpers.logwrap - def func(tst): - return tst - - result = func(arg) - self.assertEqual(result, arg) - logger.assert_has_calls(( - mock.call.log( - level=logging.DEBUG, - msg="Calling: \n'func'(\n 'tst'={},\n)".format( - log_helpers.pretty_repr( - arg, indent=8, no_indent_start=True) - ) - ), - mock.call.log( - level=logging.DEBUG, - msg="Done: 'func' with result:\n{}".format( - log_helpers.pretty_repr(result)) - ), - )) - - def test_args_defaults(self, logger): - arg = 'test arg' - - @log_helpers.logwrap - def func(tst=arg): - return tst - - result = func() - self.assertEqual(result, arg) - logger.assert_has_calls(( - mock.call.log( - level=logging.DEBUG, - msg="Calling: \n'func'(\n 'tst'={},\n)".format( - log_helpers.pretty_repr( - arg, indent=8, no_indent_start=True)) - ), - mock.call.log( - level=logging.DEBUG, - msg="Done: 'func' with result:\n{}".format( - log_helpers.pretty_repr(result)) - ), - )) - - def test_args_complex(self, logger): - string = 'string' - dictionary = {'key': 'dictionary'} - - @log_helpers.logwrap - def func(param_string, param_dictionary): - return param_string, param_dictionary - - result = func(string, dictionary) - self.assertEqual(result, (string, dictionary)) - # raise ValueError(logger.mock_calls) - logger.assert_has_calls(( - mock.call.log( - level=logging.DEBUG, - msg="Calling: \n'func'(" - "\n 'param_string'={string}," - "\n 'param_dictionary'={dictionary},\n)".format( - string=log_helpers.pretty_repr( - string, - indent=8, no_indent_start=True), - dictionary=log_helpers.pretty_repr( - dictionary, - indent=8, no_indent_start=True) - ) - ), - mock.call.log( - level=logging.DEBUG, - msg="Done: 'func' with result:\n{}".format( - log_helpers.pretty_repr(result)) - ), - )) - - def test_args_kwargs(self, logger): - targs = ['string1', 'string2'] - tkwargs = {'key': 'tkwargs'} - - @log_helpers.logwrap - def func(*args, **kwargs): - return tuple(args), kwargs - - result = func(*targs, **tkwargs) - self.assertEqual(result, (tuple(targs), tkwargs)) - # raise ValueError(logger.mock_calls) - logger.assert_has_calls(( - mock.call.log( - level=logging.DEBUG, - msg="Calling: \n'func'(" - "\n 'args'={args}," - "\n 'kwargs'={kwargs},\n)".format( - args=log_helpers.pretty_repr( - tuple(targs), - indent=8, no_indent_start=True), - kwargs=log_helpers.pretty_repr( - tkwargs, - indent=8, no_indent_start=True) - ) - ), - mock.call.log( - level=logging.DEBUG, - msg="Done: 'func' with result:\n{}".format( - log_helpers.pretty_repr(result)) - ), - )) - - def test_negative(self, logger): - @log_helpers.logwrap - def func(): - raise ValueError('as expected') - - with self.assertRaises(ValueError): - func() - - logger.assert_has_calls(( - mock.call.log( - level=logging.DEBUG, - msg="Calling: \n'func'()" - ), - mock.call.log( - level=logging.ERROR, - msg="Failed: \n'func'()", - exc_info=True - ), - )) - - def test_negative_substitutions(self, logger): - new_logger = mock.Mock(spec=logging.Logger, name='logger') - log = mock.Mock(name='log') - new_logger.attach_mock(log, 'log') - - @log_helpers.logwrap( - log=new_logger, - log_level=logging.INFO, - exc_level=logging.WARNING - ) - def func(): - raise ValueError('as expected') - - with self.assertRaises(ValueError): - func() - - self.assertEqual(len(logger.mock_calls), 0) - log.assert_has_calls(( - mock.call( - level=logging.INFO, - msg="Calling: \n'func'()" - ), - mock.call( - level=logging.WARNING, - msg="Failed: \n'func'()", - exc_info=True - ), - )) - - -@patch('logging.StreamHandler') -@patch('core.helpers.log_helpers.logger', autospec=True) -class TestQuietLogger(unittest.TestCase): - def test_default(self, logger_obj, handler_cls): - handler = Mock() - handler.configure_mock(level=logging.INFO) - handler_cls.return_value = handler - - with log_helpers.QuietLogger(): - log_helpers.logger.warning('Test') - - handler.assert_has_calls(( - call.setLevel(logging.INFO + 1), - call.setLevel(logging.INFO) - )) - - logger_obj.assert_has_calls((call.warning('Test'), )) - - def test_upper_level(self, logger_obj, handler_cls): - handler = Mock() - handler.configure_mock(level=logging.INFO) - handler_cls.return_value = handler - - with log_helpers.QuietLogger(logging.WARNING): - log_helpers.logger.warning('Test') - - handler.assert_has_calls(( - call.setLevel(logging.WARNING + 1), - call.setLevel(logging.INFO) - )) - - logger_obj.assert_has_calls((call.warning('Test'), )) - - def test_lower_level(self, logger_obj, handler_cls): - handler = Mock() - handler.configure_mock(level=logging.INFO) - handler_cls.return_value = handler - - with log_helpers.QuietLogger(logging.DEBUG): - log_helpers.logger.warning('Test') - - handler.assert_has_calls(( - call.setLevel(logging.INFO), - )) - - logger_obj.assert_has_calls(( - call.debug( - 'QuietLogger requested lower level, than is already set. ' - 'Not changing level'), - call.warning('Test'), - )) diff --git a/core/_tests/helpers/test_setup_teardown.py b/core/_tests/helpers/test_setup_teardown.py deleted file mode 100644 index d465487ab..000000000 --- a/core/_tests/helpers/test_setup_teardown.py +++ /dev/null @@ -1,255 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import absolute_import -from __future__ import print_function - -import unittest - -# pylint: disable=import-error -from mock import call -from mock import patch -# pylint: enable=import-error - -from core.helpers import setup_teardown - - -# Get helpers names (python will try to mangle it inside classes) -get_arg_names = setup_teardown.__get_arg_names -getcallargs = setup_teardown.__getcallargs -call_in_context = setup_teardown.__call_in_context - - -class TestWrappers(unittest.TestCase): - def test_get_arg_names(self): - def func_no_args(): - pass - - def func_arg(single): - pass - - def func_args(first, last): - pass - - self.assertEqual( - get_arg_names(func_no_args), - [] - ) - - self.assertEqual( - get_arg_names(func_arg), - ['single'] - ) - - self.assertEqual( - get_arg_names(func_args), - ['first', 'last'] - ) - - def test_getcallargs(self): - def func_no_def(arg1, arg2): - pass - - def func_def(arg1, arg2='arg2'): - pass - - self.assertEqual( - dict(getcallargs(func_no_def, *['arg1', 'arg2'], **{})), - {'arg1': 'arg1', 'arg2': 'arg2'} - ) - - self.assertEqual( - dict(getcallargs(func_no_def, *['arg1'], **{'arg2': 'arg2'})), - {'arg1': 'arg1', 'arg2': 'arg2'} - ) - - self.assertEqual( - dict(getcallargs( - func_no_def, *[], **{'arg1': 'arg1', 'arg2': 'arg2'})), - {'arg1': 'arg1', 'arg2': 'arg2'} - ) - - self.assertEqual( - dict(getcallargs(func_def, *['arg1'], **{})), - {'arg1': 'arg1', 'arg2': 'arg2'} - ) - - self.assertEqual( - dict(getcallargs(func_def, *[], **{'arg1': 'arg1'})), - {'arg1': 'arg1', 'arg2': 'arg2'} - ) - - self.assertEqual( - dict(getcallargs( - func_def, *[], **{'arg1': 'arg1', 'arg2': 2})), - {'arg1': 'arg1', 'arg2': 2} - ) - - def test_call_in_context(self): - def func_no_args(): - return None - - def func_args(first='first', last='last'): - return first, last - - def func_self_arg(self): - return self - - def func_cls_arg(cls): - return cls - - class Tst(object): - @classmethod - def tst(cls): - return cls - - self.assertIsNone( - call_in_context( - func=func_no_args, - context_args={} - ) - ) - - self.assertIsNone( - call_in_context( - func=func_no_args, - context_args={'test': 'val'} - ) - ) - - self.assertEqual( - call_in_context( - func=func_args, - context_args={'first': 0, 'last': -1} - ), - (0, -1) - ) - - with self.assertRaises(ValueError): - call_in_context( - func=func_args, - context_args={} - ) - - self.assertEqual( - call_in_context( - func=func_self_arg, - context_args={'self': self} - ), - self - ) - - self.assertEqual( - call_in_context( - func=func_cls_arg, - context_args={'cls': self.__class__} - ), - self.__class__ - ) - - self.assertEqual( - call_in_context( - func=func_cls_arg, - context_args={'self': self} - ), - self.__class__ - ) - - self.assertEqual( - call_in_context( - func=Tst.tst, - context_args={'cls': self.__class__} - ), - Tst, - 'cls was not filtered from @classmethod!' - ) - - # Allow to replace function by None in special cases - self.assertIsNone( - call_in_context(None, {'test_arg': 'test_val'}) - ) - - -@patch('core.helpers.setup_teardown.__getcallargs', return_value={'arg': True}) -@patch('core.helpers.setup_teardown.__call_in_context') -class TestSetupTeardown(unittest.TestCase): - def test_basic(self, call_in, getargs): - arg = True - - @setup_teardown.setup_teardown() - def positive_example(arg): - return arg - - self.assertEqual(positive_example(arg), arg) - - # Real function is under decorator, so we could not make full check - getargs.assert_called_once() - - call_in.assert_has_calls(( - call(None, {'arg': arg}), - call(None, {'arg': arg}), - )) - - def test_applied(self, call_in, getargs): - arg = True - - def setup_func(): - pass - - def teardown_func(): - pass - - @setup_teardown.setup_teardown( - setup=setup_func, - teardown=teardown_func - ) - def positive_example(arg): - return arg - - self.assertEqual(positive_example(arg), arg) - - # Real function is under decorator, so we could not make full check - getargs.assert_called_once() - - call_in.assert_has_calls(( - call(setup_func, {'arg': arg}), - call(teardown_func, {'arg': arg}), - )) - - def test_exception_applied(self, call_in, getargs): - arg = True - - def setup_func(): - pass - - def teardown_func(): - pass - - @setup_teardown.setup_teardown( - setup=setup_func, - teardown=teardown_func - ) - def positive_example(arg): - raise ValueError(arg) - - with self.assertRaises(ValueError): - positive_example(arg) - - # Real function is under decorator, so we could not make full check - getargs.assert_called_once() - - call_in.assert_has_calls(( - call(setup_func, {'arg': arg}), - call(teardown_func, {'arg': arg}), - )) diff --git a/core/_tests/models/__init__.py b/core/_tests/models/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/core/_tests/models/fuel_client/__init__.py b/core/_tests/models/fuel_client/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/core/_tests/models/fuel_client/test_adapter.py b/core/_tests/models/fuel_client/test_adapter.py deleted file mode 100644 index 2b810a395..000000000 --- a/core/_tests/models/fuel_client/test_adapter.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import absolute_import -from __future__ import unicode_literals - -import unittest - -# pylint: disable=import-error -from mock import Mock -# pylint: enable=import-error - -from core.models.fuel_client import base_client - -# pylint: disable=no-self-use - - -class TestAdapter(unittest.TestCase): - def test_init_default(self): - session = Mock(spec='keystoneauth1.session.Session') - obj = base_client.Adapter(session=session) - - self.assertEqual(obj.service_type, 'fuel') - self.assertEqual(obj.session, session) - - self.assertEqual( - repr(obj), - ( - "{cls}(" - "session=," - "service_type={svc}" - ") id={id}".format( - cls=base_client.Adapter.__name__, - sess_id=hex(id(session)), - svc=obj.service_type, - id=hex(id(obj)) - )) - ) - - def test_init_svc(self): - session = Mock(spec='keystoneauth1.session.Session') - - service_type = 'ostf' - obj = base_client.Adapter(session=session, service_type=service_type) - - self.assertEqual(obj.service_type, service_type) - self.assertEqual(obj.session, session) - - self.assertEqual( - repr(obj), - ( - "{cls}(" - "session=," - "service_type={svc}" - ") id={id}".format( - cls=base_client.Adapter.__name__, - sess_id=hex(id(session)), - svc=obj.service_type, - id=hex(id(obj)) - )) - ) - - def test_methods(self): - session = Mock(spec='keystoneauth1.session.Session') - get = Mock(name='get') - post = Mock(name='post') - put = Mock(name='put') - delete = Mock(name='delete') - - session.attach_mock(get, 'get') - session.attach_mock(post, 'post') - session.attach_mock(put, 'put') - session.attach_mock(delete, 'delete') - - url = 'test' - - obj = base_client.Adapter(session=session) - - obj.get(url=url) - obj.post(url=url) - obj.put(url=url) - obj.delete(url=url) - - get.assert_called_once_with( - connect_retries=1, - endpoint_filter={'service_type': obj.service_type}, - url=url) - - post.assert_called_once_with( - connect_retries=1, - endpoint_filter={'service_type': obj.service_type}, - url=url) - - put.assert_called_once_with( - connect_retries=1, - endpoint_filter={'service_type': obj.service_type}, - url=url) - - delete.assert_called_once_with( - connect_retries=1, - endpoint_filter={'service_type': obj.service_type}, - url=url) diff --git a/core/_tests/models/fuel_client/test_client.py b/core/_tests/models/fuel_client/test_client.py deleted file mode 100644 index 5d92d21be..000000000 --- a/core/_tests/models/fuel_client/test_client.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import absolute_import -from __future__ import unicode_literals - -import unittest - -# pylint: disable=import-error -from mock import call -from mock import Mock -from mock import patch -# pylint: enable=import-error - -from core.models.fuel_client import client - -# pylint: disable=no-self-use - - -@patch('core.models.fuel_client.client.logger', autospec=True) -@patch('core.models.fuel_client.base_client.Adapter', autospec=True) -class TestClient(unittest.TestCase): - def test_init(self, adapter, logger): - session = Mock(spec='keystoneauth1.session.Session') - session.attach_mock(Mock(), 'auth') - session.auth.auth_url = 'http://127.0.0.1' - - obj = client.Client(session=session) - - self.assertIn( - call(service_type=u'ostf', session=session), - adapter.mock_calls - ) - - logger.assert_has_calls(( - call.info( - 'Initialization of NailgunClient using shared session \n' - '(auth_url={})'.format(session.auth.auth_url)), - )) - - self.assertIn('ostf', dir(obj)) diff --git a/core/_tests/models/fuel_client/test_ostf_client.py b/core/_tests/models/fuel_client/test_ostf_client.py deleted file mode 100644 index 914cfbdd1..000000000 --- a/core/_tests/models/fuel_client/test_ostf_client.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import absolute_import -from __future__ import unicode_literals - -import unittest - -# pylint: disable=import-error -from mock import Mock -from mock import patch -# pylint: enable=import-error - -from core.models.fuel_client.ostf_client import OSTFClient - -# pylint: disable=no-self-use - - -@patch('core.models.fuel_client.ostf_client.logwrap', autospec=True) -class TestOSTFClient(unittest.TestCase): - @staticmethod - def prepare_session(): - session = Mock(spec='keystoneauth1.session.Session') - session.attach_mock(Mock(), 'auth') - session.auth.auth_url = 'http://127.0.0.1' - get = Mock(name='get') - post = Mock(name='post') - put = Mock(name='put') - delete = Mock(name='delete') - - session.attach_mock(get, 'get') - session.attach_mock(post, 'post') - session.attach_mock(put, 'put') - session.attach_mock(delete, 'delete') - - return session - - def test_basic(self, logwrap): - session = self.prepare_session() - client = OSTFClient(session) - - cluster_id = 0 - - client.get_test_sets(cluster_id=cluster_id) - - session.get.assert_called_once_with( - url="/testsets/{}".format(cluster_id)) - - session.reset_mock() - - client.get_tests(cluster_id=cluster_id) - - session.get.assert_called_once_with( - url="/tests/{}".format(cluster_id)) - - session.reset_mock() - - client.get_test_runs() - - session.get.assert_called_once_with(url="/testruns") - - def test_test_runs(self, logwrap): - session = self.prepare_session() - client = OSTFClient(session) - - cluster_id = 0 - testrun_id = 0xff - - client.get_test_runs(testrun_id=testrun_id) - session.get.assert_called_once_with( - url="/testruns/{}".format(testrun_id)) - - session.reset_mock() - - client.get_test_runs(testrun_id=testrun_id, cluster_id=cluster_id) - - session.get.assert_called_once_with( - url="/testruns/{}/{}".format(testrun_id, cluster_id)) - - session.reset_mock() - - client.get_test_runs(cluster_id=cluster_id) - - session.get.assert_called_once_with( - url="/testruns/last/{}".format(cluster_id)) - - def test_run_tests(self, logwrap): - session = self.prepare_session() - client = OSTFClient(session) - - cluster_id = 0 - - test_sets = ['smoke'] - - test_name = 'test' - - client.run_tests(cluster_id=cluster_id, test_sets=test_sets) - - json = [ - {'metadata': {'cluster_id': str(cluster_id), 'config': {}}, - 'testset': test_sets[0]}] - - session.post.assert_called_once_with( - "/testruns", json=json - ) - - session.reset_mock() - - # noinspection PyTypeChecker - client.run_tests( - cluster_id=cluster_id, test_sets=test_sets, test_name=test_name) - - json[0]['tests'] = [test_name] - - session.post.assert_called_once_with( - "/testruns", json=json - ) diff --git a/core/_tests/models/test_collector_client.py b/core/_tests/models/test_collector_client.py deleted file mode 100644 index 16f348e78..000000000 --- a/core/_tests/models/test_collector_client.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import absolute_import -from __future__ import unicode_literals - -import unittest - -# pylint: disable=import-error -from mock import call -from mock import patch -# pylint: enable=import-error - -from core.models.collector_client import CollectorClient - -ip = '127.0.0.1' -endpoint = 'fake' -url = "http://{0}/{1}".format(ip, endpoint) - - -@patch('requests.get') -class TestCollectorClient(unittest.TestCase): - def setUp(self): - self.client = CollectorClient(collector_ip=ip, endpoint=endpoint) - - def test_init(self, get): - self.assertEqual(self.client.url, url) - get.assert_not_called() - - def test_get(self, get): - tgt = '/tst' - self.client._get(tgt) - get.assert_called_once_with(url=url + tgt) - - def test_get_oswls(self, get): - master_node_uid = '0' - self.client.get_oswls(master_node_uid=master_node_uid) - get.assert_has_calls(( - call(url=url + '/oswls/{0}'.format(master_node_uid)), - call().json(), - )) - - def test_get_installation_info(self, get): - master_node_uid = '0' - self.client.get_installation_info(master_node_uid=master_node_uid) - get.assert_has_calls(( - call(url=url + '/installation_info/{0}'.format( - master_node_uid)), - call().json(), - )) - - def test_get_action_logs(self, get): - master_node_uid = '0' - self.client.get_action_logs(master_node_uid=master_node_uid) - get.assert_has_calls(( - call(url=url + '/action_logs/{0}'.format(master_node_uid)), - call().json(), - )) - - def test_get_oswls_by_resource(self, get): - master_node_uid = '0' - resource = '1' - self.client.get_oswls_by_resource( - master_node_uid=master_node_uid, - resource=resource - ) - get.assert_has_calls(( - call(url=url + "/oswls/{0}/{1}".format(master_node_uid, resource)), - call().json(), - )) - - @patch( - 'core.models.collector_client.CollectorClient.get_oswls_by_resource', - return_value={ - 'objs': [ - {'resource_data': 'test0'}, - {'resource_data': 'test1'}, - ] - } - ) - def test_get_oswls_by_resource_data(self, get_oswls, get): - master_node_uid = '0' - resource = '1' - result = self.client.get_oswls_by_resource_data( - master_node_uid=master_node_uid, - resource=resource - ) - get_oswls.assert_called_once_with( - master_node_uid, - resource - ) - self.assertEqual(result, 'test0') - - @patch( - 'core.models.collector_client.CollectorClient.get_action_logs', - return_value=[ - {'id': 0, 'body': {'additional_info': 'test0'}}, - {'id': 1, 'body': {'additional_info': 'test1'}}, - {'id': 2, 'body': {'additional_info': 'test2'}}, - ] - ) - def test_get_action_logs_ids(self, logs, get): - master_node_uid = 0 - result = self.client.get_action_logs_ids(master_node_uid) - logs.assert_called_once_with(master_node_uid) - self.assertEqual(result, [0, 1, 2]) - - @patch( - 'core.models.collector_client.CollectorClient.get_action_logs', - return_value=[ - {'id': 0, 'body': {'additional_info': 'test0'}}, - {'id': 1, 'body': {'additional_info': 'test1'}}, - {'id': 2, 'body': {'additional_info': 'test2'}}, - ] - ) - def test_get_action_logs_additional_info_by_id(self, logs, get): - master_node_uid = 0 - action_id = 1 - result = self.client.get_action_logs_additional_info_by_id( - master_node_uid, action_id) - logs.assert_called_once_with(master_node_uid) - self.assertEqual(result, ['test1']) - - @patch( - 'core.models.collector_client.CollectorClient.get_action_logs_ids', - return_value=[0, 1, 2] - ) - def test_get_action_logs_count(self, get_ids, get): - master_node_uid = 0 - result = self.client.get_action_logs_count(master_node_uid) - get_ids.assert_called_once_with(master_node_uid) - self.assertEqual(result, 3) - - @patch( - 'core.models.collector_client.CollectorClient.get_installation_info', - return_value={'structure': 'test_result'} - ) - def test_get_installation_info_data(self, get_inst_info, get): - master_node_uid = 0 - result = self.client.get_installation_info_data(master_node_uid) - get_inst_info.assert_called_once_with(master_node_uid) - self.assertEqual(result, 'test_result') diff --git a/core/_tests/models/test_value_objects.py b/core/_tests/models/test_value_objects.py deleted file mode 100644 index 69692eed3..000000000 --- a/core/_tests/models/test_value_objects.py +++ /dev/null @@ -1,86 +0,0 @@ -from copy import deepcopy -import unittest - -from core.models.value_objects import FuelAccessParams - -EXAMPLE_YAML_DICT = { - 'OS_USERNAME': 'root', - 'OS_TENANT_NAME': 'project', - 'OS_PASSWORD': 'password', - 'SERVER_ADDRESS': '127.0.0.1', - 'SERVER_PORT': '8000', - 'KEYSTONE_PORT': '5000' -} - -EXPECTED_OPENRC_CONTENT = 'export OS_USERNAME="root"\n' \ - 'export OS_PASSWORD="root"\n' \ - 'export OS_TENANT_NAME="project"\n' \ - 'export SERVICE_URL="https://127.0.0.1:8000"\n' \ - 'export OS_AUTH_URL="https://127.0.0.1:5000"\n' - - -class TestFuelAccessParams(unittest.TestCase): - def test_simple_init(self): - fuel_access = FuelAccessParams() - - fuel_access.username = 'root' - self.assertEqual(fuel_access.username, 'root') - - fuel_access.password = 'password' - self.assertEqual(fuel_access.password, 'password') - - fuel_access.project = 'tenant' - self.assertEqual(fuel_access.project, 'tenant') - - fuel_access.service_address = '127.0.0.1' - self.assertEqual(fuel_access.service_address, '127.0.0.1') - - fuel_access.service_port = '777' - self.assertEqual(fuel_access.service_port, '777') - - fuel_access.keystone_address = '127.0.0.1' - self.assertEqual(fuel_access.keystone_address, '127.0.0.1') - - fuel_access.keystone_port = '5000' - self.assertEqual(fuel_access.keystone_port, '5000') - - def test_tls_init(self): - fuel_access = FuelAccessParams(tls_keystone_enabled=True, - tls_service_enabled=False) - fuel_access.service_address = '127.0.0.1' - fuel_access.service_port = '777' - - fuel_access.keystone_address = '127.0.0.1' - fuel_access.keystone_port = '5000' - - self.assertEqual(fuel_access.service_url, 'http://127.0.0.1:777') - self.assertEqual(fuel_access.os_auth_url, 'https://127.0.0.1:5000') - - def test_init_from_yaml_content(self): - fuel_access = FuelAccessParams.from_yaml_params(EXAMPLE_YAML_DICT) - self.assertEqual(fuel_access.service_address, '127.0.0.1') - self.assertEqual(fuel_access.os_auth_url, 'http://127.0.0.1:5000') - - def test_init_from_yaml_content_with_tls(self): - fuel_access = FuelAccessParams.from_yaml_params( - EXAMPLE_YAML_DICT, - tls_service_enabled=True, - tls_keystone_enabled=True - ) - self.assertEqual(fuel_access.service_address, '127.0.0.1') - self.assertEqual(fuel_access.os_auth_url, 'https://127.0.0.1:5000') - self.assertEqual(fuel_access.service_url, 'https://127.0.0.1:8000') - - def test_failed_from_yaml_content_when_key_absents(self): - yaml_from_content = deepcopy(EXAMPLE_YAML_DICT) - yaml_from_content.pop('OS_PASSWORD', None) - with self.assertRaises(KeyError): - FuelAccessParams.from_yaml_params(yaml_from_content) - - def test_export_to_openrc(self): - openrc_content = FuelAccessParams.from_yaml_params( - EXAMPLE_YAML_DICT, - tls_service_enabled=True, - tls_keystone_enabled=True - ).to_openrc_content() - self.assertEqual(EXPECTED_OPENRC_CONTENT, openrc_content) diff --git a/core/helpers/__init__.py b/core/helpers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/core/helpers/log_helpers.py b/core/helpers/log_helpers.py deleted file mode 100644 index a58c40ad0..000000000 --- a/core/helpers/log_helpers.py +++ /dev/null @@ -1,270 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals - -import collections -import functools -import inspect -import logging -import sys -import warnings - -import six - -from core import logger - - -# pylint: disable=no-member -def _get_arg_names(func): - """get argument names for function - - :param func: func - :return: list of function argnames - :rtype: list - - >>> def tst_1(): - ... pass - - >>> _get_arg_names(tst_1) - [] - - >>> def tst_2(arg): - ... pass - - >>> _get_arg_names(tst_2) - ['arg'] - """ - # noinspection PyUnresolvedReferences - return ( - [arg for arg in inspect.getargspec(func=func).args] if six.PY2 else - list(inspect.signature(obj=func).parameters.keys()) - ) - - -def _getcallargs(func, *positional, **named): - """get real function call arguments without calling function - - :rtype: dict - """ - # noinspection PyUnresolvedReferences - if sys.version_info[0:2] < (3, 5): # apply_defaults is py35 feature - orig_args = inspect.getcallargs(func, *positional, **named) - # Construct OrderedDict as Py3 - arguments = collections.OrderedDict( - [(key, orig_args[key]) for key in _get_arg_names(func)] - ) - if six.PY2: - # args and kwargs is not bound in py27 - # Note: py27 inspect is not unicode - missed = ( - (key, val) - for key, val in orig_args.items() - if key not in arguments) - args, kwargs = (), () - for record in missed: - if isinstance(record[1], (list, tuple)): - args = record - elif isinstance(record[1], dict): - kwargs = record - - if args: - arguments[args[0]] = args[1] - if kwargs: - arguments[kwargs[0]] = kwargs[1] - return arguments - sig = inspect.signature(func).bind(*positional, **named) - sig.apply_defaults() # after bind we doesn't have defaults - return sig.arguments -# pylint:enable=no-member - - -def _simple(item): - """Check for nested iterations: True, if not""" - return not isinstance(item, (list, set, tuple, dict)) - - -_formatters = { - 'simple': "{spc:<{indent}}{val!r}".format, - 'text': "{spc:<{indent}}{prefix}'''{string}'''".format, - 'dict': "\n{spc:<{indent}}{key!r:{size}}: {val},".format, -} - - -def pretty_repr(src, indent=0, no_indent_start=False, max_indent=20): - """Make human readable repr of object - - :param src: object to process - :type src: object - :param indent: start indentation, all next levels is +4 - :type indent: int - :param no_indent_start: do not indent open bracket and simple parameters - :type no_indent_start: bool - :param max_indent: maximal indent before classic repr() call - :type max_indent: int - :return: formatted string - """ - if _simple(src) or indent >= max_indent: - indent = 0 if no_indent_start else indent - if isinstance(src, (six.binary_type, six.text_type)): - if isinstance(src, six.binary_type): - string = src.decode( - encoding='utf-8', - errors='backslashreplace' - ) - prefix = 'b' - else: - string = src - prefix = 'u' - return _formatters['text']( - spc='', - indent=indent, - prefix=prefix, - string=string - ) - return _formatters['simple']( - spc='', - indent=indent, - val=src - ) - if isinstance(src, dict): - prefix, suffix = '{', '}' - result = '' - max_len = len(max([repr(key) for key in src])) if src else 0 - for key, val in src.items(): - result += _formatters['dict']( - spc='', - indent=indent + 4, - size=max_len, - key=key, - val=pretty_repr(val, indent + 8, no_indent_start=True) - ) - return ( - '\n{start:>{indent}}'.format( - start=prefix, - indent=indent + 1 - ) + - result + - '\n{end:>{indent}}'.format(end=suffix, indent=indent + 1) - ) - if isinstance(src, list): - prefix, suffix = '[', ']' - elif isinstance(src, tuple): - prefix, suffix = '(', ')' - else: - prefix, suffix = '{', '}' - result = '' - for elem in src: - if _simple(elem): - result += '\n' - result += pretty_repr(elem, indent + 4) + ',' - return ( - '\n{start:>{indent}}'.format( - start=prefix, - indent=indent + 1) + - result + - '\n{end:>{indent}}'.format(end=suffix, indent=indent + 1) - ) - - -def logwrap(log=logger, log_level=logging.DEBUG, exc_level=logging.ERROR): - """Log function calls - - :type log: logging.Logger - :type log_level: int - :type exc_level: int - :rtype: callable - """ - warnings.warn( - 'logwrap is moved to fuel-devops 3.0.3,' - ' please change imports after switch', - DeprecationWarning) - - def real_decorator(func): - @functools.wraps(func) - def wrapped(*args, **kwargs): - call_args = _getcallargs(func, *args, **kwargs) - args_repr = "" - if len(call_args) > 0: - args_repr = "\n " + "\n ".join(( - "{key!r}={val},".format( - key=key, - val=pretty_repr(val, indent=8, no_indent_start=True) - ) - for key, val in call_args.items()) - ) + '\n' - log.log( - level=log_level, - msg="Calling: \n{name!r}({arguments})".format( - name=func.__name__, - arguments=args_repr - ) - ) - try: - result = func(*args, **kwargs) - log.log( - level=log_level, - msg="Done: {name!r} with result:\n{result}".format( - name=func.__name__, - result=pretty_repr(result)) - ) - except BaseException: - log.log( - level=exc_level, - msg="Failed: \n{name!r}({arguments})".format( - name=func.__name__, - arguments=args_repr, - ), - exc_info=True - ) - raise - return result - return wrapped - - if not isinstance(log, logging.Logger): - func, log = log, logger - return real_decorator(func) - - return real_decorator - - -class QuietLogger(object): - """Reduce logging level while context is executed.""" - - def __init__(self, upper_log_level=None): - """Reduce logging level while context is executed. - - :param upper_log_level: log level to ignore - :type upper_log_level: int - """ - self.log_level = upper_log_level - self.level = None - - def __enter__(self): - console = logging.StreamHandler() - self.level = console.level - if self.log_level is None: - self.log_level = self.level - elif self.log_level < self.level: - logger.debug( - 'QuietLogger requested lower level, than is already set. ' - 'Not changing level') - return - console.setLevel(self.log_level + 1) - - def __exit__(self, exc_type, exc_value, exc_tb): - logging.StreamHandler().setLevel(self.level) - - -__all__ = ['logwrap', 'QuietLogger', 'logger'] diff --git a/core/helpers/setup_teardown.py b/core/helpers/setup_teardown.py deleted file mode 100644 index 1ea6780be..000000000 --- a/core/helpers/setup_teardown.py +++ /dev/null @@ -1,337 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals - -import functools -import inspect - -import six - -# Setup/Teardown decorators, which is missing in Proboscis. -# Usage: like in Nose. - - -# pylint: disable=no-member -def __getcallargs(func, *positional, **named): - """get real function call arguments without calling function - - :rtype: dict - """ - # noinspection PyUnresolvedReferences - if six.PY2: - return inspect.getcallargs(func, *positional, **named) - sig = inspect.signature(func).bind(*positional, **named) - sig.apply_defaults() # after bind we doesn't have defaults - return sig.arguments - - -def __get_arg_names(func): - """get argument names for function - - :param func: func - :return: list of function argnames - :rtype: list - - >>> def tst_1(): - ... pass - - >>> __get_arg_names(tst_1) - [] - - >>> def tst_2(arg): - ... pass - - >>> __get_arg_names(tst_2) - ['arg'] - """ - # noinspection PyUnresolvedReferences - return ( - [arg for arg in inspect.getargspec(func=func).args] if six.PY2 else - list(inspect.signature(obj=func).parameters.keys()) - ) -# pylint:enable=no-member - - -def __call_in_context(func, context_args): - """call function with substitute arguments from dict - - :param func: function or None - :param context_args: dict - :type context_args: dict - :return: function call results - - >>> __call_in_context(None, {}) - - >>> def print_print(): - ... print ('print') - - >>> __call_in_context(print_print, {}) - print - - >>> __call_in_context(print_print, {'val': 1}) - print - - >>> def print_val(val): - ... print(val) - - >>> __call_in_context(print_val, {'val': 1}) - 1 - """ - if func is None: - return - - func_args = __get_arg_names(func) - if not func_args: - return func() - - if inspect.ismethod(func) and 'cls' in func_args: - func_args.remove('cls') - # cls if used in @classmethod and could not be posted - # via args or kwargs, so classmethod decorators always has access - # to it's own class only, except direct class argument - elif 'self' in context_args: - context_args.setdefault('cls', context_args['self'].__class__) - try: - arg_values = [context_args[k] for k in func_args] - except KeyError as e: - raise ValueError("Argument '{}' is missing".format(str(e))) - - return func(*arg_values) - - -def setup_teardown(setup=None, teardown=None): - """Add setup and teardown for functions and methods. - - :param setup: function - :param teardown: function - :return: - - >>> def setup_func(): - ... print('setup_func called') - - >>> def teardown_func(): - ... print('teardown_func called') - - >>> @setup_teardown(setup=setup_func, teardown=teardown_func) - ... def positive_example(arg): - ... print(arg) - - >>> positive_example(arg=1) - setup_func called - 1 - teardown_func called - - >>> def print_call(text): - ... print (text) - - >>> @setup_teardown( - ... setup=lambda: print_call('setup lambda'), - ... teardown=lambda: print_call('teardown lambda')) - ... def positive_example_lambda(arg): - ... print(arg) - - >>> positive_example_lambda(arg=1) - setup lambda - 1 - teardown lambda - - >>> def setup_with_self(self): - ... print( - ... 'setup_with_self: ' - ... 'self.cls_val = {cls_val!s}, self.val = {val!s}'.format( - ... cls_val=self.cls_val, val=self.val)) - - >>> def teardown_with_self(self): - ... print( - ... 'teardown_with_self: ' - ... 'self.cls_val = {cls_val!s}, self.val = {val!s}'.format( - ... cls_val=self.cls_val, val=self.val)) - - >>> def setup_with_cls(cls): - ... print( - ... 'setup_with_cls: cls.cls_val = {cls_val!s}'.format( - ... cls_val=cls.cls_val)) - - >>> def teardown_with_cls(cls): - ... print('teardown_with_cls: cls.cls_val = {cls_val!s}'.format( - ... cls_val=cls.cls_val)) - - >>> class HelpersBase(object): - ... cls_val = None - ... def __init__(self): - ... self.val = None - ... @classmethod - ... def cls_setup(cls): - ... print( - ... 'cls_setup: cls.cls_val = {cls_val!s}'.format( - ... cls_val=cls.cls_val)) - ... @classmethod - ... def cls_teardown(cls): - ... print( - ... 'cls_teardown: cls.cls_val = {cls_val!s}'.format( - ... cls_val=cls.cls_val)) - ... def self_setup(self): - ... print( - ... 'self_setup: ' - ... 'self.cls_val = {cls_val!s}, self.val = {val!s}'.format( - ... cls_val=self.cls_val, val=self.val)) - ... def self_teardown(self): - ... print( - ... 'self_teardown: ' - ... 'self.cls_val = {cls_val!s}, self.val = {val!s}'.format( - ... cls_val=self.cls_val, val=self.val)) - - >>> class Test(HelpersBase): - ... @setup_teardown( - ... setup=HelpersBase.self_setup, - ... teardown=HelpersBase.self_teardown) - ... def test_self_self(self, cls_val=0, val=0): - ... print( - ... 'test_self_self: ' - ... 'self.cls_val = {cls_val!s}, self.val = {val!s}'.format( - ... cls_val=cls_val, val=val)) - ... self.val = val - ... self.cls_val = cls_val - ... @setup_teardown( - ... setup=HelpersBase.cls_setup, - ... teardown=HelpersBase.cls_teardown) - ... def test_self_cls(self, cls_val=1, val=1): - ... print( - ... 'test_self_cls: ' - ... 'self.cls_val = {cls_val!s}, self.val = {val!s}'.format( - ... cls_val=cls_val, val=val)) - ... self.val = val - ... self.cls_val = cls_val - ... @setup_teardown( - ... setup=setup_func, - ... teardown=teardown_func) - ... def test_self_none(self, cls_val=2, val=2): - ... print( - ... 'test_self_cls: ' - ... 'self.cls_val = {cls_val!s}, self.val = {val!s}'.format( - ... cls_val=cls_val, val=val)) - ... self.val = val - ... self.cls_val = cls_val - ... @setup_teardown( - ... setup=setup_with_self, - ... teardown=teardown_with_self) - ... def test_self_ext_self(self, cls_val=-1, val=-1): - ... print( - ... 'test_self_ext_self: ' - ... 'self.cls_val = {cls_val!s}, self.val = {val!s}'.format( - ... cls_val=cls_val, val=val)) - ... self.val = val - ... self.cls_val = cls_val - ... @setup_teardown( - ... setup=setup_with_cls, - ... teardown=teardown_with_cls) - ... def test_self_ext_cls(self, cls_val=-2, val=-2): - ... print( - ... 'test_self_ext_cls: ' - ... 'self.cls_val = {cls_val!s}, self.val = {val!s}'.format( - ... cls_val=cls_val, val=val)) - ... self.val = val - ... self.cls_val = cls_val - ... @classmethod - ... @setup_teardown( - ... setup=HelpersBase.cls_setup, - ... teardown=HelpersBase.cls_teardown) - ... def test_cls_cls(cls, cls_val=3): - ... print( - ... 'test_cls_cls: cls.cls_val = {cls_val!s}'.format( - ... cls_val=cls_val)) - ... cls.cls_val = cls_val - ... @classmethod - ... @setup_teardown( - ... setup=setup_func, - ... teardown=teardown_func) - ... def test_cls_none(cls, cls_val=4): - ... print( - ... 'test_cls_none: cls.cls_val = {cls_val!s}'.format( - ... cls_val=cls_val)) - ... cls.cls_val = cls_val - ... @classmethod - ... @setup_teardown( - ... setup=setup_with_cls, - ... teardown=teardown_with_cls) - ... def test_cls_ext_cls(cls, cls_val=-3): - ... print( - ... 'test_self_ext_cls: cls.cls_val = {cls_val!s}'.format( - ... cls_val=cls_val)) - ... cls.cls_val = cls_val - ... @staticmethod - ... @setup_teardown(setup=setup_func, teardown=teardown_func) - ... def test_none_none(): - ... print('test') - - >>> test = Test() - - >>> test.test_self_self() - self_setup: self.cls_val = None, self.val = None - test_self_self: self.cls_val = 0, self.val = 0 - self_teardown: self.cls_val = 0, self.val = 0 - - >>> test.test_self_cls() - cls_setup: cls.cls_val = None - test_self_cls: self.cls_val = 1, self.val = 1 - cls_teardown: cls.cls_val = None - - >>> test.test_self_none() - setup_func called - test_self_cls: self.cls_val = 2, self.val = 2 - teardown_func called - - >>> test.test_self_ext_self() - setup_with_self: self.cls_val = 2, self.val = 2 - test_self_ext_self: self.cls_val = -1, self.val = -1 - teardown_with_self: self.cls_val = -1, self.val = -1 - - >>> test.test_self_ext_cls() - setup_with_cls: cls.cls_val = None - test_self_ext_cls: self.cls_val = -2, self.val = -2 - teardown_with_cls: cls.cls_val = None - - >>> test.test_cls_cls() - cls_setup: cls.cls_val = None - test_cls_cls: cls.cls_val = 3 - cls_teardown: cls.cls_val = None - - >>> test.test_cls_none() - setup_func called - test_cls_none: cls.cls_val = 4 - teardown_func called - - >>> test.test_cls_ext_cls() - setup_with_cls: cls.cls_val = 4 - test_self_ext_cls: cls.cls_val = -3 - teardown_with_cls: cls.cls_val = -3 - - >>> test.test_none_none() - setup_func called - test - teardown_func called - """ - def real_decorator(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - real_args = __getcallargs(func, *args, **kwargs) - __call_in_context(setup, real_args) - try: - result = func(*args, **kwargs) - finally: - __call_in_context(teardown, real_args) - return result - return wrapper - return real_decorator diff --git a/core/models/__init__.py b/core/models/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/core/models/collector_client.py b/core/models/collector_client.py deleted file mode 100644 index 31e2e5f72..000000000 --- a/core/models/collector_client.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals - -import requests - -from core.helpers.log_helpers import logwrap - - -class CollectorClient(object): - """CollectorClient.""" # TODO documentation - - def __init__(self, collector_ip, endpoint): - url = "http://{0}/{1}".format(collector_ip, endpoint) - self.__url = url - super(CollectorClient, self).__init__() - - @property - def url(self): - return self.__url - - def _get(self, endpoint): - return requests.get(url=self.url + endpoint) - - @logwrap - def get_oswls(self, master_node_uid): - return self._get("/oswls/{0}".format(master_node_uid)).json() - - @logwrap - def get_installation_info(self, master_node_uid): - return self._get("/installation_info/{0}".format( - master_node_uid)).json() - - @logwrap - def get_action_logs(self, master_node_uid): - return self._get("/action_logs/{0}".format( - master_node_uid)).json() - - @logwrap - def get_oswls_by_resource(self, master_node_uid, resource): - return self._get("/oswls/{0}/{1}".format(master_node_uid, - resource)).json() - - @logwrap - def get_oswls_by_resource_data(self, master_node_uid, resource): - return self.get_oswls_by_resource(master_node_uid, - resource)['objs'][0]['resource_data'] - - @logwrap - def get_action_logs_ids(self, master_node_uid): - return [actions['id'] - for actions in self.get_action_logs(master_node_uid)] - - @logwrap - def get_action_logs_count(self, master_node_uid): - return len(self.get_action_logs_ids(master_node_uid)) - - @logwrap - def get_action_logs_additional_info_by_id( - self, master_node_uid, action_id): - return [actions['body']['additional_info'] - for actions in self.get_action_logs(master_node_uid) - if actions['id'] == action_id] - - @logwrap - def get_installation_info_data(self, master_node_uid): - return self.get_installation_info(master_node_uid)['structure'] diff --git a/core/models/fuel_client/__init__.py b/core/models/fuel_client/__init__.py deleted file mode 100644 index 17c2cdb6b..000000000 --- a/core/models/fuel_client/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from core.models.fuel_client.client import Client - -__all__ = ['Client'] diff --git a/core/models/fuel_client/base_client.py b/core/models/fuel_client/base_client.py deleted file mode 100644 index 03f59472b..000000000 --- a/core/models/fuel_client/base_client.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals - - -class Adapter(object): - def __init__(self, session, service_type='fuel'): - self.session = session - self.service_type = service_type - - def __repr__(self): - return ( - "{cls}(" - "session=," - "service_type={svc}" - ") id={id}".format( - cls=self.__class__.__name__, - sess_id=hex(id(self.session)), - svc=self.service_type, - id=hex(id(self)) - )) - - def get(self, url, **kwargs): - kwargs.setdefault( - 'endpoint_filter', {'service_type': self.service_type}) - return self.session.get(url=url, connect_retries=1, **kwargs) - - def delete(self, url, **kwargs): - kwargs.setdefault( - 'endpoint_filter', {'service_type': self.service_type}) - return self.session.delete(url=url, connect_retries=1, **kwargs) - - def post(self, url, **kwargs): - kwargs.setdefault( - 'endpoint_filter', {'service_type': self.service_type}) - return self.session.post(url=url, connect_retries=1, **kwargs) - - def put(self, url, **kwargs): - kwargs.setdefault( - 'endpoint_filter', {'service_type': self.service_type}) - return self.session.put(url=url, connect_retries=1, **kwargs) - - -class BaseClient(object): - def __init__(self, client): - self._client = client diff --git a/core/models/fuel_client/client.py b/core/models/fuel_client/client.py deleted file mode 100644 index b7ec92935..000000000 --- a/core/models/fuel_client/client.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals - -from core import logger -from core.models.fuel_client import base_client -from core.models.fuel_client import ostf_client - - -class Client(object): - def __init__(self, session): - logger.info( - 'Initialization of NailgunClient using shared session \n' - '(auth_url={})'.format(session.auth.auth_url)) - - ostf_clnt = base_client.Adapter(session=session, service_type='ostf') - # TODO(astepanov): use for FUEL functionality: - # clnt = base_client.Adapter(session=session) - - self.ostf = ostf_client.OSTFClient(ostf_clnt) - - -__all__ = ['Client'] diff --git a/core/models/fuel_client/ostf_client.py b/core/models/fuel_client/ostf_client.py deleted file mode 100644 index 2fa9f031b..000000000 --- a/core/models/fuel_client/ostf_client.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals - -from core.helpers.log_helpers import logwrap -from core.models.fuel_client import base_client - - -class OSTFClient(base_client.BaseClient): - @logwrap - def get_test_sets(self, cluster_id): - """get all test sets for a cluster - - :type cluster_id: int - """ - return self._client.get( - url="/testsets/{}".format(cluster_id), - ).json() - - @logwrap - def get_tests(self, cluster_id): - """get all tests for a cluster - - :type cluster_id: int - """ - return self._client.get( - url="/tests/{}".format(cluster_id), - ).json() - - @logwrap - def get_test_runs(self, testrun_id=None, cluster_id=None): - """get test runs results - - :type testrun_id: int - :type cluster_id: int - """ - url = '/testruns' - if testrun_id is not None: - url += '/{}'.format(testrun_id) - if cluster_id is not None: - url += '/{}'.format(cluster_id) - elif cluster_id is not None: - url += '/last/{}'.format(cluster_id) - return self._client.get(url=url).json() - - @logwrap - def run_tests(self, cluster_id, test_sets, test_name=None): - """run tests on specified cluster - - :type cluster_id: int - :type test_sets: list - :type test_name: str - """ - # get tests otherwise 500 error will be thrown6^40 - self.get_tests(cluster_id) - json = [] - for test_set in test_sets: - record = { - 'metadata': {'cluster_id': str(cluster_id), 'config': {}}, - 'testset': test_set - } - if test_name is not None: - record['tests'] = [test_name] - - json.append(record) - - return self._client.post("/testruns", json=json).json() diff --git a/core/models/value_objects.py b/core/models/value_objects.py deleted file mode 100644 index cb26838ca..000000000 --- a/core/models/value_objects.py +++ /dev/null @@ -1,182 +0,0 @@ -# pylint: disable=too-many-instance-attributes -class FuelAccessParams(object): - """Value object to represent and map yaml file values of fuel master node - access to openrc file. - Should not use any api.""" - - def __init__(self, - tls_service_enabled=False, - tls_keystone_enabled=False): - self.__username = None # type: str - self.__password = None # type: str - self.__project = None # type: str - self.__service_address = None # type: str - self.__service_port = None # type: str - self.__keystone_address = None # type: str - self.__keystone_port = None # type: str - self.__tls_service_enabled = tls_service_enabled # type: bool - self.__tls_keystone_enabled = tls_keystone_enabled # type: bool - - @property - def username(self): - return self.__username - - @username.setter - def username(self, value): - """Set up username - - :type value: str - """ - self.__username = value - - @property - def password(self): - return self.__password - - @password.setter - def password(self, value): - """Set up password - - :type value: str - """ - self.__password = value - - @property - def project(self): - return self.__project - - @project.setter - def project(self, value): - """Set up project - - :type value: str - """ - self.__project = value - - @property - def service_address(self): - return self.__service_address - - @service_address.setter - def service_address(self, value): - """Set up service address - - :type value: str - """ - self.__service_address = value - - @property - def service_port(self): - return self.__service_port - - @service_port.setter - def service_port(self, value): - """Set up service port - - :type value: str - """ - self.__service_port = value - - @property - def keystone_address(self): - address = self.service_address - if self.__keystone_address: - address = self.__keystone_address - return address - - @keystone_address.setter - def keystone_address(self, value): - """Set up keystone address - - :type value: str - """ - self.__keystone_address = value - - @property - def keystone_port(self): - return self.__keystone_port - - @keystone_port.setter - def keystone_port(self, value): - """Set up keystone port - - :type value: str - """ - self.__keystone_port = value - - @property - def os_auth_url(self): - """Get url of authentication endpoint - - :rtype: str - :return: The url of os auth endpoint - """ - protocol = 'https' if self.__tls_keystone_enabled else 'http' - - return "{protocol}://{keystone_address}:{keystone_port}".format( - protocol=protocol, - keystone_address=self.keystone_address, - keystone_port=self.keystone_port - ) - - @property - def service_url(self): - """Get url of nailgun service endpoint - - :rtype: str - :return: The url of nailgun endpoint - """ - protocol = 'https' if self.__tls_service_enabled else 'http' - - return "{protocol}://{service_address}:{service_port}".format( - protocol=protocol, - service_address=self.service_address, - service_port=self.service_port - ) - - def to_openrc_content(self): - """Method to represent access credentials in openrc format. - - :rtype: str - :return: string content for openrc file - """ - env_template = ('export OS_USERNAME="{username}"\n' - 'export OS_PASSWORD="{password}"\n' - 'export OS_TENANT_NAME="{project}"\n' - 'export SERVICE_URL="{service_url}"\n' - 'export OS_AUTH_URL="{os_auth_url}"\n') - - return env_template.format( - username=self.username, - password=self.username, - project=self.project, - service_url=self.service_url, - os_auth_url=self.os_auth_url, - ) - - @classmethod - def from_yaml_params(cls, - yaml_content, - tls_service_enabled=False, - tls_keystone_enabled=False): - """The method to initialize value object from parsed yaml from - master node. - - :type yaml_content: dict[str] - :type tls_service_enabled: boolean - :type tls_keystone_enabled: boolean - :rtype: FuelAccessParams - :return: instance, which can be used - """ - access_params = cls( - tls_service_enabled=tls_service_enabled, - tls_keystone_enabled=tls_keystone_enabled) - access_params.username = yaml_content['OS_USERNAME'] - access_params.password = yaml_content['OS_PASSWORD'] - access_params.project = yaml_content['OS_TENANT_NAME'] - access_params.service_address = yaml_content['SERVER_ADDRESS'] - access_params.service_port = yaml_content['SERVER_PORT'] - access_params.keystone_port = yaml_content['KEYSTONE_PORT'] - - return access_params -# pylint: enable=too-many-instance-attributes diff --git a/core/pytest.ini b/core/pytest.ini deleted file mode 100644 index e967bfdc9..000000000 --- a/core/pytest.ini +++ /dev/null @@ -1,3 +0,0 @@ -[pytest] -addopts = -vvv -s -p no:django -p no:ipdb -testpaths = _tests diff --git a/doc/Makefile b/doc/Makefile deleted file mode 100644 index 5cf10122c..000000000 --- a/doc/Makefile +++ /dev/null @@ -1,27 +0,0 @@ -.PHONY: clean-doc doc-html - -SPHINXBUILD = sphinx-build -DOC_BUILDDIR = _build -SPHINXOPTS = -d $(DOC_BUILDDIR)/doctrees . - -help: - @echo 'Build directives (can be overridden by environment variables' - @echo 'or by command line parameters):' - @echo ' DOC_BUILDDIR: $(DOC_BUILDDIR)' - @echo - @echo 'Available targets:' - @echo ' doc-html - build html documentation based on source code of product' - @echo ' clean-doc - clean generated docs' - @echo - - - -doc-html: - $(SPHINXBUILD) -b html $(SPHINXOPTS) $(DOC_BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(DOC_BUILDDIR)/html." - -clean-doc: - $ rm -rf $(DOC_BUILDDIR) - @echo - @echo "We are cleaned documentation output." diff --git a/doc/base_tests.rst b/doc/base_tests.rst deleted file mode 100644 index 38cf13550..000000000 --- a/doc/base_tests.rst +++ /dev/null @@ -1,768 +0,0 @@ -.. index:: Base tests - -General OpenStack/Fuel Tests -**************************** - -General tests -============= - -Base Test Case --------------- -.. automodule:: fuelweb_test.tests.base_test_case - :members: - -Admin Node Tests ----------------- -.. automodule:: fuelweb_test.tests.test_admin_node - :members: - -Test Admin Node Backup-Restore ------------------------------- -.. automodule:: fuelweb_test.tests.test_backup_restore - :members: - -Test Bonding base ------------------ -.. automodule:: fuelweb_test.tests.test_bonding_base - :members: - -Test Bonding ------------- -.. automodule:: fuelweb_test.tests.test_bonding - :members: - -Test Bond offloading types --------------------------- -.. automodule:: fuelweb_test.tests.test_bond_offloading - :members: - -Test Ceph ---------- -.. automodule:: fuelweb_test.tests.test_ceph - :members: - -Test Cli --------- -.. automodule:: fuelweb_test.tests.test_cli - :members: - -Test Cli Base -------------- -.. automodule:: fuelweb_test.tests.test_cli_base - :members: - -Test Cli role component (creade/update/delete role) ---------------------------------------------------- -.. automodule:: fuelweb_test.tests.tests_cli.test_cli_role - :members: - -Test Cli deploy (deploy neutron tun) ------------------------------------- -.. automodule:: fuelweb_test.tests.tests_cli.test_cli_deploy - :members: - -Test Cli deploy ceph neutron tun --------------------------------- -.. automodule:: fuelweb_test.tests.tests_cli.test_cli_deploy_ceph - :members: - -Test custom hostname --------------------- -.. automodule:: fuelweb_test.tests.test_custom_hostname - :members: - -Test custom graph ------------------ -.. automodule:: fuelweb_test.tests.tests_custom_graph.test_custom_graph - :members: - - -Prepare target image file -------------------------- -.. automodule:: fuelweb_test.config_templates.prepare_release_image - :members: - -Test DPDK ---------- -.. automodule:: fuelweb_test.tests.test_dpdk - :members: - -Test Environment Action ------------------------ -.. automodule:: fuelweb_test.tests.test_environment_action - :members: - -Test ha NeutronTUN deployment group 1 (controller+baseos multirole and ceph for images/objects) ------------------------------------------------------------------------------------------------ -.. automodule:: fuelweb_test.tests.tests_deployments.tests_neutron_tun.test_ha_tun_group_1 - :members: - -Test ha NeutronTUN deployment group 2 (ceph for all, baseos node and ceph for all, untag networks and changed OS credentials) ------------------------------------------------------------------------------------------------------------------------------ -.. automodule:: fuelweb_test.tests.tests_deployments.tests_neutron_tun.test_ha_tun_group_2 - :members: - -Test ha NeutronTUN deployment group 3 (5 controllers, ceph for images/ephemeral and no volumes, ceph for images/ephemeral) --------------------------------------------------------------------------------------------------------------------------- -.. automodule:: fuelweb_test.tests.tests_deployments.tests_neutron_tun.test_ha_tun_group_3 - :members: - -Test ha neutron vlan deployment group 1 (cinder/ceph for images and ceph for volumes/swift) -------------------------------------------------------------------------------------------- -.. automodule:: fuelweb_test.tests.tests_deployments.tests_neutron_vlan.test_ha_vlan_group_1 - :members: - -Test ha neutron vlan deployment group 2 (cinder/ceph for ephemeral and cinder/ceph for images/ephemeral) --------------------------------------------------------------------------------------------------------- -.. automodule:: fuelweb_test.tests.tests_deployments.tests_neutron_vlan.test_ha_vlan_group_2 - :members: - -Test ha neutron vlan deployment group 3(no volumes storage/ceph volumes, ephemeral) ------------------------------------------------------------------------------------ -.. automodule:: fuelweb_test.tests.tests_deployments.tests_neutron_vlan.test_ha_vlan_group_3 - :members: - -Test ha neutron vlan deployment group 4(cinder volumes, ceph images and rados gw/ default storage) --------------------------------------------------------------------------------------------------- -.. automodule:: fuelweb_test.tests.tests_deployments.tests_neutron_vlan.test_ha_vlan_group_4 - :members: - -Test ha neutron vlan deployment group 5 (ceph for volumes/images/ephemeral/rados and cinder/ceph for images/ephemeral/rados) ----------------------------------------------------------------------------------------------------------------------------- -.. automodule:: fuelweb_test.tests.tests_deployments.tests_neutron_vlan.test_ha_vlan_group_5 - :members: - -Test ha neutron vlan deployment group 6 (no volumes and ceph for images/ephemeral/rados and ceph for volumes/images/ephemeral) ------------------------------------------------------------------------------------------------------------------------------- -.. automodule:: fuelweb_test.tests.tests_deployments.tests_neutron_vlan.test_ha_vlan_group_6 - :members: - -Test ha neutron vlan deployment group 7 (no volumes/ceph for images and cinder/swift/base os) ---------------------------------------------------------------------------------------------- -.. automodule:: fuelweb_test.tests.tests_deployments.tests_neutron_vlan.test_ha_vlan_group_7 - :members: - -Test Sahara OS component with vlan and ceph -------------------------------------------- -.. automodule:: fuelweb_test.tests.tests_os_components.test_sahara_os_component - :members: - -Test Murano OS component with vlan ----------------------------------- -.. automodule:: fuelweb_test.tests.tests_os_components.test_murano_os_component - :members: - -Test mixed OS components ------------------------- -.. automodule:: fuelweb_test.tests.tests_os_components.test_mixed_os_components - :members: - -Test failover group 1 ---------------------- -.. automodule:: fuelweb_test.tests.tests_strength.test_failover_group_1 - :members: - -Test failover group 2 ---------------------- -.. automodule:: fuelweb_test.tests.tests_strength.test_failover_group_2 - :members: - -Test failover group 3 ---------------------- -.. automodule:: fuelweb_test.tests.tests_strength.test_failover_group_3 - :members: - -Test failover mongo -------------------- -.. automodule:: fuelweb_test.tests.tests_strength.test_failover_mongo - :members: - -Test Mongo Multirole --------------------- -.. automodule:: fuelweb_test.tests.tests_multirole.test_mongo_multirole - :members: - -Test scale neutron vlan deployment add/delete compute/cinder+cinder+ceph ------------------------------------------------------------------------- -.. automodule:: fuelweb_test.tests.tests_scale.test_scale_group_5 - :members: - -Test scale neutron tun deployment add/delete compute+cinder+ceph+ephemeral --------------------------------------------------------------------------- -.. automodule:: fuelweb_test.tests.tests_scale.test_scale_group_6 - :members: - -Test High Availability on one controller ----------------------------------------- -.. automodule:: fuelweb_test.tests.test_ha_one_controller - :members: - -Test High Availability on one controller base ---------------------------------------------- -.. automodule:: fuelweb_test.tests.test_ha_one_controller_base - :members: - -Test jumbo frames ------------------ -.. automodule:: fuelweb_test.tests.test_jumbo_frames - :members: - -Test manual VIP allocation --------------------------- -.. automodule:: fuelweb_test.tests.test_manual_vip_allocation - :members: - -Test Multiple Networks ----------------------- -.. automodule:: fuelweb_test.tests.test_multiple_networks - :members: - -Test multirole group 1 (controller+ceph/compute+cinder and controller+ceph+cinder/compute+ceph+cinder) ------------------------------------------------------------------------------------------------------- -.. automodule:: fuelweb_test.tests.tests_multirole.test_multirole_group_1 - :members: - -Test network templates base ---------------------------- -.. automodule:: fuelweb_test.tests.test_net_templates_base - :members: - -Test network templates ----------------------- -.. automodule:: fuelweb_test.tests.test_net_templates - :members: - -Test multiple networks templates --------------------------------- -.. automodule:: fuelweb_test.tests.test_net_templates_multiple_networks - :members: - -Test Neutron ------------- -.. automodule:: fuelweb_test.tests.test_neutron - :members: - -Test Neutron Public -------------------- -.. automodule:: fuelweb_test.tests.test_neutron_public - :members: - -Test Neutron VXLAN ------------------- -.. automodule:: fuelweb_test.tests.test_neutron_tun - :members: - -Test Neutron VXLAN base ------------------------ -.. automodule:: fuelweb_test.tests.test_neutron_tun_base - :members: - -Test Neutron IPv6 base functionality ------------------------------------- -.. automodule:: fuelweb_test.tests.test_neutron_ipv6 - :members: - -Test Node reinstallation ------------------------- -.. automodule:: fuelweb_test.tests.test_node_reinstallation - :members: - -Test offloading types ---------------------- -.. automodule:: fuelweb_test.tests.test_offloading_types - :members: - -Test public api ---------------- -.. automodule:: fuelweb_test.tests.test_public_api - :members: - -Test Pull Requests ------------------- -.. automodule:: fuelweb_test.tests.test_pullrequest - :members: - -Test Reduced Footprint ----------------------- -.. automodule:: fuelweb_test.tests.test_reduced_footprint - :members: - -Test scale group 1 (add controllers with stop and add ceph nodes with stop) ---------------------------------------------------------------------------- -.. automodule:: fuelweb_test.tests.tests_scale.test_scale_group_1 - :members: - -Test scale group 2 (replace primary controller and remove 2 controllers) ------------------------------------------------------------------------- -.. automodule:: fuelweb_test.tests.tests_scale.test_scale_group_2 - :members: - -Test scale group 3 (add/delete compute and add/delete cinder) -------------------------------------------------------------- -.. automodule:: fuelweb_test.tests.tests_scale.test_scale_group_3 - :members: - -Test scale group 4 (add/delete ceph and add/delete cinder+ceph) ---------------------------------------------------------------- -.. automodule:: fuelweb_test.tests.tests_scale.test_scale_group_4 - :members: - -Test Services -------------- -.. automodule:: fuelweb_test.tests.test_services - :members: - -Test Ubuntu bootstrap ---------------------- -.. automodule:: fuelweb_test.tests.test_ubuntu_bootstrap - :members: - -Test Ubuntu Cloud Archive -------------------------- -.. automodule:: fuelweb_test.tests.tests_uca.test_uca - :members: - -Test Ironic ------------ -.. automodule:: fuelweb_test.tests.test_ironic_base - :members: - -Test Services reconfiguration ------------------------------ -.. automodule:: fuelweb_test.tests.test_services_reconfiguration - :members: - -Test Support HugePages ----------------------- -.. automodule:: fuelweb_test.tests.test_support_hugepages - :members: - -Test CPU pinning ----------------- -.. automodule:: fuelweb_test.tests.test_cpu_pinning - :members: - -Test extra computes -------------------- -.. automodule:: fuelweb_test.tests.tests_extra_computes.base_extra_computes -.. automodule:: fuelweb_test.tests.tests_extra_computes.test_rh_basic_actions -.. automodule:: fuelweb_test.tests.tests_extra_computes.test_rh_migration -.. automodule:: fuelweb_test.tests.tests_extra_computes.test_ol_basic_actions -.. automodule:: fuelweb_test.tests.tests_extra_computes.test_ol_migration - :members: - -Test Daemon Resource Allocation Control ---------------------------------------- -.. automodule:: fuelweb_test.tests.test_cgroups - :members: - -Test LCM base -------------- -.. automodule:: fuelweb_test.tests.tests_lcm.base_lcm_test - :members: - -Test task idempotency ---------------------- -.. automodule:: fuelweb_test.tests.tests_lcm.test_idempotency - :members: - -Test task ensurability ----------------------- -.. automodule:: fuelweb_test.tests.tests_lcm.test_ensurability - :members: - -Test task coverage by LCM tests -------------------------------- -.. automodule:: fuelweb_test.tests.tests_lcm.test_task_coverage - :members: - -Test unlock settings tab ------------------------- -.. automodule:: fuelweb_test.tests.test_unlock_settings_tab - :members: - -Test for unlock settings tab from different cluster states ----------------------------------------------------------- -.. automodule:: fuelweb_test.tests.test_states_unlock_settings_tab - :members: - -Gating tests -============ - -Test Fuel agent ---------------- -.. automodule:: gates_tests.tests.test_review_in_fuel_agent - :members: - -Test Fuel cli -------------- -.. automodule:: gates_tests.tests.test_review_in_fuel_client - :members: - -Test Fuel astute ----------------- -.. automodule:: gates_tests.tests.test_review_in_astute - :members: - -Test Fuel nailgun agent ------------------------ -.. automodule:: gates_tests.tests.test_nailgun_agent - :members: - -Test Fuel web -------------- -.. automodule:: gates_tests.tests.test_review_fuel_web - :members: - -Fuel mirror verification -======================== - -Tests to check that mirror is created in various scenarios ----------------------------------------------------------- -Fuel create mirror is made to simplify process of mirror creation for our -customers who do not have internet access on-site. It is rewritten from bash -to python. - -Fuel create mirror features: - -1) Minimize size of packages in a mirror; - -2) Download packages in parallel. - -Such features can cause some problems: - -1) During packages resolving to minimize mirror size we found such issues: - -1.1) Incorrect versions. When we have multiple mirrors, some version can be -skipped due to name duplication. But it is still needed by bootstrap/deploy. - -1.2) Mirror/version collisions. Sometimes package present in number of mirrors -and not always correct version corresponds to correct site. - -1.3) There are special mirror on Fuel iso, which differs from -http://mirror.fuel-infra.org/ . - -2) With concurrent packages fetching complications are: - -2.1) Some mirrors are unable to support download in multiple threads and fail -or reject to support concurrency. In such cases we are abandoning concurrent -downloads on such mirrors. - -2.2) Common concurrency pitfalls: race conditions for resources like lists to -process. - -2.3) Problems with offset based downloads. Some packages were broken and it had -been found out only during package installation. - -.. automodule:: fuelweb_test.tests.tests_mirrors.test_create_mirror - :members: - -Tests to verify installation from packages mirrors --------------------------------------------------- -After mirror is created we should be able to deploy environment with it. - -Fuel-mirror updates default repo urls for deployment and we do not have to -set them up for new environments.But be careful. If you want to deploy -environments with vanilla mirrors from iso, You should update settings in -environment. Currently there is no option to update default mirrors from -UI/cli. - -Fuel-mirror updates repo list with internal structures: -https://github.com/bgaifullin/packetary/blob/packetary3/contrib/fuel_mirror/fuel_mirror/commands/create.py#L224-L243 - -Repository should be able to do two things: - -1) Create bootstrap iso for provisioning; - -2) Provide packages for deployment. Packages from dependencies in http://mirror.fuel-infra.org/ do not cover all the needed packages. -So we need to mix in list of required packages: -https://github.com/bgaifullin/packetary/blob/packetary3/contrib/fuel_mirror/etc/config.yaml#L46-L96 - -Problems: - -1) We need to install not only 'depends', but also 'recommends' packages: -https://wiki.ubuntu.com/LucidLynx/ReleaseNotes/#Recommended_packages_installed_by_default -http://askubuntu.com/questions/18545/installing-suggested-recommended-packages - -2) We have a problem with support of a custom packages list. -It is only tracked via system test failure without exact team assigned for a -job. Also debootstrap and other tools are not informative about package errors. -It may fail with 'unable to mount', '/proc not mounted', 'file not found' even -if a problem is a missing package. - -.. automodule:: fuelweb_test.tests.tests_mirrors.test_use_mirror - :members: - - -Plugins tests -============= - -Contrail tests --------------- -.. automodule:: fuelweb_test.tests.plugins.plugin_contrail.test_fuel_plugin_contrail - :members: - -Emc tests ---------- -.. automodule:: fuelweb_test.tests.plugins.plugin_emc.test_plugin_emc - :members: - -Example tests -------------- -.. automodule:: fuelweb_test.tests.plugins.plugin_example.test_fuel_plugin_example - :members: - -Example tests for plugin installation after cluster create ----------------------------------------------------------- -.. automodule:: fuelweb_test.tests.plugins.plugin_example.test_fuel_plugin_example_postdeploy - :members: - -Glusterfs tests ---------------- -.. automodule:: fuelweb_test.tests.plugins.plugin_glusterfs.test_plugin_glusterfs - :members: - -Lbaas tests ------------ -.. automodule:: fuelweb_test.tests.plugins.plugin_lbaas.test_plugin_lbaas - :members: - -Reboot tests ------------- -.. automodule:: fuelweb_test.tests.plugins.plugin_reboot.test_plugin_reboot_task - :members: - -Vip reservation tests ---------------------- -.. automodule:: fuelweb_test.tests.plugins.plugin_vip_reservation.test_plugin_vip_reservation - :members: - -Zabbix tests ------------- -.. automodule:: fuelweb_test.tests.plugins.plugin_zabbix.test_plugin_zabbix - :members: - -Murano Tests ------------- -.. automodule:: fuelweb_test.tests.plugins.plugin_murano.test_plugin_murano - :members: - -Patching tests -============== - -Patching tests --------------- -.. automodule:: fuelweb_test.tests.tests_patching.test_patching - :members: - - -Security tests -============== - -Nessus scan tests ------------------ -.. automodule:: fuelweb_test.tests.tests_security.test_run_nessus - :members: - -Lynis audit tests ------------------ -.. automodule:: fuelweb_test.tests.tests_security.test_lynis_audit - :members: - -Strength tests -============== - -Cic maintenance mode tests --------------------------- -.. automodule:: fuelweb_test.tests.tests_strength.test_cic_maintenance_mode - :members: - -Failover tests --------------- -.. automodule:: fuelweb_test.tests.tests_strength.test_failover - :members: - -Base failover tests -------------------- -.. automodule:: fuelweb_test.tests.tests_strength.test_failover_base - :members: - -Failover with CEPH tests ------------------------- -.. automodule:: fuelweb_test.tests.tests_strength.test_failover_with_ceph - :members: - -Huge environments tests ------------------------ -.. automodule:: fuelweb_test.tests.tests_strength.test_huge_environments - :members: - -Image based tests ------------------ -.. automodule:: fuelweb_test.tests.tests_strength.test_image_based - :members: - -Base load tests ---------------- -.. automodule:: fuelweb_test.tests.tests_strength.test_load_base - :members: - -Load tests ----------- -.. automodule:: fuelweb_test.tests.tests_strength.test_load - :members: - -Master node failover tests --------------------------- -.. automodule:: fuelweb_test.tests.tests_strength.test_master_node_failover - :members: - -Neutron tests -------------- -.. automodule:: fuelweb_test.tests.tests_strength.test_neutron - :members: - -Base Neutron tests ------------------- -.. automodule:: fuelweb_test.tests.tests_strength.test_neutron_base - :members: - -OSTF repeatable tests ---------------------- -.. automodule:: fuelweb_test.tests.tests_strength.test_ostf_repeatable_tests - :members: - -Repetitive restart tests ------------------------- -.. automodule:: fuelweb_test.tests.tests_strength.test_repetitive_restart - :members: - -Restart tests -------------- -.. automodule:: fuelweb_test.tests.tests_strength.test_restart - :members: - -Upgrade tests -============= - -Test Data-Driven Upgrade ------------------------- -.. automodule:: fuelweb_test.tests.tests_upgrade.test_clone_env -.. automodule:: fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_base -.. automodule:: fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_ceph_ha -.. automodule:: fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_net_tmpl -.. automodule:: fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_no_cluster -.. automodule:: fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_plugin -.. automodule:: fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_smoke -.. automodule:: fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_multirack_deployment -.. automodule:: fuelweb_test.tests.tests_upgrade.test_node_reassignment -.. automodule:: fuelweb_test.tests.tests_upgrade.upgrader_tool - :members: - -OS upgrade tests -================ - -Test OpenStack Upgrades ------------------------ -.. automodule:: fuelweb_test.tests.tests_upgrade.upgrade_base - :members: - -.. automodule:: fuelweb_test.tests.tests_upgrade.test_os_upgrade - :members: - -Tests for separated services -============================ - -Test for separate haproxy service ---------------------------------- -.. automodule:: fuelweb_test.tests.tests_separate_services.test_separate_haproxy - :members: - -Test for separate horizon service ---------------------------------- -.. automodule:: fuelweb_test.tests.tests_separate_services.test_separate_horizon - :members: - -Test for separate multiroles ----------------------------- -.. automodule:: fuelweb_test.tests.tests_separate_services.test_separate_multiroles - :members: - -Test for separate rabbitmq service ----------------------------------- -.. automodule:: fuelweb_test.tests.tests_separate_services.test_separate_rabbitmq - :members: - -Test for separate rabbitmq service and ceph -------------------------------------------- -.. automodule:: fuelweb_test.tests.tests_separate_services.test_separate_rabbitmq_ceph - :members: - -Deployment with platform components ------------------------------------ -.. automodule:: fuelweb_test.tests.tests_separate_services.test_deploy_platform_components - :members: - -Test for ssl components ------------------------ -.. automodule:: fuelweb_test.tests.test_ssl - :members: - -Test for network outage ------------------------ -.. automodule:: fuelweb_test.tests.tests_strength.test_network_outage - :members: - -Test for separate master node deployment ----------------------------------------- -.. automodule:: system_test.tests.test_centos_master_deploy_ceph - :members: - -Test for multipath devices --------------------------- -.. automodule:: fuelweb_test.tests.test_multipath_devices - :members: - -Test for Image Based Provisioning ---------------------------------- -.. automodule:: fuelweb_test.tests.tests_ibp.test_ibp - :members: - -Tests for configDB api ----------------------- -.. automodule:: fuelweb_test.tests.tests_configdb.test_configdb_api - :members: - -Tests for cinder block device driver ------------------------------------- -.. automodule:: fuelweb_test.tests.test_bdd - -Tests for configDB cli ----------------------- -.. automodule:: fuelweb_test.tests.tests_configdb.test_configdb_cli - :members: - -Test for tracking /etc dir by etckeeper plugin ----------------------------------------------- -.. automodule:: fuelweb_test.tests.plugins.plugin_etckeeper.test_plugin_etckeeper - :members: - -Test SR-IOV ------------ -.. automodule:: fuelweb_test.tests.test_sriov - :members: - -Test graph extension --------------------- -.. automodule:: fuelweb_test.tests.test_graph_extension - :members: - -Test Multiqueue ---------------- -.. automodule:: fuelweb_test.tests.test_multiqueue - :members: - -Test OVS firewall driver ------------------------- -.. automodule:: fuelweb_test.tests.test_ovs_firewall - :members: diff --git a/doc/conf.py b/doc/conf.py deleted file mode 100644 index 5a4574eec..000000000 --- a/doc/conf.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import sys - -sys.path.insert(0, - os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) - -extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.doctest', - 'sphinx.ext.intersphinx', - 'sphinx.ext.todo', - 'sphinx.ext.viewcode', -] - -autodoc_default_flags = ['members', 'show-inheritance', 'inherited-members'] -autodoc_member_order = 'bysource' - -source_suffix = '.rst' - -master_doc = 'index' - -project = 'Fuel QA' -copyright = 'Copyright 2015 Mirantis, Inc.' \ - 'Licensed under the Apache License, Version 2.0' \ - ' (the "License"); you may not use this file except in' \ - ' compliance with the License. You may obtain a copy' \ - ' of the License at http://www.apache.org/licenses/LICENSE-2.0' - -exclude_patterns = ['_build'] - -pygments_style = 'sphinx' - -html_theme = 'sphinxdoc' -htmlhelp_basename = 'fuel-qadoc' - -intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/doc/fuel_tests.rst b/doc/fuel_tests.rst deleted file mode 100644 index d3e091ade..000000000 --- a/doc/fuel_tests.rst +++ /dev/null @@ -1,34 +0,0 @@ -.. index:: Fuel tests - -Fuel tests -********** - -PyTest test config -================== - -Conftest for Tests ------------------- -.. automodule:: fuel_tests.tests.conftest.py - :members: - -Models -====== - -Manager ----------- -.. automodule:: fuel_tests.models.manager - :members: - -Tests -===== - -Ceph Tests ----------- -.. automodule:: fuel_tests.tests.test_ceph.py - :members: - -Neutron Tests -------------- -.. automodule:: fuel_tests.tests.test_neutron.py - :members: - diff --git a/doc/general.rst b/doc/general.rst deleted file mode 100644 index caf77e2b2..000000000 --- a/doc/general.rst +++ /dev/null @@ -1,19 +0,0 @@ -.. index:: General - -General -======= - -Proboscis test runner ---------------------- -.. automodule:: fuelweb_test.run_tests - :members: - -Settings --------- -.. automodule:: fuelweb_test.settings - :members: - -OSTF tests mapping ------------------- -.. automodule:: fuelweb_test.ostf_test_mapping - :members: diff --git a/doc/helpers.rst b/doc/helpers.rst deleted file mode 100644 index 75714924d..000000000 --- a/doc/helpers.rst +++ /dev/null @@ -1,170 +0,0 @@ -.. index:: Helpers - -Helpers -******* - -General Helpers -=============== - -Ceph ----- -.. automodule:: fuelweb_test.helpers.ceph - :members: - -Checkers --------- -.. automodule:: fuelweb_test.helpers.checkers - :members: - -CIC Maintenance Mode --------------------- -.. automodule:: fuelweb_test.helpers.cic_maintenance_mode - :members: - -Cloud Image ------------ -.. automodule:: fuelweb_test.helpers.cloud_image - :members: - -Common ------- -.. automodule:: fuelweb_test.helpers.common - :members: - -Decorators ----------- -.. automodule:: fuelweb_test.helpers.decorators - :members: - -Metaclasses ------------ -.. automodule:: fuelweb_test.helpers.metaclasses - :members: - -Eb tables ---------- -.. automodule:: fuelweb_test.helpers.eb_tables - :members: - -Fuel Actions ------------- -.. automodule:: fuelweb_test.helpers.fuel_actions - :members: - -Fuel Release Hacks ------------------- -.. automodule:: fuelweb_test.helpers.fuel_release_hacks - :members: - -Granular Deployment Checkers ----------------------------- -.. automodule:: fuelweb_test.helpers.granular_deployment_checkers - :members: - -Ironic Actions --------------- -.. automodule:: fuelweb_test.helpers.ironic_actions - :members: - -Log Server ----------- -.. automodule:: fuelweb_test.helpers.log_server - :members: - -Multiple Networks Hacks ------------------------ -.. automodule:: fuelweb_test.helpers.multiple_networks_hacks - :members: - -Nessus REST Client ------------------- -.. automodule:: fuelweb_test.helpers.nessus - :members: - -Os Actions ----------- -.. automodule:: fuelweb_test.helpers.os_actions - :members: - -Ovs helper ----------- -.. automodule:: fuelweb_test.helpers.ovs - :members: - -Pacemaker ---------- -.. automodule:: fuelweb_test.helpers.pacemaker - :members: - -Patching --------- -.. automodule:: fuelweb_test.helpers.patching - :members: - -Rally ------ -.. automodule:: fuelweb_test.helpers.rally - :members: - -Regenerate Repo ---------------- -.. automodule:: fuelweb_test.helpers.regenerate_repo - :members: - -Replace Repositories --------------------- -.. automodule:: fuelweb_test.helpers.replace_repos - :members: - -Security --------- -.. automodule:: fuelweb_test.helpers.security - :members: - -SSH Manager ------------ -.. automodule:: fuelweb_test.helpers.ssh_manager - :members: - -Ssl ---- -.. automodule:: fuelweb_test.helpers.ssl_helpers - :members: - -UCA ---- -.. automodule:: fuelweb_test.helpers.uca - :members: - -Utils ------ -.. automodule:: fuelweb_test.helpers.utils - :members: - -Gerrit -====== - -Client ------- -.. automodule:: fuelweb_test.helpers.gerrit.gerrit_client - :members: - -Info provider -------------- -.. automodule:: fuelweb_test.helpers.gerrit.gerrit_info_provider - :members: - -Utils ------ -.. automodule:: fuelweb_test.helpers.gerrit.utils - :members: - -Rules ------ -.. automodule:: fuelweb_test.helpers.gerrit.rules - :members: - -Content parser --------------- -.. automodule:: fuelweb_test.helpers.gerrit.content_parser - :members: \ No newline at end of file diff --git a/doc/index.rst b/doc/index.rst deleted file mode 100644 index 1f725e074..000000000 --- a/doc/index.rst +++ /dev/null @@ -1,14 +0,0 @@ -Documentation for the QA test code repo -*************************************** - -.. toctree:: - :numbered: - :maxdepth: 3 - - general.rst - models.rst - helpers.rst - base_tests.rst - testrail.rst - system_tests.rst - fuel_tests.rst diff --git a/doc/models.rst b/doc/models.rst deleted file mode 100644 index 09a17c998..000000000 --- a/doc/models.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. index:: Models - -Models -====== - -Collector client ----------------- -.. automodule:: fuelweb_test.models.collector_client - :members: - -Environment ------------ -.. automodule:: fuelweb_test.models.environment - :members: - -Fuel Web Client ---------------- -.. automodule:: fuelweb_test.models.fuel_web_client - :members: - -Nailgun Client --------------- -.. automodule:: fuelweb_test.models.nailgun_client - :members: diff --git a/doc/requirements.txt b/doc/requirements.txt deleted file mode 100644 index 09fc79134..000000000 --- a/doc/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -sphinx==1.3.1 diff --git a/doc/system_tests.rst b/doc/system_tests.rst deleted file mode 100644 index 0d07275a7..000000000 --- a/doc/system_tests.rst +++ /dev/null @@ -1,137 +0,0 @@ -.. index:: System tests - -System tests -************ - -Core -==== - -Repository ----------- -.. automodule:: system_test.core.repository - :members: - -Discover --------- -.. automodule:: system_test.core.discover - :members: - -Decorators ----------- -.. automodule:: system_test.core.decorators - :members: - -Factory -------- -.. automodule:: system_test.core.factory - :members: - -Config ------- -.. automodule:: system_test.core.config - :members: - -Actions -======= - -Base actions ------------- -.. automodule:: system_test.actions.base - :members: - -Fuelmaster actions ------------------- -.. automodule:: system_test.actions.fuelmaster_actions - :members: - -OSTF actions ------------- -.. automodule:: system_test.actions.ostf_actions - :members: - -Plugins actions ---------------- -.. automodule:: system_test.actions.plugins_actions - :members: - -Strength actions ----------------- -.. automodule:: system_test.actions.strength_actions - :members: - -General tests -============= - -ActionTest ----------- -.. automodule:: system_test.tests.base - :members: - -Case deploy Environment ------------------------ -.. automodule:: system_test.tests.test_create_deploy_ostf - :members: - -Deploy cluster and check RadosGW --------------------------------- -.. automodule:: system_test.tests.test_deploy_check_rados - :members: - -Delete cluster after deploy ---------------------------- -.. automodule:: system_test.tests.test_delete_after_deploy - :members: - -Redeploy cluster after stop ---------------------------- -.. automodule:: system_test.tests.test_redeploy_after_stop - :members: - -Redeploy cluster after reset ----------------------------- -.. automodule:: system_test.tests.test_redeploy_after_reset - :members: - -Fuel master migration ---------------------- -.. automodule:: system_test.tests.test_fuel_migration - :members: - -Strength tests -============== - -Destroy controllers -------------------- -.. automodule:: system_test.tests.strength.test_destroy_controllers - :members: - -Fill root and check pacemaker ------------------------------ -.. automodule:: system_test.tests.strength.test_filling_root - :members: - -Plugin tests -============ - -Example plugin Base -------------------- -.. automodule:: system_test.tests.plugins.plugin_example - :members: - -Example plugin --------------- -.. automodule:: system_test.tests.plugins.plugin_example.test_plugin_example - :members: - -Example plugin v3 ------------------ -.. automodule:: system_test.tests.plugins.plugin_example.test_plugin_example_v3 - :members: - -Helpers -======= - -Decorators ----------- -.. automodule:: system_test.helpers.decorators - :members: diff --git a/doc/testrail.rst b/doc/testrail.rst deleted file mode 100644 index a6ccc0bd7..000000000 --- a/doc/testrail.rst +++ /dev/null @@ -1,69 +0,0 @@ -.. index:: Testrail - -Testrail -======== - -Builds ------- -.. automodule:: fuelweb_test.testrail.builds - :members: - -Launchpad client ----------------- -.. automodule:: fuelweb_test.testrail.launchpad_client - :members: - -Report ------- -.. automodule:: fuelweb_test.testrail.report - :members: - -Report partner ingreation -------------------------- -.. automodule:: fuelweb_test.testrail.report_pi - :members: - -Report tempest results ----------------------- -.. automodule:: fuelweb_test.testrail.report_tempest_results - :members: - -Settings --------- -.. automodule:: fuelweb_test.testrail.settings - :members: - -Testrail --------- -.. automodule:: fuelweb_test.testrail.testrail - :members: - -Testrail Client ---------------- -.. automodule:: fuelweb_test.testrail.testrail_client - :members: - -Upload Cases Description ------------------------- -.. automodule:: fuelweb_test.testrail.upload_cases_description - :members: - -Upload tempest test suite -------------------------- -.. automodule:: fuelweb_test.testrail.upload_tempest_test_suite - :members: - -Generate bugs statistics for TestPlan -------------------------------------- -.. automodule:: fuelweb_test.testrail.generate_statistics - :members: - -Datetime utils for Testrail ---------------------------- -.. automodule:: fuelweb_test.testrail.datetime_util - :members: - -Generate failure statistics for TestPlan ----------------------------------------- -.. automodule:: fuelweb_test.testrail.generate_failure_group_statistics - :members: diff --git a/fuel_tests/__init__.py b/fuel_tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuel_tests/models/__init__.py b/fuel_tests/models/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuel_tests/models/manager.py b/fuel_tests/models/manager.py deleted file mode 100644 index 1a094a6d2..000000000 --- a/fuel_tests/models/manager.py +++ /dev/null @@ -1,329 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# pylint: disable=redefined-builtin -# noinspection PyUnresolvedReferences -from six.moves import xrange -# pylint: enable=redefined-builtin - -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers.decorators import create_diagnostic_snapshot -from fuelweb_test.helpers.utils import TimeStat -from fuelweb_test.tests.base_test_case import TestBasic as Basic - -from system_test.core.discover import load_yaml - - -class Manager(Basic): - """Manager class for tests.""" - - def __init__(self, config_file, cls): - super(Manager, self).__init__() - self.full_config = None - self.env_config = None - self.env_settings = None - self.config_name = None - self._devops_config = None - self._start_time = 0 - self.config_file = config_file - if config_file: - self._load_config() - self._context = cls - self.assigned_slaves = set() - - def _cluster_from_template(self): - """Create cluster from template file.""" - - slaves = int(self.full_config['template']['slaves']) - cluster_name = self.env_config['name'] - snapshot_name = "ready_cluster_{}".format(cluster_name) - if self.check_run(snapshot_name): - self.env.revert_snapshot(snapshot_name) - cluster_id = self.fuel_web.client.get_cluster_id(cluster_name) - self._context._storage['cluster_id'] = cluster_id - logger.info("Got deployed cluster from snapshot") - return True - elif self.get_ready_slaves(slaves): - self.env.sync_time() - logger.info("Create env {}".format( - self.env_config['name'])) - cluster_settings = { - "sahara": self.env_settings['components'].get( - 'sahara', False), - "ceilometer": self.env_settings['components'].get( - 'ceilometer', False), - "ironic": self.env_settings['components'].get( - 'ironic', False), - "user": self.env_config.get("user", "admin"), - "password": self.env_config.get("password", "admin"), - "tenant": self.env_config.get("tenant", "admin"), - "volumes_lvm": self.env_settings['storages'].get( - "volume-lvm", False), - "volumes_ceph": self.env_settings['storages'].get( - "volume-ceph", False), - "images_ceph": self.env_settings['storages'].get( - "image-ceph", False), - "ephemeral_ceph": self.env_settings['storages'].get( - "ephemeral-ceph", False), - "objects_ceph": self.env_settings['storages'].get( - "rados-ceph", False), - "osd_pool_size": str(self.env_settings['storages'].get( - "replica-ceph", 2)), - "net_provider": self.env_config['network'].get( - 'provider', 'neutron'), - "net_segment_type": self.env_config['network'].get( - 'segment-type', 'vlan'), - "assign_to_all_nodes": self.env_config['network'].get( - 'pubip-to-all', - False), - "neutron_l3_ha": self.env_config['network'].get( - 'neutron-l3-ha', False), - "neutron_dvr": self.env_config['network'].get( - 'neutron-dvr', False), - "neutron_l2_pop": self.env_config['network'].get( - 'neutron-l2-pop', False) - } - - cluster_id = self.fuel_web.create_cluster( - name=self.env_config['name'], - mode=settings.DEPLOYMENT_MODE, - release_name=self.env_config['release'], - settings=cluster_settings) - - self._context._storage['cluster_id'] = cluster_id - logger.info("Add nodes to env {}".format(cluster_id)) - names = "slave-{:02}" - num = iter(xrange(1, slaves + 1)) - nodes = {} - for new in self.env_config['nodes']: - for _ in xrange(new['count']): - name = names.format(next(num)) - while name in self.assigned_slaves: - name = names.format(next(num)) - - self.assigned_slaves.add(name) - nodes[name] = new['roles'] - logger.info("Set roles {} to node {}".format( - new['roles'], name)) - self.fuel_web.update_nodes(cluster_id, nodes) - self.fuel_web.verify_network(cluster_id) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.verify_network(cluster_id) - self.env.make_snapshot(snapshot_name, is_make=True) - self.env.resume_environment() - return True - else: - logger.error("Can't deploy cluster because snapshot" - " with bootstrapped nodes didn't revert") - raise RuntimeError("Can't deploy cluster because snapshot" - " with bootstrapped nodes didn't revert") - - def _cluster_from_config(self, config): - """Create cluster from predefined config.""" - - slaves = len(config.get('nodes')) - cluster_name = config.get('name', self._context.__name__) - snapshot_name = "ready_cluster_{}".format(cluster_name) - if self.check_run(snapshot_name): - self.env.revert_snapshot(snapshot_name) - cluster_id = self.fuel_web.client.get_cluster_id(cluster_name) - self._context._storage['cluster_id'] = cluster_id - logger.info("Getted deployed cluster from snapshot") - return True - elif self.get_ready_slaves(slaves): - self.env.sync_time() - logger.info("Create env {}".format(cluster_name)) - cluster_id = self.fuel_web.create_cluster( - name=cluster_name, - mode=config.get('mode', settings.DEPLOYMENT_MODE), - settings=config.get('settings', {}) - ) - self._context._storage['cluster_id'] = cluster_id - self.fuel_web.update_nodes( - cluster_id, - config.get('nodes') - ) - self.fuel_web.verify_network(cluster_id) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.verify_network(cluster_id) - self.env.make_snapshot(snapshot_name, is_make=True) - self.env.resume_environment() - return True - else: - logger.error("Can't deploy cluster because snapshot" - " with bootstrapped nodes didn't revert") - raise RuntimeError("Can't deploy cluster because snapshot" - " with bootstrapped nodes didn't revert") - - def check_run(self, snapshot_name): - """Checks if run of current test is required. - - :param snapshot_name: Name of the snapshot the function should make - :type snapshot_name: str - - """ - if snapshot_name: - return self.env.d_env.has_snapshot(snapshot_name) - - def _load_config(self): - """Read cluster config from yaml file.""" - - config = load_yaml(self.config_file) - self.full_config = config - self.env_config = config[ - 'template']['cluster_template'] - self.env_settings = config[ - 'template']['cluster_template']['settings'] - self.config_name = config['template']['name'] - - if 'devops_settings' in config['template']: - self._devops_config = config - - def get_ready_setup(self): - """Create virtual environment and install Fuel master node. - """ - - logger.info("Getting ready setup") - if self.check_run("empty"): - self.env.revert_snapshot("empty") - return True - else: - with TimeStat("setup_environment", is_uniq=True): - if list(self.env.d_env.get_nodes(role='fuel_master')): - self.env.setup_environment() - self.fuel_post_install_actions() - - elif list(self.env.d_env.get_nodes(role='centos_master')): - # need to use centos_master.yaml devops template - hostname = ''.join((settings.FUEL_MASTER_HOSTNAME, - settings.DNS_SUFFIX)) - self.centos_setup_fuel(hostname) - else: - raise RuntimeError( - "No Fuel master nodes found!") - - self.env.make_snapshot("empty", is_make=True) - self.env.resume_environment() - return True - - def get_ready_release(self): - """Make changes in release configuration.""" - - logger.info("Getting ready release") - if self.check_run("ready"): - self.env.revert_snapshot("ready") - logger.info("Getted ready release from snapshot") - return True - elif self.get_ready_setup(): - self.env.sync_time() - self.fuel_web.get_nailgun_version() - self.fuel_web.change_default_network_settings() - - if (settings.REPLACE_DEFAULT_REPOS and - settings.REPLACE_DEFAULT_REPOS_ONLY_ONCE): - self.fuel_web.replace_default_repos() - - self.env.make_snapshot("ready", is_make=True) - self.env.resume_environment() - return True - else: - logger.error("Can't config releases setup " - "snapshot didn't revert") - raise RuntimeError("Can't config releases setup " - "snapshot didn't revert") - - def get_ready_slaves(self, slaves=None): - """Bootstrap slave nodes.""" - - logger.info("Getting ready slaves") - if not slaves: - if hasattr(self._context, 'cluster_config'): - slaves = len(self._context.cluster_config.get('nodes')) - elif self.full_config: - slaves = int(self.full_config['template']['slaves']) - else: - logger.error("Unable to count slaves") - raise RuntimeError("Unable to count slaves") - snapshot_name = "ready_with_{}_slaves".format(slaves) - if self.check_run(snapshot_name): - self.env.revert_snapshot(snapshot_name) - logger.info("Getted ready slaves from snapshot") - return True - elif self.get_ready_release(): - self.env.sync_time() - logger.info("Bootstrap {} nodes".format(slaves)) - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:slaves], - skip_timesync=True) - self.env.make_snapshot(snapshot_name, is_make=True) - self.env.resume_environment() - return True - logger.error( - "Can't bootstrap nodes because release snapshot didn't revert") - raise RuntimeError( - "Can't bootstrap nodes because release snapshot didn't revert") - - def get_ready_cluster(self, config=None): - """Create and deploy cluster.""" - - logger.info("Getting deployed cluster") - config = config or self._context.cluster_config or None - if config: - self._cluster_from_config(config=config) - else: - self._cluster_from_template() - - def show_step(self, step, details='', initialize=False): - """Show a description of the step taken from docstring - - :param int/str step: step number to show - :param str details: additional info for a step - """ - test_func = self._context._current_test - test_func_name = test_func.__name__ - - if initialize or step == 1: - self.current_log_step = step - else: - self.current_log_step += 1 - if self.current_log_step != step: - error_message = 'The step {} should be {} at {}' - error_message = error_message.format( - step, - self.current_log_step, - test_func_name - ) - logger.error(error_message) - - docstring = test_func.__doc__ - docstring = '\n'.join([s.strip() for s in docstring.split('\n')]) - steps = {s.split('. ')[0]: s for s in - docstring.split('\n') if s and s[0].isdigit()} - if details: - details_msg = ': {0} '.format(details) - else: - details_msg = '' - if str(step) in steps: - logger.info("\n" + " " * 55 + "<<< {0} {1}>>>" - .format(steps[str(step)], details_msg)) - else: - logger.info("\n" + " " * 55 + "<<< {0}. (no step description " - "in scenario) {1}>>>".format(str(step), details_msg)) - - def make_diagnostic_snapshot(self, status, name): - self.env.resume_environment() - create_diagnostic_snapshot(self.env, status, name) - - def save_env_snapshot(self, name): - self.env.make_snapshot(name, is_make=True) diff --git a/fuel_tests/tests/__init__.py b/fuel_tests/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuel_tests/tests/conftest.py b/fuel_tests/tests/conftest.py deleted file mode 100644 index 8c31113f4..000000000 --- a/fuel_tests/tests/conftest.py +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from __future__ import division - -import time - -import pytest - -from fuel_tests.models.manager import Manager - -from fuelweb_test import logger -from fuelweb_test import settings - -from system_test.core.discover import config_filter - - -# pylint: disable=no-member - - -@pytest.fixture(scope='session') -def config_file(request): - """Fixture which provide config for test.""" - template = settings.FUELQA_TEMPLATE - if template: - return config_filter([template])[template] - else: - return None - - -@pytest.fixture(scope='class', autouse=True) -def manager(request, config_file): - """Fixture which link manager instante for each test class.""" - manager = Manager(config_file, request.cls) - request.cls.manager = manager - request.cls._storage = dict() - request.cls._logger = logger - - def get_env(self): - return self.manager.env - - request.cls.env = property(get_env) - - -@pytest.fixture(scope='function', autouse=True) -def snapshot(request): - """Fixture which provide getting of artifacs after test. - - Markers: - get_logs - create snapshot with logs - fail_snapshot - create environment snapshot - - Example: - - @pytest.mark.get_logs - @pytest.mark.fail_snapshot - def test_ha_deploy(): - pass - """ - get_logs = request.keywords.get('get_logs', None) - fail_snapshot = request.keywords.get('fail_snapshot', None) - - def test_fin(): - if request.node.rep_setup.failed: - if get_logs: - request.instance.manager.make_diagnostic_snapshot( - status="prepare_failed", - name=request.node.function.__name__) - if fail_snapshot: - request.instance.manager.save_env_snapshot( - name="prep_fail_{}".format(request.node.function.__name__)) - elif request.node.rep_call.passed: - if get_logs: - request.instance.manager.make_diagnostic_snapshot( - status="test_pass", - name=request.node.function.__name__) - elif request.node.rep_call.failed: - if get_logs: - request.instance.manager.make_diagnostic_snapshot( - status="test_failed", - name=request.node.function.__name__) - if fail_snapshot: - request.instance.manager.save_env_snapshot( - name="fail_{}".format(request.node.function.__name__)) - - request.addfinalizer(test_fin) - - -@pytest.fixture(scope='function', autouse=True) -def prepare(request, snapshot): - """Fixture for prepearing environment for test. - - Provided two marker behaviour: - need_ready_cluster marker if test need already deployed cluster - need_ready_slaves marker if test need already provisioned slaves - need_ready_release marker if test need already provisioned slaves - need_ready_master marker if test need already provisioned slaves - - Example: - - @pytest.mark.need_ready_cluster - def test_ha_deploy(): - pass - - @pytest.mark.need_ready_slaves - def test_ha_deploy(): - pass - - """ - need_ready_cluster = request.keywords.get('need_ready_cluster', None) - need_ready_slaves = request.keywords.get('need_ready_slaves', None) - need_ready_release = request.keywords.get('need_ready_release', None) - need_ready_master = request.keywords.get('need_ready_master', None) - if need_ready_cluster: - request.instance.manager.get_ready_cluster() - elif need_ready_slaves: - request.instance.manager.get_ready_slaves() - elif need_ready_release: - request.instance.manager.get_ready_release() - elif need_ready_master: - request.instance.manager.get_ready_setup() - - -@pytest.hookimpl(tryfirst=True, hookwrapper=True) -def pytest_runtest_makereport(item, call): - """Attache test result for each test object.""" - # execute all other hooks to obtain the report object - outcome = yield - rep = outcome.get_result() - - # set a report attribute for each phase of a call, which can - # be "setup", "call", "teardown" - - setattr(item, "rep_" + rep.when, rep) - - -test_names = set() -test_groups = [] - - -@pytest.hookimpl() -def pytest_collection_finish(session): - def _get_groups(kws): - return ( - kw for kw, val in kws.keywords.items() if hasattr(val, 'name')) - - # pylint: disable=global-statement - global test_names - global test_groups - # pylint: enable=global-statement - - test_groups = [{tuple(_get_groups(kws)): kws} for kws in session.items] - - test_names = {kw for kws in session.items for kw in _get_groups(kws)} - - -def pytest_runtest_setup(item): - """Hook which run before test start.""" - item.cls._current_test = item.function - item._start_time = time.time() - head = "<" * 5 + "#" * 30 + "[ {} ]" + "#" * 30 + ">" * 5 - head = head.format(item.function.__name__) - steps = ''.join(item.function.__doc__) - start_step = "\n{head}\n{steps}".format(head=head, steps=steps) - logger.info(start_step) - - -def pytest_runtest_teardown(item): - """Hook which run after test.""" - step_name = item.function.__name__ - if hasattr(item, '_start_time'): - spent_time = time.time() - item._start_time - else: - spent_time = 0 - minutes = spent_time // 60 - # pylint: disable=round-builtin - seconds = int(round(spent_time)) % 60 - # pylint: enable=round-builtin - finish_step = "FINISH {} TEST. TOOK {} min {} sec".format( - step_name, minutes, seconds) - foot = "\n" + "<" * 5 + "#" * 30 + "[ {} ]" + "#" * 30 + ">" * 5 - foot = foot.format(finish_step) - logger.info(foot) diff --git a/fuel_tests/tests/test_admin_node.py b/fuel_tests/tests/test_admin_node.py deleted file mode 100644 index c33edae1c..000000000 --- a/fuel_tests/tests/test_admin_node.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import division - -import pytest - -from devops.helpers.helpers import http -from devops.helpers.helpers import wait - -from fuelweb_test import logger -from fuelweb_test.helpers.ssh_manager import SSHManager -# pylint: disable=import-error -# noinspection PyUnresolvedReferences -from six.moves.xmlrpc_client import ServerProxy -# pylint: enable=import-error - -# pylint: disable=no-member -# pylint: disable=no-self-use -ssh_manager = SSHManager() - - -@pytest.mark.get_logs -@pytest.mark.fail_snapshot -@pytest.mark.need_ready_master -@pytest.mark.thread_1 -class TestAdminNode(object): - """TestAdminNode.""" # TODO documentation - - @pytest.mark.test_cobbler_alive - def test_cobbler_alive(self): - """Test current installation has correctly setup cobbler - - API and cobbler HTTP server are alive - - Scenario: - 1. Revert snapshot "empty" - 2. test cobbler API and HTTP server through send http request - - Duration 1m - - """ - wait( - lambda: http(host=self.env.get_admin_node_ip(), url='/cobbler_api', - waited_code=501), - timeout=60 - ) - server = ServerProxy( - 'http://%s/cobbler_api' % self.env.get_admin_node_ip()) - - config = self.env.admin_actions.get_fuel_settings() - username = config['cobbler']['user'] - password = config['cobbler']['password'] - - # raises an error if something isn't right - server.login(username, password) - - @pytest.mark.test_astuted_alive - def test_astuted_alive(self): - """Test astute master and worker processes are alive on master node - - Scenario: - 1. Revert snapshot "empty" - 2. Search for master and child processes - - Duration 1m - - """ - ps_output = ssh_manager.execute( - ssh_manager.admin_ip, 'ps ax')['stdout'] - astute_master = [ - master for master in ps_output if 'astute master' in master] - logger.info("Found astute processes: {:s}".format(astute_master)) - assert len(astute_master) == 1 - astute_workers = [ - worker for worker in ps_output if 'astute worker' in worker] - logger.info( - "Found {length:d} astute worker processes: {workers!s}" - "".format(length=len(astute_workers), workers=astute_workers)) - assert len(astute_workers) > 1 diff --git a/fuel_tests/tests/test_ceph.py b/fuel_tests/tests/test_ceph.py deleted file mode 100644 index 8b261e559..000000000 --- a/fuel_tests/tests/test_ceph.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pytest - -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers.ssh_manager import SSHManager - - -# pylint: disable=no-member - - -ssh_manager = SSHManager() - - -# noinspection PyUnresolvedReferences -class TestCephRadosGW(object): - """Test class consits the tests for clustre with Ceph and RadosGW""" - - # This cluster config used for all test in this class - cluster_config = { - 'name': "TestCephRadosGW", - 'mode': settings.DEPLOYMENT_MODE, - 'settings': { - 'volumes_lvm': False, - 'volumes_ceph': True, - 'images_ceph': True, - 'objects_ceph': True, - 'tenant': 'rados', - 'user': 'rados', - 'password': 'rados' - }, - 'nodes': { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute', 'ceph-osd'], - 'slave-05': ['compute', 'ceph-osd'], - 'slave-06': ['compute', 'ceph-osd'] - } - } - - @pytest.mark.get_logs - @pytest.mark.fail_snapshot - @pytest.mark.need_ready_cluster - @pytest.mark.pytest_bvt_2 - def test_ceph_rados_gw(self): - """Deploy ceph HA with RadosGW for objects - - Scenario: - 1. Create cluster with Neutron - 2. Add 3 nodes with controller role - 3. Add 3 nodes with compute and ceph-osd role - 4. Deploy the cluster - 5. Network check - 6. Check HAProxy backends - 7. Check ceph status - 8. Run OSTF tests - 9. Check the radosgw daemon is started - - Duration 90m - - """ - - self.manager.show_step(1) - self.manager.show_step(2) - self.manager.show_step(3) - self.manager.show_step(4) - self.manager.show_step(5) - - # HAProxy backend checking - self.manager.show_step(6) - fuel_web = self.manager.fuel_web - controller_nodes = fuel_web.get_nailgun_cluster_nodes_by_roles( - self._storage['cluster_id'], ['controller']) - - for node in controller_nodes: - logger.info("Check all HAProxy backends on {}".format( - node['meta']['system']['fqdn'])) - haproxy_status = checkers.check_haproxy_backend(node['ip']) - msg = "HAProxy backends are DOWN. {0}".format(haproxy_status) - assert haproxy_status['exit_code'] == 1, msg - - self.manager.show_step(7) - fuel_web.check_ceph_status(self._storage['cluster_id']) - - self.manager.show_step(8) - # Run ostf - fuel_web.run_ostf(cluster_id=self._storage['cluster_id'], - test_sets=['ha', 'smoke', 'sanity']) - - self.manager.show_step(9) - # Check the radosgw daemon is started - for node in controller_nodes: - logger.info("Check radosgw daemon is started on {}".format( - node['meta']['system']['fqdn'])) - - cmd = "pkill -0 radosgw" - ip = node['ip'] - err_msg = "radosgw daemon not started on {}".format( - node['meta']['system']['fqdn']) - ssh_manager.execute_on_remote(ip=ip, cmd=cmd, err_msg=err_msg) diff --git a/fuel_tests/tests/test_discovery_slave.py b/fuel_tests/tests/test_discovery_slave.py deleted file mode 100644 index 8d7e969e8..000000000 --- a/fuel_tests/tests/test_discovery_slave.py +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import division - -import re - -import pytest - -from fuelweb_test import settings -from fuelweb_test import logger -from fuelweb_test.helpers.eb_tables import Ebtables - -# pylint: disable=no-member - - -@pytest.mark.get_logs -@pytest.mark.fail_snapshot -@pytest.mark.thread_1 -class TestNodeDiskSizes(object): - """TestNodeDiskSizes.""" # TODO documentation - - cluster_config = { - 'name': "TestNodeDiskSizes", - 'mode': settings.DEPLOYMENT_MODE, - 'nodes': { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - } - } - - @pytest.mark.need_ready_slaves - @pytest.mark.check_nodes_notifications - def test_check_nodes_notifications(self): - """Verify nailgun notifications for discovered nodes - - Scenario: - 1. Setup master and bootstrap 3 slaves - 2. Verify hard drive sizes for discovered nodes in /api/nodes - 3. Verify hard drive sizes for discovered nodes in notifications - - Duration 5m - - """ - # self.env.revert_snapshot("ready_with_3_slaves") - fuel_web = self.manager.fuel_web - # assert /api/nodes - disk_size = settings.NODE_VOLUME_SIZE * 1024 ** 3 - nailgun_nodes = fuel_web.client.list_nodes() - for node in nailgun_nodes: - for disk in node['meta']['disks']: - assert disk['size'] == disk_size, 'Disk size' - - hdd_size = "{0:.3} TB HDD".format((disk_size * 3 / (10 ** 9)) / 1000) - notifications = fuel_web.client.get_notifications() - - for node in nailgun_nodes: - # assert /api/notifications - for notification in notifications: - discover = notification['topic'] == 'discover' - current_node = notification['node_id'] == node['id'] - if current_node and discover and \ - "discovered" in notification['message']: - assert hdd_size in notification['message'], ( - '"{size} not found in notification message ' - '"{note}" for node {node} ' - '(hostname {host})!'.format( - size=hdd_size, - note=notification['message'], - node=node['name'], - host=node['hostname'])) - - # assert disks - disks = fuel_web.client.get_node_disks(node['id']) - for disk in disks: - expected_size = settings.NODE_VOLUME_SIZE * 1024 - 500 - assert disk['size'] == expected_size, ( - 'Disk size {0} is not equals expected {1}'.format( - disk['size'], expected_size)) - - @pytest.mark.check_nodes_disks - @pytest.mark.need_ready_cluster - def test_check_nodes_disks(self): - """Verify hard drive sizes for deployed nodes - - Scenario: - 1. Create cluster - 2. Add 1 controller - 3. Add 1 compute - 4. Add 1 cinder - 5. Deploy cluster - 6. Verify hard drive sizes for deployed nodes - 7. Run network verify - 8. Run OSTF - - Duration 15m - """ - - cluster_id = self._storage['cluster_id'] - fuel_web = self.manager.fuel_web - - self.manager.show_step(1) - self.manager.show_step(2) - self.manager.show_step(3) - self.manager.show_step(4) - self.manager.show_step(5) - self.manager.show_step(6) - # assert node disks after deployment - for node_name in self.cluster_config['nodes']: - str_block_devices = fuel_web.get_cluster_block_devices( - node_name) - - logger.debug("Block device:\n{}".format(str_block_devices)) - - expected_regexp = re.compile( - "vda\s+\d+:\d+\s+0\s+{}G\s+0\s+disk".format( - settings.NODE_VOLUME_SIZE)) - assert expected_regexp.search(str_block_devices), ( - "Unable to find vda block device for {}G in: {}".format( - settings.NODE_VOLUME_SIZE, str_block_devices)) - - expected_regexp = re.compile( - "vdb\s+\d+:\d+\s+0\s+{}G\s+0\s+disk".format( - settings.NODE_VOLUME_SIZE)) - assert expected_regexp.search(str_block_devices), ( - "Unable to find vdb block device for {}G in: {}".format( - settings.NODE_VOLUME_SIZE, str_block_devices)) - - expected_regexp = re.compile( - "vdc\s+\d+:\d+\s+0\s+{}G\s+0\s+disk".format( - settings.NODE_VOLUME_SIZE)) - assert expected_regexp.search(str_block_devices), ( - "Unable to find vdc block device for {}G in: {}".format( - settings.NODE_VOLUME_SIZE, str_block_devices)) - - self.manager.show_step(7) - fuel_web.verify_network(cluster_id) - - self.manager.show_step(8) - fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - -@pytest.mark.get_logs -@pytest.mark.fail_snapshot -@pytest.mark.thread_1 -class TestMultinicBootstrap(object): - """MultinicBootstrap.""" # TODO documentation - - @pytest.mark.multinic_bootstrap_booting - @pytest.mark.need_ready_release - @pytest.mark.check_nodes_disks - def test_multinic_bootstrap_booting(self): - """Verify slaves booting with blocked mac address - - Scenario: - 1. Revert snapshot "ready" - 2. Block traffic for first slave node (by mac) - 3. Restore mac addresses and boot first slave - 4. Verify slave mac addresses is equal to unblocked - - Duration 2m - - """ - slave = self.env.d_env.get_node(name='slave-01') - mac_addresses = [interface.mac_address for interface in - slave.interfaces.filter(network__name='internal')] - try: - for mac in mac_addresses: - Ebtables.block_mac(mac) - for mac in mac_addresses: - Ebtables.restore_mac(mac) - slave.destroy() - self.env.d_env.get_node(name='admin').revert("ready") - nailgun_slave = self.env.bootstrap_nodes([slave])[0] - assert mac.upper() == nailgun_slave['mac'].upper() - Ebtables.block_mac(mac) - finally: - for mac in mac_addresses: - Ebtables.restore_mac(mac) diff --git a/fuel_tests/tests/test_fuel_migration.py b/fuel_tests/tests/test_fuel_migration.py deleted file mode 100644 index 141a5ab06..000000000 --- a/fuel_tests/tests/test_fuel_migration.py +++ /dev/null @@ -1,244 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pytest - -from devops.helpers.helpers import get_admin_remote -from devops.helpers.helpers import icmp_ping -from devops.helpers.helpers import wait_pass -from devops.helpers.helpers import wait - -from fuelweb_test import logger -from fuelweb_test import settings - -# pylint: disable=no-member - - -@pytest.fixture(scope='function') -def fuel_master_migration(request): - """Fixture which migrate Fuel Master to a compute""" - - instance = request.node.instance - cluster_id = instance._storage['cluster_id'] - instance.start_fuel_migration() - instance.check_migration_status() - instance.run_checkers() - instance.manager.fuel_web.verify_network(cluster_id) - instance.manager.fuel_web.run_ostf(cluster_id=cluster_id) - - -@pytest.mark.get_logs -@pytest.mark.fail_snapshot -@pytest.mark.fuel_master_migrate -class TestFuelMasterMigrate(object): - - compute = None - cluster_config = { - 'name': "FuelMasterMigrate", - 'mode': settings.DEPLOYMENT_MODE, - 'nodes': { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - } - } - - @pytest.mark.need_ready_cluster - @pytest.mark.usefixtures("fuel_master_migration") - @pytest.mark.test_fuel_master_migrate - def test_fuel_master_migrate(self): - """Fuel master migration to VM - - Scenario: - 1. Create environment with two computes and three controllers - 2. Run network checker - 3. Deploy environment - 4. Run network checker - 5. Migrate Fuel Master to the compute node - 6. Run network checker - 7. Run OSTF - """ - - self.manager.show_step(1) - self.manager.show_step(2) - self.manager.show_step(3) - self.manager.show_step(4) - self.manager.show_step(5) - self.manager.show_step(6) - self.manager.show_step(7) - - @pytest.mark.need_ready_cluster - @pytest.mark.usefixtures("fuel_master_migration") - @pytest.mark.test_compute_hard_restart - def test_compute_hard_restart(self): - """Check Fuel Master node functionality after hard restart of the - compute where Fuel Master node is located - - Scenario: - 1. Deploy cluster with two computes and three controllers - 2. Migrate Fuel Master - 3. Hard restart for compute node where Fuel Master node was - migrated to - 4. Reconnect to Fuel Master - 5. Check status for master's services - 6. Run OSTF - """ - cluster_id = self._storage['cluster_id'] - fuel_web = self.manager.fuel_web - - self.manager.show_step(1) - self.manager.show_step(2) - - self.manager.show_step(3) - self.compute_hard_restart() - self.node_rebooted(self.env.get_admin_node_ip()) - - self.manager.show_step(4) - self.run_checkers() - - self.manager.show_step(5) - fuel_web.verify_network(cluster_id) - - self.manager.show_step(6) - fuel_web.run_ostf(cluster_id=cluster_id) - - @pytest.mark.need_ready_cluster - @pytest.mark.usefixtures("fuel_master_migration") - @pytest.mark.test_compute_warm_restart - def test_compute_warm_restart(self): - """Check Fuel Master node functionality after warm restart of the - compute where Fuel Master node is located - - Scenario: - 1. Deploy cluster with two computes and three controllers - 2. Migrate Fuel Master - 3. Warm restart for compute node where Fuel Master node was - migrated to - 4. Reconnect to Fuel Master - 5. Check status for master's services - 6. Run OSTF - """ - - cluster_id = self._storage['cluster_id'] - fuel_web = self.manager.fuel_web - - self.manager.show_step(1) - self.manager.show_step(2) - - self.manager.show_step(3) - self.compute_warm_restart() - self.node_rebooted(self.env.get_admin_node_ip()) - - self.manager.show_step(4) - self.run_checkers() - - self.manager.show_step(5) - fuel_web.verify_network(cluster_id) - - self.manager.show_step(6) - fuel_web.run_ostf(cluster_id=cluster_id) - - def start_fuel_migration(self): - """Migrate Fuel Master to a compute""" - - # Get a compute to migrate Fuel Master to - cluster_id = self._storage['cluster_id'] - fuel_web = self.manager.fuel_web - self.compute = fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'])[0] - logger.info( - 'Fuel Master will be migrated to {0} ' - 'compute'.format(self.compute['name'])) - - # Start migrating Fuel Master - with self.env.d_env.get_admin_remote() as remote: - remote.execute('fuel-migrate {0} >/dev/null &'. - format(self.compute['ip'])) - - def check_migration_status(self): - """Check periodically the status of Fuel Master migration process""" - - logger.info( - 'Rebooting to begin the data sync process for fuel migrate') - self.node_rebooted(self.env.get_admin_node_ip()) - - logger.info('Fuel Master is migrating..') - self.node_rebooted(self.env.get_admin_node_ip(), interval=0.5, - timeout=60 * 45) - - logger.info('Waiting for appearance of /tmp/migration-done file..') - with get_admin_remote(self.env.d_env) as remote: - wait(lambda: remote.exists("/tmp/migration-done"), - timeout=60 * 5, - timeout_msg="File /tmp/migration-done wasn't appeared") - - @staticmethod - def node_rebooted(ip, interval=5, timeout=60 * 15): - wait(lambda: not icmp_ping(ip), interval=interval, timeout=timeout, - timeout_msg=("Node with ip: {} has not become offline after " - "starting reboot").format(ip)) - wait(lambda: icmp_ping(ip), interval=interval, timeout=timeout, - timeout_msg="Node with ip: {} has not become online " - "after reboot".format(ip)) - - def wait_nailgun_nodes(self): - """Wait for cluster nodes online state in nailgun""" - - cluster_id = self._storage['cluster_id'] - fuel_web = self.manager.fuel_web - - fuel_web.wait_cluster_nodes_get_online_state(cluster_id) - - def wait_mcollective_nodes(self): - """Wait for mcollective online status of cluster nodes""" - - cluster_id = self._storage['cluster_id'] - fuel_web = self.manager.fuel_web - - wait(lambda: fuel_web.mcollective_nodes_online(cluster_id), - timeout=60 * 5, timeout_msg="Cluster nodes don't become available" - " via mcollective in allotted time.") - - def wait_nailgun_available(self): - """Check status for Nailgun""" - - fuel_web = self.manager.fuel_web - - wait_pass(fuel_web.get_nailgun_version, - timeout=60 * 20) - - def compute_hard_restart(self): - """Hard restart compute with Fuel Master node""" - - fuel_web = self.manager.fuel_web - fuel_web.cold_restart_nodes( - [fuel_web.get_devops_node_by_nailgun_node(self.compute)], - wait_offline=False, wait_online=False, wait_after_destroy=5 - ) - - def compute_warm_restart(self): - """Warm restart of the compute with Fuel Master node""" - - logger.debug('Reboot (warm restart) ip {0}'.format(self.compute['ip'])) - with self.env.d_env.get_ssh_to_remote(self.compute['ip']) as remote: - remote.execute('/sbin/shutdown -r now') - - def run_checkers(self): - """Run set of checkers""" - - self.wait_nailgun_available() - self.wait_mcollective_nodes() - self.wait_nailgun_nodes() diff --git a/fuel_tests/tests/test_l2_network_config.py b/fuel_tests/tests/test_l2_network_config.py deleted file mode 100644 index ca38076f3..000000000 --- a/fuel_tests/tests/test_l2_network_config.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pytest - -from fuelweb_test import settings -from fuelweb_test.settings import iface_alias - -# pylint: disable=no-member - - -@pytest.mark.get_logs -@pytest.mark.fail_snapshot -@pytest.mark.thread_1 -class TestL2NetworkConfig(object): - """TestL2NetworkConfig.""" # TODO documentation - - cluster_config = { - 'name': "TestL2NetworkConfig", - 'mode': settings.DEPLOYMENT_MODE, - 'nodes': { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - } - } - - @pytest.mark.need_ready_slaves - @pytest.mark.deploy_node_multiple_interfaces - def test_deploy_node_multiple_interfaces(self): - """Deploy cluster with networks allocated on different interfaces - - Scenario: - 1. Create cluster in Ha mode - 2. Add 1 node with controller role - 3. Add 1 node with compute role - 4. Add 1 node with cinder role - 5. Split networks on existing physical interfaces - 6. Deploy the cluster - 7. Verify network configuration on each deployed node - 8. Run network verification - - Duration 25m - Snapshot: deploy_node_multiple_interfaces - - """ - # self.env.revert_snapshot("ready_with_3_slaves") - - fuel_web = self.manager.fuel_web - interfaces_dict = { - iface_alias('eth0'): ['fuelweb_admin'], - iface_alias('eth1'): ['public'], - iface_alias('eth2'): ['storage'], - iface_alias('eth3'): ['private'], - iface_alias('eth4'): ['management'], - } - self.manager.show_step(1) - cluster_id = fuel_web.create_cluster( - name=self.cluster_config['name'], - mode=self.cluster_config['mode'], - ) - self.manager.show_step(2) - self.manager.show_step(3) - self.manager.show_step(4) - fuel_web.update_nodes( - cluster_id, - self.cluster_config['nodes'] - ) - self.manager.show_step(5) - nailgun_nodes = fuel_web.client.list_cluster_nodes(cluster_id) - for node in nailgun_nodes: - fuel_web.update_node_networks(node['id'], interfaces_dict) - - self.manager.show_step(6) - fuel_web.deploy_cluster_wait(cluster_id) - - self.manager.show_step(7) - fuel_web.verify_network(cluster_id) - - @pytest.mark.skip(reason="Disabled in fuelweb_test") - @pytest.mark.untagged_networks_negative - @pytest.mark.need_ready_slaves - def test_untagged_networks_negative(self): - """Verify network verification fails with untagged network on eth0 - - Scenario: - 1. Create cluster in ha mode - 2. Add 1 node with controller role - 3. Add 1 node with compute role - 4. Add 1 node with compute cinder - 5. Split networks on existing physical interfaces - 6. Remove VLAN tagging from networks which are on eth0 - 7. Run network verification (assert it fails) - 8. Start cluster deployment (assert it fails) - - Duration 30m - - """ - fuel_web = self.manager.fuel_web - vlan_turn_off = {'vlan_start': None} - interfaces = { - iface_alias('eth0'): ["fixed"], - iface_alias('eth1'): ["public"], - iface_alias('eth2'): ["management", "storage"], - iface_alias('eth3'): [] - } - - self.manager.show_step(1) - cluster_id = fuel_web.create_cluster( - name=self.cluster_config['name'], - mode=self.cluster_config['mode'], - ) - self.manager.show_step(2) - self.manager.show_step(3) - self.manager.show_step(4) - fuel_web.update_nodes( - cluster_id, - self.cluster_config['nodes'] - ) - - self.manager.show_step(5) - nets = fuel_web.client.get_networks(cluster_id)['networks'] - nailgun_nodes = fuel_web.client.list_cluster_nodes(cluster_id) - for node in nailgun_nodes: - fuel_web.update_node_networks(node['id'], interfaces) - - self.manager.show_step(6) - # select networks that will be untagged: - for net in nets: - net.update(vlan_turn_off) - - # stop using VLANs: - fuel_web.client.update_network(cluster_id, networks=nets) - - self.manager.show_step(7) - # run network check: - fuel_web.verify_network(cluster_id, success=False) - - self.manager.show_step(8) - # deploy cluster: - task = fuel_web.deploy_cluster(cluster_id) - fuel_web.assert_task_failed(task) diff --git a/fuel_tests/tests/test_neutron.py b/fuel_tests/tests/test_neutron.py deleted file mode 100644 index ba562a9ff..000000000 --- a/fuel_tests/tests/test_neutron.py +++ /dev/null @@ -1,261 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pytest - -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers import os_actions -from fuelweb_test.helpers.ssh_manager import SSHManager - -# pylint: disable=no-member -ssh_manager = SSHManager() - - -@pytest.mark.get_logs -@pytest.mark.fail_snapshot -@pytest.mark.need_ready_cluster -@pytest.mark.ha_neutron -class TestNeutronTunHa(object): - """NeutronTunHa. - - Old groups: ha_neutron, neutron, ha, classic_provisioning - """ # TODO documentation - - cluster_config = { - "name": "NeutronTunHa", - "mode": settings.DEPLOYMENT_MODE, - "settings": { - 'net_provider': settings.NEUTRON, - 'net_segment_type': settings.NEUTRON_SEGMENT['tun'], - 'tenant': 'haTun', - 'user': 'haTun', - 'password': 'haTun' - }, - "nodes": { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'] - } - } - - @pytest.mark.deploy_neutron_gre_ha - @pytest.mark.ha_neutron_gre - def test_deploy_neutron_gre_ha(self): - """Deploy cluster in HA mode with Neutron TUN - - Scenario: - 1. Create cluster - 2. Add 3 nodes with controller role - 3. Add 2 nodes with compute role - 4. Deploy the cluster - 5. Run network verification - 6. Run OSTF - - Duration 80m - Snapshot deploy_neutron_gre_ha - - """ - self.manager.show_step(1) - self.manager.show_step(2) - self.manager.show_step(3) - self.manager.show_step(4) - self.manager.show_step(5) - - cluster_id = self._storage['cluster_id'] - fuel_web = self.manager.fuel_web - cluster = fuel_web.client.get_cluster(cluster_id) - assert str(cluster['net_provider']) == settings.NEUTRON - - devops_node = fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - logger.debug("devops node name is {0}".format(devops_node.name)) - - _ip = fuel_web.get_nailgun_node_by_name(devops_node.name)['ip'] - for _ in range(5): - try: - checkers.check_swift_ring(_ip) - break - except AssertionError: - cmd = "/usr/local/bin/swift-rings-rebalance.sh" - result = ssh_manager.execute(ip=_ip, cmd=cmd) - logger.debug("command execution result is {0}" - .format(result['exit_code'])) - else: - checkers.check_swift_ring(_ip) - - self.manager.show_step(6) - fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - -@pytest.mark.get_logs -@pytest.mark.fail_snapshot -@pytest.mark.need_ready_cluster -@pytest.mark.ha_neutron -class TestNeutronVlanHa(object): - """NeutronVlanHa. - - - Old groups: neutron, ha, ha_neutron - - """ # TODO documentation - - cluster_config = { - "name": "NeutronVlanHa", - "mode": settings.DEPLOYMENT_MODE, - "settings": { - "net_provider": settings.NEUTRON, - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - 'tenant': 'haVlan', - 'user': 'haVlan', - 'password': 'haVlan' - }, - "nodes": { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'] - } - } - - @pytest.mark.deploy_neutron_vlan_ha - @pytest.mark.neutron_vlan_ha - def test_deploy_neutron_vlan_ha(self): - """Deploy cluster in HA mode with Neutron VLAN - - Scenario: - 1. Create cluster - 2. Add 3 nodes with controller role - 3. Add 2 nodes with compute role - 4. Deploy the cluster - 5. Run network verification - 6. Run OSTF - - Duration 80m - Snapshot deploy_neutron_vlan_ha - - """ - self.manager.show_step(1) - self.manager.show_step(2) - self.manager.show_step(3) - self.manager.show_step(4) - self.manager.show_step(5) - - cluster_id = self._storage['cluster_id'] - fuel_web = self.manager.fuel_web - - cluster = fuel_web.client.get_cluster(cluster_id) - assert str(cluster['net_provider']) == settings.NEUTRON - - os_conn = os_actions.OpenStackActions( - fuel_web.get_public_vip(cluster_id), - user=self.cluster_config['settings']['user'], - passwd=self.cluster_config['settings']['password'], - tenant=self.cluster_config['settings']['tenant']) - - fuel_web.check_fixed_network_cidr( - cluster_id, os_conn) - - fuel_web.verify_network(cluster_id) - devops_node = fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - logger.debug("devops node name is {0}".format(devops_node.name)) - - _ip = fuel_web.get_nailgun_node_by_name(devops_node.name)['ip'] - for _ in range(5): - try: - checkers.check_swift_ring(_ip) - break - except AssertionError: - cmd = "/usr/local/bin/swift-rings-rebalance.sh" - result = ssh_manager.execute(ip=_ip, cmd=cmd) - logger.debug("command execution result is {0}" - .format(result['exit_code'])) - else: - checkers.check_swift_ring(_ip) - - self.manager.show_step(6) - fuel_web.run_ostf( - cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) - - -@pytest.mark.get_logs -@pytest.mark.fail_snapshot -@pytest.mark.need_ready_cluster -@pytest.mark.thread_1 -@pytest.mark.neutron -class TestNeutronVlan(object): - """NeutronVlan.""" # TODO documentation - - cluster_config = { - "name": "NeutronVlan", - "mode": settings.DEPLOYMENT_MODE, - "settings": { - "net_provider": settings.NEUTRON, - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - 'tenant': 'simpleVlan', - 'user': 'simpleVlan', - 'password': 'simpleVlan' - }, - "nodes": { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['compute'] - } - } - - @pytest.mark.deploy_neutron_vlan - @pytest.mark.ha_one_controller_neutron_vlan - @pytest.mark.deployment - @pytest.mark.nova - @pytest.mark.nova_compute - def test_deploy_neutron_vlan(self): - """Deploy cluster in ha mode with 1 controller and Neutron VLAN - - Scenario: - 1. Create cluster - 2. Add 1 node with controller role - 3. Add 2 nodes with compute role - 4. Deploy the cluster - 5. Run network verification - 6. Run OSTF - - Duration 35m - Snapshot deploy_neutron_vlan - - """ - self.manager.show_step(1) - self.manager.show_step(2) - self.manager.show_step(3) - - cluster_id = self._storage['cluster_id'] - fuel_web = self.manager.fuel_web - - cluster = fuel_web.client.get_cluster(cluster_id) - assert str(cluster['net_provider']) == settings.NEUTRON - - self.manager.show_step(4) - fuel_web.verify_network(cluster_id) - - self.manager.show_step(5) - fuel_web.run_ostf( - cluster_id=cluster_id) - - self.env.make_snapshot("deploy_neutron_vlan", is_make=True) diff --git a/fuel_tests/tests/test_neutron_ipv6.py b/fuel_tests/tests/test_neutron_ipv6.py deleted file mode 100644 index 7aa6c5ea7..000000000 --- a/fuel_tests/tests/test_neutron_ipv6.py +++ /dev/null @@ -1,246 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals - -import pytest -from paramiko import ChannelException -from devops.helpers.ssh_client import SSHAuth -from devops.helpers.helpers import wait - -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers import os_actions - -cirros_auth = SSHAuth(**settings.SSH_IMAGE_CREDENTIALS) - -# pylint: disable=no-member - - -@pytest.mark.get_logs -@pytest.mark.fail_snapshot -@pytest.mark.need_ready_cluster -@pytest.mark.neutron -@pytest.mark.thread_1 -class TestNeutronIPv6(object): - """NeutronIPv6.""" - - cluster_config = { - "name": "NeutronVlan", - "mode": settings.DEPLOYMENT_MODE, - "settings": { - "net_provider": settings.NEUTRON, - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - 'tenant': 'simpleVlan', - 'user': 'simpleVlan', - 'password': 'simpleVlan' - }, - "nodes": { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['compute'] - } - } - - @pytest.mark.deploy_neutron_ip_v6 - @pytest.mark.nova - @pytest.mark.nova_compute - @pytest.mark.neutron_ipv6 - @pytest.mark.deploy_neutron_ip_v6 - def test_deploy_neutron_ip_v6(self): - """Check IPv6 only functionality for Neutron VLAN - - Scenario: - 1. Revert deploy_neutron_vlan snapshot - 2. Create two dualstack network IPv6 subnets - (should be in SLAAC mode, - address space should not intersect). - 3. Create virtual router and set gateway. - 4. Attach this subnets to the router. - 5. Create a Security Group, - that allows SSH and ICMP for both IPv4 and IPv6. - 6. Launch two instances, one for each network. - 7. Lease a floating IP. - 8. Attach Floating IP for main instance. - 9. SSH to the main instance and ping6 another instance. - - Duration 10m - Snapshot deploy_neutron_ip_v6 - - """ - self.manager.show_step(1) - cluster_id = self._storage['cluster_id'] - fuel_web = self.manager.fuel_web - public_vip = fuel_web.get_public_vip(cluster_id) - logger.info('Public vip is %s', public_vip) - - os_conn = os_actions.OpenStackActions( - controller_ip=public_vip, - user='simpleVlan', - passwd='simpleVlan', - tenant='simpleVlan' - ) - - tenant = os_conn.get_tenant('simpleVlan') - - self.manager.show_step(2) - net1 = os_conn.create_network( - network_name='net1', - tenant_id=tenant.id)['network'] - net2 = os_conn.create_network( - network_name='net2', - tenant_id=tenant.id)['network'] - - subnet_1_v4 = os_conn.create_subnet( - subnet_name='subnet_1_v4', - network_id=net1['id'], - cidr='192.168.100.0/24', - ip_version=4) - - subnet_1_v6 = os_conn.create_subnet( - subnet_name='subnet_1_v6', - network_id=net1['id'], - ip_version=6, - cidr="2001:db8:100::/64", - gateway_ip="2001:db8:100::1", - ipv6_ra_mode="slaac", - ipv6_address_mode="slaac") - - subnet_2_v4 = os_conn.create_subnet( - subnet_name='subnet_2_v4', - network_id=net2['id'], - cidr='192.168.200.0/24', - ip_version=4) - - subnet_2_v6 = os_conn.create_subnet( - subnet_name='subnet_2_v6', - network_id=net2['id'], - ip_version=6, - cidr="2001:db8:200::/64", - gateway_ip="2001:db8:200::1", - ipv6_ra_mode="slaac", - ipv6_address_mode="slaac") - - self.manager.show_step(3) - router = os_conn.create_router('test_router', tenant=tenant) - - self.manager.show_step(4) - os_conn.add_router_interface( - router_id=router["id"], - subnet_id=subnet_1_v4["id"]) - - os_conn.add_router_interface( - router_id=router["id"], - subnet_id=subnet_1_v6["id"]) - - os_conn.add_router_interface( - router_id=router["id"], - subnet_id=subnet_2_v4["id"]) - - os_conn.add_router_interface( - router_id=router["id"], - subnet_id=subnet_2_v6["id"]) - - self.manager.show_step(5) - security_group = os_conn.create_sec_group_for_ssh() - - self.manager.show_step(6) - instance1 = os_conn.create_server( - name='instance1', - security_groups=[security_group], - net_id=net1['id'], - ) - - instance2 = os_conn.create_server( - name='instance2', - security_groups=[security_group], - net_id=net2['id'], - ) - - self.manager.show_step(7) - self.manager.show_step(8) - floating_ip = os_conn.assign_floating_ip(instance1) - floating_ip2 = os_conn.assign_floating_ip(instance2) - - self.manager.show_step(9) - - instance1_ipv6 = [ - addr['addr'] for addr in instance1.addresses[net1['name']] - if addr['version'] == 6].pop() - - instance2_ipv6 = [ - addr['addr'] for addr in instance2.addresses[net2['name']] - if addr['version'] == 6].pop() - - logger.info( - '\ninstance1:\n' - '\tFloatingIP: {ip!s}\n' - '\tIPv6 address: {ipv6!s}'.format( - ip=floating_ip.ip, - ipv6=instance1_ipv6)) - logger.info( - '\ninstance2:\n' - '\tFloatingIP: {ip!s}\n' - '\tIPv6 address: {ipv6!s}'.format( - ip=floating_ip2.ip, - ipv6=instance2_ipv6)) - - with fuel_web.get_ssh_for_node("slave-01") as remote: - def ssh_ready(vm_host): - try: - remote.execute_through_host( - hostname=vm_host, - cmd="ls -la", - auth=cirros_auth - ) - return True - except ChannelException: - return False - - for vm_host, hostname in ( - (floating_ip.ip, instance1), - (floating_ip2.ip, instance2) - ): - wait(lambda: ssh_ready(vm_host), timeout=120, - timeout_msg='ssh is not ready on host ' - '{hostname:s} ({ip:s}) at timeout 120s' - ''.format(hostname=hostname, ip=vm_host)) - - res = remote.execute_through_host( - hostname=floating_ip.ip, - cmd="{ping:s} -q " - "-c{count:d} " - "-w{deadline:d} " - "-s{packetsize:d} " - "{dst_address:s}".format( - ping='ping6', - count=10, - deadline=20, - packetsize=1452, - dst_address=instance2_ipv6), - auth=cirros_auth - ) - - logger.info( - 'Ping results: \n\t{res:s}'.format(res=res['stdout_str'])) - - assert res['exit_code'] == 0, ( - 'Ping failed with error code: {code:d}\n' - '\tSTDOUT: {stdout:s}\n' - '\tSTDERR: {stderr:s}'.format( - code=res['exit_code'], - stdout=res['stdout_str'], - stderr=res['stderr_str'])) - - self.env.make_snapshot('deploy_neutron_ip_v6') diff --git a/fuel_tests/tests/test_restart.py b/fuel_tests/tests/test_restart.py deleted file mode 100644 index 33513f1f1..000000000 --- a/fuel_tests/tests/test_restart.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pytest - -from fuelweb_test import settings - -# pylint: disable=no-member - - -@pytest.mark.get_logs -@pytest.mark.fail_snapshot -@pytest.mark.thread_1 -class TestHAOneControllerNeutronRestart(object): - - cluster_config = { - 'name': "TestHAOneControllerNeutronRestart", - 'mode': settings.DEPLOYMENT_MODE, - 'nodes': { - 'slave-01': ['controller'], - 'slave-02': ['compute'] - } - } - - @pytest.mark.need_ready_cluster - @pytest.mark.ha_one_controller_neutron_warm_restart - def test_ha_one_controller_neutron_warm_restart(self): - """Warm restart for ha one controller environment - - Scenario: - 1. Create cluster - 2. Add 1 node with controller role - 3. Add 1 node with compute role - 4. Deploy the cluster - 5. Run network verification - 6. Run OSTF - 7. Warm restart - 8. Wait for HA services to be ready - 9. Wait for OS services to be ready - 10. Wait for Galera is up - 11. Verify firewall rules - 12. Run network verification - 13. Run OSTF - - Duration 30m - - """ - cluster_id = self._storage['cluster_id'] - fuel_web = self.manager.fuel_web - - self.manager.show_step(1) - self.manager.show_step(2) - self.manager.show_step(3) - self.manager.show_step(4) - - self.manager.show_step(5) - fuel_web.verify_network(cluster_id) - self.manager.show_step(6) - fuel_web.run_ostf(cluster_id=cluster_id) - - self.manager.show_step(7) - fuel_web.warm_restart_nodes( - self.env.d_env.get_nodes(name__in=['slave-01', 'slave-02'])) - - self.manager.show_step(8) - fuel_web.assert_ha_services_ready(cluster_id) - - self.manager.show_step(9) - fuel_web.assert_os_services_ready(cluster_id) - - self.manager.show_step(10) - fuel_web.wait_mysql_galera_is_up(['slave-01']) - - self.manager.show_step(11) - fuel_web.security.verify_firewall(cluster_id) - - self.manager.show_step(12) - fuel_web.verify_network(cluster_id) - - self.manager.show_step(13) - fuel_web.run_ostf(cluster_id=cluster_id) diff --git a/fuelweb_test/__init__.py b/fuelweb_test/__init__.py deleted file mode 100644 index 4162481e8..000000000 --- a/fuelweb_test/__init__.py +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals - -import logging.config -import os -import warnings - -from core.helpers.log_helpers import logwrap -from core.helpers.log_helpers import QuietLogger - -from fuelweb_test.settings import LOGS_DIR - -if not os.path.exists(LOGS_DIR): - os.makedirs(LOGS_DIR) - -_log_config = { - 'version': 1, - 'disable_existing_loggers': False, - 'formatters': { - 'default': { - 'format': '%(asctime)s - %(levelname)s %(filename)s:' - '%(lineno)d -- %(message)s', - 'datefmt': '%Y-%m-%d %H:%M:%S' - }, - }, - 'handlers': { - 'console': { - 'level': 'INFO', - 'class': 'logging.StreamHandler', - 'formatter': 'default' - }, - 'tests_log': { - 'level': 'DEBUG', - 'class': 'logging.FileHandler', - 'formatter': 'default', - 'filename': os.path.join(LOGS_DIR, 'sys_test.log'), - 'mode': 'w', - 'encoding': 'utf8', - }, - 'devops_log': { - 'level': 'DEBUG', - 'class': 'logging.FileHandler', - 'formatter': 'default', - 'filename': os.path.join(LOGS_DIR, 'devops.log'), - 'mode': 'w', - 'encoding': 'utf8', - }, - 'null': { - 'level': 'CRITICAL', - 'class': 'logging.NullHandler', - }, - }, - 'loggers': { - # Log all to log file , but by default only warnings. - '': { - 'handlers': ['tests_log'], - 'level': 'WARNING', - }, - 'fuel-qa': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': True - }, - 'devops': { - 'handlers': ['console', 'devops_log'], - 'level': 'DEBUG', - 'propagate': True # Test log too - }, - # py.warnings is changed by Django -> do not propagate - 'py.warnings': { - 'handlers': ['console', 'tests_log'], - 'level': 'WARNING', - 'propagate': False - }, - 'paramiko': {'level': 'WARNING'}, - 'iso8601': {'level': 'WARNING'}, - 'keystoneauth': {'level': 'WARNING'}, - } -} - -logging.config.dictConfig(_log_config) -logging.captureWarnings(True) # Log warnings -# Filter deprecation warnings: log only when deletion announced -warnings.filterwarnings( - 'default', - message=r'.*(drop|remove)+.*', - category=DeprecationWarning) - -logger = logging.getLogger('fuel-qa.{}'.format(__name__)) - -__all__ = ['QuietLogger', 'logwrap', 'logger'] diff --git a/fuelweb_test/config_templates/create_primary_role.yaml b/fuelweb_test/config_templates/create_primary_role.yaml deleted file mode 100644 index 2009dac71..000000000 --- a/fuelweb_test/config_templates/create_primary_role.yaml +++ /dev/null @@ -1,13 +0,0 @@ -meta: - conflicts: - - controller - - compute - description: New role - has_primary: true - name: Test primary role - tags: - - base-os -name: test-primary-role -volumes_roles_mapping: -- allocate_size: min - id: os diff --git a/fuelweb_test/config_templates/create_role.yaml b/fuelweb_test/config_templates/create_role.yaml deleted file mode 100644 index 81a4fde36..000000000 --- a/fuelweb_test/config_templates/create_role.yaml +++ /dev/null @@ -1,12 +0,0 @@ -meta: - conflicts: - - controller - - compute - description: New role - name: Test role - tags: - - base-os -name: test-role -volumes_roles_mapping: -- allocate_size: min - id: os \ No newline at end of file diff --git a/fuelweb_test/config_templates/custom_graph_tasks.yaml b/fuelweb_test/config_templates/custom_graph_tasks.yaml deleted file mode 100644 index 8a5750f66..000000000 --- a/fuelweb_test/config_templates/custom_graph_tasks.yaml +++ /dev/null @@ -1,27 +0,0 @@ -- id: custom_task_on_controller - type: shell - version: 2.0.0 - role: ['/(primary-)?controller/'] - parameters: - cmd: 'echo "controller" >> /tmp/custom_task_log' - -- id: custom_task_on_compute - type: shell - version: 2.0.0 - role: ['compute'] - parameters: - cmd: 'echo "compute" >> /tmp/custom_task_log' - -- id: custom_task_on_cinder - type: shell - version: 2.0.0 - role: ['cinder'] - parameters: - cmd: 'echo "cinder" >> /tmp/custom_task_log' - -- id: custom_task_on_ceph-osd - type: shell - version: 2.0.0 - role: ['ceph-osd'] - parameters: - cmd: 'echo "ceph-osd" >> /tmp/custom_task_log' diff --git a/fuelweb_test/config_templates/custom_yaql_tasks.yaml b/fuelweb_test/config_templates/custom_yaql_tasks.yaml deleted file mode 100644 index ac0c52269..000000000 --- a/fuelweb_test/config_templates/custom_yaql_tasks.yaml +++ /dev/null @@ -1,9 +0,0 @@ -- id: custom_task_on_all_nodes - type: shell - version: 2.0.0 - condition: - yaql_exp: '$.uid in added($.nodes).uid' - role: ['/.*/'] - requires: ['custom_task_on_controller'] - parameters: - cmd: 'echo "yaql_task_on_all_nodes" >> /tmp/yaql_task_on_all_nodes' diff --git a/fuelweb_test/config_templates/keystone.yaml b/fuelweb_test/config_templates/keystone.yaml deleted file mode 100644 index 99e466ecc..000000000 --- a/fuelweb_test/config_templates/keystone.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -keystone_config: - token/expiration: - value: 300 - notify: Service[apache2] -service: - apache2: - ensure: running \ No newline at end of file diff --git a/fuelweb_test/config_templates/keystone_ldap.yaml b/fuelweb_test/config_templates/keystone_ldap.yaml deleted file mode 100644 index a20c58ecc..000000000 --- a/fuelweb_test/config_templates/keystone_ldap.yaml +++ /dev/null @@ -1,62 +0,0 @@ ---- -keystone_config: - ldap/url: - value: ldap://dc.example.com - ldap/user: - value: CN=ldap,OU=Users,DC=example,DC=com - ldap/password: - value: ldap_test - ldap/suffix: - value: DC=example,DC=com - ldap/use_dumb_member: - value: "True" - ldap/dumb_member: - value: CN=ldap,OU=Users,DC=example,DC=com - ldap/user_tree_dn: - value: OU=Users,DC=example,DC=com - ldap/user_objectclass: - value: person - ldap/user_filter: - ensure: absent - ldap/user_id_attribute: - value: cn - ldap/user_name_attribute: - value: cn - ldap/user_mail_attribute: - value: mail - ldap/user_pass_attribute: - ensure: absent - ldap/user_enabled_attribute: - value: userAccountControl - ldap/user_enabled_mask: - value: "2" - ldap/user_enabled_default: - value: "512" - ldap/user_attribute_ignore: - value: password,tenant_id,tenants - ldap/user_allow_create: - value: "False" - ldap/user_allow_update: - value: "False" - ldap/user_allow_delete: - value: "False" - ldap/role_tree_dn: - value: OU=Roles,DC=example,DC=com - ldap/role_filter: - ensure: absent - ldap/role_objectclass: - value: organizationalRole - ldap/role_id_attribute: - value: cn - ldap/role_name_attribute: - value: ou - ldap/role_name_attribute: - value: roleOccupant - ldap/role_attribute_ignore: - ensure: absent - ldap/role_allow_create: - value: "True" - ldap/role_allow_create: - value: "True" - ldap/role_allow_create: - value: "True" diff --git a/fuelweb_test/config_templates/master_node_tasks.yaml b/fuelweb_test/config_templates/master_node_tasks.yaml deleted file mode 100644 index 5d59fe51d..000000000 --- a/fuelweb_test/config_templates/master_node_tasks.yaml +++ /dev/null @@ -1,22 +0,0 @@ -- id: task_on_master_1 - type: shell - version: 2.0.0 - role: ['master'] - required_for: ['task_on_master_2'] - parameters: - cmd: 'echo 1 > /tmp/master_task' - -- id: task_on_master_2 - type: shell - version: 2.0.0 - role: ['master'] - parameters: - cmd: 'echo 2 >> /tmp/master_task' - -- id: task_on_master_3 - type: shell - version: 2.0.0 - role: ['master'] - requires: ['task_on_master_2'] - parameters: - cmd: 'echo 3 >> /tmp/master_task' diff --git a/fuelweb_test/config_templates/neutron.yaml b/fuelweb_test/config_templates/neutron.yaml deleted file mode 100644 index 2da72f6a6..000000000 --- a/fuelweb_test/config_templates/neutron.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -neutron_plugin_ml2: - ml2_type_vlan/network_vlan_ranges: - value: "physnet2:900:901,physnet1" - notify: "Service[neutron-server]" -neutron_config: - DEFAULT/verbose: - ensure: "absent" - notify: "Service[neutron-server]" - DEFAULT/debug: - value: "True" - notify: "Service[neutron-server]" -service: - neutron-server: - ensure: running - diff --git a/fuelweb_test/config_templates/new_fields_compute.yaml b/fuelweb_test/config_templates/new_fields_compute.yaml deleted file mode 100644 index 97cf7f6ae..000000000 --- a/fuelweb_test/config_templates/new_fields_compute.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -nova_config: - fuel_qa/service_reconfiguration_8: - value: "InProgress" - notify: "Service[nova-compute]" -service: - nova-compute: - ensure: running diff --git a/fuelweb_test/config_templates/new_fields_controller.yaml b/fuelweb_test/config_templates/new_fields_controller.yaml deleted file mode 100644 index e8736b17b..000000000 --- a/fuelweb_test/config_templates/new_fields_controller.yaml +++ /dev/null @@ -1,62 +0,0 @@ ---- -neutron_config: - fuel_qa/service_reconfiguration_1: - value: "InProgress" - notify: "Service[neutron-server]" -neutron_plugin_ml2: - fuel_qa/service_reconfiguration_2: - value: "InProgress" - notify: "Service[neutron-server]" -neutron_dhcp_agent_config: - fuel_qa/service_reconfiguration_4: - value: "InProgress" - notify: "Service[neutron-dhcp-agent]" -neutron_l3_agent_config: - fuel_qa/service_reconfiguration_5: - value: "InProgress" - notify: "Service[neutron-l3-agent]" -neutron_metadata_agent_config: - fuel_qa/service_reconfiguration_6: - value: "InProgress" - notify: "Service[neutron-metadata-agent]" -neutron_api_config: - fuel_qa/service_reconfiguration_7: - value: "InProgress" - notify: "Service[neutron-server]" -keystone_config: - fuel_qa/service_reconfiguration_8: - value: "InProgress" -nova_config: - fuel_qa/service_reconfiguration_9: - value: "InProgress" - notify: - - "Service[nova-scheduler]" - - "Service[nova-novncproxy]" - - "Service[nova-conductor]" - - "Service[nova-api]" - - "Service[nova-consoleauth]" - - "Service[nova-cert]" -service: - neutron-server: - ensure: running - nova-scheduler: - ensure: running - nova-novncproxy: - ensure: running - nova-conductor: - ensure: running - nova-api: - ensure: running - nova-consoleauth: - ensure: running - nova-cert: - ensure: running - neutron-dhcp-agent: - ensure: running - provider: pacemaker - neutron-l3-agent: - ensure: running - provider: pacemaker - neutron-metadata-agent: - ensure: running - provider: pacemaker diff --git a/fuelweb_test/config_templates/nova_cpu.yaml b/fuelweb_test/config_templates/nova_cpu.yaml deleted file mode 100644 index 2485f4327..000000000 --- a/fuelweb_test/config_templates/nova_cpu.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -nova_config: - DEFAULT/cpu_allocation_ratio: - value: "1.0" - notify: "Service[nova-scheduler]" -service: - nova-scheduler: - ensure: running diff --git a/fuelweb_test/config_templates/nova_cpu_old.yaml b/fuelweb_test/config_templates/nova_cpu_old.yaml deleted file mode 100644 index 30a6eefe5..000000000 --- a/fuelweb_test/config_templates/nova_cpu_old.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -nova_config: - DEFAULT/cpu_allocation_ratio: - ensure: absent - notify: "Service[nova-scheduler]" -service: - nova-scheduler: - ensure: running diff --git a/fuelweb_test/config_templates/nova_disk.yaml b/fuelweb_test/config_templates/nova_disk.yaml deleted file mode 100644 index 20dc09cda..000000000 --- a/fuelweb_test/config_templates/nova_disk.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -nova_config: - DEFAULT/default_ephemeral_format: - value: ext4 - notify: "Service[nova-compute]" -service: - nova-compute: - ensure: running \ No newline at end of file diff --git a/fuelweb_test/config_templates/nova_disk_cinder_role.yaml b/fuelweb_test/config_templates/nova_disk_cinder_role.yaml deleted file mode 100644 index 4feb921f7..000000000 --- a/fuelweb_test/config_templates/nova_disk_cinder_role.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -nova_config: - DEFAULT/default_ephemeral_format: - value: "ext3" - notify: "Service[nova-compute]" - DEFAULT/verbose: - value: "False" - notify: "Service[nova-compute]" -service: - nova-compute: - ensure: running diff --git a/fuelweb_test/config_templates/nova_quota.yaml b/fuelweb_test/config_templates/nova_quota.yaml deleted file mode 100644 index 8664fa738..000000000 --- a/fuelweb_test/config_templates/nova_quota.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -nova_config: - DEFAULT/quota_driver: - value: nova.quota.DbQuotaDriver - notify: - - "Service[nova-api]" - DEFAULT/quota_instances: - value: "1" - notify: - - "Service[nova-api]" -service: - nova-api: - ensure: running diff --git a/fuelweb_test/config_templates/prepare_release_image.py b/fuelweb_test/config_templates/prepare_release_image.py deleted file mode 100644 index 758a69d9a..000000000 --- a/fuelweb_test/config_templates/prepare_release_image.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Script to prepare shell script to generate target image""" - - -def execute(): - """Function to prepare shell script to generate target image""" - import sys - - import six - - from nailgun.settings import NailgunSettings - from nailgun.objects.release import Release - from nailgun import consts - from nailgun.orchestrator import tasks_templates - - settings = NailgunSettings() - master_ip = settings.config['MASTER_IP'] - release_id = sys.argv[1] - - rel = Release.get_by_uid(release_id) - - packages_str = \ - rel.attributes_metadata['editable']['provision']['packages']['value'] - packages = list( - six.moves.filter(bool, (s.strip() for s in packages_str.split('\n')))) - task = tasks_templates.make_provisioning_images_task( - [consts.MASTER_NODE_UID], - rel.attributes_metadata['editable']['repo_setup']['repos']['value'], - rel.attributes_metadata['generated']['provision'], - 'prepare_release_ubuntu', - packages) - - release_str = 'release_{release_id}'.format(release_id=release_id) - with open('build_image.sh', 'w') as cmd_file: - cmd_file.write(task['parameters']['cmd'].replace( - "{cluster.release.environment_version}", - rel.environment_version).replace( - '{cluster.release.version}', - rel.version).replace( - '{settings.MASTER_IP}', - master_ip).replace( - "{cluster.id}", - release_str)) - - -if __name__ == '__main__': - execute() diff --git a/fuelweb_test/config_templates/release_custom_tasks.yaml b/fuelweb_test/config_templates/release_custom_tasks.yaml deleted file mode 100644 index 78941d506..000000000 --- a/fuelweb_test/config_templates/release_custom_tasks.yaml +++ /dev/null @@ -1,7 +0,0 @@ -- id: custom_task_on_all_nodes - type: shell - version: 2.0.0 - role: ['/.*/'] - requires: ['custom_task_on_controller'] - parameters: - cmd: 'echo "custom_task_on_all_nodes" > /tmp/custom_task_on_all_nodes' diff --git a/fuelweb_test/helpers/__init__.py b/fuelweb_test/helpers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/helpers/ceph.py b/fuelweb_test/helpers/ceph.py deleted file mode 100644 index b029a0701..000000000 --- a/fuelweb_test/helpers/ceph.py +++ /dev/null @@ -1,276 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis.asserts import assert_equal - -from fuelweb_test import logger -from fuelweb_test.helpers.utils import check_distribution -from fuelweb_test.settings import DNS_SUFFIX -from fuelweb_test.settings import OPENSTACK_RELEASE -from fuelweb_test.settings import OPENSTACK_RELEASE_CENTOS -from fuelweb_test.settings import OPENSTACK_RELEASE_UBUNTU -from fuelweb_test.settings import UBUNTU_SERVICE_PROVIDER - - -def start_monitor(remote): - """Starts ceph-mon service depending on Linux distribution. - - :param remote: devops.helpers.helpers.SSHClient - :return: None - :raise: DistributionNotSupported - """ - logger.debug("Starting Ceph monitor on {0}".format(remote.host)) - check_distribution() - if OPENSTACK_RELEASE_UBUNTU in OPENSTACK_RELEASE: - remote.check_call('start ceph-mon-all') - if OPENSTACK_RELEASE_CENTOS in OPENSTACK_RELEASE: - remote.check_call('/etc/init.d/ceph start') - - -def stop_monitor(remote): - """Stops ceph-mon service depending on Linux distribution. - - :param remote: devops.helpers.helpers.SSHClient - :return: None - :raise: DistributionNotSupported - """ - logger.debug("Stopping Ceph monitor on {0}".format(remote.host)) - check_distribution() - if OPENSTACK_RELEASE_UBUNTU in OPENSTACK_RELEASE: - remote.check_call('stop ceph-mon-all') - if OPENSTACK_RELEASE_CENTOS in OPENSTACK_RELEASE: - remote.check_call('/etc/init.d/ceph stop') - - -def restart_monitor(remote): - """Restarts ceph-mon service depending on Linux distribution. - - :param remote: devops.helpers.helpers.SSHClient - :return: None - :raise: DistributionNotSupported - """ - stop_monitor(remote) - start_monitor(remote) - - -def get_health(remote): - logger.debug("Checking Ceph cluster health on {0}".format(remote.host)) - cmd = 'ceph health -f json' - return remote.check_call(cmd).stdout_json - - -def get_monitor_node_fqdns(remote): - """Returns node FQDNs with Ceph monitor service is running. - - :param remote: devops.helpers.helpers.SSHClient - :return: list of FQDNs - """ - cmd = 'ceph mon_status -f json' - result = remote.check_call(cmd).stdout_json - fqdns = [i['name'] + DNS_SUFFIX for i in result['monmap']['mons']] - msg = "Ceph monitor service is running on {0}".format(', '.join(fqdns)) - logger.debug(msg) - return fqdns - - -def is_clock_skew(remote): - """Checks whether clock skews across the monitor nodes. - - :param remote: devops.helpers.helpers.SSHClient - :return: bool - """ - if is_health_warn(remote): - if 'clock skew' in ' '.join(health_detail(remote)): - return True - - return False - - -def get_node_fqdns_w_clock_skew(remote): - """Returns node FQDNs with a clock skew. - - :param remote: devops.helpers.helpers.SSHClient - :return: list of FQDNs - """ - fqdns = [] - if not is_clock_skew(remote): - return fqdns - - for i in get_health(remote)['timechecks']['mons']: - if abs(float(i['skew'])) >= 0.05: - fqdns.append(i['name'] + DNS_SUFFIX) - logger.debug("Clock skew is found on {0}".format(', '.join(fqdns))) - return fqdns - - -def check_disks(remote, nodes_ids): - nodes_names = ['node-{0}'.format(node_id) for node_id in nodes_ids] - disks_tree = get_osd_tree(remote) - osd_ids = get_osd_ids(remote) - logger.debug("Disks output information: \\n{0}".format(disks_tree)) - disks_ids = [] - for node in disks_tree['nodes']: - if node['type'] == 'host' and node['name'] in nodes_names: - disks_ids.extend(node['children']) - for node in disks_tree['nodes']: - if node['type'] == 'osd' and node['id'] in disks_ids: - assert_equal(node['status'], 'up', 'OSD node {0} is down'. - format(node['id'])) - for node in disks_tree['stray']: - if node['type'] == 'osd' and node['id'] in osd_ids: - logger.info("WARNING! Ceph OSD '{0}' has no parent host!". - format(node['name'])) - assert_equal(node['status'], 'up', 'OSD node {0} is down'. - format(node['id'])) - - -def check_service_ready(remote, exit_code=0): - cmds = [] - if OPENSTACK_RELEASE_UBUNTU in OPENSTACK_RELEASE: - if UBUNTU_SERVICE_PROVIDER == 'systemd': - # Gather services on remote node - cmd = 'systemctl show --property=Id ceph-mon*service '\ - 'ceph-osd*service ceph-radosgw*service' - result = remote.execute(cmd) - if result['exit_code'] != 0: - return False - - ceph_services = [] - for line in result['stdout']: - try: - _, value = line.strip().split('=', 1) - ceph_services.append(value) - except ValueError: - pass - for service in ceph_services: - cmds.append('systemctl is-active -q {}'.format(service)) - else: - cmds.append('service ceph-all status') - else: - cmds.append('service ceph status') - - if not cmds: - raise Exception("Don't know how to check ceph status. " - "Perhaps ceph packages are not installed") - - for cmd in cmds: - if remote.execute(cmd)['exit_code'] != exit_code: - return False - return True - - -def health_overall_status(remote): - """Returns Ceph health overall status. - - Can be one of: 'HEALTH_OK', 'HEALTH_WARN', 'HEALTH_ERR', ... - :param remote: devops.helpers.helpers.SSHClient - :return: str - - """ - health = get_health(remote) - return health['overall_status'] - - -def health_detail(remote): - """Returns 'detail' section of Ceph health. - - :param remote: devops.helpers.helpers.SSHClient - :return: JSON-like object - - """ - health = get_health(remote) - return health['detail'] - - -def is_health_ok(remote): - """Checks whether Ceph health overall status is OK. - - :param remote: devops.helpers.helpers.SSHClient - :return: bool - """ - - if health_overall_status(remote) == 'HEALTH_OK': - return True - if is_health_warn(remote): - health = get_health(remote) - if 'too many PGs' in health['summary'][0]['summary']: - return True - return False - - -def is_health_warn(remote): - """Checks whether Ceph health overall status is WARN. - - :param remote: devops.helpers.helpers.SSHClient - :return: bool - """ - return health_overall_status(remote) == 'HEALTH_WARN' - - -def is_pgs_recovering(remote): - """Checks whether Ceph PGs are being recovered. - - :param remote: devops.helpers.helpers.SSHClient - :return: bool - """ - keywords = ['degraded', 'recovery', 'osds', 'are', 'down'] - detail = ' '.join(health_detail(remote)) - if all(k in detail.split() for k in keywords): - return True - logger.debug('Ceph PGs are not being recovered. ' - 'Details: {0}'.format(detail)) - return False - - -def get_osd_tree(remote): - """Returns OSDs according to their position in the CRUSH map. - - :param remote: devops.helpers.helpers.SSHClient - :return: JSON-like object - """ - logger.debug("Fetching Ceph OSD tree") - cmd = 'ceph osd tree -f json' - return remote.check_call(cmd).stdout_json - - -def get_osd_ids(remote): - """Returns all OSD ids. - - :param remote: devops.helpers.helpers.SSHClient - :return: JSON-like object - """ - logger.debug("Fetching Ceph OSD ids") - cmd = 'ceph osd ls -f json' - return remote.check_call(cmd).stdout_json - - -def get_rbd_images_list(remote, pool): - """Returns all OSD ids. - - :param remote: devops.helpers.helpers.SSHClient - :param pool: string, can be: 'images', 'volumes', etc. - :return: JSON-like object - """ - cmd = 'rbd --pool {pool} --format json ls -l'.format(pool=pool) - return remote.check_call(cmd).stdout_json - - -def get_version(remote): - """Returns Ceph version - - :param remote: devops.helpers.helpers.SSHClient - :return: str - """ - cmd = 'ceph --version' - return remote.check_call(cmd).stdout[0].split(' ')[2] diff --git a/fuelweb_test/helpers/checkers.py b/fuelweb_test/helpers/checkers.py deleted file mode 100644 index 3e6ea29a3..000000000 --- a/fuelweb_test/helpers/checkers.py +++ /dev/null @@ -1,1555 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import division - -import hashlib -import json -import os -import re -from time import sleep - -from devops.error import TimeoutError -from devops.helpers.helpers import wait_pass -from devops.helpers.helpers import wait -from devops.helpers.ssh_client import SSHAuth -from netaddr import IPAddress -from netaddr import IPNetwork -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_true - -from keystoneauth1 import exceptions -# pylint: disable=redefined-builtin -# noinspection PyUnresolvedReferences -from six.moves import xrange -# pylint: enable=redefined-builtin -import yaml - -from core.helpers.log_helpers import logwrap - -from fuelweb_test import settings -from fuelweb_test import logger -from fuelweb_test.helpers.ssh_manager import SSHManager -from fuelweb_test.helpers.utils import check_config -from fuelweb_test.helpers.utils import get_ini_config -from fuelweb_test.helpers.utils import get_mongo_partitions - - -cirros_auth = SSHAuth(**settings.SSH_IMAGE_CREDENTIALS) -ssh_manager = SSHManager() - - -@logwrap -def validate_minimal_amount_nodes( - nodes, expected_amount, - state='discover', online=True): - """Validate amount of nodes in state - - :type nodes: iterable - :type expected_amount: int - :type state: str - :type online: bool - :raises: Exception - """ - fnodes = [ - node for node in nodes - if node['online'] == online and node['status'] == state] - if len(fnodes) < expected_amount: - raise Exception( - 'Nodes in state {state} (online: {online}): ' - '{amount}, while expected: {expected}'.format( - state=state, - online=online, - amount=len(fnodes), - expected=expected_amount - ) - ) - - -@logwrap -def check_cinder_status(ip): - """Parse output and return False if any enabled service is down. - 'cinder service-list' stdout example: - | cinder-scheduler | node-1.test.domain.local | nova | enabled | up | - | cinder-scheduler | node-2.test.domain.local | nova | enabled | down | - """ - cmd = '. openrc; cinder service-list' - result = ssh_manager.execute_on_remote( - ip=ip, - cmd=cmd, - raise_on_assert=False - ) - cinder_services = result['stdout_str'] - logger.debug('>$ cinder service-list\n{}'.format(cinder_services)) - if result['exit_code'] == 0: - return all(' up ' in x.split('enabled')[1] - for x in cinder_services.split('\n') - if 'cinder' in x and 'enabled' in x and - len(x.split('enabled'))) - return False - - -@logwrap -def check_image(image, md5, path): - local_path = "{0}/{1}".format(path, image) - logger.debug('Check md5 {0} of image {1}/{2}'.format(md5, path, image)) - if not os.path.isfile(local_path): - logger.error('Image {0} not found in {1} directory'.format( - image, path)) - return False - with open(local_path, mode='rb') as fimage: - digits = hashlib.md5() - while True: - buf = fimage.read(4096) - if not buf: - break - digits.update(buf) - md5_local = digits.hexdigest() - if md5_local != md5: - logger.error('MD5 of {0}/{1} is not correct, aborting'.format( - path, image)) - return False - return True - - -@logwrap -def verify_service(ip, service_name, count=1, - ignore_count_of_proccesses=False): - ps_output = ssh_manager.check_call( - ip=ip, - command='ps ax' - )['stdout'] - api = [ps for ps in ps_output if service_name in ps] - logger.debug("{} \\n: {}".format(service_name, str(api))) - if not ignore_count_of_proccesses: - assert_equal(len(api), count, - "{0} count not equal to {1}".format(service_name, count)) - else: - assert_true(len(api), "Service '{0}' not found!".format(service_name)) - - -@logwrap -def verify_service_list_api(os_conn, service_count): - def _verify(): - ret = os_conn.get_nova_service_list() - logger.debug('Service list {0}'.format(ret)) - assert_equal(service_count, len(ret), - 'Expected service count is {0},' - ' but get {1} count, actual list {2}'.format( - service_count, len(ret), ret)) - for service in ret: - logger.debug('service is {0}'.format(service)) - assert_equal( - service.state, 'up', - 'Service {0} on host {1} has next ' - 'state {2}'.format(service.binary, - service.host, - service.state)) - try: - _verify() - except AssertionError: - logger.debug( - "Services still not read. Sleeping for 60 seconds and retrying") - sleep(60) - _verify() - - -@logwrap -def verify_glance_image_api(os_conn): - ret = os_conn.get_image_list() - assert_equal(1, len([i for i in ret if i.name == 'TestVM']), - "TestVM not found in glance image-list") - - -@logwrap -def verify_network_list_api(os_conn, net_count=None): - ret = os_conn.get_nova_network_list() - assert_equal(net_count, len(ret), - 'Unexpected count of networks detected, ' - 'expected: {0}, current {1} count,' - ' full list {2}'.format(net_count, len(ret), ret)) - - -@logwrap -def check_ceph_image_size(ip, expected_size, device='vdc'): - ret = ssh_manager.check_call( - ip=ip, - command="df -m /dev/{device}* | grep ceph | awk" - " {size}".format(device=device, - size=re.escape('{print $2}')) - ).stdout - - if not ret: - logger.error( - "Partition not present! {}: ".format( - ssh_manager.check_call(ip=ip, command="df -m").stdout_str)) - raise Exception() - logger.debug("Partitions: {part}".format(part=ret)) - assert_true(abs(float(ret[0].rstrip()) / expected_size - 1) < 0.1, - "size {0} is not equal" - " to {1}".format(ret[0].rstrip(), - expected_size)) - - -@logwrap -def check_cinder_image_size(ip, expected_size, device='vdc3'): - ret = get_mongo_partitions(ip, device)[0].rstrip().rstrip('G') - cinder_size = float(ret) * 1024 - assert_true(abs(cinder_size / expected_size - 1) < 0.1, - "size {0} is not equal" - " to {1}".format(ret[0].rstrip(), - expected_size)) - - -@logwrap -def check_unallocated_space(disks, contr_img_ceph=False): - for disk in disks: - # In case we have Ceph for images all space on controller - # should be given to Base System space: - if contr_img_ceph: - logger.info("Check that all space on /dev/{d} is allocated for " - "Base System Space".format(d=disk['name'])) - if not bool(disk["volumes"][0]["size"] == disk["size"]): - return False - else: - logger.info("Get overall size of volumes") - sizes = [v['size'] for v in disk["volumes"]] - logger.info("Space on disk: {s}".format(s=disk['size'])) - logger.info("Summary space of disks on /dev/{d}: {s}".format( - d=disk["name"], s=sum(sizes))) - if not bool(sum(sizes) == disk["size"]): - return False - return True - - -@logwrap -def check_archive_type(tar_path): - if os.path.splitext(tar_path)[1] not in [".tar", ".lrz", ".fp", ".rpm"]: - raise Exception("Wrong archive type!") - - -@logwrap -def check_file_exists(ip, path): - assert_true(ssh_manager.exists_on_remote(ip, path), - 'Can not find {0}'.format(path)) - logger.info('File {0} exists on {1}'.format(path, ip)) - - -@logwrap -def wait_phrase_in_log(ip, timeout, interval, phrase, log_path): - cmd = "grep '{0}' '{1}'".format(phrase, log_path) - wait( - lambda: not SSHManager().execute(ip=ip, cmd=cmd)['exit_code'], - interval=interval, - timeout=timeout, - timeout_msg="The phrase {0} not found in {1} file on " - "remote node".format(phrase, log_path)) - - -@logwrap -def enable_feature_group(env, group): - fuel_settings = env.admin_actions.get_fuel_settings() - if group not in fuel_settings["FEATURE_GROUPS"]: - fuel_settings["FEATURE_GROUPS"].append(group) - env.admin_actions.save_fuel_settings(fuel_settings) - - # NOTE(akostrikov) We use FUEL_SETTINGS_YAML as primary source or truth and - # update nailgun configs via puppet from that value - ssh_manager.check_call( - ip=ssh_manager.admin_ip, - command='puppet apply /etc/puppet/modules/fuel/examples/nailgun.pp' - ) - - def check_api_group_enabled(): - try: - return (group in - env.fuel_web.client.get_api_version()["feature_groups"]) - except exceptions.HttpError: - return False - - wait(check_api_group_enabled, interval=10, timeout=60 * 20, - timeout_msg='Failed to enable feature group - {!r}'.format(group)) - - -def find_backup(ip): - backups = ssh_manager.execute(ip, - "ls -1u /var/backup/fuel/*/*.lrz")["stdout"] - if backups: - arch_path = backups[0] - logger.info('Backup archive found: {0}'.format(arch_path)) - return arch_path - else: - raise ValueError("No backup file found in the '/var/backup/fuel/'") - - -@logwrap -def backup_check(ip): - logger.info("Backup check archive status") - path = find_backup(ip) - assert_true(path, "Can not find backup. Path value '{0}'".format(path)) - test_result = ssh_manager.execute(ip, - "test -e {0}".format(path.rstrip())) - assert_true(test_result['exit_code'] == 0, - "Archive '{0}' does not exist".format(path.rstrip())) - - -_md5_record = re.compile(r'(?P\w+)[ \t]+(?P\w+)') - - -def parse_md5sum_output(string): - """Process md5sum command output and return dict filename: md5 - - :param string: output of md5sum - :type string: str - :rtype: dict - :return: dict - """ - return {filename: md5 for md5, filename in _md5_record.findall(string)} - - -def diff_md5(before, after, no_dir_change=True): - """Diff md5sum output - - :type before: str - :type after: str - :param no_dir_change: Check, that some files was added or removed - :type no_dir_change: bool - """ - before_dict = parse_md5sum_output(before) - after_dict = parse_md5sum_output(after) - - before_files = set(before_dict.keys()) - after_files = set(after_dict.keys()) - - diff_filenames = before_files ^ after_files - - dir_change = ( - "Directory contents changed:\n" - "\tRemoved files: {removed}\n" - "\tNew files: {created}".format( - removed=[ - filename for filename in diff_filenames - if filename in before_files], - created=[ - filename for filename in diff_filenames - if filename in after_files], - ) - ) - if no_dir_change: - assert_true(len(diff_filenames) == 0, dir_change) - else: - logger.debug(dir_change) - - changelist = [ - { - 'filename': filename, - 'before': before_dict[filename], - 'after': after_dict[filename]} - for filename in before_files & after_files - if before_dict[filename] != after_dict[filename] - ] - assert_true( - len(changelist) == 0, - "Files has been changed:\n" - "{}".format( - "".join( - map( - lambda record: "{filename}: {before} -> {after}\n".format( - **record), - changelist - ) - ) - ) - ) - - -@logwrap -def restore_check_sum(ip): - logger.debug('Check if removed file /etc/fuel/data was restored') - - assert_true( - ssh_manager.exists_on_remote(ip=ip, path='/etc/fuel/data'), - 'Test file /etc/fuel/data was not restored!!!') - - logger.info("Restore check md5sum") - md5sum_backup = ssh_manager.check_call(ip, "cat /etc/fuel/sum") - assert_true(md5sum_backup['stdout_str'], - 'Command cat /etc/fuel/sum ' - 'failed with {0}'.format(md5sum_backup['stderr'])) - md5sum_restore = ssh_manager.check_call( - ip=ip, - command="md5sum /etc/fuel/data | sed -n 1p | awk '{print $1}'" - ) - assert_equal( - md5sum_backup.stdout_str, md5sum_restore.stdout_str, - "Checksum is not equal:\n" - "\tOLD: {0}\n" - "\tNEW: {1}".format( - md5sum_backup.stdout_str, md5sum_restore.stdout_str - ) - ) - - -@logwrap -def iptables_check(ip): - logger.info("Iptables check") - ssh_manager.execute(ip, "iptables-save > /etc/fuel/iptables-restore") - iptables_backup = ssh_manager.execute( - ip=ip, - cmd="sed -e '/^:/d; /^#/d' /etc/fuel/iptables-backup" - ) - iptables_restore = ssh_manager.execute( - ip=ip, - cmd="sed -e '/^:/d; /^#/d' /etc/fuel/iptables-restore" - ) - assert_equal(iptables_backup, iptables_restore, - "list of iptables rules are not equal") - - -@logwrap -def check_mysql(ip, node_name): - check_cmd = 'pkill -0 -x mysqld' - check_crm_cmd = ('crm resource status clone_p_mysqld |' - ' grep -q "is running on: $HOSTNAME"') - check_galera_cmd = ("mysql --connect_timeout=5 -sse \"SELECT" - " VARIABLE_VALUE FROM" - " information_schema.GLOBAL_STATUS" - " WHERE VARIABLE_NAME" - " = 'wsrep_local_state_comment';\"") - - wait(lambda: ssh_manager.execute(ip, check_cmd)['exit_code'] == 0, - timeout=10 * 60, - timeout_msg='MySQL daemon is down on {0}'.format(node_name)) - logger.info('MySQL daemon is started on {0}'.format(node_name)) - - # TODO(astudenov): add timeout_msg - wait_pass( - lambda: assert_equal( - ssh_manager.execute( - ip, - check_crm_cmd)['exit_code'], - 0, - 'MySQL resource is NOT running on {0}'.format(node_name)), - timeout=120) - try: - wait(lambda: ''.join(ssh_manager.execute( - ip, check_galera_cmd)['stdout']).rstrip() == 'Synced', timeout=600, - timeout_msg='galera status != "Synced" on node {!r} with ip {}' - ''.format(node_name, ip)) - except TimeoutError: - logger.error('galera status is {0}'.format(''.join(ssh_manager.execute( - ip, check_galera_cmd)['stdout']).rstrip())) - raise - - -@logwrap -def check_action_logs(scenario, postgres_actions): - def _check(_action, _group=False): - assert_true(postgres_actions.action_logs_contain(_action, _group), - "Action logs are missed for '{0}'!".format( - _action)) - - actions = [ - { - 'desc': [''], - 'name': ['master_node_settings'], - 'group': [], - 'regex': False, - }, - { - 'desc': [r'create\s+.*(cluster|environment|cloud)'], - 'name': ['cluster_collection'], - 'group': ['cluster_attributes', 'network_configuration'], - 'regex': True, - }, - { - 'desc': ['deploy'], - 'name': ['deploy_changes', 'provision', 'deployment', - 'cluster_collection', 'check_before_deployment'], - 'group': ['cluster_changes', 'cluster_checking'], - 'regex': True, - }, - { - 'desc': [r'verif.*\s+.*network|network.*\s+.*verif'], - 'name': ['check_networks', 'verify_networks'], - 'group': ['network_verification'], - 'regex': True, - }, - { - 'desc': [r'(stop|abort).*(deployment|provision)'], - 'name': ['stop_deployment'], - 'group': ['cluster_changes'], - 'regex': True, - }, - { - 'desc': [r'reset.*(deployment|provision)'], - 'name': ['reset'], - 'group': ['cluster_changes'], - 'regex': True, - }, - { - 'desc': [r'rename.*(cluster|environment|cloud)'], - 'name': ['cluster_instance'], - 'group': ['cluster_changes'], - 'regex': True, - }, - { - 'desc': [r'upgrade'], - 'name': ['releases_collection'], - 'group': ['release_changes'], - 'regex': True, - }, - { - 'desc': [r'update.*(cluster|environment|cloud)'], - 'name': ['update'], - 'group': ['cluster_changes'], - 'regex': True, - }, - { - 'desc': [r'upload.*deployment'], - 'name': ['deployment_info'], - 'group': ['orchestrator'], - 'regex': True, - }, - { - 'desc': [r'upload.*provisioning'], - 'name': ['provisioning_info'], - 'group': ['orchestrator'], - 'regex': True, - }, - # Logging of OSTF isn't implemented yet, so actions list is - # empty - { - 'desc': ['OSTF', 'Health'], - 'name': [], - 'group': [], - 'regex': False, - }, - ] - - # Check logs in Nailgun database - for action in actions: - if action['regex']: - if not any(re.search(regex, scenario, re.IGNORECASE) - for regex in action['desc']): - continue - elif not any(action in scenario for action in action['desc']): - logger.info(action['desc']) - continue - for action_name in action['name']: - _check(action_name, _group=False) - for action_group in action['group']: - _check(action_group, _group=True) - - -def execute_query_on_collector(collector_remote, master_uuid, query, - collector_db='collector', - collector_db_user='collector', - collector_db_pass='collector'): - if master_uuid is not None: - query = "{0} where master_node_uid = '{1}';".format(query, master_uuid) - cmd = 'PGPASSWORD={0} psql -qt -h 127.0.0.1 -U {1} -d {2} -c "{3}"'.\ - format(collector_db_pass, collector_db_user, collector_db, query) - logger.debug('query collector is {0}'.format(cmd)) - return ''.join(collector_remote.execute(cmd)['stdout']).strip() - - -def count_stats_on_collector(collector_remote, master_uuid): - return execute_query_on_collector(collector_remote, master_uuid=None, - query="select (select count(*) from " - "action_logs where master_node_uid" - " = \'{0}\') + (select count(*) " - "from installation_structures " - "where master_node_uid = \'{0}\')". - format(master_uuid)) - - -@logwrap -def check_stats_on_collector(collector_remote, postgres_actions, master_uuid): - sent_logs_count = postgres_actions.count_sent_action_logs() - logger.info("Number of logs that were sent to collector: {}".format( - sent_logs_count - )) - logs = collector_remote.get_action_logs_count(master_uuid) - logger.info("Number of logs that were saved on collector: {}".format(logs)) - assert_true(sent_logs_count <= int(logs), - ("Count of action logs in Nailgun DB ({0}) is bigger than on " - "Collector ({1}), but should be less or equal").format( - sent_logs_count, logs)) - - sum_stats_count = len( - [collector_remote.get_installation_info(master_uuid)['id']]) - assert_equal(int(sum_stats_count), 1, - "Installation structure wasn't saved on Collector side proper" - "ly: found: {0}, expected: 1 record.".format(sum_stats_count)) - - summ_stats = collector_remote.get_installation_info_data(master_uuid) - general_stats = { - 'clusters_num': int, - 'allocated_nodes_num': int, - 'unallocated_nodes_num': int, - 'fuel_release': dict, - 'clusters': list, - 'user_information': dict, - } - - # Check that important data (clusters number, nodes number, nodes roles, - # user's email, used operation system, OpenStack stats) is saved correctly - for stat_type in general_stats: - assert_true( - isinstance(summ_stats[stat_type], general_stats[stat_type]), - "Installation structure in Collector's DB doesn't contain" - "the following stats: {0}".format(stat_type)) - - real_clusters_number = int(postgres_actions.run_query( - db='nailgun', query='select count(*) from clusters;')) - assert_equal(real_clusters_number, summ_stats['clusters_num'], - 'Real clusters number is {0}, but usage statistics says ' - 'that clusters number is {1}'.format( - real_clusters_number, summ_stats['clusters_num'])) - - real_allocated_nodes_num = int(postgres_actions.run_query( - db='nailgun', - query='select count(id) from nodes where cluster_id is not Null;')) - assert_equal(real_allocated_nodes_num, summ_stats['allocated_nodes_num'], - 'Real allocated nodes number is {0}, but usage statistics ' - 'says that allocated nodes number is {1}'.format( - real_allocated_nodes_num, - summ_stats['allocated_nodes_num'])) - - real_user_email = json.loads(postgres_actions.run_query( - db='nailgun', query='select settings from master_node_settings;') - )['statistics']['email']['value'] - assert_equal(real_user_email, summ_stats['user_information']['email'], - "Usage statistics contains incorrect user's email address: " - "'{0}', but should be {1}".format( - summ_stats['user_information']['email'], - real_user_email)) - - for cluster in summ_stats['clusters']: - for node in cluster['nodes']: - assert_true(len(node['roles']) > 0, - "Usage statistics contains nodes without roles: node-" - "{0} roles: {1}".format(node['id'], node['roles'])) - assert_equal(len(cluster['nodes']), cluster['nodes_num'], - "Usage statistics contains incorrect number of nodes" - "assigned to cluster!") - real_cluster_os = postgres_actions.run_query( - db="nailgun", query="select operating_system from releases where " - "id = (select release_id from clusters where " - "id = {0});".format(cluster['id'])) - assert_equal(real_cluster_os, cluster['release']['os'], - "Usage statistics contains incorrect operation system " - "that is used for environment with ID '{0}'. Expected: " - "'{1}', reported: '{2}'.".format( - cluster['id'], real_cluster_os, - cluster['release']['os'])) - - logger.info("Usage stats were properly saved to collector's database.") - - -@logwrap -def check_stats_private_info(collector_remote, postgres_actions, - master_uuid, _settings): - def _contain_secret_data(data): - _has_private_data = False - # Check that stats doesn't contain private data (e.g. - # specific passwords, settings, emails) - for _private in private_data: - _regex = r'(?P"\S+"): (?P[^:]*"{0}"[^:]*)'.format( - private_data[_private]) - for _match in re.finditer(_regex, data): - logger.warning('Found private info in usage statistics using ' - 'pattern: {0}'. format(_regex)) - logger.debug('Usage statistics with private data:\n {0}'. - format(data)) - logger.error("Usage statistics contains private info: '{type}:" - " {value}'. Part of the stats: {match}".format( - type=_private, - value=private_data[_private], - match=_match.group('key', 'value'))) - _has_private_data = True - # Check that stats doesn't contain private types of data (e.g. any kind - # of passwords) - for _data_type in secret_data_types: - _regex = (r'(?P"[^"]*{0}[^"]*": (\{{[^\}}]+\}}|\[[^\]+]\]|' - r'"[^"]+"))').format(secret_data_types[_data_type]) - - for _match in re.finditer(_regex, data, re.IGNORECASE): - logger.warning('Found private info in usage statistics using ' - 'pattern: {0}'. format(_regex)) - logger.debug('Usage statistics with private data:\n {0}'. - format(data)) - logger.error("Usage statistics contains private info: '{type}:" - " {value}'. Part of the stats: {match}".format( - type=_data_type, - value=secret_data_types[_data_type], - match=_match.group('secret'))) - _has_private_data = True - return _has_private_data - - def _contain_public_ip(data, _used_networks): - _has_public_ip = False - _ip_regex = (r'\b((\d|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])\.){3}' - r'(\d|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])\b') - _not_public_regex = [ - r'\b10(\.\d{1,3}){3}', - r'\b127(\.\d{1,3}){3}', - r'\b169\.254(\.\d{1,3}){2}', - r'172\.(1[6-9]|2[0-9]|3[0-1])(\.\d{1,3}){2}', - r'192\.168(\.\d{1,3}){2}', - r'2(2[4-9]|[3-5][0-9])(\.\d{1,3}){3}' - ] - for _match in re.finditer(_ip_regex, data): - # If IP address isn't public and doesn't belong to defined for - # deployment pools (e.g. admin, public, storage), then skip it - if any(re.search(_r, _match.group()) for _r in _not_public_regex) \ - and not any(IPAddress(str(_match.group())) in - IPNetwork(str(net)) for - net in _used_networks): - continue - logger.debug('Usage statistics with public IP(s):\n {0}'. - format(data)) - logger.error('Found public IP in usage statistics: "{0}"'.format( - _match.group())) - _has_public_ip = True - return _has_public_ip - - private_data = { - 'hostname': _settings['HOSTNAME'], - 'dns_domain': _settings['DNS_DOMAIN'], - 'dns_search': _settings['DNS_SEARCH'], - 'dns_upstream': _settings['DNS_UPSTREAM'], - 'fuel_password': ( - _settings['FUEL_ACCESS']['password'] - if _settings['FUEL_ACCESS']['password'] != 'admin' - else 'DefaultPasswordIsNotAcceptableForSearch'), - 'nailgun_password': _settings['postgres']['nailgun_password'], - 'keystone_password': _settings['postgres']['keystone_password'], - 'ostf_password': _settings['postgres']['ostf_password'], - 'cobbler_password': _settings['cobbler']['password'], - 'astute_password': _settings['astute']['password'], - 'mcollective_password': _settings['mcollective']['password'], - 'keystone_admin_token': _settings['keystone']['admin_token'], - 'keystone_nailgun_password': _settings['keystone']['nailgun_password'], - 'kesytone_ostf_password': _settings['keystone']['ostf_password'], - } - - secret_data_types = { - 'some_password': 'password', - 'some_login': 'login', - 'some_tenant': 'tenant', - 'some_token': 'token', - 'some_ip': '\bip\b', - 'some_netmask': 'netmask', - 'some_network': 'network\b', - } - - action_logs = [l.strip() for l in postgres_actions.run_query( - 'nailgun', 'select id from action_logs;').split('\n')] - sent_stats = str(collector_remote.get_installation_info_data(master_uuid)) - logger.debug('installation structure is {0}'.format(sent_stats)) - used_networks = [settings.POOLS[net_name][0] - for net_name in settings.POOLS.keys()] - has_no_private_data = True - - logger.debug("Looking for private data in the installation structure, " - "that was sent to collector") - - if _contain_secret_data(sent_stats) or _contain_public_ip(sent_stats, - used_networks): - has_no_private_data = False - - for log_id in action_logs: - log_data = postgres_actions.run_query( - 'nailgun', - "select additional_info from action_logs where id = '{0}';".format( - log_id - )) - logger.debug("Looking for private data in action log with ID={0}". - format(log_id)) - if _contain_secret_data(log_data) or _contain_public_ip(log_data, - used_networks): - has_no_private_data = False - - assert_true(has_no_private_data, 'Found private data in stats, check test ' - 'output and logs for details.') - logger.info('Found no private data in logs') - - -def check_kernel(kernel, expected_kernel): - assert_equal(kernel, expected_kernel, - "kernel version is wrong, it is {0}".format(kernel)) - - -@logwrap -def external_dns_check(ip): - logger.info("External dns check") - provided_dns = settings.EXTERNAL_DNS - logger.debug("provided to test dns is {}".format(provided_dns)) - cluster_dns = [] - for dns in provided_dns: - ext_dns_ip = ''.join( - ssh_manager.execute( - ip=ip, - cmd="grep {0} /etc/resolv.dnsmasq.conf | " - "awk {{'print $2'}}".format(dns) - )["stdout"]).rstrip() - cluster_dns.append(ext_dns_ip) - logger.debug("external dns in conf is {}".format(cluster_dns)) - assert_equal(set(provided_dns), set(cluster_dns), - "/etc/resolv.dnsmasq.conf does not contain external dns ip") - command_hostname = ''.join( - ssh_manager.execute(ip, - "host {0} | awk {{'print $5'}}" - .format(settings.PUBLIC_TEST_IP)) - ["stdout"]).rstrip() - hostname = 'google-public-dns-a.google.com.' - assert_equal(command_hostname, hostname, - "Can't resolve hostname") - - -def verify_bootstrap_on_node(ip, os_type, uuid=None): - os_type = os_type.lower() - if 'ubuntu' not in os_type: - raise Exception("Only Ubuntu are supported, " - "you have chosen {0}".format(os_type)) - - logger.info("Verify bootstrap on slave {0}".format(ip)) - - cmd = 'cat /etc/*release' - output = ssh_manager.execute_on_remote(ip, cmd)['stdout_str'].lower() - assert_true(os_type in output, - "Slave {0} doesn't use {1} image for bootstrap " - "after {1} images were enabled, /etc/release " - "content: {2}".format(ip, os_type, output)) - if not uuid: - return - - with ssh_manager.open_on_remote( - ip=ip, - path='/etc/nailgun-agent/config.yaml') as f: - data = yaml.safe_load(f) - - actual_uuid = data.get("runtime_uuid") - assert_equal(actual_uuid, uuid, - "Actual uuid {0} is not the same as expected {1}" - .format(actual_uuid, uuid)) - - -@logwrap -def external_ntp_check(ip, vrouter_vip): - logger.info("External ntp check") - provided_ntp = settings.EXTERNAL_NTP - logger.debug("provided to test ntp is {}".format(provided_ntp)) - cluster_ntp = [] - for ntp in provided_ntp: - ext_ntp_ip = ''.join( - ssh_manager.execute( - ip=ip, - cmd="awk '/^server +{0}/{{print $2}}' " - "/etc/ntp.conf".format(ntp))["stdout"]).rstrip() - cluster_ntp.append(ext_ntp_ip) - logger.debug("external ntp in conf is {}".format(cluster_ntp)) - assert_equal(set(provided_ntp), set(cluster_ntp), - "/etc/ntp.conf does not contain external ntp ip") - try: - wait( - lambda: is_ntpd_active(ip, vrouter_vip), timeout=120) - except Exception as e: - logger.error(e) - status = is_ntpd_active(ip, vrouter_vip) - assert_equal( - status, 1, "Failed updated ntp. " - "Exit code is {0}".format(status)) - - -def check_swift_ring(ip): - for ring in ['object', 'account', 'container']: - res = ''.join(ssh_manager.execute( - ip, "swift-ring-builder /etc/swift/{0}.builder".format( - ring))['stdout']) - logger.debug("swift ring builder information is {0}".format(res)) - balance = re.search('(\d+.\d+) balance', res).group(1) - assert_true(float(balance) < 10, - "swift ring builder {1} is not ok," - " balance is {0}".format(balance, ring)) - - -def check_oswl_stat(postgres_actions, nailgun_actions, - remote_collector, master_uid, - operation='current', - resources=None): - if resources is None: - resources = [ - 'vm', 'flavor', 'volume', 'image', 'tenant', 'keystone_user' - ] - logger.info("Checking that all resources were collected...") - expected_resource_count = { - 'current': - {'vm': 0, - 'flavor': 2, - 'volume': 0, - 'image': 0, - 'tenant': 2, - 'keystone_user': 8 - }, - 'modified': - {'vm': 0, - 'flavor': 0, - 'volume': 0, - 'image': 0, - 'tenant': 0, - 'keystone_user': 0 - }, - 'removed': - {'vm': 0, - 'flavor': 0, - 'volume': 0, - 'image': 0, - 'tenant': 0, - 'keystone_user': 0 - } - } - for resource in resources: - q = "select resource_data from oswl_stats where" \ - " resource_type = '\"'\"'{0}'\"'\"';".format(resource) - - # pylint: disable=undefined-loop-variable - def get_resource(): - result = postgres_actions.run_query('nailgun', q) - logger.debug("resource state is {}".format(result)) - if not result: - return False - return ( - len(json.loads(result)[operation]) > - expected_resource_count[operation][resource]) - # pylint: enable=undefined-loop-variable - - wait(get_resource, timeout=10, - timeout_msg="resource {} wasn't updated in db".format(resource)) - q_result = postgres_actions.run_query('nailgun', q) - assert_true(q_result.strip() is not None, - "Resource {0} is absent in 'oswl_stats' table, " - "please check /var/log/nailgun/oswl_{0}" - "_collectord.log on Fuel admin node for details." - .format(resource)) - resource_data = json.loads(q_result) - - logger.debug('db return {0}'.format(resource_data)) - assert_true(len(resource_data['added']) > - expected_resource_count[operation][resource], - "resource {0} wasn't added," - " added is {1}".format(resource, resource_data['added'])) - assert_true(len(resource_data[operation]) > - expected_resource_count[operation][resource], - "number of resources in current {0}," - " expected is {1}".format(len(resource_data[operation]), - expected_resource_count[ - operation][resource])) - - # check stat on collector side - - def are_logs_sent(): - sent_logs = postgres_actions.count_sent_action_logs( - table='oswl_stats') - result = sent_logs == 6 - if not result: - nailgun_actions.force_fuel_stats_sending() - return result - - wait(are_logs_sent, timeout=20, - timeout_msg='Logs status was not changed to sent in db') - sent_logs_count = postgres_actions.count_sent_action_logs( - table='oswl_stats') - logger.info("Number of logs that were sent to collector: {}".format( - sent_logs_count - )) - logger.debug('oswls are {}'.format(remote_collector.get_oswls(master_uid))) - logs = remote_collector.get_oswls(master_uid)['paging_params']['total'] - logger.info("Number of logs that were saved" - " on collector: {}".format(logs)) - assert_true(sent_logs_count <= int(logs), - ("Count of action logs in Nailgun DB ({0}) is bigger than on " - "Collector ({1}), but should be less or equal").format( - sent_logs_count, logs)) - for resource in resources: - resource_data = remote_collector.get_oswls_by_resource_data( - master_uid, resource) - - logger.debug('resource data on' - ' collector is {0}'.format(resource_data)) - assert_true(len(resource_data['added']) > - expected_resource_count[operation][resource], - "resource {0} wasn't added," - " added is {1}".format(resource, resource_data['added'])) - assert_true(len(resource_data[operation]) > - expected_resource_count[operation][resource], - "number of resources in current {0}," - " expected is {1}".format(len(resource_data[operation]), - expected_resource_count[ - operation][resource])) - - logger.info("OSWL stats were properly saved to collector's database.") - - -@logwrap -def check_ping(ip, host, deadline=10, size=56, timeout=1, interval=1): - """Check network connectivity from remote to host using ICMP (ping) - :param ip: remote ip - :param host: string IP address or host/domain name - :param deadline: time in seconds before ping exits - :param size: size of data to be sent - :param timeout: time to wait for a response, in seconds - :param interval: wait interval seconds between sending each packet - :return: bool: True if ping command - """ - ssh_manager = SSHManager() - cmd = ("ping -W {timeout} -i {interval} -s {size} -c 1 -w {deadline} " - "{host}".format(host=host, - size=size, - timeout=timeout, - interval=interval, - deadline=deadline)) - res = ssh_manager.execute(ip, cmd) - return int(res['exit_code']) == 0 - - -@logwrap -def check_neutron_dhcp_lease(ip, instance_ip, instance_mac, - dhcp_server_ip, dhcp_port_tag): - """Check if the DHCP server offers a lease for a client with the specified - MAC address - :param ip: remote IP - :param str instance_ip: IP address of instance - :param str instance_mac: MAC address that will be checked - :param str dhcp_server_ip: IP address of DHCP server for request a lease - :param str dhcp_port_tag: OVS port tag used for access the DHCP server - :return bool: True if DHCP lease for the 'instance_mac' was obtained - """ - logger.debug("Checking DHCP server {0} for lease {1} with MAC address {2}" - .format(dhcp_server_ip, instance_ip, instance_mac)) - ovs_port_name = 'tapdhcptest1' - ovs_cmd = '/usr/bin/ovs-vsctl --timeout=10 --oneline --format=json -- ' - ovs_add_port_cmd = ("--if-exists del-port {0} -- " - "add-port br-int {0} -- " - "set Interface {0} type=internal -- " - "set Port {0} tag={1}" - .format(ovs_port_name, dhcp_port_tag)) - ovs_del_port_cmd = ("--if-exists del-port {0}".format(ovs_port_name)) - - # Add an OVS interface with a tag for accessing the DHCP server - ssh_manager.execute_on_remote(ip, ovs_cmd + ovs_add_port_cmd) - - # Set to the created interface the same MAC address - # that was used for the instance. - ssh_manager.execute_on_remote( - ip, "ifconfig {0} hw ether {1}".format(ovs_port_name, - instance_mac)) - ssh_manager.execute_on_remote(ip, "ifconfig {0} up".format(ovs_port_name)) - - # Perform a 'dhcpcheck' request to check if the lease can be obtained - lease = ssh_manager.execute_on_remote( - ip=ip, - cmd="dhcpcheck request {0} {1} --range_start {2} " - "--range_end 255.255.255.255 | fgrep \" {1} \"" - .format(ovs_port_name, dhcp_server_ip, instance_ip))['stdout'] - - # Remove the OVS interface - ssh_manager.execute_on_remote(ip, ovs_cmd + ovs_del_port_cmd) - - logger.debug("DHCP server answer: {}".format(lease)) - return ' ack ' in lease - - -def is_ntpd_active(ip, ntpd_ip): - cmd = 'ntpdate -d -p 4 -t 0.2 -u {0}'.format(ntpd_ip) - return not ssh_manager.execute(ip, cmd)['exit_code'] - - -def check_repo_managment(ip): - """Check repo management - - run 'yum -y clean all && yum check-update' or - 'apt-get clean all && apt-get update' exit code should be 0 - - :type ip: node ip - :rtype Dict - """ - if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_UBUNTU: - cmd = "apt-get clean all && apt-get update > /dev/null" - else: - cmd = "yum -y clean all && yum check-update > /dev/null" - ssh_manager.execute_on_remote( - ip=ip, - cmd=cmd - ) - - -def check_public_ping(ip): - """ Check if ping public vip - :type ip: node ip - """ - cmd = ('ruby /etc/puppet/modules/osnailyfacter/' - 'modular/virtual_ips/public_vip_ping_post.rb') - ssh_manager.execute_on_remote( - ip=ip, - cmd=cmd, - err_msg='Public ping check failed' - ) - - -def check_cobbler_node_exists(ip, node_id): - """Check node with following node_id - is present in the cobbler node list - :param ip: node ip - :param node_id: fuel node id - :return: bool: True if exit code of command (node) == 0 - """ - logger.debug("Check that cluster contains node with ID:{0} ". - format(node_id)) - node = ssh_manager.execute( - ip=ip, - cmd='bash -c "cobbler system list" | grep ' - '-w "node-{0}"'.format(node_id) - ) - return int(node['exit_code']) == 0 - - -def check_cluster_presence(cluster_id, postgres_actions): - logger.debug("Check cluster presence") - query_result = postgres_actions.run_query( - db='nailgun', - query="select id from clusters where id={0}".format(cluster_id)) - return str(cluster_id) in query_result - - -def check_haproxy_backend(ip, - services=None, nodes=None, - ignore_services=None, ignore_nodes=None): - """Check DOWN state of HAProxy backends. Define names of service or nodes - if need check some specific service or node. Use ignore_services for ignore - service status on all nodes. Use ignore_nodes for ignore all services on - all nodes. Ignoring has a bigger priority. - - :type ip: node ip - :type services: List - :type nodes: List - :type ignore_services: List - :type ignore_nodes: List - :rtype: Dict - """ - cmd = 'haproxy-status | egrep -v "BACKEND|FRONTEND" | grep "DOWN"' - - positive_filter = (services, nodes) - negative_filter = (ignore_services, ignore_nodes) - grep = ['|egrep "{}"'.format('|'.join(n)) for n in positive_filter if n] - grep.extend( - ['|egrep -v "{}"'.format('|'.join(n)) for n in negative_filter if n]) - - result = ssh_manager.execute( - ip=ip, - cmd="{}{}".format(cmd, ''.join(grep)) - ) - return result - - -def check_log_lines_order(ip, log_file_path, line_matcher): - """Read log file and check that lines order are same as strings in list - - :param ip: ip of node in str format - :param log_file_path: path to log file - :param line_matcher: list of strings to search - """ - check_file_exists(ip, path=log_file_path) - - previous_line_pos = 1 - previous_line = None - for current_line in line_matcher: - cmd = 'tail -n +{0} {1} | grep -n "{2}"'\ - .format(previous_line_pos, log_file_path, current_line) - - result = ssh_manager.execute_on_remote( - ip=ip, - cmd=cmd, - err_msg="Line '{0}' not found after line '{1}' in the file " - "'{2}'.".format(current_line, previous_line, log_file_path) - - ) - - # few lines found case - assert_equal(1, - len(result['stdout']), - "Found {0} lines like {1} but should be only 1 in {2}" - " Command '{3}' executed with exit_code='{4}'\n" - "stdout:\n* {5} *\n" - "stderr:\n'* {6} *\n" - .format(len(result['stdout']), - current_line, - log_file_path, - cmd, - result['exit_code'], - '\n'.join(result['stdout']), - '\n'.join(result['stderr']))) - - current_line_pos = int(result['stdout'][0].split(':')[0]) - - previous_line_pos += current_line_pos - previous_line = current_line - - -def check_hiera_hosts(nodes, cmd): - hiera_hosts = [] - for node in nodes: - result = ssh_manager.execute_on_remote( - ip=node['ip'], - cmd=cmd - )['stdout_str'] - hosts = result.split(',') - logger.debug("hosts on {0} are {1}".format(node['hostname'], hosts)) - - if not hiera_hosts: - hiera_hosts = hosts - continue - else: - assert_true(set(hosts) == set(hiera_hosts), - 'Hosts on node {0} differ from' - ' others'.format(node['hostname'])) - - -def check_client_smoke(ip): - fuel_output = ssh_manager.execute( - ip=ip, - cmd='fuel env list' - )['stdout'][2].split('|')[2].strip() - fuel_2_output = ssh_manager.execute( - ip=ip, - cmd='fuel2 env list' - )['stdout'][3].split('|')[3].strip() - assert_equal(fuel_output, fuel_2_output, - "The fuel: {0} and fuel2: {1} outputs are not equal") - - -def check_offload(ip, interface, offload_type): - command = ("ethtool --show-offload {0} | awk '/{1}/' " - "| cut -d ':' -f 2").format(interface, offload_type) - - result = ssh_manager.execute_on_remote( - ip=ip, - cmd=command, - err_msg=("Failed to get Offload {0} " - "on node {1}").format(offload_type, ip) - ) - return result['stdout_str'] - - -def check_get_network_data_over_cli(ip, cluster_id, path): - logger.info("Download network data over cli") - cmd = 'fuel --debug --env {0} network --dir {1} --json -d'.format( - cluster_id, path) - ssh_manager.execute_on_remote( - ip=ip, - cmd=cmd, - err_msg='Failed to upload network data' - ) - - -def check_update_network_data_over_cli(ip, cluster_id, path): - logger.info("Upload network data over cli") - cmd = 'fuel --debug --env {0} network --dir {1} --json -u'.format( - cluster_id, path) - ssh_manager.execute_on_remote( - ip=ip, - cmd=cmd, - err_msg='Failed to upload network data' - ) - - -def check_plugin_path_env(var_name, plugin_path): - assert_true( - plugin_path, - '{var_name:s} variable is not set or set incorrectly: ' - '{plugin_path!r}'.format( - var_name=var_name, - plugin_path=plugin_path) - ) - assert_true( - os.path.exists(plugin_path), - 'File {plugin_path:s} (variable: {var_name:s}) does not exists!' - ''.format(plugin_path=plugin_path, var_name=var_name) - ) - - -def incomplete_tasks(tasks, cluster_id=None): - def get_last_tasks(): - last_tasks = {} - for tsk in tasks: - if cluster_id is not None and cluster_id != tsk['cluster']: - continue - if (tsk['cluster'], tsk['name']) not in last_tasks: - last_tasks[(tsk['cluster'], tsk['name'])] = tsk - return last_tasks - - deploy_tasks = {} - not_ready_tasks = {} - allowed_statuses = {'ready', 'skipped'} - - for (task_cluster, task_name), task in get_last_tasks().items(): - if task_name == 'deployment': - deploy_tasks[task['cluster']] = task['id'] - if task['status'] not in allowed_statuses: - if task_cluster not in not_ready_tasks: - not_ready_tasks[task_cluster] = [] - not_ready_tasks[task_cluster].append(task) - - return not_ready_tasks, deploy_tasks - - -def incomplete_deploy(deployment_tasks): - allowed_statuses = {'ready', 'skipped'} - not_ready_deploy = {} - - for cluster_id, tasks in deployment_tasks.items(): - not_ready_jobs = {} - for task in filter( - lambda tsk: tsk['status'] not in allowed_statuses, - tasks): - if task['node_id'] not in not_ready_jobs: - not_ready_jobs[task['node_id']] = [] - not_ready_jobs[task['node_id']].append(task) - if not_ready_jobs: - not_ready_deploy[cluster_id] = not_ready_jobs - - return not_ready_deploy - - -def fail_deploy(not_ready_transactions): - if len(not_ready_transactions) > 0: - cluster_info_template = "\n\tCluster ID: {cluster}{info}\n" - task_details_template = ( - "\n" - "\t\t\tTask name: {task_name}\n" - "\t\t\t\tStatus: {status}\n" - "\t\t\t\tStart: {time_start}\n" - "\t\t\t\tEnd: {time_end}\n" - ) - - failure_text = 'Not all deployments tasks completed: {}'.format( - ''.join( - cluster_info_template.format( - cluster=cluster, - info="".join( - "\n\t\tNode: {node_id}{details}\n".format( - node_id=node_id, - details="".join( - task_details_template.format(**task) - for task in sorted( - tasks, - key=lambda item: item['status']) - )) - for node_id, tasks in sorted(records.items()) - )) - for cluster, records in sorted(not_ready_transactions.items()) - )) - logger.error(failure_text) - assert_true(len(not_ready_transactions) == 0, failure_text) - - -def check_free_space_admin(env, min_disk_admin=50, disk_id=0): - """Calculate available free space on /var and /var/log/ disk partitions - - :param env: environment model object - :param min_disk_admin: minimal disk size of admin node - :param disk_id: id of disk in the admin node's list of disks - """ - disk_size_admin = env.d_env.nodes().admin.disk_devices[ - disk_id].volume.get_capacity() - min_disk_admin *= 1024 ** 3 - if disk_size_admin < min_disk_admin: - raise ValueError( - "The minimal disk size should be {0}, current {1}".format( - min_disk_admin, disk_size_admin)) - admin_ip = env.ssh_manager.admin_ip - var_free_space = ssh_manager.check_call( - ip=admin_ip, - command="df -h /var")['stdout'][1].split()[3][:-1] - system_dirs = ['/boot/efi', '/boot$', 'docker-docker--pool', 'SWAP', - 'os-root'] - system_dirs_size = 0 - for sys_dir in system_dirs: - system_dir = ssh_manager.check_call( - ip=admin_ip, - command="lsblk -b | grep -we {0} | tail -1".format( - sys_dir))['stdout'][0] - system_dir = int(re.findall(r"\D(\d{9,12})\D", system_dir)[0]) - system_dirs_size += system_dir - system_files_var = int(ssh_manager.check_call( - ip=admin_ip, - command="df -B1 /var")['stdout'][1].split()[2]) - init_size = (min_disk_admin - system_dirs_size) - min_var_free_space = (init_size * 0.4 - system_files_var) / 1024 ** 3 - if var_free_space < min_var_free_space: - raise ValueError( - "The minimal /var size should be {0}, current {1}".format( - min_var_free_space, var_free_space)) - system_files_log = int(ssh_manager.check_call( - ip=admin_ip, - command="df -B1 /var/log")['stdout'][1].split()[2]) - min_log_free_space = (init_size * 0.6 - system_files_log) / 1024 ** 3 - log_free_space = ssh_manager.check_call( - ip=admin_ip, - command="df -h /var/log")['stdout'][1].split()[3][:-1] - if log_free_space < min_log_free_space: - raise ValueError( - "The minimal /var/log size should be {0}, current {1}".format( - min_log_free_space, log_free_space)) - - -def check_free_space_slave(env, min_disk_slave=150): - """Calculate available free space on /var/lib/nova disk partition - - :param env: environment model object - :param min_disk_slave: minimal disk size of slave node - """ - min_disk_slave *= 1024 ** 3 - disk_size_slave = 0 - active_nodes = [] - for node in env.d_env.nodes().slaves: - if node.driver.node_active(node): - active_nodes.append(node) - for slave_id in xrange(len(active_nodes)): - volume_slave_numb = len( - env.d_env.nodes().slaves[slave_id].disk_devices) - for disk_id in xrange(volume_slave_numb): - volume_size = env.d_env.nodes().slaves[slave_id].disk_devices[ - disk_id].volume.get_capacity() - disk_size_slave += volume_size - if disk_size_slave < min_disk_slave: - raise ValueError( - "The minimal disk size should be {0}, current {1}".format( - min_disk_slave, disk_size_slave)) - cluster_id = env.fuel_web.get_last_created_cluster() - compute_ip = [compute['ip'] for compute in - env.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'])] - if compute_ip: - small_flavor_disk = 20 - for ip in compute_ip: - vm_storage_free_space = ssh_manager.check_call( - ip=ip, - command="df -h /var/lib/nova")['stdout'][1].split()[3][:-1] - if vm_storage_free_space < 4 * small_flavor_disk: - raise ValueError( - "The minimal vm-nova storage size should be {0}, " - "current {1}".format( - vm_storage_free_space, 4 * small_flavor_disk)) - - -@logwrap -def check_package_version(ip, package_name, expected_version, condition='ge'): - """Check that package version equal/not equal/greater/less than expected - - :param ip: ip - :param package_name: package name to check - :param expected_version: expected version of package - :param condition: predicate can be on of eq, ne, lt, le, ge, gt - :return None: or raise UnexpectedExitCode - """ - cmd = ("dpkg -s {0} " - "| awk -F': ' '/Version/ {{print $2}}'".format(package_name)) - logger.debug(cmd) - result = ssh_manager.execute_on_remote( - ip, - cmd=cmd, - assert_ec_equal=[0] - ) - version = result['stdout_str'] - logger.info('{} ver is {}'.format(package_name, version)) - err_msg = 'Package {} version is {} and not {} {}'.format(package_name, - version, - condition, - expected_version) - cmd = 'dpkg --compare-versions {0} {1} {2}'.format(version, condition, - expected_version) - ssh_manager.execute_on_remote(ip, cmd, assert_ec_equal=[0], - err_msg=err_msg) - - -def check_firewall_driver(ip, node_role, firewall_driver): - """Check which firewall driver is set for security groups - - :param ip: str, node ip - :param node_role: str, node role - :param firewall_driver: str, name of firewall driver for security group - """ - configpaths = { - 'compute': ['/etc/neutron/plugins/ml2/openvswitch_agent.ini'], - 'controller': ['/etc/neutron/plugins/ml2/openvswitch_agent.ini', - '/etc/neutron/plugins/ml2/ml2_conf.ini'] - } - if node_role not in configpaths: - logger.error('Passed value of node role {!r} is invalid for ' - 'the further check! Should use ' - '"compute" "controller" roles'.format(node_role)) - for configpath in configpaths[node_role]: - conf_for_check = get_ini_config( - ssh_manager.open_on_remote(ip, configpath)) - check_config(conf_for_check, configpath, 'securitygroup', - 'firewall_driver', firewall_driver) - - -def ping6_from_instance(through_host, instance_ip, target_ip): - """Ping6 target ip from instance - - :param through_host: obj, object of ssh connection - :param instance_ip: str, instance ip - :param target_ip: str, target ip - """ - res = through_host.execute_through_host( - hostname=instance_ip, - cmd="{ping:s} -q " - "-c{count:d} " - "-w{deadline:d} " - "-s{packetsize:d} " - "{dst_address:s}".format( - ping='ping6', - count=10, - deadline=40, - packetsize=1452, - dst_address=target_ip), - auth=cirros_auth - ) - - logger.info( - 'Ping results: \n\t{res:s}'.format(res=res['stdout_str'])) - - assert_equal( - res['exit_code'], - 0, - 'Ping failed with error code: {code:d}\n' - '\tSTDOUT: {stdout:s}\n' - '\tSTDERR: {stderr:s}'.format( - code=res['exit_code'], - stdout=res['stdout_str'], - stderr=res['stderr_str'])) - - -def check_settings_requirements(tests_requirements): - bad_params = set() - for param, value in tests_requirements.items(): - if getattr(settings, param) != value: - bad_params.add('{0}={1}'.format(param, value)) - assert_true(not bad_params, - 'Can not start tests, the following settings are ' - 'not set properly: {0}'.format(', '.join(bad_params))) diff --git a/fuelweb_test/helpers/cic_maintenance_mode.py b/fuelweb_test/helpers/cic_maintenance_mode.py deleted file mode 100644 index 359dc166c..000000000 --- a/fuelweb_test/helpers/cic_maintenance_mode.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from core.helpers.log_helpers import logwrap - -from fuelweb_test.helpers.ssh_manager import SSHManager - - -ssh_manager = SSHManager() - - -@logwrap -def change_config(ip, umm=True, reboot_count=2, counter_reset_time=10): - umm_string = 'yes' if umm else 'no' - cmd = ("echo -e 'UMM={0}\n" - "REBOOT_COUNT={1}\n" - "COUNTER_RESET_TIME={2}' > /etc/umm.conf".format(umm_string, - reboot_count, - counter_reset_time) - ) - result = ssh_manager.execute( - ip=ip, - cmd=cmd - ) - return result - - -def check_available_mode(ip): - command = ('umm status | grep runlevel &>/dev/null && echo "True" ' - '|| echo "False"') - if ssh_manager.execute(ip, command)['exit_code'] == 0: - return ''.join(ssh_manager.execute(ip, command)['stdout']).strip() - else: - return ''.join(ssh_manager.execute(ip, command)['stderr']).strip() - - -def check_auto_mode(ip): - command = ('umm status | grep umm &>/dev/null && echo "True" ' - '|| echo "False"') - if ssh_manager.execute(ip, command)['exit_code'] == 0: - return ''.join(ssh_manager.execute(ip, command)['stdout']).strip() - else: - return ''.join(ssh_manager.execute(ip, command)['stderr']).strip() diff --git a/fuelweb_test/helpers/cloud_image.py b/fuelweb_test/helpers/cloud_image.py deleted file mode 100644 index e288cfb47..000000000 --- a/fuelweb_test/helpers/cloud_image.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import subprocess - -from fuelweb_test import logger - - -def generate_cloud_image_settings(cloud_image_settings_path, admin_network, - interface_name, admin_ip, admin_netmask, - gateway, dns, dns_ext, - hostname, user, password): - - # create dir for meta_data, user_data and cloud_ISO - dir_path = os.path.dirname(cloud_image_settings_path) - - if not os.path.exists(dir_path): - os.makedirs(dir_path) - - meta_data_path = os.path.join(dir_path, - "meta-data") - user_data_path = os.path.join(dir_path, - "user-data") - - # create meta_data and user_data - - meta_data_context = { - "interface_name": interface_name, - "address": admin_ip, - "network": admin_network, - "netmask": admin_netmask, - "gateway": gateway, - "dns": dns, - "dns_ext": dns_ext, - "hostname": hostname - } - - meta_data_content = ("instance-id: iid-local1\n" - "network-interfaces: |\n" - " auto {interface_name}\n" - " iface {interface_name} inet static\n" - " address {address}\n" - " network {network}\n" - " netmask {netmask}\n" - " gateway {gateway}\n" - " dns-nameservers {dns} {dns_ext}\n" - "local-hostname: {hostname}") - - logger.debug("meta_data contains next data: \n{}".format( - meta_data_content.format(**meta_data_context))) - - with open(meta_data_path, 'w') as f: - f.write(meta_data_content.format(**meta_data_context)) - - user_data_context = { - "interface_name": interface_name, - "gateway": gateway, - "user": user, - "password": password - } - - user_data_content = ("\n#cloud-config\n" - "ssh_pwauth: True\n" - "chpasswd:\n" - " list: |\n" - " {user}:{password}\n" - " expire: False \n\n" - "runcmd:\n" - " - sudo ifup {interface_name}\n" - " - sudo sed -i -e '/^PermitRootLogin/s/^" - ".*$/PermitRootLogin yes/' /etc/ssh/sshd_config\n" - " - sudo service ssh restart\n" - " - sudo route add default gw " - "{gateway} {interface_name}") - - logger.debug("user_data contains next data: \n{}".format( - user_data_content.format(**user_data_context))) - - with open(user_data_path, 'w') as f: - f.write(user_data_content.format(**user_data_context)) - - # Generate cloud_ISO - cmd = "genisoimage -output {} " \ - "-volid cidata -joliet " \ - "-rock {} {}".format(cloud_image_settings_path, - user_data_path, - meta_data_path) - - subprocess.check_call(cmd, shell=True) diff --git a/fuelweb_test/helpers/common.py b/fuelweb_test/helpers/common.py deleted file mode 100644 index 4a00edd22..000000000 --- a/fuelweb_test/helpers/common.py +++ /dev/null @@ -1,315 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys -import time -import traceback - -from cinderclient.client import Client as CinderClient -from heatclient.v1.client import Client as HeatClient -from glanceclient import Client as GlanceClient -from ironicclient.client import get_client as get_ironic_client -from keystoneauth1.exceptions import ClientException -from keystoneauth1.identity import V2Password -from keystoneauth1.session import Session as KeystoneSession -from keystoneclient.v2_0 import Client as KeystoneClient -from novaclient.client import Client as NovaClient -from neutronclient.v2_0.client import Client as NeutronClient -from proboscis.asserts import assert_equal -import six -# pylint: disable=redefined-builtin -# noinspection PyUnresolvedReferences -from six.moves import xrange -# pylint: enable=redefined-builtin -# pylint: disable=import-error -# noinspection PyUnresolvedReferences -from six.moves import urllib -# pylint: enable=import-error - -from core.helpers.log_helpers import logwrap - -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers.ssh_manager import SSHManager -from fuelweb_test import logger -from fuelweb_test.settings import DISABLE_SSL -from fuelweb_test.settings import PATH_TO_CERT -from fuelweb_test.settings import VERIFY_SSL - - -class Common(object): - """Common.""" # TODO documentation - - def __make_endpoint(self, endpoint): - parse = urllib.parse.urlparse(endpoint) - return parse._replace( - netloc='{}:{}'.format( - self.controller_ip, parse.port)).geturl() - - def __init__(self, controller_ip, user, password, tenant): - self.controller_ip = controller_ip - - self.keystone_session = None - - if DISABLE_SSL: - auth_url = 'http://{0}:5000/v2.0/'.format(self.controller_ip) - path_to_cert = None - else: - auth_url = 'https://{0}:5000/v2.0/'.format(self.controller_ip) - path_to_cert = PATH_TO_CERT - - insecure = not VERIFY_SSL - - logger.debug('Auth URL is {0}'.format(auth_url)) - - self.__keystone_auth = V2Password( - auth_url=auth_url, - username=user, - password=password, - tenant_name=tenant) # TODO: in v3 project_name - - self.__start_keystone_session(ca_cert=path_to_cert, insecure=insecure) - - @property - def keystone(self): - return KeystoneClient(session=self.keystone_session) - - @property - def glance(self): - endpoint = self.__make_endpoint( - self._get_url_for_svc(service_type='image')) - return GlanceClient( - version='1', - session=self.keystone_session, - endpoint_override=endpoint) - - @property - def neutron(self): - endpoint = self.__make_endpoint( - self._get_url_for_svc(service_type='network')) - return NeutronClient( - session=self.keystone_session, - endpoint_override=endpoint) - - @property - def nova(self): - endpoint = self.__make_endpoint( - self._get_url_for_svc(service_type='compute')) - return NovaClient( - version='2', - session=self.keystone_session, - endpoint_override=endpoint) - - @property - def cinder(self): - endpoint = self.__make_endpoint( - self._get_url_for_svc(service_type='volume')) - return CinderClient( - version='3', - session=self.keystone_session, - endpoint_override=endpoint) - - @property - def heat(self): - endpoint = self.__make_endpoint( - self._get_url_for_svc(service_type='orchestration')) - # TODO: parameter endpoint_override when heatclient will be fixed - return HeatClient( - session=self.keystone_session, - endpoint=endpoint) - - @property - def ironic(self): - try: - endpoint = self.__make_endpoint( - self._get_url_for_svc(service_type='baremetal')) - return get_ironic_client('1', session=self.keystone_session, - insecure=True, ironic_url=endpoint) - except ClientException as e: - logger.warning('Could not initialize ironic client {0}'.format(e)) - raise - - @property - def keystone_access(self): - return self.__keystone_auth.get_access(session=self.keystone_session) - - def _get_url_for_svc( - self, service_type=None, interface='public', - region_name=None, service_name=None, - service_id=None, endpoint_id=None - ): - return self.keystone_access.service_catalog.url_for( - service_type=service_type, interface=interface, - region_name=region_name, service_name=service_name, - service_id=service_id, endpoint_id=endpoint_id - ) - - def goodbye_security(self): - secgroup_list = self.nova.security_groups.list() - logger.debug("Security list is {0}".format(secgroup_list)) - secgroup_id = [i.id for i in secgroup_list if i.name == 'default'][0] - logger.debug("Id of security group default is {0}".format( - secgroup_id)) - logger.debug('Permit all TCP and ICMP in security group default') - self.nova.security_group_rules.create(secgroup_id, - ip_protocol='tcp', - from_port=1, - to_port=65535) - self.nova.security_group_rules.create(secgroup_id, - ip_protocol='icmp', - from_port=-1, - to_port=-1) - - def update_image(self, image, **kwargs): - self.glance.images.update(image.id, **kwargs) - return self.glance.images.get(image.id) - - def delete_image(self, image_id): - return self.glance.images.delete(image_id) - - def create_key(self, key_name): - logger.debug('Try to create key {0}'.format(key_name)) - return self.nova.keypairs.create(key_name) - - def create_instance(self, flavor_name='test_flavor', ram=64, vcpus=1, - disk=1, server_name='test_instance', image_name=None, - neutron_network=True, label=None): - logger.debug('Try to create instance') - - start_time = time.time() - exc_type, exc_value, exc_traceback = None, None, None - while time.time() - start_time < 100: - try: - if image_name: - image = [i.id for i in self.nova.images.list() - if i.name == image_name] - else: - image = [i.id for i in self.nova.images.list()] - break - except Exception as e: - exc_type, exc_value, exc_traceback = sys.exc_info() - logger.warning('Ignoring exception: {!r}'.format(e)) - logger.debug(traceback.format_exc()) - else: - if all((exc_type, exc_traceback, exc_value)): - six.reraise(exc_type, exc_value, exc_traceback) - raise Exception('Can not get image') - - kwargs = {} - if neutron_network: - net_label = label if label else 'net04' - network = self.nova.networks.find(label=net_label) - kwargs['nics'] = [{'net-id': network.id, 'v4-fixed-ip': ''}] - - logger.info('image uuid is {0}'.format(image)) - flavor = self.nova.flavors.create( - name=flavor_name, ram=ram, vcpus=vcpus, disk=disk) - logger.info('flavor is {0}'.format(flavor.name)) - server = self.nova.servers.create( - name=server_name, image=image[0], flavor=flavor, **kwargs) - logger.info('server is {0}'.format(server.name)) - return server - - @logwrap - def get_instance_detail(self, server): - details = self.nova.servers.get(server) - return details - - def verify_instance_status(self, server, expected_state): - def _verify_instance_state(): - curr_state = self.get_instance_detail(server).status - assert_equal(expected_state, curr_state) - - try: - _verify_instance_state() - except AssertionError: - logger.debug('Instance is not {0}, lets provide it the last ' - 'chance and sleep 60 sec'.format(expected_state)) - time.sleep(60) - _verify_instance_state() - - def delete_instance(self, server): - logger.debug('Try to delete instance') - self.nova.servers.delete(server) - - def create_flavor(self, name, ram, vcpus, disk, flavorid="auto", - ephemeral=0, extra_specs=None): - flavor = self.nova.flavors.create(name, ram, vcpus, disk, flavorid, - ephemeral=ephemeral) - if extra_specs: - flavor.set_keys(extra_specs) - return flavor - - def delete_flavor(self, flavor): - return self.nova.flavors.delete(flavor) - - def create_aggregate(self, name, availability_zone=None, - metadata=None, hosts=None): - aggregate = self.nova.aggregates.create( - name=name, availability_zone=availability_zone) - for host in hosts or []: - aggregate.add_host(host) - if metadata: - aggregate.set_metadata(metadata) - return aggregate - - def delete_aggregate(self, aggregate, hosts=None): - for host in hosts or []: - self.nova.aggregates.remove_host(aggregate, host) - return self.nova.aggregates.delete(aggregate) - - def __start_keystone_session( - self, retries=3, ca_cert=None, insecure=not VERIFY_SSL): - exc_type, exc_value, exc_traceback = None, None, None - for i in xrange(retries): - try: - if insecure: - self.keystone_session = KeystoneSession( - auth=self.__keystone_auth, verify=False) - elif ca_cert: - self.keystone_session = KeystoneSession( - auth=self.__keystone_auth, verify=ca_cert) - else: - self.keystone_session = KeystoneSession( - auth=self.__keystone_auth) - self.keystone_session.get_auth_headers() - return - - except ClientException as exc: - exc_type, exc_value, exc_traceback = sys.exc_info() - err = "Try nr {0}. Could not get keystone token, error: {1}" - logger.warning(err.format(i + 1, exc)) - time.sleep(5) - if exc_type and exc_traceback and exc_value: - six.reraise(exc_type, exc_value, exc_traceback) - raise RuntimeError() - - @staticmethod - def rebalance_swift_ring(controller_ip, retry_count=5, sleep=600): - """Check Swift ring and rebalance it if needed. - - Replication should be performed on primary controller node. - Retry check several times. Wait for replication due to LP1498368. - """ - ssh = SSHManager() - cmd = "/usr/local/bin/swift-rings-rebalance.sh" - logger.debug('Check swift ring and rebalance it.') - for _ in xrange(retry_count): - try: - checkers.check_swift_ring(controller_ip) - break - except AssertionError: - result = ssh.execute(controller_ip, cmd) - logger.debug("command execution result is {0}".format(result)) - else: - checkers.check_swift_ring(controller_ip) diff --git a/fuelweb_test/helpers/decorators.py b/fuelweb_test/helpers/decorators.py deleted file mode 100644 index 1110e222f..000000000 --- a/fuelweb_test/helpers/decorators.py +++ /dev/null @@ -1,533 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools -import inspect -import json -import os -from subprocess import call -import sys -import time -import traceback - -from proboscis import SkipTest -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_true -# pylint: disable=import-error -# noinspection PyUnresolvedReferences -from six.moves import urllib -# pylint: enable=import-error - -# pylint: disable=unused-import -from core.helpers.setup_teardown import setup_teardown # noqa -# pylint: enable=unused-import - -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers.checkers import check_action_logs -from fuelweb_test.helpers.checkers import check_repo_managment -from fuelweb_test.helpers.checkers import check_stats_on_collector -from fuelweb_test.helpers.checkers import check_stats_private_info -from fuelweb_test.helpers.checkers import count_stats_on_collector -from fuelweb_test.helpers.regenerate_repo import CustomRepo -from fuelweb_test.helpers.ssh_manager import SSHManager -from fuelweb_test.helpers.utils import get_current_env -from fuelweb_test.helpers.utils import pull_out_logs_via_ssh -from fuelweb_test.helpers.utils import store_astute_yaml -from fuelweb_test.helpers.utils import store_packages_json -from fuelweb_test.helpers.utils import TimeStat -from gates_tests.helpers.exceptions import ConfigurationException - - -def save_logs(session, url, path, chunk_size=1024): - logger.info('Saving logs to "%s" file', path) - - stream = session.get(url, stream=True, verify=False) - if stream.status_code != 200: - logger.error("%s %s: %s", stream.status_code, stream.reason, - stream.content) - return - - with open(path, 'wb') as fp: - for chunk in stream.iter_content(chunk_size=chunk_size): - if chunk: - fp.write(chunk) - fp.flush() - - -def store_error_details(name, env): - description = "Failed in method {:s}.".format(name) - if env is not None: - try: - create_diagnostic_snapshot(env, "fail", name) - except: - logger.error("Fetching of diagnostic snapshot failed: {0}".format( - traceback.format_exception_only(sys.exc_info()[0], - sys.exc_info()[1]))) - logger.debug("Fetching of diagnostic snapshot failed: {0}". - format(traceback.format_exc())) - try: - with env.d_env.get_admin_remote()\ - as admin_remote: - pull_out_logs_via_ssh(admin_remote, name) - except: - logger.error("Fetching of raw logs failed: {0}".format( - traceback.format_exception_only(sys.exc_info()[0], - sys.exc_info()[1]))) - logger.debug("Fetching of raw logs failed: {0}". - format(traceback.format_exc())) - finally: - try: - env.make_snapshot(snapshot_name=name[-50:], - description=description, - is_make=True) - except: - logger.error( - "Error making the environment snapshot: {0}".format( - traceback.format_exception_only(sys.exc_info()[0], - sys.exc_info()[1]))) - logger.debug("Error making the environment snapshot:" - " {0}".format(traceback.format_exc())) - - -def log_snapshot_after_test(func): - """Generate diagnostic snapshot after the end of the test. - - - Show test case method name and scenario from docstring. - - Create a diagnostic snapshot of environment in cases: - - if the test case passed; - - if error occurred in the test case. - - Fetch logs from master node if creating the diagnostic - snapshot has failed. - """ - @functools.wraps(func) - def wrapper(*args, **kwargs): - logger.info("\n" + "<" * 5 + "#" * 30 + "[ {} ]" - .format(func.__name__) + "#" * 30 + ">" * 5 + "\n{}" - .format(''.join(func.__doc__))) - try: - result = func(*args, **kwargs) - except SkipTest: - raise - except Exception: - name = 'error_{:s}'.format(func.__name__) - store_error_details(name, args[0].env) - logger.error(traceback.format_exc()) - logger.info("<" * 5 + "*" * 100 + ">" * 5) - raise - else: - if settings.ALWAYS_CREATE_DIAGNOSTIC_SNAPSHOT: - if args[0].env is None: - logger.warning("Can't get diagnostic snapshot: " - "unexpected class is decorated.") - return result - try: - args[0].env.resume_environment() - create_diagnostic_snapshot(args[0].env, "pass", - func.__name__) - except: - logger.error("Fetching of diagnostic snapshot failed: {0}". - format(traceback.format_exc())) - return result - return wrapper - - -def json_parse(func): - @functools.wraps(func) - def wrapped(*args, **kwargs): - response = func(*args, **kwargs) - return json.loads(response.read()) - return wrapped - - -def upload_manifests(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - result = func(*args, **kwargs) - try: - if settings.UPLOAD_MANIFESTS: - logger.info( - "Uploading new manifests from " - "{:s}".format(settings.UPLOAD_MANIFESTS_PATH)) - environment = get_current_env(args) - if not environment: - logger.warning("Can't upload manifests: method of " - "unexpected class is decorated.") - return result - with environment.d_env.get_admin_remote() as remote: - remote.execute('rm -rf /etc/puppet/modules/*') - remote.upload(settings.UPLOAD_MANIFESTS_PATH, - '/etc/puppet/modules/') - logger.info( - "Copying new site.pp from " - "{:s}".format(settings.SITEPP_FOR_UPLOAD)) - remote.execute("cp %s /etc/puppet/manifests" % - settings.SITEPP_FOR_UPLOAD) - if settings.SYNC_DEPL_TASKS: - remote.execute("fuel release --sync-deployment-tasks" - " --dir /etc/puppet/") - except Exception: - logger.error("Could not upload manifests") - raise - return result - return wrapper - - -def update_rpm_packages(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - result = func(*args, **kwargs) - if not settings.UPDATE_FUEL: - return result - try: - environment = get_current_env(args) - if not environment: - logger.warning("Can't update packages: method of " - "unexpected class is decorated.") - return result - - if settings.UPDATE_FUEL_MIRROR: - for url in settings.UPDATE_FUEL_MIRROR: - repo_url = urllib.parse.urlparse(url) - cut_dirs = len(repo_url.path.strip('/').split('/')) - download_cmd = ('wget --recursive --no-parent' - ' --no-verbose --reject "index' - '.html*,*.gif" --exclude-directories' - ' "{pwd}/repocache" ' - '--directory-prefix {path} -nH' - ' --cut-dirs={cutd} {url}').\ - format(pwd=repo_url.path.rstrip('/'), - path=settings.UPDATE_FUEL_PATH, - cutd=cut_dirs, url=repo_url.geturl()) - return_code = call(download_cmd, shell=True) - assert_equal(return_code, 0, 'Mirroring of remote' - ' packages ' - 'repository failed') - - centos_files_count, _ = \ - environment.admin_actions.upload_packages( - local_packages_dir=settings.UPDATE_FUEL_PATH, - centos_repo_path=settings.LOCAL_MIRROR_CENTOS, - ubuntu_repo_path=None) - - if centos_files_count == 0: - return result - - # Add temporary repo with new packages to YUM configuration - conf_file = '/etc/yum.repos.d/temporary.repo' - cmd = ("echo -e '[temporary]\nname=temporary\nbaseurl=file://{0}/" - "\ngpgcheck=0\npriority=1' > {1}").format( - settings.LOCAL_MIRROR_CENTOS, conf_file) - - SSHManager().execute_on_remote( - ip=SSHManager().admin_ip, - cmd=cmd - ) - update_command = 'yum clean expire-cache; yum update -y -d3 ' \ - '2>>/var/log/yum-update-error.log' - cmd_result = SSHManager().execute(ip=SSHManager().admin_ip, - cmd=update_command) - logger.debug('Result of "yum update" command on master node: ' - '{0}'.format(cmd_result)) - assert_equal(int(cmd_result['exit_code']), 0, - 'Packages update failed, ' - 'inspect logs for details') - - SSHManager().execute_on_remote( - ip=SSHManager().admin_ip, - cmd='rm -f {0}'.format(conf_file) - ) - except Exception: - logger.error("Could not update packages") - raise - return result - return wrapper - - -def update_fuel(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - result = func(*args, **kwargs) - if settings.UPDATE_FUEL: - logger.info("Update fuel's packages from directory {0}." - .format(settings.UPDATE_FUEL_PATH)) - environment = get_current_env(args) - if not environment: - logger.warning("Decorator was triggered " - "from unexpected class.") - return result - - centos_files_count, ubuntu_files_count = \ - environment.admin_actions.upload_packages( - local_packages_dir=settings.UPDATE_FUEL_PATH, - centos_repo_path=settings.LOCAL_MIRROR_CENTOS, - ubuntu_repo_path=settings.LOCAL_MIRROR_UBUNTU) - if not centos_files_count and not ubuntu_files_count: - raise ConfigurationException('Nothing to update,' - ' packages to update values is 0') - cluster_id = environment.fuel_web.get_last_created_cluster() - - if centos_files_count > 0: - with environment.d_env.get_admin_remote() as remote: - # Update packages on master node - remote.execute( - 'yum -y install yum-plugin-priorities;' - 'yum clean expire-cache; yum update -y ' - '2>>/var/log/yum-update-error.log') - - # Add auxiliary repository to the cluster attributes - if settings.OPENSTACK_RELEASE_UBUNTU not in \ - settings.OPENSTACK_RELEASE: - environment.fuel_web.add_local_centos_mirror( - cluster_id, path=settings.LOCAL_MIRROR_CENTOS, - priority=settings.AUX_RPM_REPO_PRIORITY) - - if ubuntu_files_count > 0: - # Add auxiliary repository to the cluster attributes - if settings.OPENSTACK_RELEASE_UBUNTU in \ - settings.OPENSTACK_RELEASE: - environment.fuel_web.add_local_ubuntu_mirror( - cluster_id, name="Auxiliary", - path=settings.LOCAL_MIRROR_UBUNTU, - priority=settings.AUX_DEB_REPO_PRIORITY) - else: - logger.error("{0} .DEB files uploaded but won't be used" - " because of deploying wrong release!" - .format(ubuntu_files_count)) - if settings.SYNC_DEPL_TASKS: - with environment.d_env.get_admin_remote() as remote: - remote.execute("fuel release --sync-deployment-tasks" - " --dir /etc/puppet/") - return result - return wrapper - - -def revert_info(snapshot_name, master_ip, description=""): - logger.info("<" * 5 + "*" * 100 + ">" * 5) - logger.info("{} Make snapshot: {}".format(description, snapshot_name)) - command = ("dos.py revert-resume {env} {name} " - "&& ssh root@{master_ip}".format( - env=settings.ENV_NAME, - name=snapshot_name, - master_ip=master_ip)) - if settings.VIRTUAL_ENV: - command = ('source {venv}/bin/activate; {command}' - .format(venv=settings.VIRTUAL_ENV, command=command)) - logger.info("You could revert and ssh to master node: [{command}]" - .format(command=command)) - - logger.info("<" * 5 + "*" * 100 + ">" * 5) - - -def create_diagnostic_snapshot(env, status, name="", - timeout=settings.LOG_SNAPSHOT_TIMEOUT): - logger.debug('Starting log snapshot with ' - 'timeout {} seconds'.format(timeout)) - task = env.fuel_web.task_wait(env.fuel_web.client.generate_logs(), timeout) - assert_true(task['status'] == 'ready', - "Generation of diagnostic snapshot failed: {}".format(task)) - if settings.FORCE_HTTPS_MASTER_NODE: - url = "https://{}:8443{}".format(env.get_admin_node_ip(), - task['message']) - else: - url = "http://{}:8000{}".format(env.get_admin_node_ip(), - task['message']) - - log_file_name = '{status}_{name}-{basename}'.format( - status=status, - name=name, - basename=os.path.basename(task['message'])) - save_logs( - session=env.fuel_web.client.session, - url=url, - path=os.path.join(settings.LOGS_DIR, log_file_name)) - - -def retry(count=3, delay=30): - def wrapped(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - i = 0 - while True: - try: - return func(*args, **kwargs) - except: - i += 1 - if i >= count: - raise - time.sleep(delay) - return wrapper - return wrapped - - -def custom_repo(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - custom_pkgs = CustomRepo() - try: - if settings.CUSTOM_PKGS_MIRROR: - custom_pkgs.prepare_repository() - - except Exception: - logger.error("Unable to get custom packages from {0}\n{1}" - .format(settings.CUSTOM_PKGS_MIRROR, - traceback.format_exc())) - raise - - try: - return func(*args, **kwargs) - except Exception: - custom_pkgs.check_puppet_logs() - raise - return wrapper - - -def check_fuel_statistics(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - result = func(*args, **kwargs) - if not settings.FUEL_STATS_CHECK: - return result - logger.info('Test "{0}" passed. Checking stats.'.format(func.__name__)) - fuel_settings = args[0].env.admin_actions.get_fuel_settings() - nailgun_actions = args[0].env.nailgun_actions - postgres_actions = args[0].env.postgres_actions - remote_collector = args[0].env.collector - master_uuid = args[0].env.get_masternode_uuid() - logger.info("Master Node UUID: '{0}'".format(master_uuid)) - nailgun_actions.force_fuel_stats_sending() - - if not settings.FUEL_STATS_ENABLED: - assert_equal(0, int(count_stats_on_collector(remote_collector, - master_uuid)), - "Sending of Fuel stats is disabled in test, but " - "usage info was sent to collector!") - assert_equal(args[0].env.postgres_actions.count_sent_action_logs(), - 0, ("Sending of Fuel stats is disabled in test, but " - "usage info was sent to collector!")) - return result - - test_scenario = inspect.getdoc(func) - if 'Scenario' not in test_scenario: - logger.warning(("Can't check that fuel statistics was gathered " - "and sent to collector properly because '{0}' " - "test doesn't contain correct testing scenario. " - "Skipping...").format(func.__name__)) - return func(*args, **kwargs) - try: - check_action_logs(test_scenario, postgres_actions) - check_stats_private_info(remote_collector, - postgres_actions, - master_uuid, - fuel_settings) - check_stats_on_collector(remote_collector, - postgres_actions, - master_uuid) - return result - except Exception: - logger.error(traceback.format_exc()) - raise - return wrapper - - -def download_astute_yaml(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - result = func(*args, **kwargs) - if settings.STORE_ASTUTE_YAML: - environment = get_current_env(args) - if environment: - store_astute_yaml(environment) - else: - logger.warning("Can't download astute.yaml: " - "Unexpected class is decorated.") - return result - return wrapper - - -def download_packages_json(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - result = func(*args, **kwargs) - environment = get_current_env(args) - if environment: - store_packages_json(environment) - else: - logger.warning("Can't collect packages: " - "Unexpected class is decorated.") - return result - return wrapper - - -def duration(func): - """Measuring execution time of the decorated method in context of a test. - - settings.TIMESTAT_PATH_YAML contains file name for collected data. - Data are stored to YAML file in the following format: - - : - _XX: - - , where: - - - name_of_system_test_method: Name of the system test method started - by proboscis; - - name_of_decorated_method: Name of the method to which this decorator - is implemented. _XX is a number of the method - call while test is running, from _00 to _99 - - seconds: Time in seconds with floating point, consumed by the - decorated method - - Thus, different tests can call the same decorated method multiple times - and get the separate measurement for each call. - """ - @functools.wraps(func) - def wrapper(*args, **kwargs): - with TimeStat(func.__name__): - return func(*args, **kwargs) - return wrapper - - -def check_repos_management(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - result = func(*args, **kwargs) - # FIXME: Enable me for all release after fix #1403088 and #1448114 - if settings.OPENSTACK_RELEASE_UBUNTU in settings.OPENSTACK_RELEASE: - try: - env = get_current_env(args) - nailgun_nodes = env.fuel_web.client.list_cluster_nodes( - env.fuel_web.get_last_created_cluster()) - for n in nailgun_nodes: - logger.debug("Check repository management on {0}" - .format(n['ip'])) - check_repo_managment(n['ip']) - except Exception: - logger.error("An error happened during check repositories " - "management on nodes. Please see the debug log.") - return result - return wrapper - - -def token(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except AssertionError: - logger.info("Response code not equivalent to 200," - " trying to update the token") - args[0].login() - return func(*args, **kwargs) - return wrapper diff --git a/fuelweb_test/helpers/eb_tables.py b/fuelweb_test/helpers/eb_tables.py deleted file mode 100644 index ae7011b51..000000000 --- a/fuelweb_test/helpers/eb_tables.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import subprocess - -from core.helpers.log_helpers import logwrap - - -class Ebtables(object): - """Ebtables.""" # TODO documentation - - def __init__(self, target_devs, vlans): - super(Ebtables, self).__init__() - self.target_devs = target_devs - self.vlans = vlans - - @logwrap - def restore_vlans(self): - for vlan in self.vlans: - for target_dev in self.target_devs: - Ebtables.restore_vlan(target_dev, vlan) - - @logwrap - def restore_first_vlan(self): - for target_dev in self.target_devs: - Ebtables.restore_vlan(target_dev, self.vlans[0]) - - @logwrap - def block_first_vlan(self): - for target_dev in self.target_devs: - Ebtables.block_vlan(target_dev, self.vlans[0]) - - @staticmethod - @logwrap - def block_mac(mac): - return subprocess.check_output( - ['sudo', 'ebtables', '-t', 'filter', '-A', 'FORWARD', '-s', - mac, '-j', 'DROP'], - stderr=subprocess.STDOUT - ) - - @staticmethod - @logwrap - def restore_mac(mac): - return subprocess.call( - [ - 'sudo', 'ebtables', '-t', 'filter', - '-D', 'FORWARD', '-s', mac, '-j', 'DROP' - ], - stderr=subprocess.STDOUT, - ) - - @staticmethod - @logwrap - def restore_vlan(target_dev, vlan): - return subprocess.call( - [ - 'sudo', 'ebtables', '-t', 'broute', '-D', 'BROUTING', '-i', - target_dev, '-p', '8021Q', '--vlan-id', str(vlan), '-j', 'DROP' - ], - stderr=subprocess.STDOUT, - ) - - @staticmethod - @logwrap - def block_vlan(target_dev, vlan): - return subprocess.check_output( - [ - 'sudo', 'ebtables', '-t', 'broute', '-A', 'BROUTING', '-i', - target_dev, '-p', '8021Q', '--vlan-id', str(vlan), '-j', 'DROP' - ], - stderr=subprocess.STDOUT - ) diff --git a/fuelweb_test/helpers/fuel_actions.py b/fuelweb_test/helpers/fuel_actions.py deleted file mode 100644 index 682e358be..000000000 --- a/fuelweb_test/helpers/fuel_actions.py +++ /dev/null @@ -1,579 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import re - -from devops.helpers.helpers import wait -from proboscis.asserts import assert_true -import yaml - -from core.helpers.log_helpers import logwrap - -from fuelweb_test import logger -from fuelweb_test.helpers.decorators import retry -from fuelweb_test.helpers.regenerate_repo import regenerate_centos_repo -from fuelweb_test.helpers.regenerate_repo import regenerate_ubuntu_repo -from fuelweb_test.helpers import replace_repos -from fuelweb_test.helpers.ssh_manager import SSHManager -from fuelweb_test.helpers.utils import dict_merge -from fuelweb_test.settings import FUEL_PLUGIN_BUILDER_FROM_GIT -from fuelweb_test.settings import FUEL_PLUGIN_BUILDER_REPO -from fuelweb_test.settings import FUEL_USE_LOCAL_NTPD -from fuelweb_test.settings import KEYSTONE_CREDS -from fuelweb_test.settings import MIRROR_UBUNTU -from fuelweb_test.settings import PLUGIN_PACKAGE_VERSION -from fuelweb_test.settings import FUEL_SETTINGS_YAML -from fuelweb_test.helpers.utils import YamlEditor - - -class BaseActions(object): - """BaseActions.""" # TODO documentation - - def __init__(self): - self.ssh_manager = SSHManager() - self.admin_ip = self.ssh_manager.admin_ip - - def __repr__(self): - klass, obj_id = type(self), hex(id(self)) - return "[{klass}({obj_id})]".format( - klass=klass, - obj_id=obj_id) - - def restart_service(self, service): - self.ssh_manager.execute_on_remote( - ip=self.admin_ip, - cmd="systemctl restart {0}".format(service), - err_msg="Failed to restart service {!r}, please inspect logs for " - "details".format(service)) - - -class AdminActions(BaseActions): - """ All actions relating to the admin node.""" - - @logwrap - def is_fuel_service_ready(self, service): - result = self.ssh_manager.execute( - ip=self.admin_ip, - cmd="timeout 5 fuel-utils check_service {0}".format(service)) - return result['exit_code'] == 0 - - @logwrap - def is_fuel_ready(self): - result = self.ssh_manager.execute( - ip=self.admin_ip, - cmd="timeout 15 fuel-utils check_all") - return result['exit_code'] == 0 - - @logwrap - def wait_for_fuel_ready(self, timeout=300): - wait(lambda: self.is_fuel_ready, timeout=timeout, - timeout_msg="Fuel services are not ready, please check the " - "output of 'fuel-utils check_all") - - @logwrap - @retry() - def ensure_cmd(self, cmd): - self.ssh_manager.execute_on_remote(ip=self.admin_ip, cmd=cmd) - - @logwrap - def upload_plugin(self, plugin): - """ Upload plugin on master node. - """ - logger.info("Upload fuel's plugin from path {}.".format(plugin)) - return self.ssh_manager.upload_to_remote( - ip=self.ssh_manager.admin_ip, - source=plugin, - target='/var', - port=self.ssh_manager.admin_port) - - @logwrap - def install_plugin(self, plugin_file_name): - """ Install plugin on master node. - """ - return self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd="cd /var && fuel plugins --install " - "{plugin!s} ".format(plugin=plugin_file_name), - port=self.ssh_manager.admin_port, - err_msg='Install script failed' - ) - - @logwrap - def modify_configs(self, router): - # Slave nodes should use the gateway of 'admin' network as the default - # gateway during provisioning and as an additional DNS server. - fuel_settings = self.get_fuel_settings() - fuel_settings['DEBUG'] = True - fuel_settings['DNS_UPSTREAM'] = router - fuel_settings['ADMIN_NETWORK']['dhcp_gateway'] = router - fuel_settings["FUEL_ACCESS"]['user'] = KEYSTONE_CREDS['username'] - fuel_settings["FUEL_ACCESS"]['password'] = KEYSTONE_CREDS['password'] - - if FUEL_USE_LOCAL_NTPD: - # Try to use only ntpd on the host as the time source - # for admin node - cmd = 'ntpdate -p 4 -t 0.2 -ub {0}'.format(router) - - if not self.ssh_manager.execute(ip=self.admin_ip, - cmd=cmd)['exit_code']: - # Local ntpd on the host is alive, so - # remove all NTP sources and add the host instead. - logger.info("Switching NTPD on the Fuel admin node to use " - "{0} as the time source.".format(router)) - ntp_keys = [k for k in fuel_settings.keys() - if re.match(r'^NTP', k)] - for key in ntp_keys: - fuel_settings.pop(key) - fuel_settings['NTP1'] = router - - if MIRROR_UBUNTU: - fuel_settings['BOOTSTRAP']['repos'] = \ - replace_repos.replace_ubuntu_repos( - { - 'value': fuel_settings['BOOTSTRAP']['repos'] - }, - upstream_host='archive.ubuntu.com') - logger.info("Replace default Ubuntu mirror URL for " - "bootstrap image in Fuel settings") - self.save_fuel_settings(fuel_settings) - - @logwrap - def update_fuel_setting_yaml(self, path): - """This method override fuel settings yaml according to custom yaml - - :param path: a string of full path to custom setting yaml - """ - - fuel_settings = self.get_fuel_settings() - with open(path) as fyaml: - custom_fuel_settings = yaml.load(fyaml) - - fuel_settings = dict_merge(fuel_settings, custom_fuel_settings) - self.save_fuel_settings(fuel_settings) - logger.debug('File /etc/fuel/astute.yaml was updated.' - 'And now is {}'.format(fuel_settings)) - - @logwrap - def upload_packages(self, local_packages_dir, centos_repo_path, - ubuntu_repo_path, clean_target=False): - logger.info("Upload fuel's packages from directory {0}." - .format(local_packages_dir)) - - centos_files_count = 0 - ubuntu_files_count = 0 - - if centos_repo_path: - centos_files_count = self.ssh_manager.cond_upload( - ip=self.admin_ip, - source=local_packages_dir, - target=os.path.join(centos_repo_path, 'Packages'), - condition="(?i).*\.rpm$", - clean_target=clean_target - ) - if centos_files_count > 0: - regenerate_centos_repo(centos_repo_path) - - if ubuntu_repo_path: - ubuntu_files_count = self.ssh_manager.cond_upload( - ip=self.admin_ip, - source=local_packages_dir, - target=os.path.join(ubuntu_repo_path, 'pool/main'), - condition="(?i).*\.deb$", - clean_target=clean_target - ) - if ubuntu_files_count > 0: - regenerate_ubuntu_repo(ubuntu_repo_path) - - return centos_files_count, ubuntu_files_count - - @logwrap - def clean_generated_image(self, distro): - out = self.ssh_manager.execute( - ip=self.admin_ip, - cmd="find /var/www/nailgun/targetimages/ -name " - "'env*{}*' -printf '%P\n'".format(distro.lower()) - ) - images = ''.join(out) - - logger.debug("images are {}".format(images)) - self.ssh_manager.execute( - ip=self.admin_ip, - cmd="find /var/www/nailgun/targetimages/ -name 'env*{}*'" - " -delete".format(distro.lower()) - ) - - def get_fuel_settings(self): - return YamlEditor( - file_path=FUEL_SETTINGS_YAML, - ip=self.admin_ip - ).get_content() - - def save_fuel_settings(self, settings): - with YamlEditor( - file_path=FUEL_SETTINGS_YAML, - ip=self.admin_ip - ) as data: - data.content = settings - - @logwrap - def get_tasks_description(self, release=None): - """Get tasks description - - :param release: a string with release name - :return: a dictionary of tasks description - """ - if not release: - release = '' - cmd = "cat `find /etc/puppet/{} -name tasks.yaml`".format(release) - return self.ssh_manager.check_call(self.admin_ip, cmd).stdout_yaml - - -class NailgunActions(BaseActions): - """NailgunActions.""" # TODO documentation - - def update_nailgun_settings(self, settings): - cfg_file = '/etc/nailgun/settings.yaml' - with YamlEditor(file_path=cfg_file, ip=self.admin_ip) as ng_settings: - ng_settings.content.update(settings) - - logger.debug('Uploading new nailgun settings: {}'.format( - ng_settings)) - self.restart_service("nailgun") - - def set_collector_address(self, host, port, ssl=False): - base_cfg_file = ('/usr/lib/python2.7/site-packages/' - 'nailgun/settings.yaml') - assert_true( - self.ssh_manager.exists_on_remote( - self.ssh_manager.admin_ip, base_cfg_file), - "Nailgun config file was not found at {!r}".format(base_cfg_file)) - - server = "{!s}:{!s}".format(host, port) - parameters = {'COLLECTOR_SERVER': server, - 'OSWL_COLLECT_PERIOD': 0} - if not ssl: - # replace https endpoints to http endpoints - with self.ssh_manager.open_on_remote(self.admin_ip, - base_cfg_file) as f: - data = yaml.load(f) - for key, value in data.items(): - if (isinstance(key, str) and key.startswith("COLLECTOR") and - key.endswith("URL") and value.startswith("https")): - parameters[key] = "http" + value[len("https"):] - logger.debug('Custom collector parameters: {!r}'.format(parameters)) - self.update_nailgun_settings(parameters) - - def force_fuel_stats_sending(self): - log_file = '/var/log/nailgun/statsenderd.log' - # Rotate logs on restart in order to get rid of old errors - cmd = 'mv {0}{{,.backup_$(date +%s)}}'.format(log_file) - self.ssh_manager.execute_on_remote( - ip=self.admin_ip, cmd=cmd, raise_on_assert=False) - self.restart_service('statsenderd') - - wait(lambda: self.ssh_manager.exists_on_remote(self.admin_ip, - log_file), - timeout=10) - cmd = 'grep -sw "ERROR" {0}'.format(log_file) - self.ssh_manager.execute_on_remote( - ip=self.admin_ip, cmd=cmd, assert_ec_equal=[1], - err_msg=("Fuel stats were sent with errors! Check its logs" - " in {0} for details.").format(log_file)) - - def force_oswl_collect(self, resources=None): - resources = resources or ['vm', 'flavor', 'volume', 'image', 'tenant', - 'keystone_user'] - for resource in resources: - self.restart_service("oswl_{}_collectord".format(resource)) - - -class PostgresActions(BaseActions): - """PostgresActions.""" # TODO documentation - - def run_query(self, db, query): - cmd = "su - postgres -c 'psql -qt -d {0} -c \"{1};\"'".format( - db, query) - return self.ssh_manager.execute_on_remote( - ip=self.admin_ip, - cmd=cmd)['stdout_str'] - - def action_logs_contain(self, action, group=False, - table='action_logs'): - logger.info("Checking that '{0}' action was logged..".format( - action)) - log_filter = "action_name" if not group else "action_group" - q = "select id from {0} where {1} = '\"'\"'{2}'\"'\"'".format( - table, log_filter, action) - logs = [i.strip() for i in self.run_query('nailgun', q).split('\n') - if re.compile(r'\d+').match(i.strip())] - logger.info("Found log records with ids: {0}".format(logs)) - return len(logs) > 0 - - def count_sent_action_logs(self, table='action_logs'): - q = "select count(id) from {0} where is_sent = True".format(table) - return int(self.run_query('nailgun', q)) - - -class FuelPluginBuilder(BaseActions): - """ - Basic class for fuel plugin builder support in tests. - - Initializes BaseActions. - """ - def fpb_install(self): - """ - Installs fuel plugin builder on master node - - :return: nothing - """ - rpms = "createrepo dpkg-devel dpkg-dev rpm-build python-pip" - fpb_package = "fuel-plugin-builder" - if FUEL_PLUGIN_BUILDER_FROM_GIT: - rpms += " tar git" - fpb_package = "git+{}".format(FUEL_PLUGIN_BUILDER_REPO) - - self.ssh_manager.check_call(self.admin_ip, - "yum -y install {}".format(rpms)) - self.ssh_manager.check_call(self.admin_ip, - "pip install {}".format(fpb_package)) - - def fpb_create_plugin(self, name, package_version=PLUGIN_PACKAGE_VERSION): - """ - Creates new plugin with given name - :param name: name for plugin created - :param package_version: plugin package version to create template for - :return: nothing - """ - cmd = "fpb --create {0}".format(name) - if package_version != '': - cmd += ' --package-version {0}'.format(package_version) - self.ssh_manager.execute_on_remote( - ip=self.admin_ip, - cmd=cmd - ) - - def fpb_build_plugin(self, path): - """ - Builds plugin from path - :param path: path to plugin. For ex.: /root/example_plugin - :return: packet name - """ - self.ssh_manager.execute_on_remote( - ip=self.admin_ip, - cmd="bash -c 'fpb --build {0}'".format(path) - ) - packet_name = self.ssh_manager.execute_on_remote( - ip=self.admin_ip, - cmd="bash -c 'basename {0}/*.rpm'".format(path) - )['stdout_str'] - return packet_name - - def fpb_update_release_in_metadata(self, path): - """Update fuel version and openstack release version - - :param path: path to plugin's dir on master node - """ - metadata_path = os.path.join(path, 'metadata.yaml') - output = self.ssh_manager.execute_on_remote( - ip=self.admin_ip, cmd="fuel2 fuel-version -f json", - jsonify=True)['stdout_json'] - fuel_version = [str(output['release'])] - openstack_version = str(output['openstack_version']) - with YamlEditor(metadata_path, ip=self.admin_ip) as editor: - editor.content['fuel_version'] = fuel_version - editor.content['releases'][0]['version'] = openstack_version - - def fpb_validate_plugin(self, path): - """ - Validates plugin for errors - :param path: path to plugin to be verified - :return: nothing - """ - self.ssh_manager.execute_on_remote( - ip=self.admin_ip, - cmd="fpb --check {0}".format(path)) - - def fpb_replace_plugin_content(self, local_file, remote_file): - """ - Replaces file with given local file - :param local_file: path to the local file - :param remote_file: file to be replaced - :return: nothing - """ - self.ssh_manager.rm_rf_on_remote(ip=self.admin_ip, path=remote_file) - self.ssh_manager.upload_to_remote( - ip=self.admin_ip, - source=local_file, - target=remote_file - ) - - def fpb_change_plugin_version(self, plugin_name, new_version): - """ - Changes plugin version with given one - :param plugin_name: plugin name - :param new_version: new version to be used for plugin - :return: nothing - """ - with YamlEditor('/root/{}/metadata.yaml'.format(plugin_name), - ip=self.admin_ip) as editor: - editor.content['version'] = new_version - - def fpb_change_package_version(self, plugin_name, new_version): - """ - Changes plugin's package version - :param plugin_name: plugin to be used for changing version - :param new_version: version to be changed at - :return: nothing - """ - with YamlEditor('/root/{}/metadata.yaml'.format(plugin_name), - ip=self.admin_ip) as editor: - editor.content['package_version'] = new_version - - def fpb_copy_plugin(self, source, target): - """ - Copy new plugin from source to target - :param source: initial plugin location - :param target: target path - :return: nothing - """ - self.ssh_manager.execute_on_remote( - ip=self.admin_ip, - cmd="cp {0} {1}".format(source, target)) - - -class CobblerActions(BaseActions): - """CobblerActions.""" # TODO documentation - - def add_dns_upstream_server(self, dns_server_ip): - self.ssh_manager.execute_on_remote( - ip=self.admin_ip, - cmd="sed '$anameserver {0}' -i /etc/dnsmasq.upstream".format( - dns_server_ip)) - self.ssh_manager.execute_on_remote( - ip=self.admin_ip, - cmd='service dnsmasq restart') - - -class FuelBootstrapCliActions(AdminActions): - def get_bootstrap_default_config(self): - fuel_settings = self.get_fuel_settings() - return fuel_settings["BOOTSTRAP"] - - @staticmethod - def parse_uuid(message): - uuid_regex = r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-" \ - r"[0-9a-f]{4}-[0-9a-f]{12}" - - # NOTE: Splitting for matching only first uuid in case of parsing - # images list, because image label could contain matching strings - message_lines = message.splitlines() - uuids = [] - - for line in message_lines: - match = re.search(uuid_regex, line) - if match is not None: - uuids.append(match.group()) - - if not uuids: - raise Exception("Could not find uuid in fuel-bootstrap " - "output: {0}".format(message)) - return uuids - - def activate_bootstrap_image(self, uuid): - command = "fuel-bootstrap activate {0}".format(uuid) - result = self.ssh_manager.execute_on_remote( - ip=self.admin_ip, - cmd=command, - )['stdout_str'] - - return self.parse_uuid(result)[0] - - def build_bootstrap_image(self, **kwargs): - simple_fields = \ - ("ubuntu-release", "http-proxy", "https-proxy", "script", - "label", "extend-kopts", "kernel-flavor", - "root-ssh-authorized-file", "output-dir", "image-build-dir") - list_fields = ("repo", "direct-repo-addr", "package", "extra-dir") - flag_fields = ("activate", ) - command = "fuel-bootstrap build " - - for field in simple_fields: - if kwargs.get(field) is not None: - command += "--{0} {1} ".format(field, kwargs.get(field)) - - for field in list_fields: - if kwargs.get(field) is not None: - for value in kwargs.get(field): - command += "--{0} {1} ".format(field, value) - - for field in flag_fields: - if kwargs.get(field) is not None: - command += "--{0} ".format(field) - - logger.info("Building bootstrap image: {0}".format(command)) - result = self.ssh_manager.execute_on_remote( - ip=self.admin_ip, - cmd=command, - )['stdout_str'] - - logger.info("Bootstrap image has been built: {0}".format(result)) - uuid = self.parse_uuid(result)[0] - path = os.path.join(kwargs.get("output-dir", "/tmp"), - "{0}.tar.gz".format(uuid)) - return uuid, path - - def import_bootstrap_image(self, filename, activate=False): - command = ("fuel-bootstrap import {0} {1}" - .format(filename, - "--activate" if activate else "")) - - result = self.ssh_manager.execute_on_remote( - ip=self.admin_ip, - cmd=command, - )['stdout_str'] - return self.parse_uuid(result)[0] - - def list_bootstrap_images(self): - command = "fuel-bootstrap list" - result = self.ssh_manager.execute_on_remote( - ip=self.admin_ip, - cmd=command, - )['stdout_str'] - return result - - def list_bootstrap_images_uuids(self): - return self.parse_uuid(self.list_bootstrap_images()) - - def get_active_bootstrap_uuid(self): - command = "fuel-bootstrap list" - bootstrap_images = \ - self.ssh_manager.execute_on_remote( - ip=self.admin_ip, - cmd=command)['stdout_str'].split('\n') - - for line in bootstrap_images: - if "active" in line: - return self.parse_uuid(line)[0] - - logger.warning("No active bootstrap. Fuel-bootstrap list:\n{0}" - .format("".join(bootstrap_images))) - - def delete_bootstrap_image(self, uuid): - command = "fuel-bootstrap delete {0}".format(uuid) - result = self.ssh_manager.execute_on_remote( - ip=self.admin_ip, - cmd=command, - )['stdout_str'] - return self.parse_uuid(result)[0] diff --git a/fuelweb_test/helpers/fuel_release_hacks.py b/fuelweb_test/helpers/fuel_release_hacks.py deleted file mode 100644 index 92f766ecd..000000000 --- a/fuelweb_test/helpers/fuel_release_hacks.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers.ssh_manager import SSHManager -from fuelweb_test.helpers.utils import generate_yum_repos_config - -from gates_tests.helpers import exceptions - - -def install_mos_repos(): - """ - Upload and install fuel-release packet with mos-repo description - and install necessary packets for packetary Fuel installation - :return: nothing - """ - logger.info("upload fuel-release packet") - if not settings.FUEL_RELEASE_PATH: - raise exceptions.FuelQAVariableNotSet('FUEL_RELEASE_PATH', '/path') - try: - ssh = SSHManager() - pack_path = '/tmp/' - full_pack_path = os.path.join(pack_path, - 'fuel-release*.noarch.rpm') - ssh.upload_to_remote( - ip=ssh.admin_ip, - source=settings.FUEL_RELEASE_PATH.rstrip('/'), - target=pack_path) - - if settings.RPM_REPOS_YAML: - with ssh.open_on_remote( - ip=ssh.admin_ip, - path='/etc/yum.repos.d/custom.repo', - mode="w") as f: - f.write(generate_yum_repos_config(settings.RPM_REPOS_YAML)) - - if settings.DEB_REPOS_YAML: - ssh = SSHManager() - pack_path = "/root/default_deb_repos.yaml" - ssh.upload_to_remote( - ip=ssh.admin_ip, - source=settings.DEB_REPOS_YAML, - target=pack_path) - - except Exception: - logger.exception("Could not upload package") - raise - - logger.debug("setup MOS repositories") - cmd = "rpm -ivh {}".format(full_pack_path) - ssh.execute_on_remote(ssh.admin_ip, cmd=cmd) - - cmd = "yum install -y fuel-setup" - ssh.execute_on_remote(ssh.admin_ip, cmd=cmd) diff --git a/fuelweb_test/helpers/gerrit/__init__.py b/fuelweb_test/helpers/gerrit/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/helpers/gerrit/content_parser.py b/fuelweb_test/helpers/gerrit/content_parser.py deleted file mode 100644 index 41ec09f98..000000000 --- a/fuelweb_test/helpers/gerrit/content_parser.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -# pylint: disable=redefined-builtin -# noinspection PyUnresolvedReferences -from six.moves import xrange -# pylint: enable=redefined-builtin - - -class PuppetfileChangesParser(object): - - def __init__(self, review, path): - self.review = review - self.filepath = path - - def get_changed_modules(self): - content = self.review.get_content_as_dict(self.filepath) - diff = self.review.get_diff_as_dict(self.filepath) - diff_lines_changed = self._get_lines_num_changed_from_diff(diff) - mod_lines_changed = self._get_modules_line_num_changed_from_content( - diff_lines_changed, content) - return self._get_modules_from_lines_changed(mod_lines_changed, content) - - @staticmethod - def _get_lines_num_changed_from_diff(diff): - lines_changed = [] - cursor = 1 - for content in diff['content']: - diff_content = content.values()[0] - if 'ab' in content.keys(): - cursor += len(diff_content) - if 'b' in content.keys(): - lines_changed.extend( - xrange(cursor, len(diff_content) + cursor)) - cursor += len(diff_content) - return lines_changed - - @staticmethod - def _get_modules_line_num_changed_from_content(lines, content): - modules_lines_changed = [] - for num in lines: - index = num - if content[index] == '' or content[index].startswith('#'): - continue - while not content[index].startswith('mod'): - index -= 1 - modules_lines_changed.append(index) - return modules_lines_changed - - def _get_modules_from_lines_changed(self, lines, content): - modules = [] - pattern = re.compile(r"mod '([a-z]+)',") - for num in lines: - match = pattern.match(content[num]) - if match: - module = match.group(1) - modules.append((module, self.filepath)) - return modules diff --git a/fuelweb_test/helpers/gerrit/gerrit_client.py b/fuelweb_test/helpers/gerrit/gerrit_client.py deleted file mode 100644 index 8a8fc0d33..000000000 --- a/fuelweb_test/helpers/gerrit/gerrit_client.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import base64 -import os -import requests -from requests.utils import quote - -from fuelweb_test.helpers.gerrit import utils - - -class BaseGerritClient(object): - - def __init__(self, - endpoint='https://review.openstack.org', - project=None, - branch=None, - change_id=None, - patchset_num=None): - self.endpoint = endpoint - self.project = project - self.branch = branch - self.change_id = change_id - self.patchset_num = None if patchset_num is None else str(patchset_num) - self.query = None - - def get_content(self, filename): - self.query = self._build_revision_endpoint('files', - quote(filename, safe=''), - 'content') - return self._send_get_request() - - def get_diff(self, filename): - self.query = self._build_revision_endpoint('files', - quote(filename, safe=''), - 'diff') - return self._send_get_request() - - def get_related_changes(self): - self.query = self._build_revision_endpoint('related') - return self._send_get_request() - - def list_files(self): - self.query = self._build_revision_endpoint('files') - return self._send_get_request() - - def _build_change_id(self): - return '{}~{}~{}'.format(quote(self.project, safe=''), - quote(self.branch, safe=''), - self.change_id) - - def _build_full_change_id(self): - return os.path.join(self.endpoint, 'changes', self._build_change_id()) - - def _build_revision_endpoint(self, *args): - return os.path.join(self._build_full_change_id(), - 'revisions', - self.patchset_num, - *args) - - def _build_reviewer_endpoint(self, *args): - return os.path.join(self._build_full_change_id(), 'reviewers', *args) - - def _send_get_request(self): - return requests.get(self.query, verify=False) - - -class GerritClient(BaseGerritClient): - - def __init__(self, *args, **kwargs): - super(GerritClient, self).__init__(*args, **kwargs) - - def get_files(self): - r = self._request_file_list() - text = r.text - files = utils.filter_response_text(text) - return set(filter(lambda x: x != '/COMMIT_MSG', - utils.json_to_dict(files).keys())) - - def get_content_as_dict(self, filename): - content_decoded = self._request_content(filename).text - content = base64.b64decode(content_decoded) - return {num: line for num, line in enumerate(content.split('\n'), 1)} - - def get_diff_as_dict(self, filename): - diff_raw = self._request_diff(filename).text - diff_filtered = utils.filter_response_text(diff_raw) - return utils.json_to_dict(diff_filtered) - - def get_dependencies_as_dict(self): - dependencies_raw = self._request_related_changes().text - dependencies_filtered = utils.filter_response_text(dependencies_raw) - return utils.json_to_dict(dependencies_filtered) - - @utils.check_status_code(200) - def _request_file_list(self): - return self.list_files() - - @utils.check_status_code(200) - def _request_content(self, filename): - return self.get_content(filename) - - @utils.check_status_code(200) - def _request_diff(self, filename): - return self.get_diff(filename) - - @utils.check_status_code(200) - def _request_related_changes(self): - return self.get_related_changes() diff --git a/fuelweb_test/helpers/gerrit/gerrit_info_provider.py b/fuelweb_test/helpers/gerrit/gerrit_info_provider.py deleted file mode 100644 index 67982b01b..000000000 --- a/fuelweb_test/helpers/gerrit/gerrit_info_provider.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers.gerrit.gerrit_client import GerritClient -from fuelweb_test.helpers.gerrit import rules - - -class TemplateMap(object): - - M_PATH = 'deployment/puppet/' - - MAP = [ - {'deployment/Puppetfile': - rules.get_changed_modules_inside_file}, - {os.path.join(M_PATH, 'osnailyfacter/modular/roles/'): - rules.osnailyfacter_roles_rule}, - {os.path.join(M_PATH, 'osnailyfacter/modular/'): - rules.osnailyfacter_modular_rule}, - {os.path.join(M_PATH, 'osnailyfacter/manifests/'): - rules.osnailyfacter_manifest_rule}, - {os.path.join(M_PATH, 'osnailyfacter/templates/'): - rules.osnailyfacter_templates_rule}, - {os.path.join(M_PATH, 'osnailyfacter/'): - rules.no_rule}, - {os.path.join(M_PATH, 'openstack_tasks/Puppetfile'): - rules.get_changed_modules_inside_file}, - {os.path.join(M_PATH, 'openstack_tasks/lib/facter/'): - rules.openstack_tasks_libfacter_rule}, - {os.path.join(M_PATH, 'openstack_tasks/manifests/roles/'): - rules.openstack_tasks_roles_rule}, - {os.path.join(M_PATH, 'openstack_tasks/examples/roles/'): - rules.openstack_tasks_roles_rule}, - {os.path.join(M_PATH, 'openstack_tasks/manifests/'): - rules.openstack_manifest_rule}, - {os.path.join(M_PATH, 'openstack_tasks/examples/'): - rules.openstack_examples_rule}, - {os.path.join(M_PATH, 'openstack_tasks/'): - rules.no_rule}, - {M_PATH: - rules.common_rule}, - ] - - -class FuelLibraryModulesProvider(object): - - def __init__(self, review): - self.changed_modules = {} - self.review = review - - @classmethod - def from_environment_vars(cls, endpoint='https://review.openstack.org'): - review = GerritClient(endpoint, - project=settings.GERRIT_PROJECT, - branch=settings.GERRIT_BRANCH, - change_id=settings.GERRIT_CHANGE_ID, - patchset_num=settings.GERRIT_PATCHSET_NUMBER) - return cls(review) - - def get_changed_modules(self): - logger.debug('Review details: branch={0}, id={1}, patchset={2}' - .format(self.review.branch, - self.review.change_id, - self.review.patchset_num)) - files = self.review.get_files() - for _file in files: - self._apply_rule(review=self.review, _file=_file) - return self.changed_modules - - def _add_module(self, module, module_path): - logger.debug("Add module '{}' to changed modules".format(module)) - if module in self.changed_modules: - self.changed_modules[module].add(module_path) - else: - self.changed_modules[module] = {module_path} - - def _add_modules(self, modules): - for module, module_path in modules: - self._add_module(module, module_path) - - def _apply_rule(self, review, _file): - for path_rule in TemplateMap.MAP: - tmpl, rule = next(iter(path_rule.items())) - if _file.startswith(tmpl): - logger.debug("Using '{0}' rule with '{1}' template " - "for '{2}' filename".format(rule.__name__, - tmpl, - _file)) - modules = rules.invoke_rule(review, _file, rule) - if modules: - self._add_modules(modules) - return diff --git a/fuelweb_test/helpers/gerrit/rules.py b/fuelweb_test/helpers/gerrit/rules.py deleted file mode 100644 index 9287c5415..000000000 --- a/fuelweb_test/helpers/gerrit/rules.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from fuelweb_test.helpers.gerrit.content_parser import PuppetfileChangesParser - - -FUEL_LIBRARY_PROJECT_NAME = 'fuel-library' - - -def invoke_rule(review, path, rule): - if rule.__name__ == 'get_changed_modules_inside_file': - return rule(review, path) - else: - return rule(path) - - -def get_changed_modules_inside_file(review, filename): - parser = PuppetfileChangesParser(review=review, path=filename) - return [(module, os.path.join(FUEL_LIBRARY_PROJECT_NAME, module_path)) - for module, module_path in parser.get_changed_modules()] - - -def no_rule(path): - return [] - - -def common_rule(path): - return _apply_standard_rule(path=path, mod_depth=2) - - -def osnailyfacter_roles_rule(path): - return _apply_subdir_rule(path=path, subdir='roles', mod_depth=5) - - -def osnailyfacter_modular_rule(path): - return _apply_standard_rule(path=path) - - -def osnailyfacter_manifest_rule(path): - return _apply_standard_rule(path=path) - - -def osnailyfacter_templates_rule(path): - return _apply_standard_rule(path=path) - - -def openstack_tasks_libfacter_rule(path): - return _apply_standard_rule(path=path, mod_depth=5) - - -def openstack_tasks_roles_rule(path): - return _apply_subdir_rule(path=path, subdir='roles', mod_depth=4) - - -def openstack_manifest_rule(path): - return _apply_standard_rule(path=path) - - -def openstack_examples_rule(path): - return _apply_standard_rule(path=path) - - -def _join_module_path(split_path, depth): - return os.path.join(FUEL_LIBRARY_PROJECT_NAME, *split_path[:depth]) - - -def _apply_subdir_rule(path, subdir, mod_depth=4): - """Returns module name and module path if not given subdir, otherwise - returns module combined with given subdir. - """ - split_path = path.split('/') - module = split_path[mod_depth] - if module == subdir: - filename, _ = os.path.splitext(os.path.basename(path)) - module = '{}/{}'.format(subdir, filename) - module_path = _join_module_path(split_path, mod_depth + 2) - return [(module, module_path)] - - -def _apply_standard_rule(path, mod_depth=4): - """Returns module name and module path by applying the following rule: - if this is a directory, then use directory name as the module name, - otherwise use filename without extension as the module name. - """ - split_path = path.split('/') - module, _ = os.path.splitext(split_path[mod_depth]) - module_path = _join_module_path(split_path, mod_depth + 1) - return [(module, module_path)] diff --git a/fuelweb_test/helpers/gerrit/utils.py b/fuelweb_test/helpers/gerrit/utils.py deleted file mode 100644 index c77843043..000000000 --- a/fuelweb_test/helpers/gerrit/utils.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - - -def check_status_code(code): - def outer_wrap(f): - def inner_wrap(*args, **kwargs): - r = f(*args, **kwargs) - if r.status_code != code: - raise Exception("Unexpected status code. " - "Wanted status code: {0}. " - "Got status code: {1}" - .format(code, r.status_code)) - return r - return inner_wrap - return outer_wrap - - -def json_to_dict(data): - return dict(json.loads(data)) - - -def filter_gerrit_response_separator(data): - return data.replace(")]}\'", "") - - -def filter_newlines(data): - return data.replace('\n', '') - - -def filter_response_text(data): - data = filter_gerrit_response_separator(data) - data = filter_newlines(data) - return data diff --git a/fuelweb_test/helpers/granular_deployment_checkers.py b/fuelweb_test/helpers/granular_deployment_checkers.py deleted file mode 100644 index 02557994b..000000000 --- a/fuelweb_test/helpers/granular_deployment_checkers.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import time - - -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_true - -from fuelweb_test import logger - - -def check_hiera_resources(remote, file_name=None): - cmd_sh = 'if [ -d /etc/hiera ] ; then echo "fine" ; fi' - output = ''.join(remote.execute(cmd_sh)['stdout']) - assert_true('fine' in output, output) - if not file_name: - output_f = ''.join(remote.execute( - 'if [ -r /etc/hiera.yaml ] ; then echo "passed" ; fi')['stdout']) - assert_true('passed' in output_f, output_f) - else: - output_f = ''.join(remote.execute( - 'if [ -r /etc/%s ] ; then echo "passed" ; fi' % file_name)[ - 'stdout']) - assert_true('passed' in output_f, - 'Can not find passed result in ' - 'output {0}'.format(output_f)) - - -def get_hiera_data(remote, data): - cmd = 'hiera {}'.format(data) - res = remote.execute(cmd)['stdout'] - return res - - -def check_interface_status(remote, iname): - cmd = 'ethtools {0}| grep "Link detected"'.format(iname) - result = remote.execute(cmd) - assert_equal(0, result['exit_code'], - "Non-zero exit code stderr {0}, " - "stdout {1}".format(result['stderr'], result['stdout'])) - - assert_true('yes' in ''.join(result['stdout']), - "No link detected for interface {0}," - " Actual stdout {1}".format(iname, result['stdout'])) - - -def ping_remote_net(remote, ip): - cmd = "ping -q -c1 -w10 {0}".format(ip) - res = remote.execute(cmd) - logger.debug('Current res from ping is {0}'.format(res)) - assert_equal( - res['exit_code'], 0, - "Ping of {0} ended with non zero exit-code. " - "Stdout is {1}, stderr {2}".format( - ip, ''.join(res['stdout']), ''.join(res['stderr']))) - - -def check_logging_task(remote, conf_name): - cmd_sh = 'if [ -r /rsyslog.d/{0}] ; then echo "fine" ; fi'.format( - conf_name) - output = ''.join(remote.execute(cmd_sh)['stdout']) - assert_true('fine' in output, output) - - -def check_tools_task(remote, tool_name): - cmd_sh = 'pgrep {0}'.format(tool_name) - output = remote.execute(cmd_sh) - assert_equal( - 0, output['exit_code'], - "Command {0} failed with non zero exit code, current output is:" - " stdout {1}, stderr: {2} ".format( - cmd_sh, ''.join(output['stdout']), ''.join(output['stderr']))) - - -def run_check_from_task(remote, path): - res = remote.execute('{0}'.format(path)) - try: - assert_equal( - 0, res['exit_code'], - "Check {0} finishes with non zero exit code, stderr is {1}, " - "stdout is {2} on remote".format( - path, res['stderr'], res['stdout'])) - except AssertionError: - time.sleep(60) - logger.info('remote is {0}'.format(remote)) - res = remote.execute('{0}'.format(path)) - assert_equal( - 0, res['exit_code'], - "Check {0} finishes with non zero exit code, stderr is {1}, " - "stdout is {2} on remote".format( - path, res['stderr'], res['stdout'])) diff --git a/fuelweb_test/helpers/instance_initial_scenario b/fuelweb_test/helpers/instance_initial_scenario deleted file mode 100644 index 5514fa70d..000000000 --- a/fuelweb_test/helpers/instance_initial_scenario +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh - -echo "Creating test file" -touch /home/test_file - -echo "Creating volume mount script on instance" -echo -e '#!/bin/sh\nsudo /usr/sbin/mkfs.ext4 /dev/vdb | logger -t mount_volume.sh\nsudo mount -t ext4 /dev/vdb /mnt | logger -t mount_volume.sh\nmount | grep /mnt | logger -t mount_volume.sh' | tee /home/mount_volume.sh -chmod 777 /home/mount_volume.sh - -echo -e "test\ntest" | passwd cirros diff --git a/fuelweb_test/helpers/ironic_actions.py b/fuelweb_test/helpers/ironic_actions.py deleted file mode 100644 index 2840a43f9..000000000 --- a/fuelweb_test/helpers/ironic_actions.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -from devops.helpers.helpers import tcp_ping -from devops.helpers.helpers import wait - -from fuelweb_test.helpers import os_actions - - -class IronicActions(os_actions.OpenStackActions): - """IronicActions.""" # TODO documentation - - def __init__(self, controller_ip, user='admin', - passwd='admin', tenant='admin'): - super(IronicActions, self).__init__(controller_ip, - user, passwd, - tenant) - - @staticmethod - def upload_user_image(nailgun_node, ssh_manager, img_url): - disk_info = [{"name": "vda", "extra": [], "free_space": 11000, - "type": "disk", "id": "vda", "size": 11000, - "volumes": [{"mount": "/", "type": "partition", - "file_system": "ext4", "size": 10000}]}] - cmd = ('. /root/openrc; cd /tmp/; ' - 'curl {img_url} | tar -xzp; ' - 'glance image-create --name virtual_trusty_ext4 ' - '--disk-format raw --container-format bare ' - '--file trusty-server-cloudimg-amd64.img --visibility public ' - '--property cpu_arch="x86_64" ' - '--property hypervisor_type="baremetal" ' - '--property fuel_disk_info=\'{disk_info}\'').format( - disk_info=json.dumps(disk_info), - img_url=img_url) - - ssh_manager.execute_on_remote(nailgun_node['ip'], cmd=cmd) - - def enroll_ironic_node(self, ironic_slave, hw_ip): - deploy_kernel = self.get_image_by_name('ironic-deploy-linux') - deploy_ramdisk = self.get_image_by_name('ironic-deploy-initramfs') - deploy_squashfs = self.get_image_by_name('ironic-deploy-squashfs') - - libvirt_uri = 'qemu+tcp://{server_ip}/system'.format( - server_ip=hw_ip) - driver_info = {'libvirt_uri': libvirt_uri, - 'deploy_kernel': deploy_kernel.id, - 'deploy_ramdisk': deploy_ramdisk.id, - 'deploy_squashfs': deploy_squashfs.id} - - mac_address = ironic_slave.interface_by_network_name( - 'ironic').mac_address - - properties = {'memory_mb': ironic_slave.memory, - 'cpu_arch': ironic_slave.architecture, - 'local_gb': '50', - 'cpus': ironic_slave.vcpu} - - ironic_node = self.create_ironic_node(driver='fuel_libvirt', - driver_info=driver_info, - properties=properties) - self.create_ironic_port(address=mac_address, - node_uuid=ironic_node.uuid) - - @staticmethod - def wait_for_ironic_hypervisors(ironic_conn, ironic_slaves): - - def _wait_for_ironic_hypervisor(): - hypervisors = ironic_conn.get_hypervisors() or [] - ironic_hypervisors = [h for h in hypervisors if - h.hypervisor_type == 'ironic'] - - if len(ironic_slaves) == len(ironic_hypervisors): - for hypervisor in ironic_hypervisors: - if hypervisor.memory_mb == 0: - return False - return True - return False - - wait(_wait_for_ironic_hypervisor, - timeout=60 * 10, - timeout_msg='Failed to update hypervisor details') - - def wait_for_vms(self, ironic_conn): - srv_list = ironic_conn.get_servers() - for srv in srv_list: - wait(lambda: self.get_instance_detail(srv).status == "ACTIVE", - timeout=60 * 30, timeout_msg='Server didn\'t became active') - - @staticmethod - def verify_vms_connection(ironic_conn): - srv_list = ironic_conn.get_servers() - for srv in srv_list: - wait(lambda: tcp_ping(srv.networks['baremetal'][0], 22), - timeout=60 * 10, timeout_msg='Failed to connect to port 22') - - def delete_servers(self, ironic_conn): - srv_list = ironic_conn.get_servers() - for srv in srv_list: - self.nova.servers.delete(srv) - - def create_ironic_node(self, **kwargs): - return self.ironic.node.create(**kwargs) - - def create_ironic_port(self, **kwargs): - return self.ironic.port.create(**kwargs) diff --git a/fuelweb_test/helpers/log_server.py b/fuelweb_test/helpers/log_server.py deleted file mode 100644 index b16d50f9c..000000000 --- a/fuelweb_test/helpers/log_server.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import select -import socket -import threading - -from core.helpers.log_helpers import logwrap - - -class LogServer(threading.Thread): - """LogServer.""" # TODO documentation - - @logwrap - def __init__(self, address="localhost", port=5514): - super(LogServer, self).__init__() - self.socket = socket.socket( - socket.AF_INET, socket.SOCK_DGRAM - ) - self.socket.bind((str(address), port)) - self.rlist = [self.socket] - self._stop = threading.Event() - self._handler = self.handler - self._status = False - - def handler(self, messages): - pass - - def set_status(self, status): - self._status = status - - def get_status(self): - return self._status - - def set_handler(self, handler): - self._handler = handler - - @logwrap - def stop(self): - self.socket.close() - self._stop.set() - - def started(self): - return not self._stop.is_set() - - def rude_join(self, timeout=None): - self._stop.set() - super(LogServer, self).join(timeout) - - def join(self, timeout=None): - self.rude_join(timeout) - - @logwrap - def run(self): - while self.started(): - r, _, _ = select.select(self.rlist, [], [], 1) - if self.socket in r: - message, _ = self.socket.recvfrom(2048) - self._handler(message) - - -class TriggeredLogServer(LogServer): - """TriggeredLogServer.""" # TODO documentation - - def __init__(self, address="localhost", port=5514): - super(TriggeredLogServer, self).__init__(address, port) - self.set_handler(self.handler) - - def handler(self, message): - self.set_status(True) diff --git a/fuelweb_test/helpers/metaclasses.py b/fuelweb_test/helpers/metaclasses.py deleted file mode 100644 index 11adcb46a..000000000 --- a/fuelweb_test/helpers/metaclasses.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from warnings import warn - -warn( - 'fuelweb_test.helpers.metaclasses.SingletonMeta is deprecated:' - 'class is moved to devops.helpers.metaclasses.\n' - 'Due to it was single metaclass in file, this file will be deleted in a' - 'short time!', - DeprecationWarning -) - - -class SingletonMeta(type): - """Metaclass for Singleton - - Main goals: not need to implement __new__ in singleton classes - """ - _instances = {} - - def __call__(cls, *args, **kwargs): - if cls not in cls._instances: - cls._instances[cls] = super( - SingletonMeta, cls).__call__(*args, **kwargs) - return cls._instances[cls] diff --git a/fuelweb_test/helpers/multiple_networks_hacks.py b/fuelweb_test/helpers/multiple_networks_hacks.py deleted file mode 100644 index e2d935ef3..000000000 --- a/fuelweb_test/helpers/multiple_networks_hacks.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# TODO(apanchenko): This file contains hacks (e.g. configuring of dhcp-server -# or firewall on master node) which are used for testing multiple cluster -# networks feature: -# https://blueprints.launchpad.net/fuel/+spec/multiple-cluster-networks -# This code should be removed from tests as soon as automatic cobbler -# configuring for non-default admin (PXE) networks is implemented in Fuel - -from proboscis.asserts import assert_equal - -from core.helpers.log_helpers import logwrap - -from fuelweb_test.helpers.ssh_manager import SSHManager - - -@logwrap -def configure_second_admin_dhcp(ip, interface): - dhcp_conf_file = '/etc/cobbler/dnsmasq.template' - cmd = ("sed '0,/^interface.*/s//\\0\\ninterface={0}/' -i {1};" - "cobbler sync").format(interface, - dhcp_conf_file) - result = SSHManager().execute( - ip=ip, - cmd=cmd - ) - assert_equal(result['exit_code'], 0, ('Failed to add second admin ' - 'network to DHCP server: {0}').format(result)) - - -@logwrap -def configure_second_admin_firewall(ip, network, netmask, interface, - master_ip): - # Allow input/forwarding for nodes from the second admin network and - # enable source NAT for UDP (tftp) and HTTP (proxy server) traffic - # on master node - rules = [ - ('-I INPUT -i {0} -m comment --comment "input from admin network" ' - '-j ACCEPT').format(interface), - ('-t nat -I POSTROUTING -s {0}/{1} -o e+ -m comment --comment ' - '"004 forward_admin_net2" -j MASQUERADE'). - format(network, netmask), - ("-t nat -I POSTROUTING -o {0} -d {1}/{2} -p udp -m addrtype " - "--src-type LOCAL -j SNAT --to-source {3}").format(interface, - network, netmask, - master_ip), - ("-t nat -I POSTROUTING -d {0}/{1} -p tcp --dport 8888 -j SNAT " - "--to-source {2}").format(network, netmask, master_ip), - ('-I FORWARD -i {0} -m comment --comment ' - '"forward custom admin net" -j ACCEPT').format(interface) - ] - - for rule in rules: - cmd = 'iptables {0}'.format(rule) - result = SSHManager().execute( - ip=ip, - cmd=cmd - ) - assert_equal(result['exit_code'], 0, - ('Failed to add firewall rule for admin net on' - ' master node: {0}, {1}').format(rule, result)) - - # Save new firewall configuration - cmd = 'service iptables save' - result = SSHManager().execute( - ip=ip, - cmd=cmd - ) - assert_equal(result['exit_code'], 0, - ('Failed to save firewall configuration on master node:' - ' {0}').format(result)) diff --git a/fuelweb_test/helpers/nessus.py b/fuelweb_test/helpers/nessus.py deleted file mode 100644 index 67657b370..000000000 --- a/fuelweb_test/helpers/nessus.py +++ /dev/null @@ -1,197 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import json -import os - -from devops.helpers.helpers import wait -from proboscis import asserts -import requests -# pylint: disable=import-error -# noinspection PyUnresolvedReferences -from six.moves import urllib -# pylint: enable=import-error - -from fuelweb_test import logger -from fuelweb_test.helpers.decorators import token - - -class NessusClient(object): - def __init__(self, hostname, port, username, password, ssl_verify=False): - self.nessus_auth_token = None - self.nessus_base_url = 'https://{0}:{1}'.format(hostname, port) - self.nessus_username = username - self.nessus_password = password - self.ssl_verify = ssl_verify - self.login() - - @staticmethod - def log_request(url, method, request_headers, request_body, - status_code, response_headers, response_body): - log_fmt = ("Request {method} {url}\n" - "Request - Headers: {request_headers}\n" - " Body: {request_body}\n" - "Response status code: {status_code}\n" - "Response - Headers: {response_headers}\n" - " Body: {response_body}\n") - - logger.info(log_fmt.format(url=url, - method=method, - request_headers=request_headers, - request_body=request_body, - status_code=status_code, - response_headers=response_headers, - response_body=response_body)) - - @token - def request(self, method, url, body=None, **kwargs): - headers = {'X-Cookie': 'token={0}'.format(self.nessus_auth_token), - 'Content-Type': 'application/json'} - url = urllib.parse.urljoin(self.nessus_base_url, url) - - response = requests.request( - method, url, data=body, headers=headers, - verify=self.ssl_verify, **kwargs) - - self.log_request(url, method, headers, body, - response.status_code, response.headers, - response.content[:1024]) - - asserts.assert_equal( - response.status_code, 200, - "Request failed: {0}\n{1}".format(response.status_code, - response.content)) - - return response - - def get(self, url, body=None): - return self.request("GET", url, json.dumps(body)).json() - - def get_raw(self, url, body=None): - return self.request("GET", url, json.dumps(body)).content - - def post(self, url, body=None): - return self.request("POST", url, json.dumps(body)).json() - - def login(self): - creds = {'username': self.nessus_username, - 'password': self.nessus_password} - - self.nessus_auth_token = self.post('/session', creds)['token'] - - def add_policy(self, policy_def): - return self.post('/policies', policy_def) - - def list_policy_templates(self): - return self.get('/editor/policy/templates')['templates'] - - def add_cpa_policy(self, name, description, pid): - policy_def = \ - { - "uuid": pid, - "settings": { - "name": name, - "description": description - }, - "credentials": { - "add": { - "Host": { - "SSH": [ - { - "auth_method": "password", - "username": "root", - "password": "r00tme", - "elevate_privileges_with": "Nothing" - } - ] - } - } - } - } - - return self.add_policy(policy_def)['policy_id'] - - def add_wat_policy(self, name, desc, pid): - policy_def = \ - { - "uuid": pid, - "settings": { - "name": name, - "description": desc, - "discovery_mode": "Port scan (all ports)", - "assessment_mode": "Scan for all web vulnerabilities " - "(complex)", - - } - } - - return self.add_policy(policy_def)['policy_id'] - - def create_scan(self, name, description, target_ip, - policy_id, policy_template_id): - scan_def = \ - { - "uuid": policy_template_id, - "settings": { - "name": name, - "description": description, - "scanner_id": "1", - "policy_id": policy_id, - "text_targets": target_ip, - "launch": "ONETIME", - "enabled": False, - "launch_now": False - } - } - - return self.post('/scans', scan_def)['scan']['id'] - - def launch_scan(self, scan_id): - return self.post('/scans/{0}/launch'.format(scan_id))['scan_uuid'] - - def get_scan_history(self, scan_id, history_id): - return self.get('/scans/{0}'.format(scan_id), - {'history_id': history_id})['info'] - - def get_scan_status(self, scan_id, history_id): - return self.get_scan_history(scan_id, history_id)['status'] - - def list_scan_history_ids(self, scan_id): - data = self.get('/scans/{0}'.format(scan_id)) - return dict((h['uuid'], h['history_id']) for h in data['history']) - - def check_scan_export_status(self, scan_id, file_id): - return self.get('/scans/{0}/export/{1}/status' - .format(scan_id, file_id))['status'] == 'ready' - - def export_scan(self, scan_id, history_id, save_format): - export_def = {'history_id': history_id, - 'format': save_format, - 'chapters': 'vuln_hosts_summary'} - file_id = self.post('/scans/{0}/export'.format(scan_id), - body=export_def)['file'] - wait(lambda: self.check_scan_export_status(scan_id, file_id), - interval=10, timeout=600, - timeout_msg='Nessus export scan status != "ready" for ' - ' scan_id={} file_id={}'.format(scan_id, file_id)) - return file_id - - def download_scan_result( - self, scan_id, file_id, scan_type, save_format, file_path): - report = self.get_raw('/scans/{0}/export/{1}/download' - .format(scan_id, file_id)) - - filename = 'nessus_report_scan_{0}_{1}.{2}'\ - .format(scan_id, scan_type, save_format) - file_with_path = os.path.join(file_path, filename) - logger.info("Saving Nessus scan report: {0}".format(file_with_path)) - with open(file_with_path, 'w') as report_file: - report_file.write(report) diff --git a/fuelweb_test/helpers/os_actions.py b/fuelweb_test/helpers/os_actions.py deleted file mode 100644 index e3f9bd52f..000000000 --- a/fuelweb_test/helpers/os_actions.py +++ /dev/null @@ -1,804 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from devops.error import TimeoutError -from devops.helpers import helpers -from proboscis import asserts - -from fuelweb_test import logger -from fuelweb_test.helpers import common - - -class OpenStackActions(common.Common): - """OpenStackActions.""" # TODO documentation - - def __init__(self, controller_ip, user='admin', - passwd='admin', tenant='admin'): - super(OpenStackActions, self).__init__(controller_ip, - user, passwd, - tenant) - - def _get_cirros_image(self): - for image in self.glance.images.list(): - if image.name.startswith("TestVM"): - return image - - def get_image_by_name(self, name): - for image in self.glance.images.list(): - if image.name.startswith(name): - return image - - def get_hypervisors(self): - hypervisors = self.nova.hypervisors.list() - if hypervisors: - return hypervisors - - def get_hypervisor_vms_count(self, hypervisor): - hypervisor = self.nova.hypervisors.get(hypervisor.id) - return getattr(hypervisor, "running_vms") - - def get_hypervisor_hostname(self, hypervisor): - hypervisor = self.nova.hypervisors.get(hypervisor.id) - return getattr(hypervisor, "hypervisor_hostname") - - def get_srv_hypervisor_name(self, srv): - srv = self.nova.servers.get(srv.id) - return getattr(srv, "OS-EXT-SRV-ATTR:hypervisor_hostname") - - def get_servers(self): - servers = self.nova.servers.list() - if servers: - return servers - - def get_server_by_name(self, name): - servers = self.get_servers() - for srv in servers: - if srv.name == name: - return srv - logger.warning("Instance with name {} was not found".format(name)) - return None - - def get_flavor_by_name(self, name): - flavor_list = self.nova.flavors.list() - for flavor in flavor_list: - if flavor.name == name: - return flavor - logger.warning("Flavor with name {} was not found".format(name)) - return None - - def wait_for_server_is_active(self, server, timeout): - """Wait for server is in active state - - :param server: nova server object - :param timeout: int, timeout in sec - """ - - helpers.wait( - lambda: self.get_instance_detail(server).status == "ACTIVE", - timeout=timeout, - timeout_msg="Create server {!r} failed by timeout. Please, take" - " a look at OpenStack logs".format(server.id)) - srv = self.get_instance_detail(server.id) - logger.debug('The Instance {!r} booted successfully on {!r}' - .format(srv.id, - srv.to_dict()['OS-EXT-SRV-ATTR:host'])) - return srv - - def create_server( - self, - name=None, - security_groups=None, - flavor_id=None, - net_id=None, - timeout=100, - image=None, - **kwargs - ): - """ Creates simple server, like in OSTF. - - :param name: server name, if None -> test-serv + random suffix - :param security_groups: list, if None -> ssh + icmp v4 & icmp v6 - :param flavor_id: create a new flavor if None - :param net_id: network id, could be omitted - :param timeout: int=100 - :param image: TestVM if None. - :return: Server, in started state - """ - - if not name: - name = "test-serv" + str(random.randint(1, 0x7fffffff)) - if not security_groups: - security_groups = [self.create_sec_group_for_ssh()] - if not flavor_id: - flavor = self.create_flavor('test_flavor_{}'. - format(random.randint(10, 10000)), - 64, 1, 0) - flavor_id = flavor.id - if image is None: - image = self._get_cirros_image().id - - nics = [{'net-id': net_id}] if net_id else None - - srv = self.nova.servers.create( - name=name, - image=image, - flavor=flavor_id, - security_groups=[sec_group.name for sec_group in security_groups], - nics=nics, - **kwargs) - logger.debug('Start instance {!r} ...'.format(srv.id)) - self.wait_for_server_is_active(srv, timeout) - return self.get_instance_detail(srv) - - def create_server_for_migration(self, neutron=True, scenario='', - timeout=100, filename=None, key_name=None, - label=None, flavor_id=None, **kwargs): - name = "test-serv" + str(random.randint(1, 0x7fffffff)) - security_group = {} - try: - if scenario: - with open(scenario, "r+") as f: - scenario = f.read() - except Exception as exc: - logger.info("Error opening file: {:s}".format(exc)) - raise Exception() - image_id = self._get_cirros_image().id - security_group[self.keystone_access.tenant_id] =\ - self.create_sec_group_for_ssh() - security_groups = [security_group[self.keystone_access.tenant_id].name] - - if neutron: - net_label = label if label else 'net04' - network = [net.id for net in self.nova.networks.list() - if net.label == net_label] - - kwargs.update({'nics': [{'net-id': network[0]}], - 'security_groups': security_groups}) - else: - kwargs.update({'security_groups': security_groups}) - - if not flavor_id: - flavor = self.create_flavor('test_flavor_{}'. - format(random.randint(10, 10000)), - 64, 1, 0) - flavor_id = flavor.id - - srv = self.nova.servers.create(name=name, - image=image_id, - flavor=flavor_id, - userdata=scenario, - files=filename, - key_name=key_name, - **kwargs) - self.wait_for_server_is_active(srv, timeout) - return self.get_instance_detail(srv) - - def create_server_from_volume(self, name=None, security_groups=None, - flavor_id=None, net_id=None, timeout=100, - image=None, **kwargs): - bootable_volume = self.create_volume( - image_id=image or self._get_cirros_image().id) - kwargs['block_device_mapping'] = {'vda': bootable_volume.id + ':::0'} - srv = self.create_server(name=name, security_groups=security_groups, - flavor_id=flavor_id, net_id=net_id, - timeput=timeout, image=image, **kwargs) - return srv - - def is_srv_deleted(self, srv): - for server in self.nova.servers.list(): - if srv.id == server.id: - logger.info("Server found in server list") - return False - logger.info("Server was successfully deleted") - return True - - def verify_srv_deleted(self, srv, timeout=150): - helpers.wait(lambda: self.is_srv_deleted(srv), - interval=2, timeout=timeout, - timeout_msg="Server wasn't deleted in " - "{0} seconds".format(timeout)) - - def assign_floating_ip(self, srv, use_neutron=False): - if use_neutron: - # Find external net id for tenant - nets = self.neutron.list_networks()['networks'] - err_msg = "Active external network not found in nets:{}" - ext_net_ids = [ - net['id'] for net in nets - if net['router:external'] and net['status'] == "ACTIVE"] - asserts.assert_true(ext_net_ids, err_msg.format(nets)) - net_id = ext_net_ids[0] - # Find instance port - ports = self.neutron.list_ports(device_id=srv.id)['ports'] - err_msg = "Not found active ports for instance:{}" - asserts.assert_true(ports, err_msg.format(srv.id)) - port = ports[0] - # Create floating IP - body = {'floatingip': {'floating_network_id': net_id, - 'port_id': port['id']}} - flip = self.neutron.create_floatingip(body) - # Wait active state for port - port_id = flip['floatingip']['port_id'] - helpers.wait(lambda: self.neutron.show_port( - port_id)['port']['status'] == "ACTIVE") - return flip['floatingip'] - - fl_ips_pool = self.nova.floating_ip_pools.list() - if fl_ips_pool: - floating_ip = self.nova.floating_ips.create( - pool=fl_ips_pool[0].name) - self.nova.servers.add_floating_ip(srv, floating_ip) - return floating_ip - - def create_sec_group_for_ssh(self): - name = "test-sg" + str(random.randint(1, 0x7fffffff)) - secgroup = self.nova.security_groups.create( - name, "descr") - - rulesets = [ - { - # ssh - 'ip_protocol': 'tcp', - 'from_port': 22, - 'to_port': 22, - 'cidr': '0.0.0.0/0', - }, - { - # ping - 'ip_protocol': 'icmp', - 'from_port': -1, - 'to_port': -1, - 'cidr': '0.0.0.0/0', - }, - { - # ping6 - 'ip_protocol': 'icmp', - 'from_port': -1, - 'to_port': -1, - 'cidr': '::/0', - } - ] - - for ruleset in rulesets: - self.nova.security_group_rules.create( - secgroup.id, **ruleset) - return secgroup - - def get_srv_host_name(self, srv): - # Get host name server is currently on - srv = self.nova.servers.get(srv.id) - return getattr(srv, "OS-EXT-SRV-ATTR:host") - - def get_srv_instance_name(self, srv): - # Get instance name of the server - server = self.nova.servers.get(srv.id) - return getattr(server, "OS-EXT-SRV-ATTR:instance_name") - - def migrate_server(self, server, host, timeout): - curr_host = self.get_srv_host_name(server) - logger.debug("Current compute host is {0}".format(curr_host)) - logger.debug("Start live migration of instance") - server.live_migrate(host._info['host_name']) - try: - helpers.wait( - lambda: self.get_instance_detail(server).status == "ACTIVE", - timeout=timeout) - except TimeoutError: - logger.debug("Instance do not became active after migration") - asserts.assert_true( - self.get_instance_detail(server).status == "ACTIVE", - "Instance do not become Active after live migration, " - "current status is {0}".format( - self.get_instance_detail(server).status)) - - asserts.assert_true( - self.get_srv_host_name( - self.get_instance_detail(server)) != curr_host, - "Server did not migrate") - server = self.get_instance_detail(server.id) - return server - - def create_volume(self, size=1, image_id=None, **kwargs): - volume = self.cinder.volumes.create(size=size, imageRef=image_id, - **kwargs) - helpers.wait( - lambda: self.cinder.volumes.get(volume.id).status == "available", - timeout=100) - logger.info("Created volume: '{0}', parent image: '{1}'" - .format(volume.id, image_id)) - return self.cinder.volumes.get(volume.id) - - def delete_volume(self, volume): - return self.cinder.volumes.delete(volume) - - def delete_volume_and_wait(self, volume, timeout=60): - self.delete_volume(volume) - try: - helpers.wait( - lambda: volume not in self.cinder.volumes.list(), - timeout=timeout) - except TimeoutError: - asserts.assert_false( - volume in self.cinder.volumes.list(), - "Volume wasn't deleted in {0} sec".format(timeout)) - - def attach_volume(self, volume, server, mount='/dev/vdb'): - self.cinder.volumes.attach(volume, server.id, mount) - logger.debug('The volume {!r} was attached to instance {!r}' - .format(volume.id, server.id)) - return self.cinder.volumes.get(volume.id) - - def extend_volume(self, volume, newsize): - self.cinder.volumes.extend(volume, newsize) - return self.cinder.volumes.get(volume.id) - - def get_volume_status(self, volume): - vol = self.cinder.volumes.get(volume.id) - return vol._info['status'] - - def get_hosts_for_migr(self, srv_host_name): - # Determine which host is available for live migration - return [ - host for host in self.nova.hosts.list() - if host.host_name != srv_host_name and - host._info['service'] == 'compute'] - - def get_tenant(self, tenant_name): - tenant_list = self.keystone.tenants.list() - for ten in tenant_list: - if ten.name == tenant_name: - return ten - return None - - def get_user(self, username): - user_list = self.keystone.users.list() - for user in user_list: - if user.name == username: - return user - return None - - def create_tenant(self, tenant_name): - tenant = self.get_tenant(tenant_name) - if tenant: - return tenant - return self.keystone.tenants.create(enabled=True, - tenant_name=tenant_name) - - def update_tenant(self, tenant_id, tenant_name=None, description=None, - enabled=None, **kwargs): - self.keystone.tenants.update(tenant_id, tenant_name, description, - enabled) - return self.keystone.tenants.get(tenant_id) - - def delete_tenant(self, tenant): - return self.keystone.tenants.delete(tenant) - - def create_user(self, username, passw, tenant): - user = self.get_user(username) - if user: - return user - return self.keystone.users.create( - name=username, - password=passw, - tenant_id=tenant.id) - - def update_user_enabled(self, user, enabled=True): - self.keystone.users.update_enabled(user, enabled) - return self.keystone.users.get(user) - - def delete_user(self, user): - return self.keystone.users.delete(user) - - def create_user_and_tenant(self, tenant_name, username, password): - tenant = self.create_tenant(tenant_name) - return self.create_user(username, password, tenant) - - def get_network(self, network_name): - net_list = self.neutron.list_networks() - for net in net_list['networks']: - if net['name'] == network_name: - return net - return None - - def get_network_by_type(self, net_type): - """Get the first network by type: external or internal - - :param net_type: str, value is external or internal - :return: dict, network data - """ - if net_type == 'external': - flag = True - elif net_type == 'internal': - flag = False - else: - raise Exception('Type should be "external" or "internal".' - ' Your type is {!r}!'.format(net_type)) - net_list = self.neutron.list_networks() - for net in net_list['networks']: - if net['router:external'] == flag: - return net - return None - - def get_subnet(self, subnet_name): - subnet_list = self.neutron.list_subnets() - for subnet in subnet_list['subnets']: - if subnet['name'] == subnet_name: - return subnet - return None - - def nova_get_net(self, net_name): - for net in self.nova.networks.list(): - if net.human_id == net_name: - return net - return None - - def get_router(self, network): - router_list = self.neutron.list_routers() - for router in router_list['routers']: - network_id = router['external_gateway_info'].get('network_id') - if network_id == network['id']: - return router - return None - - def create_image(self, **kwargs): - image = self.glance.images.create(**kwargs) - logger.info("Created image: '{0}'".format(image.id)) - logger.info("Image status: '{0}'".format(image.status)) - return image - - def get_image_list(self): - return self.glance.images.list() - - def update_image(self, image, **kwargs): - self.glance.images.update(image, **kwargs) - - def get_image(self, image_name): - image_list = self.get_image_list() - for img in image_list: - if img.name == image_name: - return img - return None - - def get_image_data(self, image_name): - return self.glance.images.data(image_name) - - def get_security_group_list(self): - return self.nova.security_groups.list() - - def get_security_group(self, sg_name): - sg_list = self.get_security_group_list() - for sg in sg_list: - if sg.name == sg_name: - return sg - return None - - def get_nova_service_list(self): - return self.nova.services.list() - - def get_nova_service_status(self, service): - services = self.get_nova_service_list() - for s in services: - if s.host == service.host and s.binary == service.binary: - return s.status - - def enable_nova_service(self, service, timeout=30): - self.nova.services.enable(service.host, service.binary) - helpers.wait( - lambda: self.get_nova_service_status(service) == "enabled", - timeout=timeout, - timeout_msg="Service {0} on {1} does not reach enabled " - "state, current state " - "is {2}".format(service.binary, service.host, - service.status)) - - def disable_nova_service(self, service, timeout=30): - self.nova.services.disable(service.host, service.binary) - helpers.wait( - lambda: self.get_nova_service_status(service) == "disabled", - timeout=timeout, - timeout_msg="Service {0} on {1} does not reach disabled " - "state, current state " - "is {2}".format(service.binary, service.host, - service.status)) - - def delete_nova_service(self, service_id): - return self.nova.services.delete(service_id) - - def get_nova_network_list(self): - return self.nova.networks.list() - - def get_neutron_router(self): - return self.neutron.list_routers() - - def get_routers_ids(self): - result = self.get_neutron_router() - ids = [i['id'] for i in result['routers']] - return ids - - def get_l3_for_router(self, router_id): - return self.neutron.list_l3_agent_hosting_routers(router_id) - - def get_l3_agent_ids(self, router_id): - result = self.get_l3_for_router(router_id) - ids = [i['id'] for i in result['agents']] - return ids - - def get_l3_agent_hosts(self, router_id): - result = self.get_l3_for_router(router_id) - hosts = [i['host'] for i in result['agents']] - return hosts - - def remove_l3_from_router(self, l3_agent, router_id): - return self.neutron.remove_router_from_l3_agent(l3_agent, router_id) - - def add_l3_to_router(self, l3_agent, router_id): - return self.neutron.add_router_to_l3_agent( - l3_agent, {"router_id": router_id}) - - def list_agents(self): - return self.neutron.list_agents() - - def get_available_l3_agents_ids(self, hosted_l3_agent_id): - result = self.list_agents() - ids = [i['id'] for i in result['agents'] - if i['binary'] == 'neutron-l3-agent'] - ids.remove(hosted_l3_agent_id) - return ids - - def list_dhcp_agents_for_network(self, net_id): - return self.neutron.list_dhcp_agent_hosting_networks(net_id) - - def get_node_with_dhcp_for_network(self, net_id): - result = self.list_dhcp_agents_for_network(net_id) - nodes = [i['host'] for i in result['agents']] - return nodes - - def get_neutron_dhcp_ports(self, net_id): - ports = self.neutron.list_ports()['ports'] - network_ports = [x for x in ports - if x['device_owner'] == 'network:dhcp' and - x['network_id'] == net_id] - return network_ports - - def create_pool(self, pool_name): - sub_net = self.neutron.list_subnets() - body = {"pool": {"name": pool_name, - "lb_method": "ROUND_ROBIN", - "protocol": "HTTP", - "subnet_id": sub_net['subnets'][0]['id']}} - return self.neutron.create_pool(body=body) - - def get_vips(self): - return self.neutron.list_vips() - - def create_vip(self, name, protocol, port, pool): - sub_net = self.neutron.list_subnets() - logger.debug("subnet list is {0}".format(sub_net)) - logger.debug("pool is {0}".format(pool)) - body = {"vip": { - "name": name, - "protocol": protocol, - "protocol_port": port, - "subnet_id": sub_net['subnets'][0]['id'], - "pool_id": pool['pool']['id'] - }} - return self.neutron.create_vip(body=body) - - def delete_vip(self, vip): - return self.neutron.delete_vip(vip) - - def get_vip(self, vip): - return self.neutron.show_vip(vip) - - @staticmethod - def get_nova_instance_ip(srv, net_name='novanetwork', addrtype='fixed'): - for network_label, address_list in srv.addresses.items(): - if network_label != net_name: - continue - for addr in address_list: - if addr['OS-EXT-IPS:type'] == addrtype: - return addr['addr'] - raise Exception("Instance {0} doesn't have {1} address for network " - "{2}, available addresses: {3}".format(srv.id, - addrtype, - net_name, - srv.addresses)) - - def get_instance_mac(self, remote, srv): - res = ''.join(remote.execute('virsh dumpxml {0} | grep "mac address="' - .format(self.get_srv_instance_name(srv)))['stdout']) - return res.split('\'')[1] - - def create_network(self, network_name, **kwargs): - body = {'network': {'name': network_name}} - if kwargs: - body['network'].update(kwargs) - return self.neutron.create_network(body) - - def create_subnet( - self, subnet_name, network_id, cidr, ip_version=4, **kwargs): - body = {"subnet": {"name": subnet_name, "network_id": network_id, - "ip_version": ip_version, "cidr": cidr}} - if kwargs: - body['subnet'].update(kwargs) - subnet = self.neutron.create_subnet(body) - return subnet['subnet'] - - def get_router_by_name(self, router_name): - router_list = self.neutron.list_routers() - for router in router_list['routers']: - if router['name'] == router_name: - return router - return None - - def add_router_interface(self, router_id, subnet_id, port_id=None): - body = {"router_id": router_id, "subnet_id": subnet_id} - if port_id: - body["port_id"] = port_id - self.neutron.add_interface_router(router_id, body) - return None - - def create_router(self, name, tenant): - """Creates router at neutron. - - :param name: str, router name - :param tenant: tenant - :return: router object - """ - external_network = None - for network in self.neutron.list_networks()["networks"]: - if network.get("router:external"): - external_network = network - - if not external_network: - raise RuntimeError('Cannot find the external network.') - - gw_info = { - "network_id": external_network["id"], - "enable_snat": True - } - - router_info = { - "router": { - "name": name, - "external_gateway_info": gw_info, - "tenant_id": tenant.id - } - } - return self.neutron.create_router(router_info)['router'] - - def get_keystone_endpoints(self): - endpoints = self.keystone.endpoints.list() - return endpoints - - def boot_parameterized_vms(self, attach_volume=False, - boot_vm_from_volume=False, - enable_floating_ips=False, - on_each_compute=False, - **kwargs): - """Boot parameterized VMs - - :param attach_volume: bool, flag for attaching of volume to booted VM - :param boot_vm_from_volume: bool, flag for the boot of VM from volume - :param enable_floating_ips: bool, flag for assigning of floating ip to - booted VM - :param on_each_compute: bool, boot VMs on each compute or only one - :param kwargs: dict, it includes the same keys like for - nova.servers.create - :return: list, list of vms data dicts - """ - vms_data = [] - if on_each_compute: - for hypervisor in self.get_hypervisors(): - kwargs['availability_zone'] = '{}:{}'.format( - 'nova', - hypervisor.hypervisor_hostname) - vms_data.extend( - self.boot_parameterized_vms( - attach_volume=attach_volume, - boot_vm_from_volume=boot_vm_from_volume, - enable_floating_ips=enable_floating_ips, - on_each_compute=False, - **kwargs)) - return vms_data - - fixed_network_id = self.get_network_by_type('internal')['id'] - if boot_vm_from_volume: - server = self.create_server_from_volume(net_id=fixed_network_id, - **kwargs) - else: - server = self.create_server(net_id=fixed_network_id, - **kwargs) - vm_data = {'server': server.to_dict()} - - if attach_volume: - volume = self.create_volume() - self.attach_volume(volume, server) - volume = self.cinder.volumes.get(volume.id) - # cinderclient in kilo does not contains "to_dict" method for - # volume object - vm_data['attached_volume'] = volume._info - - if enable_floating_ips: - self.assign_floating_ip(server) - - server = self.get_instance_detail(server) - vm_data['server'] = server.to_dict() - vms_data.append(vm_data) - return vms_data - - def create_network_resources_for_ipv6_test(self, tenant): - """Create network resources: two dualstack network IPv6 subnets - (should be in SLAAC mode, address space should not intersect), - virtual router and set gateway. - - :param tenant: obj, object of keystone tenant - """ - net1 = self.create_network( - network_name='net1', - tenant_id=tenant.id)['network'] - net2 = self.create_network( - network_name='net2', - tenant_id=tenant.id)['network'] - - subnet_1_v4 = self.create_subnet( - subnet_name='subnet_1_v4', - network_id=net1['id'], - cidr='192.168.100.0/24', - ip_version=4) - - subnet_1_v6 = self.create_subnet( - subnet_name='subnet_1_v6', - network_id=net1['id'], - ip_version=6, - cidr="2001:db8:100::/64", - gateway_ip="2001:db8:100::1", - ipv6_ra_mode="slaac", - ipv6_address_mode="slaac") - - subnet_2_v4 = self.create_subnet( - subnet_name='subnet_2_v4', - network_id=net2['id'], - cidr='192.168.200.0/24', - ip_version=4) - - subnet_2_v6 = self.create_subnet( - subnet_name='subnet_2_v6', - network_id=net2['id'], - ip_version=6, - cidr="2001:db8:200::/64", - gateway_ip="2001:db8:200::1", - ipv6_ra_mode="slaac", - ipv6_address_mode="slaac") - - router = self.create_router('test_router', tenant=tenant) - - self.add_router_interface( - router_id=router["id"], - subnet_id=subnet_1_v4["id"]) - - self.add_router_interface( - router_id=router["id"], - subnet_id=subnet_1_v6["id"]) - - self.add_router_interface( - router_id=router["id"], - subnet_id=subnet_2_v4["id"]) - - self.add_router_interface( - router_id=router["id"], - subnet_id=subnet_2_v6["id"]) - return net1, net2 diff --git a/fuelweb_test/helpers/ovs.py b/fuelweb_test/helpers/ovs.py deleted file mode 100644 index 54f1a121d..000000000 --- a/fuelweb_test/helpers/ovs.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from fuelweb_test import logger - - -def ovs_get_data(remote, table, columns=None): - """Get data from a specified OpenVSwitch table - - :param SSHClient remote: fuel-devops.helpers.helpers object - :param str table: ovs table name (see `ovsdb-client list-tables`) - :param list columns: - list of strings to get specified columns. if None - all columns - will be requested. - :return dict: data from JSON object - """ - if columns: - col = '--columns=' + ','.join(columns) - else: - col = '' - cmd = ('ovs-vsctl --oneline --format=json {columns} list {table}' - .format(columns=col, table=table)) - res = remote.check_call(cmd).stdout_json - logger.debug("OVS output of the command '{0}': {1}".format(cmd, res)) - return res - - -def ovs_decode_columns(ovs_data): - """Decode columns from OVS data format to a python dict - :param str ovs_data: data from JSON object - :return list: list of decoded dicts - """ - data = ovs_data['data'] - headings = ovs_data['headings'] - res = [] - for fields in data: - res_fields = {} - for i, field in enumerate(fields): - if isinstance(field, list): - if field[0] == 'map': - d = {} - for f in field[1]: - d[f[0]] = f[1] - res_fields[headings[i]] = d - elif field[0] == 'uuid': - res_fields[headings[i]] = {'uuid': field[1]} - else: - res_fields[headings[i]] = field - else: - res_fields[headings[i]] = field - res.append(res_fields) - return res - - -def ovs_get_tag_by_port(remote, port): - """Get the tag used for OVS interface by Neutron port ID - - :param SSHClient remote: fuel-devops.helpers.helpers object - :param str port: Neutron port ID - :return str: tag number - """ - interfaces_raw = ovs_get_data(remote, - table='Interface', - columns=['external_ids', 'name']) - interfaces = ovs_decode_columns(interfaces_raw) - - ports_ifaces = {x['external_ids']['iface-id']: x['name'] - for x in interfaces if 'iface-id' in x['external_ids']} - logger.debug("OVS interfaces: {0}".format(ports_ifaces)) - if port not in ports_ifaces: - raise ValueError("Neutron port {0} not found in OVS interfaces." - .format(port)) - - iface_id = ports_ifaces[port] - - ovs_port_raw = ovs_get_data(remote, - table='Port {0}'.format(iface_id), - columns=['tag']) - ovs_port = ovs_decode_columns(ovs_port_raw) - logger.debug("OVS tag for port {0}: {1}".format(iface_id, ovs_port)) - ovs_tag = ovs_port[0]['tag'] - - return str(ovs_tag) diff --git a/fuelweb_test/helpers/pacemaker.py b/fuelweb_test/helpers/pacemaker.py deleted file mode 100644 index 3eca644a1..000000000 --- a/fuelweb_test/helpers/pacemaker.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from xml.etree import ElementTree - -from fuelweb_test.helpers.ssh_manager import SSHManager - -ssh_manager = SSHManager() - - -def get_pacemaker_nodes_attributes(cibadmin_status_xml): - """Parse 'cibadmin --query --scope status'. - :param cibadmin_status_xml: stdout from 'cibadmin --query --scope status' - :return: nested dictionary with node-fqdn and attribute name as keys - """ - - """ Get cibadmin_status to a python dict: - return: - { - fqdn: { - 'arch': - 'cpu_cores': - 'cpu_info': - 'cpu_load': - 'cpu_speed': - 'free_swap': - 'gtidd': - 'master-p_conntrackd': - 'master-p_rabbitmq-server': - 'os': - '#health_disk': # only on master if root_free < 100M - 'pingd': - 'rabbit-master': # only on master - 'rabbit-start-time': - 'rabbit_get_alarms_timeouts': - 'rabbit_list_channels_timeouts': - 'ram_free': - 'ram_total': - 'root_free': - 'var_lib_glance_free': - 'var_lib_mysql_free': - 'var_log_free': - }, - ... - } - """ - root = ElementTree.fromstring(cibadmin_status_xml) - nodes = {} - for node_state in root.iter('node_state'): - node_name = node_state.get('uname') - nodes[node_name] = {} - for instance_attribute in node_state.iter('nvpair'): - nodes[node_name][instance_attribute.get( - 'name')] = instance_attribute.get('value') - return nodes - - -def get_pcs_nodes(pcs_status_xml): - """Parse 'pcs status xml'. section - :param pcs_status_xml: stdout from 'pcs status xml' - :return: nested dictionary with node-fqdn and attribute name as keys - """ - """ Get crm node attributes to a python dict: - return: - { - fqdn: { - 'node name': - 'id': - 'online': - 'standby': - 'standby_on_fail': - 'maintenance': - 'pending': - 'unclean': - 'shutdown': - 'expected_up': - 'is_dc': - 'resources_running': - 'type': - }, - ... - } - """ - - root = ElementTree.fromstring(pcs_status_xml) - nodes = {} - for nodes_group in root.iter('nodes'): - for node in nodes_group: - nodes[node.get('name')] = node.attrib - return nodes - - -def parse_pcs_status_xml(remote_ip): - """Parse 'pcs status xml'. section - :param remote_ip: remote IP address - :return: nested dictionary with node-fqdn and attribute name as keys - """ - pcs_status_dict = ssh_manager.execute_on_remote( - remote_ip, 'pcs status xml')['stdout_str'] - return pcs_status_dict - - -def get_pacemaker_resource_name(remote_ip, resource_name): - """ Parse 'cibadmin -Q --scope resources' and check whether the resource - is multistate. Return parent resource name if it is, resource name - otherwise - :param remote_ip: remote IP address - :param resource_name: resource name string - :return: string with proper resource name - """ - cib = ssh_manager.execute_on_remote( - remote_ip, 'cibadmin -Q --scope resources')['stdout_str'] - root = ElementTree.fromstring(cib) - - resource_parent = root.find( - ".//primitive[@id='{0}']/..".format(resource_name)) - - if resource_parent.tag in ['master', 'clone']: - return resource_parent.attrib['id'] - else: - return resource_name diff --git a/fuelweb_test/helpers/patching.py b/fuelweb_test/helpers/patching.py deleted file mode 100644 index 1394ce36e..000000000 --- a/fuelweb_test/helpers/patching.py +++ /dev/null @@ -1,622 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import re -import sys -import traceback -import zlib -from xml.dom.minidom import parseString - -from proboscis import register -from proboscis import TestProgram -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_is_not_none -from proboscis.asserts import assert_not_equal -from proboscis.asserts import assert_true -# pylint: disable=import-error,wrong-import-order -# noinspection PyUnresolvedReferences -from six.moves.urllib.request import urlopen -# noinspection PyUnresolvedReferences -from six.moves.urllib.parse import urlparse -# pylint: enable=import-error,wrong-import-order -import yaml - -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers.ssh_manager import SSHManager - -patching_validation_schema = { - 'type': { - 'required': True, - 'values': ['service_stop', 'service_start', 'service_restart', - 'server_down', 'server_up', 'server_reboot', - 'run_command', 'upload_script', 'run_tasks'], - 'data_type': str - }, - 'target': { - 'required': True, - 'values': {'master', 'slaves', 'controller_role', 'compute_role', - 'cinder_role', 'ceph-osd_role', 'mongo_role', - 'zabbix-server_role', 'base-os_role'}, - 'data_type': list - }, - 'service': { - 'required': False, - 'data_type': str - }, - 'command': { - 'required': False, - 'data_type': str - }, - 'script': { - 'required': False, - 'data_type': str - }, - 'upload_path': { - 'required': False, - 'data_type': str - }, - 'id': { - 'required': True, - 'data_type': int - }, - 'tasks': { - 'required': False, - 'data_type': list - }, - 'tasks_timeout': { - 'required': False, - 'data_type': int - }, -} - - -def map_test(target): - assert_is_not_none(settings.PATCHING_BUG_ID, - "Bug ID wasn't specified, can't start patching tests!") - errata = get_errata(path=settings.PATCHING_APPLY_TESTS, - bug_id=settings.PATCHING_BUG_ID) - verify_errata(errata) - if not any(target == e_target['type'] for e_target in errata['targets']): - skip_patching_test(target, errata['target']) - env_distro = settings.OPENSTACK_RELEASE - master_distro = settings.OPENSTACK_RELEASE_CENTOS - if 'affected-pkgs' in errata.keys(): - if target == 'master': - settings.PATCHING_PKGS = set( - [re.split('=|<|>', package)[0] for package - in errata['affected-pkgs'][master_distro.lower()]]) - else: - settings.PATCHING_PKGS = set( - [re.split('=|<|>', package)[0] for package - in errata['affected-pkgs'][env_distro.lower()]]) - available_env_packages = set() - available_master_packages = set() - for repo in settings.PATCHING_MIRRORS: - logger.debug( - 'Checking packages from "{0}" repository'.format(repo)) - available_env_packages.update(get_repository_packages(repo, - env_distro)) - for repo in settings.PATCHING_MASTER_MIRRORS: - logger.debug( - 'Checking packages from "{0}" repository'.format(repo)) - available_master_packages.update(get_repository_packages( - repo, master_distro)) - available_packages = available_env_packages | available_master_packages - if not settings.PATCHING_PKGS: - if target == 'master': - settings.PATCHING_PKGS = available_master_packages - else: - settings.PATCHING_PKGS = available_env_packages - else: - assert_true(settings.PATCHING_PKGS <= available_packages, - "Patching repositories don't contain all packages need" - "ed for tests. Need: {0}, available: {1}, missed: {2}." - "".format(settings.PATCHING_PKGS, - available_packages, - settings.PATCHING_PKGS - available_packages)) - assert_not_equal(len(settings.PATCHING_PKGS), 0, - "No packages found in repository(s) for patching:" - " '{0} {1}'".format(settings.PATCHING_MIRRORS, - settings.PATCHING_MASTER_MIRRORS)) - if target == 'master': - tests_groups = get_packages_tests(settings.PATCHING_PKGS, - master_distro, - target) - else: - tests_groups = get_packages_tests(settings.PATCHING_PKGS, - env_distro, - target) - - if 'rally' in errata.keys(): - if len(errata['rally']) > 0: - settings.PATCHING_RUN_RALLY = True - settings.RALLY_TAGS = errata['rally'] - - if settings.PATCHING_CUSTOM_TEST: - deployment_test = settings.PATCHING_CUSTOM_TEST - settings.PATCHING_SNAPSHOT = \ - 'patching_after_{0}'.format(deployment_test) - register(groups=['prepare_patching_environment'], - depends_on_groups=[deployment_test]) - register(groups=['prepare_patching_master_environment'], - depends_on_groups=[deployment_test]) - else: - program = TestProgram(argv=['none']) - deployment_test = None - for my_test in program.plan.tests: - if all(patching_group in my_test.entry.info.groups for - patching_group in tests_groups): - deployment_test = my_test - break - if deployment_test: - settings.PATCHING_SNAPSHOT = 'patching_after_{0}'.format( - deployment_test.entry.method.im_func.func_name) - if target == 'master': - register(groups=['prepare_patching_master_environment'], - depends_on=[deployment_test.entry.home]) - else: - register(groups=['prepare_patching_environment'], - depends_on=[deployment_test.entry.home]) - else: - raise Exception( - "Test with groups {0} not found.".format(tests_groups)) - - -def get_repository_packages(remote_repo_url, repo_type): - repo_url = urlparse(remote_repo_url) - packages = [] - if repo_type == settings.OPENSTACK_RELEASE_UBUNTU: - packages_url = '{0}/Packages'.format(repo_url.geturl()) - pkgs_raw = urlopen(packages_url).read() - for pkg in pkgs_raw.split('\n'): - match = re.search(r'^Package: (\S+)\s*$', pkg) - if match: - packages.append(match.group(1)) - else: - packages_url = '{0}/repodata/primary.xml.gz'.format(repo_url.geturl()) - pkgs_xml = parseString(zlib.decompressobj(zlib.MAX_WBITS | 32). - decompress(urlopen(packages_url).read())) - for pkg in pkgs_xml.getElementsByTagName('package'): - packages.append( - pkg.getElementsByTagName('name')[0].firstChild.nodeValue) - return packages - - -def _get_target_and_project(_pkg, _all_pkgs): - for _installation_target in _all_pkgs.keys(): - for _project in _all_pkgs[_installation_target]['projects']: - if _pkg in _project['packages']: - return _installation_target, _project['name'] - - -def get_package_test_info(package, pkg_type, tests_path, patch_target): - packages_path = "{0}/{1}/packages.yaml".format(tests_path, pkg_type) - tests = set() - tests_file = 'test.yaml' - all_packages = yaml.load(open(packages_path).read()) - assert_is_not_none(_get_target_and_project(package, all_packages), - "Package '{0}' doesn't belong to any installation " - "target / project".format(package)) - target, project = _get_target_and_project(package, all_packages) - if patch_target == 'master': - if target not in ['master', 'bootstrap']: - return {None} - if patch_target == 'environment': - if target not in ['deployment', 'provisioning']: - return {None} - target_tests_path = "/".join((tests_path, pkg_type, target, tests_file)) - project_tests_path = "/".join((tests_path, pkg_type, target, project, - tests_file)) - package_tests_path = "/".join((tests_path, pkg_type, target, project, - package, tests_file)) - for path in (target_tests_path, project_tests_path, package_tests_path): - try: - test = yaml.load(open(path).read()) - if 'system_tests' in test.keys(): - tests.update(test['system_tests']['tags']) - except IOError as e: - logger.warning('Ignoring exception: {!r}'.format(e)) - logger.debug(traceback.format_exc()) - return tests - - -def get_packages_tests(packages, distro, target): - assert_true(os.path.isdir(settings.PATCHING_PKGS_TESTS), - "Path for packages tests doesn't exist: '{0}'".format( - settings.PATCHING_PKGS_TESTS)) - if distro == settings.OPENSTACK_RELEASE_UBUNTU: - pkg_type = 'deb' - else: - pkg_type = 'rpm' - packages_tests = set() - for package in packages: - tests = get_package_test_info(package, - pkg_type, - settings.PATCHING_PKGS_TESTS, - target) - assert_true(len(tests) > 0, - "Tests for package {0} not found".format(package)) - if None in tests: - continue - packages_tests.update(tests) - return packages_tests - - -def mirror_remote_repository(admin_remote, remote_repo_url, local_repo_path): - repo_url = urlparse(remote_repo_url) - cut_dirs = len(repo_url.path.strip('/').split('/')) - download_cmd = ('wget --recursive --no-parent --no-verbose --reject "index' - '.html*,*.gif" --exclude-directories "{pwd}/repocache" ' - '--directory-prefix {path} -nH --cut-dirs={cutd} {url}').\ - format(pwd=repo_url.path.rstrip('/'), path=local_repo_path, - cutd=cut_dirs, url=repo_url.geturl()) - result = admin_remote.execute(download_cmd) - assert_equal(result['exit_code'], 0, 'Mirroring of remote packages ' - 'repository failed: {0}'.format( - result)) - - -def add_remote_repositories(environment, mirrors, prefix_name='custom_repo'): - repositories = set() - for mir in mirrors: - name = '{0}_{1}'.format(prefix_name, mirrors.index(mir)) - local_repo_path = '/'.join([settings.PATCHING_WEB_DIR, name]) - remote_repo_url = mir - with environment.d_env.get_admin_remote() as remote: - mirror_remote_repository( - admin_remote=remote, - remote_repo_url=remote_repo_url, - local_repo_path=local_repo_path) - repositories.add(name) - return repositories - - -def connect_slaves_to_repo(environment, nodes, repo_name): - repo_ip = environment.get_admin_node_ip() - repo_port = '8080' - repourl = 'http://{master_ip}:{repo_port}/{repo_name}/'.format( - master_ip=repo_ip, repo_name=repo_name, repo_port=repo_port) - if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_UBUNTU: - cmds = [ - "echo -e '\ndeb {repourl} /' > /etc/apt/sources.list.d/{repo_name}" - ".list".format(repourl=repourl, repo_name=repo_name), - "apt-key add <(curl -s '{repourl}/Release.key') || :".format( - repourl=repourl), - # Set highest priority to all repositories located on master node - "echo -e 'Package: *\nPin: origin {0}\nPin-Priority: 1060' > " - "/etc/apt/preferences.d/custom_repo".format( - environment.get_admin_node_ip()), - "apt-get update" - ] - else: - cmds = [ - "yum-config-manager --add-repo {url}".format(url=repourl), - "echo -e 'gpgcheck=0\npriority=20' >>/etc/yum.repos.d/{ip}_{port}_" - "{repo}_.repo".format(ip=repo_ip, repo=repo_name, port=repo_port), - "yum -y clean all", - ] - - for slave in nodes: - for cmd in cmds: - SSHManager().execute_on_remote( - ip=slave['ip'], - cmd=cmd - ) - - -def connect_admin_to_repo(environment, repo_name): - repo_ip = environment.get_admin_node_ip() - repo_port = '8080' - repourl = 'http://{master_ip}:{repo_port}/{repo_name}/'.format( - master_ip=repo_ip, repo_name=repo_name, repo_port=repo_port) - - cmds = [ - "yum-config-manager --add-repo {url}".format(url=repourl), - "echo -e 'gpgcheck=0\npriority=20' >>/etc/yum.repos.d/{ip}_{port}_" - "{repo}_.repo".format(ip=repo_ip, repo=repo_name, port=repo_port), - "yum -y clean all", - # FIXME(apanchenko): - # Temporary disable this check in order to test packages update - # inside Docker containers. When building of new images for containers - # is implemented, we should check here that `yum check-update` returns - # ONLY `100` exit code (updates are available for master node). - "yum check-update; [[ $? -eq 100 || $? -eq 0 ]]" - ] - - for cmd in cmds: - SSHManager().execute_on_remote( - ip=SSHManager().admin_ip, - cmd=cmd - ) - - -def update_packages(environment, remote, packages, exclude_packages=None): - if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_UBUNTU: - cmds = [ - 'apt-get -o Dpkg::Options::="--force-confdef" ' - '-o Dpkg::Options::="--force-confold" -y install ' - '--only-upgrade {0}'.format(' '.join(packages)) - ] - if exclude_packages: - exclude_commands = ["apt-mark hold {0}".format(pkg) - for pkg in exclude_packages] - cmds = exclude_commands + cmds - else: - cmds = [ - "yum -y update --nogpgcheck {0} -x '{1}'".format( - ' '.join(packages), ','.join(exclude_packages or [])) - ] - for cmd in cmds: - remote.check_call(cmd) - - -def update_packages_on_slaves(environment, slaves, packages=None, - exclude_packages=None): - if not packages: - # Install all updates - packages = ' ' - for slave in slaves: - with environment.d_env.get_ssh_to_remote(slave['ip']) as remote: - update_packages(environment, remote, packages, exclude_packages) - - -def get_slaves_ips_by_role(slaves, role=None): - if role: - return [slave['ip'] for slave in slaves if role in slave['roles']] - return [slave['ip'] for slave in slaves] - - -def get_devops_slaves_by_role(env, slaves, role=None): - if role: - return [env.fuel_web.find_devops_node_by_nailgun_fqdn(slave['fqdn'], - env.d_env.nodes().slaves) - for slave in slaves if role in slave['roles']] - return [env.fuel_web.find_devops_node_by_nailgun_fqdn(slave['fqdn'], - env.d_env.nodes().slaves) for slave in slaves] - - -def get_slaves_ids_by_role(slaves, role=None): - if role: - return [slave['id'] for slave in slaves if role in slave['roles']] - return [slave['id'] for slave in slaves] - - -def verify_fix_apply_step(apply_step): - validation_schema = patching_validation_schema - for key in validation_schema: - if key in apply_step.keys(): - is_exists = apply_step[key] is not None - else: - is_exists = False - if validation_schema[key]['required']: - assert_true(is_exists, "Required field '{0}' not found in patch " - "apply scenario step".format(key)) - if not is_exists: - continue - is_valid = True - if 'values' in validation_schema[key].keys(): - if validation_schema[key]['data_type'] == str: - is_valid = apply_step[key] in validation_schema[key]['values'] - elif validation_schema[key]['data_type'] in (list, set): - is_valid = set(apply_step[key]) <= \ - validation_schema[key]['values'] - - assert_true(is_valid, 'Step in patch apply actions scenario ' - 'contains incorrect data: "{key}": "{value}"' - '. Supported values for "{key}" are ' - '"{valid}"'.format( - key=key, - value=apply_step[key], - valid=validation_schema[key]['values'])) - if 'data_type' in validation_schema[key].keys(): - assert_true( - isinstance( - apply_step[key], validation_schema[key]['data_type']), - "Unexpected data type in patch apply scenario step: '" - "{key}' is '{type}', but expecting '{expect}'.".format( - key=key, - type=type(apply_step[key]), - expect=validation_schema[key]['data_type'])) - - -def validate_fix_apply_step(apply_step, environment, slaves): - verify_fix_apply_step(apply_step) - slaves = [] if not slaves else slaves - command = '', - remotes_ips = set() - devops_action = '' - devops_nodes = set() - nodes_ids = set() - - if apply_step['type'] == 'run_tasks': - remotes_ips.add(environment.get_admin_node_ip()) - assert_true('master' not in apply_step['target'], - "Action type 'run_tasks' accepts only slaves (roles) " - "as target value, but 'master' is specified!") - - for target in apply_step['target']: - if target == 'slaves': - nodes_ids.update(get_slaves_ids_by_role(slaves, role=None)) - else: - role = target.split('_role')[0] - nodes_ids.update(get_slaves_ids_by_role(slaves, role=role)) - else: - for target in apply_step['target']: - if target == 'master': - remotes_ips.add(environment.get_admin_node_ip()) - devops_nodes.add( - environment.d_env.nodes().admin) - elif target == 'slaves': - remotes_ips.update(get_slaves_ips_by_role(slaves, role=None)) - devops_nodes.update(get_devops_slaves_by_role(environment, - slaves)) - else: - role = target.split('_role')[0] - remotes_ips.update(get_slaves_ips_by_role(slaves, role)) - devops_nodes.update(get_devops_slaves_by_role(environment, - slaves, - role=role)) - if apply_step['type'] in ('service_stop', 'service_start', - 'service_restart'): - assert_true(len(apply_step['service'] or '') > 0, - "Step #{0} in apply patch scenario perform '{1}', but " - "service isn't specified".format(apply_step['id'], - apply_step['type'])) - action = apply_step['type'].split('service_')[1] - command = ( - "find /etc/init.d/ -regex '/etc/init.d/{service}' -printf " - "'%f\n' -quit | xargs -i service {{}} {action}".format( - service=apply_step['service'], action=action), ) - elif apply_step['type'] in ('server_down', 'server_up', 'server_reboot'): - assert_true('master' not in apply_step['target'], - 'Action type "{0}" doesn\'t accept "master" node as ' - 'target! Use action "run_command" instead.'.format( - apply_step['type'])) - devops_action = apply_step['type'].split('server_')[1] - elif apply_step['type'] == 'upload_script': - assert_true(len(apply_step['script'] or '') > 0, - "Step #{0} in apply patch scenario perform '{1}', but " - "script isn't specified".format(apply_step['id'], - apply_step['type'])) - assert_true(len(apply_step['upload_path'] or '') > 0, - "Step #{0} in apply patch scenario perform '{1}', but " - "upload path isn't specified".format(apply_step['id'], - apply_step['type'])) - command = ('UPLOAD', apply_step['script'], apply_step['upload_path']) - elif apply_step['type'] == 'run_tasks': - assert_true(len(apply_step['tasks'] or '') > 0, - "Step #{0} in apply patch scenario perform '{1}', but " - "tasks aren't specified".format(apply_step['id'], - apply_step['type'])) - tasks_timeout = apply_step['tasks_timeout'] if 'tasks_timeout' in \ - apply_step.keys() else 60 * 30 - command = ( - 'RUN_TASKS', - nodes_ids, - apply_step['tasks'], - tasks_timeout - ) - else: - assert_true(len(apply_step['command'] or '') > 0, - "Step #{0} in apply patch scenario perform '{1}', but " - "command isn't specified".format(apply_step['id'], - apply_step['type'])) - command = apply_step['command'] - # remotes sessions .clear() placed in run_actions() - remotes = [environment.d_env.get_ssh_to_remote(ip) for ip in remotes_ips] \ - if command else [] - devops_nodes = devops_nodes if devops_action else [] - return command, remotes, devops_action, devops_nodes - - -def get_errata(path, bug_id): - scenario_path = '{0}/bugs/{1}/erratum.yaml'.format(path, bug_id) - assert_true(os.path.exists(scenario_path), - "Erratum for bug #{0} is not found in '{1}' " - "directory".format(bug_id, settings.PATCHING_APPLY_TESTS)) - with open(scenario_path) as f: - return yaml.load(f.read()) - - -def verify_errata(errata): - actions_types = ('patch-scenario', 'verify-scenario') - distro = settings.OPENSTACK_RELEASE.lower() - for target in errata['targets']: - for action_type in actions_types: - assert_true(distro in target[action_type].keys(), - "Steps for '{0}' not found for '{1}' distro!".format( - action_type, distro)) - scenario = sorted(target[action_type][distro], - key=lambda k: k['id']) - for step in scenario: - verify_fix_apply_step(step) - - -def run_actions(environment, target, slaves, action_type='patch-scenario'): - errata = get_errata(path=settings.PATCHING_APPLY_TESTS, - bug_id=settings.PATCHING_BUG_ID) - distro = settings.OPENSTACK_RELEASE.lower() - target_scenarios = [e_target for e_target in errata['targets'] - if target == e_target['type']] - assert_true(len(target_scenarios) > 0, - "Can't found patch scenario for '{0}' target in erratum " - "for bug #{1}!".format(target, settings.PATCHING_BUG_ID)) - scenario = sorted(target_scenarios[0][action_type][distro], - key=lambda k: k['id']) - - for step in scenario: - command, remotes, devops_action, devops_nodes = \ - validate_fix_apply_step(step, environment, slaves) - if 'UPLOAD' in command: - file_name = command[1] - upload_path = command[2] - source_path = '{0}/bugs/{1}/tests/{2}'.format( - settings.PATCHING_APPLY_TESTS, - settings.PATCHING_BUG_ID, - file_name) - assert_true(os.path.exists(source_path), - 'File for uploading "{0}" doesn\'t exist!'.format( - source_path)) - for remote in remotes: - remote.upload(source_path, upload_path) - continue - elif 'RUN_TASKS' in command: - nodes_ids = command[1] - tasks = command[2] - timeout = command[3] - nodes = [node for node in environment.fuel_web.client.list_nodes() - if node['id'] in nodes_ids] - assert_true(len(nodes_ids) == len(nodes), - 'Get nodes with ids: {0} for deployment task, but ' - 'found {1}!'.format(nodes_ids, - [n['id'] for n in nodes])) - assert_true(len(set([node['cluster'] for node in nodes])) == 1, - 'Slaves for patching actions belong to different ' - 'environments, can\'t run deployment tasks!') - cluster_id = nodes[0]['cluster'] - environment.fuel_web.wait_deployment_tasks(cluster_id, nodes_ids, - tasks, timeout) - continue - for remote in remotes: - remote.check_call(command) - if devops_action == 'down': - environment.fuel_web.warm_shutdown_nodes(devops_nodes) - elif devops_action == 'up': - environment.fuel_web.warm_start_nodes(devops_nodes) - elif devops_action == 'reboot': - environment.fuel_web.warm_restart_nodes(devops_nodes) - - # clear connections - for remote in remotes: - remote.clear() - - -def apply_patches(environment, target, slaves=None): - run_actions(environment, target, slaves, action_type='patch-scenario') - - -def verify_fix(environment, target, slaves=None): - run_actions(environment, target, slaves, action_type='verify-scenario') - - -def skip_patching_test(target, errata_target): - # TODO(apanchenko): - # If 'target' from erratum doesn't match 'target' from tests we need to - # skip tests and return special exit code, so Jenkins is able to recognize - # test were skipped and it shouldn't vote to CRs (just leave comment) - logger.error('Tests for "{0}" were started, but patches are targeted to ' - '"{1}" according to erratum.'.format(target, errata_target)) - sys.exit(123) diff --git a/fuelweb_test/helpers/rally.py b/fuelweb_test/helpers/rally.py deleted file mode 100644 index 90269555c..000000000 --- a/fuelweb_test/helpers/rally.py +++ /dev/null @@ -1,427 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import division - -import json -import os - -from devops.helpers.helpers import wait -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_true - -from fuelweb_test import logger - - -class RallyEngine(object): - def __init__(self, - admin_remote, - container_repo, - proxy_url=None, - user_id=0, - dir_for_home='/var/rally_home', - home_bind_path='/home/rally'): - self.admin_remote = admin_remote - self.container_repo = container_repo - self.repository_tag = 'latest' - self.proxy_url = proxy_url or "" - self.user_id = user_id - self.dir_for_home = dir_for_home - self.home_bind_path = home_bind_path - self.setup() - - def image_exists(self, tag='latest'): - cmd = "docker images | awk 'NR > 1{print $1\" \"$2}'" - logger.debug('Checking Docker images...') - result = self.admin_remote.execute(cmd) - logger.debug(result) - existing_images = [line.strip().split() for line in result['stdout']] - return [self.container_repo, tag] in existing_images - - def pull_image(self): - # TODO(apanchenko): add possibility to load image from local path or - # remote link provided in settings, in order to speed up downloading - cmd = 'docker pull {0}'.format(self.container_repo) - logger.debug('Downloading Rally repository/image from registry...') - result = self.admin_remote.execute(cmd) - logger.debug(result) - return self.image_exists() - - def run_container_command(self, command, in_background=False): - command = str(command).replace(r"'", r"'\''") - options = '' - if in_background: - options = '{0} -d'.format(options) - cmd = ("docker run {options} --user {user_id} --net=\"host\" -e " - "\"http_proxy={proxy_url}\" -e \"https_proxy={proxy_url}\" " - "-v {dir_for_home}:{home_bind_path} {container_repo}:{tag} " - "/bin/bash -c '{command}'".format( - options=options, - user_id=self.user_id, - proxy_url=self.proxy_url, - dir_for_home=self.dir_for_home, - home_bind_path=self.home_bind_path, - container_repo=self.container_repo, - tag=self.repository_tag, - command=command)) - logger.debug('Executing command "{0}" in Rally container {1}..'.format( - cmd, self.container_repo)) - result = self.admin_remote.execute(cmd) - logger.debug(result) - return result - - def setup_utils(self): - utils = ['gawk', 'vim', 'curl'] - cmd = ('unset http_proxy https_proxy; apt-get update; ' - 'apt-get install -y {0}'.format(' '.join(utils))) - logger.debug('Installing utils "{0}" to the Rally container...'.format( - utils)) - result = self.run_container_command(cmd) - assert_equal(result['exit_code'], 0, - 'Utils installation failed in Rally container: ' - '{0}'.format(result)) - - def create_database(self): - check_rally_db_cmd = 'test -s .rally.sqlite' - result = self.run_container_command(check_rally_db_cmd) - if result['exit_code'] == 0: - return - logger.debug('Recreating Database for Rally...') - create_rally_db_cmd = 'rally-manage db recreate' - result = self.run_container_command(create_rally_db_cmd) - assert_equal(result['exit_code'], 0, - 'Rally Database creation failed: {0}!'.format(result)) - result = self.run_container_command(check_rally_db_cmd) - assert_equal(result['exit_code'], 0, 'Failed to create Database for ' - 'Rally: {0} !'.format(result)) - - def prepare_image(self): - self.create_database() - self.setup_utils() - last_container_cmd = "docker ps -lq" - result = self.admin_remote.execute(last_container_cmd) - assert_equal(result['exit_code'], 0, - "Unable to get last container ID: {0}!".format(result)) - last_container = ''.join([line.strip() for line in result['stdout']]) - commit_cmd = 'docker commit {0} {1}:ready'.format(last_container, - self.container_repo) - result = self.admin_remote.execute(commit_cmd) - assert_equal(result['exit_code'], 0, - 'Commit to Docker image "{0}" failed: {1}.'.format( - self.container_repo, result)) - return self.image_exists(tag='ready') - - def setup_bash_alias(self): - alias_name = 'rally_docker' - check_alias_cmd = '. /root/.bashrc && alias {0}'.format(alias_name) - result = self.admin_remote.execute(check_alias_cmd) - if result['exit_code'] == 0: - return - logger.debug('Creating bash alias for Rally inside container...') - create_alias_cmd = ("alias {alias_name}='docker run --user {user_id} " - "--net=\"host\" -e \"http_proxy={proxy_url}\" -t " - "-i -v {dir_for_home}:{home_bind_path} " - "{container_repo}:{tag} rally'".format( - alias_name=alias_name, - user_id=self.user_id, - proxy_url=self.proxy_url, - dir_for_home=self.dir_for_home, - home_bind_path=self.home_bind_path, - container_repo=self.container_repo, - tag=self.repository_tag)) - result = self.admin_remote.execute('echo "{0}">> /root/.bashrc'.format( - create_alias_cmd)) - assert_equal(result['exit_code'], 0, - "Alias creation for running Rally from container failed: " - "{0}.".format(result)) - result = self.admin_remote.execute(check_alias_cmd) - assert_equal(result['exit_code'], 0, - "Alias creation for running Rally from container failed: " - "{0}.".format(result)) - - def setup(self): - if not self.image_exists(): - assert_true(self.pull_image(), - "Docker image for Rally not found!") - if not self.image_exists(tag='ready'): - assert_true(self.prepare_image(), - "Docker image for Rally is not ready!") - self.repository_tag = 'ready' - self.setup_bash_alias() - - def list_deployments(self): - cmd = (r"rally deployment list | awk -F " - r"'[[:space:]]*\\\\|[[:space:]]*' '/\ydeploy\y/{print $2}'") - result = self.run_container_command(cmd) - logger.debug('Rally deployments list: {0}'.format(result)) - return [line.strip() for line in result['stdout']] - - def show_deployment(self, deployment_uuid): - cmd = ("rally deployment show {0} | awk -F " - "'[[:space:]]*\\\\|[[:space:]]*' '/\w/{{print $2\",\"$3\",\"$4" - "\",\"$5\",\"$6\",\"$7\",\"$8}}'").format(deployment_uuid) - result = self.run_container_command(cmd) - assert_equal(len(result['stdout']), 2, - "Command 'rally deployment show' returned unexpected " - "value: expected 2 lines, got {0}: ".format(result)) - keys = [k for k in result['stdout'][0].strip().split(',') if k != ''] - values = [v for v in result['stdout'][1].strip().split(',') if v != ''] - return {keys[i]: values[i] for i in range(0, len(keys))} - - def list_tasks(self): - cmd = "rally task list --uuids-only" - result = self.run_container_command(cmd) - logger.debug('Rally tasks list: {0}'.format(result)) - return [line.strip() for line in result['stdout']] - - def get_task_status(self, task_uuid): - cmd = "rally task status {0}".format(task_uuid) - result = self.run_container_command(cmd) - assert_equal(result['exit_code'], 0, - "Getting Rally task status failed: {0}".format(result)) - task_status = ''.join(result['stdout']).strip().split()[-1] - logger.debug('Rally task "{0}" has status "{1}".'.format(task_uuid, - task_status)) - return task_status - - -class RallyDeployment(object): - def __init__(self, rally_engine, cluster_vip, username, password, tenant, - key_port=5000, proxy_url=''): - self.rally_engine = rally_engine - self.cluster_vip = cluster_vip - self.username = username - self.password = password - self.tenant_name = tenant - self.keystone_port = str(key_port) - self.proxy_url = proxy_url - self.auth_url = "http://{0}:{1}/v2.0/".format(self.cluster_vip, - self.keystone_port) - self.set_proxy = not self.is_proxy_set - self._uuid = None - self.create_deployment() - - @property - def uuid(self): - if self._uuid is None: - for d_uuid in self.rally_engine.list_deployments(): - deployment = self.rally_engine.show_deployment(d_uuid) - logger.debug("Deployment info: {0}".format(deployment)) - if self.auth_url in deployment['auth_url'] and \ - self.username == deployment['username'] and \ - self.tenant_name == deployment['tenant_name']: - self._uuid = d_uuid - break - return self._uuid - - @property - def is_proxy_set(self): - cmd = '[ "${{http_proxy}}" == "{0}" ]'.format(self.proxy_url) - return self.rally_engine.run_container_command(cmd)['exit_code'] == 0 - - @property - def is_deployment_exist(self): - return self.uuid is not None - - def create_deployment(self): - if self.is_deployment_exist: - return - cmd = ('rally deployment create --name "{0}" --filename ' - '<(echo \'{{ "admin": {{ "password": "{1}", "tenant_name": "{2}' - '", "username": "{3}" }}, "auth_url": "{4}", "endpoint": null, ' - '"type": "ExistingCloud", "https_insecure": true }}\')').format( - self.cluster_vip, self.password, self.tenant_name, self.username, - self.auth_url) - result = self.rally_engine.run_container_command(cmd) - assert_true(self.is_deployment_exist, - 'Rally deployment creation failed: {0}'.format(result)) - logger.debug('Rally deployment created: {0}'.format(result)) - assert_true(self.check_deployment(), - "Rally deployment check failed.") - - def check_deployment(self, deployment_uuid=''): - cmd = 'rally deployment check {0}'.format(deployment_uuid) - result = self.rally_engine.run_container_command(cmd) - if result['exit_code'] == 0: - return True - else: - logger.error('Rally deployment check failed: {0}'.format(result)) - return False - - -class RallyTask(object): - def __init__(self, rally_deployment, test_type): - self.deployment = rally_deployment - self.engine = self.deployment.rally_engine - self.test_type = test_type - self.uuid = None - self._status = None - - @property - def status(self): - if self.uuid is None: - self._status = None - else: - self._status = self.engine.get_task_status(self.uuid) - return self._status - - def prepare_scenario(self): - scenario_file = '{0}/fuelweb_test/rally/scenarios/{1}.json'.format( - os.environ.get("WORKSPACE", "./"), self.test_type) - remote_path = '{0}/{1}.json'.format(self.engine.dir_for_home, - self.test_type) - self.engine.admin_remote.upload(scenario_file, remote_path) - result = self.engine.admin_remote.execute('test -f {0}'.format( - remote_path)) - assert_equal(result['exit_code'], 0, - "Scenario upload filed: {0}".format(result)) - return '{0}.json'.format(self.test_type) - - def start(self): - scenario = self.prepare_scenario() - temp_file = '{0}_results.tmp.txt'.format(scenario) - cmd = 'rally task start {0} &> {1}'.format(scenario, temp_file) - result = self.engine.run_container_command(cmd, in_background=True) - logger.debug('Started Rally task: {0}'.format(result)) - cmd = ("awk 'BEGIN{{retval=1}};/^Using task:/{{print $NF; retval=0}};" - "END {{exit retval}}' {0}").format(temp_file) - wait(lambda: self.engine.run_container_command(cmd)['exit_code'] == 0, - timeout=30, timeout_msg='Rally task {!r} creation timeout' - ''.format(result)) - result = self.engine.run_container_command(cmd) - task_uuid = ''.join(result['stdout']).strip() - assert_true(task_uuid in self.engine.list_tasks(), - "Rally task creation failed: {0}".format(result)) - self.uuid = task_uuid - - def abort(self, task_id): - logger.debug('Stop Rally task {0}'.format(task_id)) - cmd = 'rally task abort {0}'.format(task_id) - self.engine.run_container_command(cmd) - assert_true( - self.status in ('finished', 'aborted'), - "Rally task {0} was not aborted; current task status " - "is {1}".format(task_id, self.status)) - - def get_results(self): - if self.status == 'finished': - cmd = 'rally task results {0}'.format(self.uuid) - result = self.engine.run_container_command(cmd) - assert_equal(result['exit_code'], 0, - "Getting task results failed: {0}".format(result)) - logger.debug("Rally task {0} result: {1}".format(self.uuid, - result)) - return ''.join(result['stdout']) - - -class RallyResult(object): - def __init__(self, json_results): - self.values = { - 'full_duration': 0.00, - 'load_duration': 0.00, - 'errors': 0 - } - self.raw_data = [] - self.parse_raw_results(json_results) - - def parse_raw_results(self, raw_results): - data = json.loads(raw_results) - assert_equal(len(data), 1, - "Current implementation of RallyResult class doesn't " - "support results with length greater than '1'!") - self.raw_data = data[0] - self.values['full_duration'] = data[0]['full_duration'] - self.values['load_duration'] = data[0]['load_duration'] - self.values['errors'] = sum([len(result['error']) - for result in data[0]['result']]) - - @staticmethod - def compare(first_result, second_result, deviation=0.1): - """ - Compare benchmark results - :param first_result: RallyResult - :param second_result: RallyResult - :param deviation: float - :return: bool - """ - message = '' - equal = True - for val in first_result.values.keys(): - logger.debug('Comparing {2}: {0} and {1}'.format( - first_result.values[val], second_result.values[val], - val - )) - if first_result.values[val] == 0 or second_result.values[val] == 0: - if first_result.values[val] != second_result.values[val]: - message += "Values of '{0}' are: {1} and {2}. ".format( - val, - first_result.values[val], - second_result.values[val]) - equal = False - continue - diff = abs( - first_result.values[val] / second_result.values[val] - 1) - if diff > deviation: - message += "Values of '{0}' are: {1} and {2}. ".format( - val, first_result.values[val], second_result.values[val]) - equal = False - if not equal: - logger.info("Rally benchmark results aren't equal: {0}".format( - message)) - return equal - - def show(self): - return json.dumps(self.raw_data) - - -class RallyBenchmarkTest(object): - def __init__(self, container_repo, environment, cluster_id, - test_type): - self.admin_remote = environment.d_env.get_admin_remote() - self.cluster_vip = environment.fuel_web.get_mgmt_vip(cluster_id) - self.cluster_credentials = \ - environment.fuel_web.get_cluster_credentials(cluster_id) - self.proxy_url = environment.fuel_web.get_alive_proxy(cluster_id) - logger.debug('Rally proxy URL is: {0}'.format(self.proxy_url)) - self.container_repo = container_repo - self.home_dir = 'rally-{0}'.format(cluster_id) - self.test_type = test_type - self.engine = RallyEngine( - admin_remote=self.admin_remote, - container_repo=self.container_repo, - proxy_url=self.proxy_url, - dir_for_home='/var/{0}/'.format(self.home_dir) - ) - self.deployment = RallyDeployment( - rally_engine=self.engine, - cluster_vip=self.cluster_vip, - username=self.cluster_credentials['username'], - password=self.cluster_credentials['password'], - tenant=self.cluster_credentials['tenant'], - proxy_url=self.proxy_url - ) - self.current_task = None - - def run(self, timeout=60 * 10, result=True): - self.current_task = RallyTask(self.deployment, self.test_type) - logger.info('Starting Rally benchmark test...') - self.current_task.start() - assert_equal(self.current_task.status, 'running', - 'Rally task was started, but it is not running, status: ' - '{0}'.format(self.current_task.status)) - if result: - wait(lambda: self.current_task.status == 'finished', - timeout=timeout, timeout_msg='Rally benchmark test timeout') - logger.info('Rally benchmark test is finished.') - return RallyResult(json_results=self.current_task.get_results()) diff --git a/fuelweb_test/helpers/regenerate_centos_repo b/fuelweb_test/helpers/regenerate_centos_repo deleted file mode 100644 index c4d263e34..000000000 --- a/fuelweb_test/helpers/regenerate_centos_repo +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -REPO_PATH=$1 - -createrepo --update ${REPO_PATH} 2>/dev/null diff --git a/fuelweb_test/helpers/regenerate_repo.py b/fuelweb_test/helpers/regenerate_repo.py deleted file mode 100644 index 3efab997d..000000000 --- a/fuelweb_test/helpers/regenerate_repo.py +++ /dev/null @@ -1,388 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import traceback -import os -import re -from xml.etree import ElementTree -import zlib - -from proboscis.asserts import assert_equal -# pylint: disable=import-error -# noinspection PyUnresolvedReferences -from six.moves.urllib.request import urlopen -# noinspection PyUnresolvedReferences -from six.moves.urllib.error import HTTPError -# noinspection PyUnresolvedReferences -from six.moves.urllib.error import URLError -# pylint: enable=import-error - -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers.utils import install_pkg_2 -from fuelweb_test.helpers.ssh_manager import SSHManager - - -def regenerate_ubuntu_repo(path): - # Ubuntu - cr = CustomRepo() - cr.install_tools(['dpkg', 'dpkg-devel', 'dpkg-dev']) - cr.regenerate_repo('regenerate_ubuntu_repo', path) - - -def regenerate_centos_repo(path): - # CentOS - cr = CustomRepo() - cr.install_tools(['createrepo']) - cr.regenerate_repo('regenerate_centos_repo', path) - - -class CustomRepo(object): - """CustomRepo.""" # TODO documentation - - def __init__(self): - self.ssh_manager = SSHManager() - self.ip = self.ssh_manager.admin_ip - self.path_scripts = ('{0}/fuelweb_test/helpers/' - .format(os.environ.get("WORKSPACE", "./"))) - self.remote_path_scripts = '/tmp/' - self.ubuntu_script = 'regenerate_ubuntu_repo' - self.centos_script = 'regenerate_centos_repo' - self.local_mirror_ubuntu = settings.LOCAL_MIRROR_UBUNTU - self.local_mirror_centos = settings.LOCAL_MIRROR_CENTOS - self.ubuntu_release = settings.UBUNTU_RELEASE - self.centos_supported_archs = ['noarch', 'x86_64'] - self.pkgs_list = [] - - self.custom_pkgs_mirror_path = '' - if settings.OPENSTACK_RELEASE_UBUNTU in settings.OPENSTACK_RELEASE: - # Trying to determine the root of Ubuntu repository - pkgs_path = settings.CUSTOM_PKGS_MIRROR.split('/dists/') - if len(pkgs_path) == 2: - self.custom_pkgs_mirror = pkgs_path[0] - self.custom_pkgs_mirror_path = '/dists/{}'.format(pkgs_path[1]) - else: - self.custom_pkgs_mirror = settings.CUSTOM_PKGS_MIRROR - else: - self.custom_pkgs_mirror = settings.CUSTOM_PKGS_MIRROR - - def prepare_repository(self): - """Prepare admin node to packages testing - - Scenario: - 1. Temporary set nameserver to local router on admin node - 2. Install tools to manage rpm/deb repository - 3. Retrieve list of packages from custom repository - 4. Download packages to local rpm/deb repository - 5. Update .yaml file with new packages version - 6. Re-generate repo using shell scripts on admin node - - """ - # Check necessary settings and revert a snapshot - if not self.custom_pkgs_mirror: - return - logger.info("Custom mirror with new packages: {0}" - .format(settings.CUSTOM_PKGS_MIRROR)) - - if settings.OPENSTACK_RELEASE_UBUNTU in settings.OPENSTACK_RELEASE: - # Ubuntu - master_tools = ['dpkg', 'dpkg-devel', 'dpkg-dev'] - self.install_tools(master_tools) - self.get_pkgs_list_ubuntu() - pkgs_local_path = ('{0}/pool/' - .format(self.local_mirror_ubuntu)) - self.download_pkgs(pkgs_local_path) - self.regenerate_repo(self.ubuntu_script, self.local_mirror_ubuntu) - else: - # CentOS - master_tools = ['createrepo'] - self.install_tools(master_tools) - self.get_pkgs_list_centos() - pkgs_local_path = '{0}/Packages/'.format(self.local_mirror_centos) - self.download_pkgs(pkgs_local_path) - self.regenerate_repo(self.centos_script, self.local_mirror_centos) - - # Install tools to masternode - def install_tools(self, master_tools=None): - if master_tools is None: - master_tools = [] - logger.info("Installing necessary tools for {0}" - .format(settings.OPENSTACK_RELEASE)) - for master_tool in master_tools: - exit_code = install_pkg_2( - ip=self.ip, - pkg_name=master_tool - ) - assert_equal(0, exit_code, 'Cannot install package {0} ' - 'on admin node.'.format(master_tool)) - - # Ubuntu: Creating list of packages from the additional mirror - def get_pkgs_list_ubuntu(self): - url = "{0}/{1}/Packages".format(self.custom_pkgs_mirror, - self.custom_pkgs_mirror_path) - logger.info("Retrieving additional packages from the custom mirror:" - " {0}".format(url)) - try: - pkgs_release = urlopen(url).read() - except (HTTPError, URLError): - logger.error(traceback.format_exc()) - url_gz = '{0}.gz'.format(url) - logger.info( - "Retrieving additional packages from the custom mirror:" - " {0}".format(url_gz)) - try: - pkgs_release_gz = urlopen(url_gz).read() - except (HTTPError, URLError): - logger.error(traceback.format_exc()) - raise - try: - d = zlib.decompressobj(zlib.MAX_WBITS | 32) - pkgs_release = d.decompress(pkgs_release_gz) - except Exception: - logger.error('Ubuntu mirror error: Could not decompress {0}\n' - '{1}'.format(url_gz, traceback.format_exc())) - raise - - packages = (pkg for pkg in pkgs_release.split("\n\n") if pkg) - for package in packages: - upkg = {pstr.split()[0].lower(): ''.join(pstr.split()[1:]) - for pstr in package.split("\n") if pstr[0].strip()} - - upkg_keys = ["package:", "version:", "filename:"] - assert_equal(True, all(x in upkg for x in upkg_keys), - 'Missing one of the statements ["Package:", ' - '"Version:", "Filename:"] in {0}'.format(url)) - # TODO: add dependencies list to upkg - self.pkgs_list.append(upkg) - - # Centos: Creating list of packages from the additional mirror - def get_pkgs_list_centos(self): - logger.info("Retrieving additional packages from the custom mirror:" - " {0}".format(self.custom_pkgs_mirror)) - url = "{0}/repodata/repomd.xml".format(self.custom_pkgs_mirror) - try: - repomd_data = urlopen(url).read() - except (HTTPError, URLError): - logger.error(traceback.format_exc()) - raise - # Remove namespace attribute before parsing XML - repomd_data = re.sub(' xmlns="[^"]+"', '', repomd_data, count=1) - tree_repomd_data = ElementTree.fromstring(repomd_data) - lists_location = '' - for repomd in tree_repomd_data.findall('data'): - if repomd.get('type') == 'primary': - repomd_location = repomd.find('location') - lists_location = repomd_location.get('href') - - assert_equal(True, lists_location is not '', 'CentOS mirror error:' - ' Could not parse {0}\nlists_location = "{1}"\n{2}' - .format(url, lists_location, traceback.format_exc())) - url = "{0}/{1}".format(self.custom_pkgs_mirror, lists_location) - try: - lists_data = urlopen(url).read() - except (HTTPError, URLError): - logger.error(traceback.format_exc()) - raise - if '.xml.gz' in lists_location: - try: - d = zlib.decompressobj(zlib.MAX_WBITS | 32) - lists_data = d.decompress(lists_data) - except Exception: - logger.error('CentOS mirror error: Could not decompress {0}\n' - '{1}'.format(url, traceback.format_exc())) - raise - - # Remove namespace attribute before parsing XML - lists_data = re.sub(' xmlns="[^"]+"', '', lists_data, count=1) - - tree_lists_data = ElementTree.fromstring(lists_data) - - for flist in tree_lists_data.findall('package'): - if flist.get('type') == 'rpm': - flist_arch = flist.find('arch').text - if flist_arch in self.centos_supported_archs: - flist_name = flist.find('name').text - flist_location = flist.find('location') - flist_file = flist_location.get('href') - flist_version = flist.find('version') - flist_ver = '{0}-{1}'.format(flist_version.get('ver'), - flist_version.get('rel')) - cpkg = {'package:': flist_name, - 'version:': flist_ver, - 'filename:': flist_file} - # TODO: add dependencies list to cpkg - self.pkgs_list.append(cpkg) - - # Download packages (local_folder) - def download_pkgs(self, pkgs_local_path): - # Process the packages list: - total_pkgs = len(self.pkgs_list) - logger.info('Found {0} custom package(s)'.format(total_pkgs)) - - for npkg, pkg in enumerate(self.pkgs_list): - # TODO: Previous versions of the updating packages must be removed - # to avoid unwanted packet manager dependencies resolution - # (when some package still depends on other package which - # is not going to be installed) - - logger.info('({0}/{1}) Downloading package: {2}/{3}' - .format(npkg + 1, total_pkgs, - self.custom_pkgs_mirror, - pkg["filename:"])) - - pkg_ext = pkg["filename:"].split('.')[-1] - if pkg_ext == 'deb': - path_suff = 'main/' - elif pkg_ext == 'udeb': - path_suff = 'debian-installer/' - else: - path_suff = '' - - wget_cmd = "wget --no-verbose --directory-prefix {0} {1}/{2}"\ - .format(pkgs_local_path + path_suff, - self.custom_pkgs_mirror, - pkg["filename:"]) - wget_result = self.ssh_manager.execute( - ip=self.ip, - cmd=wget_cmd - ) - assert_equal(0, wget_result['exit_code'], - self.assert_msg(wget_cmd, wget_result['stderr'])) - - # Upload regenerate* script to masternode (script name) - def regenerate_repo(self, regenerate_script, local_mirror_path): - # Uploading scripts that prepare local repositories: - # 'regenerate_centos_repo' and 'regenerate_ubuntu_repo' - try: - self.ssh_manager.upload_to_remote( - ip=self.ip, - source='{0}/{1}'.format(self.path_scripts, regenerate_script), - target=self.remote_path_scripts - ) - self.ssh_manager.execute_on_remote( - ip=self.ip, - cmd='chmod 755 {0}/{1}'.format(self.remote_path_scripts, - regenerate_script) - ) - except Exception: - logger.error('Could not upload scripts for updating repositories.' - '\n{0}'.format(traceback.format_exc())) - raise - - # Update the local repository using previously uploaded script. - script_cmd = '{0}/{1} {2} {3}'.format(self.remote_path_scripts, - regenerate_script, - local_mirror_path, - self.ubuntu_release) - script_result = self.ssh_manager.execute( - ip=self.ip, - cmd=script_cmd - ) - assert_equal(0, script_result['exit_code'], - self.assert_msg(script_cmd, script_result['stderr'])) - - logger.info('Local repository {0} has been updated successfully.' - .format(local_mirror_path)) - - @staticmethod - def assert_msg(cmd, err): - return 'Executing \'{0}\' on the admin node has failed with: {1}'\ - .format(cmd, err) - - def check_puppet_logs(self): - logger.info("Check puppet logs for packages with unmet dependencies.") - if settings.OPENSTACK_RELEASE_UBUNTU in settings.OPENSTACK_RELEASE: - err_deps = self.check_puppet_logs_ubuntu() - else: - err_deps = self.check_puppet_logs_centos() - - for err_deps_key in err_deps.keys(): - logger.info('Error: Package: {0} has unmet dependencies:' - .format(err_deps_key)) - for dep in err_deps[err_deps_key]: - logger.info(' {0}'.format(dep.strip())) - logger.info("Check puppet logs completed.") - - def check_puppet_logs_ubuntu(self): - """ Check puppet-agent.log files on all nodes for package - dependency errors during a cluster deployment (ubuntu)""" - - err_start = 'The following packages have unmet dependencies:' - err_end = ('Unable to correct problems,' - ' you have held broken packages.') - cmd = ('fgrep -h -e " Depends: " -e "{0}" -e "{1}" ' - '/var/log/remote/node-*/' - 'puppet*.log'.format(err_start, err_end)) - result = self.ssh_manager.execute( - ip=self.ip, - cmd=cmd - )['stdout'] - - err_deps = {} - err_deps_key = '' - err_deps_flag = False - - # Forming a dictionary of package names - # with sets of required packages. - for res_str in result: - if err_deps_flag: - if err_end in res_str: - err_deps_flag = False - elif ": Depends:" in res_str: - str0, str1, str2 = res_str.partition(': Depends:') - err_deps_key = ''.join(str0.split()[-1:]) - if err_deps_key not in err_deps: - err_deps[err_deps_key] = set() - if 'but it is not' in str2 or 'is to be installed' in str2: - err_deps[err_deps_key].add('Depends:{0}' - .format(str2)) - elif 'Depends:' in res_str and err_deps_key: - str0, str1, str2 = res_str.partition('Depends:') - if 'but it is not' in str2 or 'is to be installed' in str2: - err_deps[err_deps_key].add(str1 + str2) - else: - err_deps_key = '' - elif err_start in res_str: - err_deps_flag = True - - return err_deps - - def check_puppet_logs_centos(self): - """ Check puppet-agent.log files on all nodes for package - dependency errors during a cluster deployment (centos)""" - - cmd = ('fgrep -h -e "Error: Package: " -e " Requires: " /var/log/' - 'remote/node-*/puppet*.log') - result = self.ssh_manager.execute( - ip=self.ip, - cmd=cmd - )['stdout'] - - err_deps = {} - err_deps_key = '' - - # Forming a dictionary of package names - # with sets of required packages. - for res_str in result: - if 'Error: Package:' in res_str: - err_deps_key = res_str.partition('Error: Package: ')[2] - if err_deps_key not in err_deps: - err_deps[err_deps_key] = set() - elif ' Requires: ' in res_str and err_deps_key: - _, str1, str2 = res_str.partition(' Requires: ') - err_deps[err_deps_key].add(str1 + str2) - else: - err_deps_key = '' - - return err_deps diff --git a/fuelweb_test/helpers/regenerate_ubuntu_repo b/fuelweb_test/helpers/regenerate_ubuntu_repo deleted file mode 100644 index 31fd0ce8b..000000000 --- a/fuelweb_test/helpers/regenerate_ubuntu_repo +++ /dev/null @@ -1,92 +0,0 @@ -#!/bin/bash -# Based on the method described here: -# http://troubleshootingrange.blogspot.com/2012/09/hosting-simple-apt-repository-on-centos.html -# Please install 'dpkg' and 'dpkg-devel' packages before use. - -set -e - -ARCH=amd64 -REPO_PATH=$1 -SUITE=$2 -SECTION=main - -BINDIR=${REPO_PATH}/dists/${SUITE}/${SECTION} - -# Validate structure of the repo -mkdir -p "${BINDIR}/binary-${ARCH}/" -mkdir -p "${REPO_PATH}/pool/${SECTION}" -RELEASE="${REPO_PATH}/dists/${SUITE}/Release" -touch ${RELEASE} - -release_header=`sed '/MD5Sum:/,$d' ${RELEASE}` - -override_main="indices/override.${SUITE}.${SECTION}" -override_udeb="indices/override.${SUITE}.${SECTION}.debian-installer" -override_extra="indices/override.${SUITE}.extra.${SECTION}" - -if [ -f "${REPO_PATH}/${override_main}" ]; then - binoverride="${override_main}" -else - binoverride="" -fi -if [ -f "${REPO_PATH}/${override_udeb}" ]; then - binoverride_udeb="${override_udeb}" -else - binoverride_udeb="" -fi -if [ -f "${REPO_PATH}/${override_extra}" ]; then - extraoverride="--extra-override ${override_extra}" -else - extraoverride="" -fi - -package_deb=${BINDIR}/binary-${ARCH}/Packages -package_udeb=${BINDIR}/debian-installer/binary-${ARCH}/Packages - -cd ${REPO_PATH} - -# Scan *.deb packages -dpkg-scanpackages -m ${extraoverride} -a ${ARCH} pool/${SECTION} ${binoverride} > ${package_deb}.tmp 2>/dev/null - -gzip -9c ${package_deb}.tmp > ${package_deb}.gz.tmp -bzip2 -ckz ${package_deb}.tmp > ${package_deb}.bz2.tmp - -# Replace original files with new ones -mv --backup -f ${package_deb}.tmp ${package_deb} -mv --backup -f ${package_deb}.gz.tmp ${package_deb}.gz -mv --backup -f ${package_deb}.bz2.tmp ${package_deb}.bz2 - -# Scan *.udeb packages -if [ -d "${BINDIR}/debian-installer/binary-${ARCH}/" ]; then - dpkg-scanpackages --udeb -m -a ${ARCH} pool/debian-installer ${binoverride_udeb} > ${package_udeb}.tmp 2>/dev/null - - gzip -9c ${package_udeb}.tmp > ${package_udeb}.gz.tmp - bzip2 -ckz ${package_udeb}.tmp > ${package_udeb}.bz2.tmp - - # Replace original files with new ones - mv --backup -f ${package_udeb}.tmp ${package_udeb} - mv --backup -f ${package_udeb}.gz.tmp ${package_udeb}.gz - mv --backup -f ${package_udeb}.bz2.tmp ${package_udeb}.bz2 -fi - -# Generate release file -cd ${REPO_PATH}/dists/${SUITE} -echo "$release_header" > Release.tmp - -# Generate hashes -c1=(MD5Sum: SHA1: SHA256: SHA512:) -c2=(md5 sha1 sha256 sha512) - -i=0 -while [ ${i} -lt ${#c1[*]} ]; do - echo ${c1[i]} - for hashme in `find ${SECTION} -type f \( -not -name "*~" -name "Package*" -o -name "Release*" \)`; do - ohash=`openssl dgst -${c2[$i]} ${hashme}` - chash="${ohash##* }" - size=`stat -c %s ${hashme}` - echo " ${chash} ${size} ${hashme}" - done - i=$(( $i + 1)); -done >> Release.tmp - -mv --backup -f Release.tmp Release diff --git a/fuelweb_test/helpers/replace_repos.py b/fuelweb_test/helpers/replace_repos.py deleted file mode 100644 index 78caebf32..000000000 --- a/fuelweb_test/helpers/replace_repos.py +++ /dev/null @@ -1,238 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -from fuelweb_test import logger -import fuelweb_test.settings as help_data - - -def replace_ubuntu_repos(repos_attr, upstream_host): - # Walk thru repos_attr and replace/add extra Ubuntu mirrors - if help_data.MIRROR_UBUNTU: - logger.debug("Adding new mirrors: '{0}'" - .format(help_data.MIRROR_UBUNTU)) - repos = add_ubuntu_mirrors() - # Keep other (not upstream) repos, skip previously added ones - for repo_value in repos_attr['value']: - if upstream_host not in repo_value['uri']: - if check_new_ubuntu_repo(repos, repo_value): - repos.append(repo_value) - else: - logger.debug("Removing mirror: '{0} {1}'" - .format(repo_value['name'], repo_value['uri'])) - else: - # Use defaults from Nailgun if MIRROR_UBUNTU is not set - repos = repos_attr['value'] - if help_data.EXTRA_DEB_REPOS: - repos = add_ubuntu_extra_mirrors(repos=repos) - if help_data.PATCHING_DISABLE_UPDATES: - repos = [repo for repo in repos if repo['name'] - not in ('mos-updates', 'mos-security')] - - return repos - - -def replace_centos_repos(repos_attr, upstream_host): - # Walk thru repos_attr and replace/add extra Centos mirrors - if help_data.MIRROR_CENTOS: - logger.debug("Adding new mirrors: '{0}'" - .format(help_data.MIRROR_CENTOS)) - repos = add_centos_mirrors() - # Keep other (not upstream) repos, skip previously added ones - for repo_value in repos_attr['value']: - # self.admin_node_ip while repo is located on master node - if upstream_host not in repo_value['uri']: - if check_new_centos_repo(repos, repo_value): - repos.append(repo_value) - else: - logger.debug("Removing mirror: '{0} {1}'" - .format(repo_value['name'], repo_value['uri'])) - else: - # Use defaults from Nailgun if MIRROR_CENTOS is not set - repos = repos_attr['value'] - if help_data.EXTRA_RPM_REPOS: - repos = add_centos_extra_mirrors(repos=repos) - if help_data.PATCHING_DISABLE_UPDATES: - repos = [repo for repo in repos if repo['name'] - not in ('mos-updates', 'mos-security')] - - return repos - - -def report_repos(repos_attr, release=help_data.OPENSTACK_RELEASE): - """Show list of repositories for specified cluster""" - if help_data.OPENSTACK_RELEASE_UBUNTU in release: - report_ubuntu_repos(repos_attr['value']) - else: - report_centos_repos(repos_attr['value']) - - -def report_ubuntu_repos(repos): - for x, rep in enumerate(repos): - logger.info( - "Ubuntu repo {0} '{1}': '{2} {3} {4} {5}', priority:{6}" - .format(x, rep['name'], rep['type'], rep['uri'], - rep['suite'], rep['section'], rep['priority'])) - - -def report_centos_repos(repos): - for x, rep in enumerate(repos): - logger.info( - "Centos repo {0} '{1}': '{2} {3}', priority:{4}" - .format(x, rep['name'], rep['type'], rep['uri'], - rep['priority'])) - - -def add_ubuntu_mirrors(repos=None, mirrors=help_data.MIRROR_UBUNTU, - priority=help_data.MIRROR_UBUNTU_PRIORITY): - if not repos: - repos = [] - # Add external Ubuntu repositories - for x, repo_str in enumerate(mirrors.split('|')): - repo_value = parse_ubuntu_repo( - repo_str, 'ubuntu-{0}'.format(x), priority) - if repo_value and check_new_ubuntu_repo(repos, repo_value): - repos.append(repo_value) - return repos - - -def add_centos_mirrors(repos=None, mirrors=help_data.MIRROR_CENTOS, - priority=help_data.MIRROR_CENTOS_PRIORITY): - if not repos: - repos = [] - # Add external Centos repositories - for repo_str in mirrors.split('|'): - repo_value = parse_centos_repo(repo_str, priority) - if repo_value and check_new_centos_repo(repos, repo_value): - repos.append(repo_value) - return repos - - -def add_ubuntu_extra_mirrors(repos=None, prefix='extra', - mirrors=help_data.EXTRA_DEB_REPOS, - priority=help_data.EXTRA_DEB_REPOS_PRIORITY): - if not repos: - repos = [] - # Add extra Ubuntu repositories with higher priority - for x, repo_str in enumerate(mirrors.split('|')): - repo_value = parse_ubuntu_repo( - repo_str, '{0}-{1}'.format(prefix, x), priority) - - if repo_value and check_new_ubuntu_repo(repos, repo_value): - # Remove repos that use the same name - repos = [repo for repo in repos - if repo["name"] != repo_value["name"]] - repos.append(repo_value) - - return repos - - -def add_centos_extra_mirrors(repos=None, - mirrors=help_data.EXTRA_RPM_REPOS, - priority=help_data.EXTRA_RPM_REPOS_PRIORITY): - if not repos: - repos = [] - # Add extra Centos repositories - for repo_str in mirrors.split('|'): - repo_value = parse_centos_repo(repo_str, priority) - if repo_value and check_new_centos_repo(repos, repo_value): - # Remove repos that use the same name - repos = [repo for repo in repos - if repo["name"] != repo_value["name"]] - repos.append(repo_value) - - return repos - - -def check_new_ubuntu_repo(repos, repo_value): - # Checks that 'repo_value' is a new unique record for Ubuntu 'repos' - for repo in repos: - if (repo["type"] == repo_value["type"] and - repo["uri"] == repo_value["uri"] and - repo["suite"] == repo_value["suite"] and - repo["section"] == repo_value["section"]): - return False - return True - - -def check_new_centos_repo(repos, repo_value): - # Checks that 'repo_value' is a new unique record for Centos 'repos' - for repo in repos: - if repo["uri"] == repo_value["uri"]: - return False - return True - - -def parse_ubuntu_repo(repo_string, name, priority): - # Validate DEB repository string format - results = re.search(""" - ^ # [beginning of the string] - ([\w\-\.\/]+)? # group 1: optional repository name (for Nailgun) - ,? # [optional comma separator] - (deb|deb-src) # group 2: type; search for 'deb' or 'deb-src' - \s+ # [space separator] - ( # group 3: uri; - \w+:\/\/ # - protocol, i.e. 'http://' - [\w\-\.\/]+ # - hostname - (?::\d+) # - port, i.e. ':8080', if exists - ?[\w\-\.\/]+ # - rest of the path, if exists - ) # - end of group 2 - \s+ # [space separator] - ([\w\-\.\/]+) # group 4: suite; - \s* # [space separator], if exists - ( # group 5: section; - [\w\-\.\/\s]* # - several space-separated names, or None - ) # - end of group 4 - ,? # [optional comma separator] - (\d+)? # group 6: optional priority of the repository - $ # [ending of the string]""", - repo_string.strip(), re.VERBOSE) - if results: - return {"name": results.group(1) or name, - "priority": int(results.group(6) or priority), - "type": results.group(2), - "uri": results.group(3), - "suite": results.group(4), - "section": results.group(5) or ''} - else: - logger.error("Provided DEB repository has incorrect format: {}" - .format(repo_string)) - - -def parse_centos_repo(repo_string, priority): - # Validate RPM repository string format - results = re.search(""" - ^ # [beginning of the string] - ([\w\-\.\/]+) # group 1: repo name - , # [comma separator] - ( # group 2: uri; - \w+:\/\/ # - protocol, i.e. 'http://' - [\w\-\.\/]+ # - hostname - (?::\d+) # - port, i.e. ':8080', if exists - ?[\w\-\.\/]+ # - rest of the path, if exists - ) # - end of group 2 - \s* # [space separator] - ,? # [optional comma separator] - (\d+)? # group 3: optional priority of the repository - $ # [ending of the string]""", - repo_string.strip(), re.VERBOSE) - if results: - return {"name": results.group(1), - "priority": int(results.group(3) or priority), - "type": 'rpm', - "uri": results.group(2)} - else: - logger.error("Provided RPM repository has incorrect format: {}" - .format(repo_string)) diff --git a/fuelweb_test/helpers/security.py b/fuelweb_test/helpers/security.py deleted file mode 100644 index 2d6922f73..000000000 --- a/fuelweb_test/helpers/security.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from random import randrange - -from proboscis.asserts import assert_equal - -from core.helpers.log_helpers import logwrap - -from fuelweb_test import logger -from fuelweb_test.helpers.decorators import retry -from fuelweb_test.settings import OPENSTACK_RELEASE -from fuelweb_test.settings import OPENSTACK_RELEASE_UBUNTU - - -class SecurityChecks(object): - """SecurityChecks.""" # TODO documentation - - def __init__(self, nailgun_client, environment): - self.client = nailgun_client - self.environment = environment - super(SecurityChecks, self).__init__() - - @logwrap - def _listen_random_port(self, ip_address, protocol, tmp_file_path): - # Install socat - if OPENSTACK_RELEASE_UBUNTU in OPENSTACK_RELEASE: - cmd = '/usr/bin/apt-get install -y {pkg}'.format(pkg='socat') - else: - cmd = '/usr/bin/yum install -y {pkg}'.format(pkg='socat') - with self.environment.d_env.get_ssh_to_remote(ip_address) as remote: - result = remote.execute(cmd) - if not result['exit_code'] == 0: - raise Exception('Could not install package: {0}\n{1}'. - format(result['stdout'], result['stderr'])) - # Get all used ports - cmd = ('netstat -A inet -ln --{proto} | awk \'$4 ~ /^({ip}' - '|0\.0\.0\.0):[0-9]+/ {{split($4,port,":"); print ' - 'port[2]}}\'').format(ip=ip_address, proto=protocol) - with self.environment.d_env.get_ssh_to_remote(ip_address) as remote: - used_ports = [int(p.strip()) - for p in remote.execute(cmd)['stdout']] - - # Get list of opened ports - cmd = ('iptables -t filter -S INPUT | sed -rn -e \'s/^.*\s\-p\s+' - '{proto}\s.*\-\-(dport|ports|dports)\s+([0-9,\,,:]+)\s.*' - '-j\s+ACCEPT.*$/\\2/p\' | sed -r \'s/,/\\n/g; s/:/ /g\' |' - ' while read ports; do if [[ "$ports" =~ [[:digit:]]' - '[[:blank:]][[:digit:]] ]]; then seq $ports; else echo ' - '"$ports";fi; done').format(proto=protocol) - with self.environment.d_env.get_ssh_to_remote(ip_address) as remote: - allowed_ports = [int(p.strip()) - for p in remote.execute(cmd)['stdout']] - - test_port = randrange(10000) - while test_port in used_ports or test_port in allowed_ports: - test_port = randrange(10000) - - # Create dump of iptables rules - cmd = 'iptables-save > {0}.dump'.format(tmp_file_path) - with self.environment.d_env.get_ssh_to_remote(ip_address) as remote: - result = remote.execute(cmd) - assert_equal(result['exit_code'], 0, - 'Dumping of iptables rules failed on {0}: {1}; {2}'. - format(ip_address, result['stdout'], result['stderr'])) - - # Start listening for connections on test_port - cmd = ('socat {proto}4-LISTEN:{port},bind={ip} {file} ' - '&>/dev/null & pid=$! ; disown; sleep 1; kill -0 $pid').\ - format(proto=protocol, ip=ip_address, file=tmp_file_path, - port=test_port) - with self.environment.d_env.get_ssh_to_remote(ip_address) as remote: - result = remote.execute(cmd) - - assert_equal(result['exit_code'], 0, - 'Listening on {0}:{1}/{2} port failed: {3}'. - format(ip_address, test_port, protocol, - result['stderr'])) - return test_port - - @retry() - @logwrap - def verify_firewall(self, cluster_id): - # Install NetCat - if not self.environment.admin_install_pkg('nc') == 0: - raise Exception('Can not install package "nc".') - - cluster_nodes = self.client.list_cluster_nodes(cluster_id) - tmp_file_path = '/var/tmp/iptables_check_file' - check_string = 'FirewallHole' - - for node in cluster_nodes: - protocols_to_check = ['tcp', 'udp'] - for protocol in protocols_to_check: - port = self._listen_random_port(ip_address=node['ip'], - protocol=protocol, - tmp_file_path=tmp_file_path) - nc_opts = '' - if protocol == 'udp': - nc_opts = '{} -u'.format(nc_opts) - - cmd = 'echo {string} | nc {opts} {ip} {port}'.\ - format(opts=nc_opts, string=check_string, ip=node['ip'], - port=port) - with self.environment.d_env.get_admin_remote() as admin_remote: - admin_remote.execute(cmd) - with self.environment.d_env\ - .get_ssh_to_remote(node['ip']) as remote: - cmd = 'cat {0}; mv {0}{{,.old}}'.format(tmp_file_path) - result = remote.execute(cmd) - if ''.join(result['stdout']).strip() == check_string: - msg = ('Firewall vulnerability detected. Unused port ' - '{0}/{1} can be accessed on {2} (node-{3}) node. ' - 'Check {4}.old and {4}.dump files on the node for ' - 'details'.format(port, protocol, node['name'], - node['id'], tmp_file_path)) - raise Exception(msg) - logger.info('Firewall test passed') diff --git a/fuelweb_test/helpers/ssh_manager.py b/fuelweb_test/helpers/ssh_manager.py deleted file mode 100644 index 63612c19a..000000000 --- a/fuelweb_test/helpers/ssh_manager.py +++ /dev/null @@ -1,412 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import posixpath -import re -import traceback -from warnings import warn - -from devops.helpers.metaclasses import SingletonMeta -from devops.helpers.ssh_client import SSHAuth -from devops.helpers.ssh_client import SSHClient -from paramiko import RSAKey -from paramiko import SSHException -import six - -from fuelweb_test import logger -from fuelweb_test.settings import SSH_FUEL_CREDENTIALS -from fuelweb_test.settings import SSH_SLAVE_CREDENTIALS - - -class SSHManager(six.with_metaclass(SingletonMeta, object)): - - def __init__(self): - logger.debug('SSH_MANAGER: Run constructor SSHManager') - self.__connections = {} # Disallow direct type change and deletion - self.admin_ip = None - self.admin_port = None - self.admin_login = None - self.__admin_password = None - self.slave_login = None - self.slave_fallback_login = 'root' - self.__slave_password = None - - @property - def connections(self): - return self.__connections - - def initialize(self, admin_ip, - admin_login=SSH_FUEL_CREDENTIALS['login'], - admin_password=SSH_FUEL_CREDENTIALS['password'], - slave_login=SSH_SLAVE_CREDENTIALS['login'], - slave_password=SSH_SLAVE_CREDENTIALS['password']): - """ It will be moved to __init__ - - :param admin_ip: ip address of admin node - :param admin_login: user name - :param admin_password: password for user - :param slave_login: user name - :param slave_password: password for user - :return: None - """ - self.admin_ip = admin_ip - self.admin_port = 22 - self.admin_login = admin_login - self.__admin_password = admin_password - self.slave_login = slave_login - self.__slave_password = slave_password - - def _get_keys(self): - keys = [] - admin_remote = self.get_remote(self.admin_ip) - key_string = '/root/.ssh/id_rsa' - with admin_remote.open(key_string) as f: - keys.append(RSAKey.from_private_key(f)) - return keys - - def connect(self, remote): - """ Check if connection is stable and return this one - - :param remote: - :return: - """ - try: - from fuelweb_test.helpers.utils import RunLimit - with RunLimit( - seconds=5, - error_message="Socket timeout! Forcing reconnection"): - remote.check_call("cd ~") - except Exception: - logger.debug(traceback.format_exc()) - logger.debug('SSHManager: Check for current connection fails. ' - 'Trying to reconnect') - remote = self.reconnect(remote) - return remote - - def reconnect(self, remote): - """ Reconnect to remote or update connection - - :param remote: - :return: - """ - ip = remote.hostname - port = remote.port - try: - remote.reconnect() - except SSHException: - self.update_connection(ip=ip, port=port) - return self.connections[(ip, port)] - - def init_remote(self, ip, port=22, custom_creds=None): - """ Initialise connection to remote - - :param ip: IP of host - :type ip: str - :param port: port for SSH - :type port: int - :param custom_creds: custom creds - :type custom_creds: dict - """ - logger.debug('SSH_MANAGER: Create new connection for ' - '{ip}:{port}'.format(ip=ip, port=port)) - - keys = self._get_keys() if ip != self.admin_ip else [] - if ip == self.admin_ip: - ssh_client = SSHClient( - host=ip, - port=port, - auth=SSHAuth( - username=self.admin_login, - password=self.__admin_password, - keys=keys) - ) - ssh_client.sudo_mode = SSH_FUEL_CREDENTIALS['sudo'] - elif custom_creds: - ssh_client = SSHClient( - host=ip, - port=port, - auth=SSHAuth(**custom_creds)) - else: - try: - ssh_client = SSHClient( - host=ip, - port=port, - auth=SSHAuth( - username=self.slave_login, - password=self.__slave_password, - keys=keys) - ) - except SSHException: - ssh_client = SSHClient( - host=ip, - port=port, - auth=SSHAuth( - username=self.slave_fallback_login, - password=self.__slave_password, - keys=keys) - ) - ssh_client.sudo_mode = SSH_SLAVE_CREDENTIALS['sudo'] - - self.connections[(ip, port)] = ssh_client - logger.debug('SSH_MANAGER: New connection for ' - '{ip}:{port} is created'.format(ip=ip, port=port)) - - def get_remote(self, ip, port=22): - """ Function returns remote SSH connection to node by ip address - - :param ip: IP of host - :type ip: str - :param port: port for SSH - :type port: int - :rtype: SSHClient - """ - if (ip, port) in self.connections: - logger.debug('SSH_MANAGER: Return existed connection for ' - '{ip}:{port}'.format(ip=ip, port=port)) - else: - self.init_remote(ip=ip, port=port) - logger.debug('SSH_MANAGER: Connections {0}'.format(self.connections)) - return self.connect(self.connections[(ip, port)]) - - def update_connection(self, ip, port=22, login=None, password=None, - keys=None): - """Update existed connection - - :param ip: host ip string - :param port: ssh port int - :param login: login string - :param password: password string - :param keys: list of keys - :return: None - """ - if (ip, port) in self.connections: - logger.debug('SSH_MANAGER: Close connection for {ip}:{port}' - .format(ip=ip, port=port)) - ssh_client = self.connections.pop((ip, port)) - ssh_client.close() - if login and (password or keys): - custom_creds = { - 'username': login, - 'password': password, - 'keys': keys - } - else: - custom_creds = None - self.init_remote(ip=ip, port=port, custom_creds=custom_creds) - - def clean_all_connections(self): - for (ip, port), connection in self.connections.items(): - connection.clear() - logger.debug('SSH_MANAGER: Close connection for {ip}:{port}' - .format(ip=ip, port=port)) - - def execute(self, ip, cmd, port=22, sudo=None): - remote = self.get_remote(ip=ip, port=port) - with remote.sudo(enforce=sudo): - return remote.execute(cmd) - - def check_call( - self, - ip, - command, port=22, verbose=False, timeout=None, - error_info=None, - expected=None, raise_on_err=True, - sudo=None - ): - """Execute command and check for return code - - :type ip: str - :type command: str - :type port: int - :type verbose: bool - :type timeout: int - :type error_info: str - :type expected: list - :type raise_on_err: bool - :type sudo: bool - :rtype: ExecResult - :raises: DevopsCalledProcessError - """ - remote = self.get_remote(ip=ip, port=port) - with remote.sudo(enforce=sudo): - return remote.check_call( - command=command, - verbose=verbose, - timeout=timeout, - error_info=error_info, - expected=expected, - raise_on_err=raise_on_err - ) - - def execute_on_remote(self, ip, cmd, port=22, err_msg=None, - jsonify=False, assert_ec_equal=None, - raise_on_assert=True, yamlify=False, sudo=None): - """Execute ``cmd`` on ``remote`` and return result. - - :param ip: ip of host - :param port: ssh port - :param cmd: command to execute on remote host - :param err_msg: custom error message - :param jsonify: bool, conflicts with yamlify - :param assert_ec_equal: list of expected exit_code - :param raise_on_assert: Boolean - :param yamlify: bool, conflicts with jsonify - :param sudo: use sudo: bool or None for default value set in settings - :return: dict - :raise: Exception - """ - warn( - 'SSHManager().execute_on_remote is deprecated in favor of ' - 'SSHManager().check_call.\n' - 'Please, do not use this method in any new tests. ' - 'Old code will be updated later.', DeprecationWarning - ) - if assert_ec_equal is None: - assert_ec_equal = [0] - - if yamlify and jsonify: - raise ValueError('Conflicting arguments: yamlify and jsonify!') - - orig_result = self.check_call( - ip=ip, - command=cmd, - port=port, - error_info=err_msg, - expected=assert_ec_equal, - raise_on_err=raise_on_assert, - sudo=sudo - ) - - # Now create fallback result - # TODO(astepanov): switch to SSHClient output after tests adoptation - - result = { - 'stdout': orig_result['stdout'], - 'stderr': orig_result['stderr'], - 'exit_code': orig_result['exit_code'], - 'stdout_str': ''.join(orig_result['stdout']).strip(), - 'stderr_str': ''.join(orig_result['stderr']).strip(), - } - - if jsonify: - result['stdout_json'] = orig_result.stdout_json - elif yamlify: - result['stdout_yaml'] = orig_result.stdout_yaml - - return result - - def execute_async_on_remote(self, ip, cmd, port=22, sudo=None): - remote = self.get_remote(ip=ip, port=port) - with remote.sudo(enforce=sudo): - return remote.execute_async(cmd) - - def open_on_remote(self, ip, path, mode='r', port=22): - remote = self.get_remote(ip=ip, port=port) - return remote.open(path, mode) - - def upload_to_remote(self, ip, source, target, port=22, sudo=None): - remote = self.get_remote(ip=ip, port=port) - with remote.sudo(enforce=sudo): - return remote.upload(source, target) - - def download_from_remote(self, ip, destination, target, port=22): - remote = self.get_remote(ip=ip, port=port) - return remote.download(destination, target) - - def exists_on_remote(self, ip, path, port=22): - remote = self.get_remote(ip=ip, port=port) - return remote.exists(path) - - def isdir_on_remote(self, ip, path, port=22): - remote = self.get_remote(ip=ip, port=port) - return remote.isdir(path) - - def isfile_on_remote(self, ip, path, port=22): - remote = self.get_remote(ip=ip, port=port) - return remote.isfile(path) - - def mkdir_on_remote(self, ip, path, port=22, sudo=None): - remote = self.get_remote(ip=ip, port=port) - with remote.sudo(enforce=sudo): - return remote.mkdir(path) - - def rm_rf_on_remote(self, ip, path, port=22, sudo=None): - remote = self.get_remote(ip=ip, port=port) - with remote.sudo(enforce=sudo): - return remote.rm_rf(path) - - def cond_upload(self, ip, source, target, port=22, condition='', - clean_target=False, sudo=None): - """ Upload files only if condition in regexp matches filenames - - :param ip: host ip - :param source: source path - :param target: destination path - :param port: ssh port - :param condition: regexp condition - :param clean_target: drop whole target contents by target recreate - :param sudo: use sudo: bool or None for default value set in settings - :return: count of files - """ - - # remote = self.get_remote(ip=ip, port=port) - # maybe we should use SSHClient function. e.g. remote.isdir(target) - # we can move this function to some *_actions class - if self.isdir_on_remote(ip=ip, port=port, path=target): - target = posixpath.join(target, os.path.basename(source)) - - if clean_target: - self.rm_rf_on_remote(ip=ip, port=port, path=target, sudo=sudo) - self.mkdir_on_remote(ip=ip, port=port, path=target, sudo=sudo) - - source = os.path.expanduser(source) - if not os.path.isdir(source): - if re.match(condition, source): - self.upload_to_remote(ip=ip, port=port, - source=source, target=target, sudo=sudo) - logger.debug("File '{0}' uploaded to the remote folder" - " '{1}'".format(source, target)) - return 1 - else: - logger.debug("Pattern '{0}' doesn't match the file '{1}', " - "uploading skipped".format(condition, source)) - return 0 - - files_count = 0 - for rootdir, _, files in os.walk(source): - targetdir = os.path.normpath( - os.path.join( - target, - os.path.relpath(rootdir, source))).replace("\\", "/") - - self.mkdir_on_remote(ip=ip, port=port, path=targetdir, sudo=sudo) - - for entry in files: - local_path = os.path.join(rootdir, entry) - remote_path = posixpath.join(targetdir, entry) - if re.match(condition, local_path): - self.upload_to_remote(ip=ip, - port=port, - source=local_path, - target=remote_path, - sudo=sudo) - files_count += 1 - logger.debug("File '{0}' uploaded to the " - "remote folder '{1}'".format(source, target)) - else: - logger.debug("Pattern '{0}' doesn't match the file '{1}', " - "uploading skipped".format(condition, - local_path)) - return files_count diff --git a/fuelweb_test/helpers/ssl_helpers.py b/fuelweb_test/helpers/ssl_helpers.py deleted file mode 100644 index 963d272ca..000000000 --- a/fuelweb_test/helpers/ssl_helpers.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from OpenSSL import crypto - -from core.helpers.log_helpers import logwrap - -from fuelweb_test import logger -from fuelweb_test.settings import DISABLE_SSL -from fuelweb_test.settings import PATH_TO_CERT -from fuelweb_test.settings import PATH_TO_PEM -from fuelweb_test.settings import USER_OWNED_CERT - - -@logwrap -def generate_user_own_cert(cn, path_to_cert=PATH_TO_CERT, - path_to_pem=PATH_TO_PEM): - logger.debug("Trying to generate user certificate files") - k = crypto.PKey() - k.generate_key(crypto.TYPE_RSA, 2048) - cert = crypto.X509() - cert.get_subject().OU = 'Fuel-QA Team' - cert.get_subject().CN = cn - cert.set_serial_number(1000) - cert.gmtime_adj_notBefore(0) - cert.gmtime_adj_notAfter(315360000) - cert.set_issuer(cert.get_subject()) - cert.set_pubkey(k) - cert.sign(k, 'sha1') - with open(path_to_pem, 'wt') as f: - f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert)) - f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k)) - logger.debug("Generated PEM file {}".format(path_to_pem)) - with open(path_to_cert, 'wt') as f: - f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert)) - logger.debug("Generated PEM file {}".format(path_to_cert)) - - -@logwrap -def change_cluster_ssl_config(attributes, CN): - logger.debug("Trying to change cluster {} ssl configuration") - is_ssl_available = attributes['editable'].get('public_ssl', None) - if DISABLE_SSL and is_ssl_available: - attributes['editable']['public_ssl']['services'][ - 'value'] = False - attributes['editable']['public_ssl']['horizon'][ - 'value'] = False - elif not DISABLE_SSL and is_ssl_available: - attributes['editable']['public_ssl']['services'][ - 'value'] = True - attributes['editable']['public_ssl']['horizon'][ - 'value'] = True - attributes['editable']['public_ssl']['hostname'][ - 'value'] = CN - if USER_OWNED_CERT: - generate_user_own_cert(CN) - attributes['editable']['public_ssl'][ - 'cert_source']['value'] = 'user_uploaded' - cert_data = {} - with open(PATH_TO_PEM, 'r') as f: - cert_data['content'] = f.read() - cert_data['name'] = os.path.basename(PATH_TO_PEM) - attributes['editable']['public_ssl'][ - 'cert_data']['value'] = cert_data - - -@logwrap -def copy_cert_from_master(admin_remote, cluster_id, - path_to_store=PATH_TO_CERT): - path_to_cert = \ - '/var/lib/fuel/keys/{}/haproxy/public_haproxy.crt'.format( - cluster_id) - admin_remote.download(path_to_cert, path_to_store) - logger.debug("Copied cert from admin node to the {}".format( - path_to_store)) diff --git a/fuelweb_test/helpers/tempest.conf.template b/fuelweb_test/helpers/tempest.conf.template deleted file mode 100644 index 744f70b3e..000000000 --- a/fuelweb_test/helpers/tempest.conf.template +++ /dev/null @@ -1,76 +0,0 @@ -[DEFAULT] - -debug=true -verbose=true - -[identity] - -disable_ssl_certificate_validation=true - -uri=http://{{ management_vip }}:5000/v2.0/ -uri_v3=http://{{ management_vip }}:5000/v3.0/ - -username={{username}} -password={{password}} -tenant_name={{tenant_name}} - -admin_username={{admin_username}} -admin_tenant_name={{admin_tenant_name}} -admin_password={{admin_password}} - -alt_username={{alt_username}} -alt_password={{alt_password}} -alt_tenant_name={{alt_tenant_name}} - -[dashboard] - -dashboard_url=http://{{ management_vip }}/dashboard/ -login_url=http://{{ management_vip }}/dashboard/auth/login/ - -[network] - -tenant_network_cidr={{ internal_cidr }} -tenant_network_mask_bits={{ internal_mask }} -public_network_id={{ public_network }} -public_router_id={{ public_router}} - -[network-feature-enabled] - -ipv6=false - -[service_available] -{% if net_provider == 'neutron' %} -neutron=true -{% endif %} - -[object-storage] - -operator_role=admin - -[compute] - -image_ref={{image_ref}} -image_ssh_user=cirros -image_ssh_password=cubswin:) - -{% if image_ref_alt %} -image_ref_alt={{image_ref_alt}} -{% else %} -image_ref_alt={{image_ref}} -{% endif %} - -{% if net_provider == 'nova_network' %} -fixed_network_name=novanetwork_0 -network_for_ssh=fixed -{% endif %} - -[compute-feature-enabled] - -api_v3=false - -[cli] - -#Dont provide full path - PATH variable will help us -cli_dir= - -has_manage=false \ No newline at end of file diff --git a/fuelweb_test/helpers/uca.py b/fuelweb_test/helpers/uca.py deleted file mode 100644 index e6d641e2e..000000000 --- a/fuelweb_test/helpers/uca.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import asserts - -from fuelweb_test import settings - - -def change_cluster_uca_config(cluster_attributes): - """Returns cluster attributes with UCA repo configuration.""" - - # check attributes have uca options - - for option in ["pin_haproxy", "pin_rabbitmq", "pin_ceph"]: - asserts.assert_true( - option in cluster_attributes["editable"]["repo_setup"], - "{0} is not in cluster attributes: {1}". - format(option, str(cluster_attributes["editable"]["repo_setup"]))) - - # enable UCA repository - - uca_options = cluster_attributes["editable"]["repo_setup"] - uca_options["pin_haproxy"]["value"] = settings.UCA_PIN_HAPROXY - uca_options["pin_rabbitmq"]["value"] = settings.UCA_PIN_RABBITMQ - uca_options["pin_ceph"]["value"] = settings.UCA_PIN_RABBITMQ - - return cluster_attributes diff --git a/fuelweb_test/helpers/utils.py b/fuelweb_test/helpers/utils.py deleted file mode 100644 index c9a5856b5..000000000 --- a/fuelweb_test/helpers/utils.py +++ /dev/null @@ -1,1637 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import division - -import copy -# pylint: disable=no-name-in-module -# noinspection PyUnresolvedReferences -from distutils import version -# pylint: enable=no-name-in-module -import inspect -import json -import os -import posixpath -import random -import re -import signal -import string -import time -import traceback -from warnings import warn - -import netaddr -from proboscis import asserts -from proboscis.asserts import assert_true -from proboscis.asserts import assert_equal -# pylint: disable=import-error -# noinspection PyUnresolvedReferences -from six.moves import configparser -# pylint: enable=import-error -# pylint: disable=redefined-builtin -# noinspection PyUnresolvedReferences -from six.moves import xrange -# pylint: enable=redefined-builtin -import yaml - -from core.helpers.log_helpers import logwrap -from core.models.value_objects import FuelAccessParams -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers.ssh_manager import SSHManager -from gates_tests.helpers import exceptions - - -@logwrap -def get_yaml_to_json(node_ssh, filename): - msg = ( - 'get_yaml_to_json helper is unused in fuel-qa and planned ' - 'for deletion on 14.09.2016') - warn(msg, DeprecationWarning) - traceback.print_stack() - logger.critical(msg) - - cmd = ("python -c 'import sys, yaml, json; json.dump(" - "yaml.load(sys.stdin)," - " sys.stdout)' < {0}").format(filename) - err_res = '' - res = node_ssh.execute(cmd) - err_res.join(res['stderr']) - asserts.assert_equal( - res['exit_code'], 0, - 'Command {0} execution failed ' - 'with message {1}'.format(cmd, err_res)) - return res['stdout'] - - -@logwrap -def put_json_on_remote_from_dict(remote, src_dict, cluster_id): - msg = ( - 'put_json_on_remote_from_dict helper is unused in fuel-qa and planned ' - 'for deletion on 14.09.2016') - warn(msg, DeprecationWarning) - traceback.print_stack() - logger.critical(msg) - - cmd = ('python -c "import json; ' - 'data=json.dumps({0}); print data"').format(src_dict) - result = remote.execute( - '{0} > /var/log/network_{1}.json'.format(cmd, cluster_id)) - asserts.assert_equal( - result['exit_code'], 0, - 'Failed to run cmd {0} with result {1}'.format(cmd, result)) - - -@logwrap -def nova_service_get_pid(node_ssh, nova_services=None): - pid_dict = {} - for el in nova_services: - cmd = "pgrep {0}".format(el) - pid_dict[el] = node_ssh.execute(cmd)['stdout'] - logger.debug('current dict is {0}'. format(pid_dict)) - return pid_dict - - -@logwrap -def check_if_service_restarted(node_ssh, services_list=None, - pattern='(re)?start', skip=0): - if services_list: - # from the log file {2}, scan all lines after line {0} with the - # pattern {1} to find restarted services, print their names to stdout - cmd = ("awk 'NR >= {0} && /{1}/ {{print $11}}' {2}" - .format(skip, pattern, '/var/log/puppet.log')) - res = ''.join(node_ssh.execute(cmd)['stdout']) - logger.debug('Next services were restarted {0}'.format(res)) - for service in services_list: - asserts.assert_true( - any(service in x for x in res), - 'Seems service {0} was not restarted {1}'.format(service, res)) - - -@logwrap -def pull_out_logs_via_ssh(admin_remote, name, - logs_dirs=('/var/log/', '/root/', '/etc/fuel/')): - def _compress_logs(_dirs, _archive_path): - cmd = 'tar --absolute-names --warning=no-file-changed -czf {t} {d}'.\ - format(t=_archive_path, d=' '.join(_dirs)) - result = admin_remote.execute(cmd) - if result['exit_code'] != 0: - logger.error("Compressing of logs on master node failed: {0}". - format(result)) - return False - return True - - archive_path = '/var/tmp/fail_{0}_diagnostic-logs_{1}.tgz'.format( - name, time.strftime("%Y_%m_%d__%H_%M_%S", time.gmtime())) - - try: - if _compress_logs(logs_dirs, archive_path): - if not admin_remote.download(archive_path, settings.LOGS_DIR): - logger.error(("Downloading of archive with logs failed, file" - "wasn't saved on local host")) - except Exception: - logger.error(traceback.format_exc()) - - -@logwrap -def store_astute_yaml(env): - func_name = get_test_method_name() - nailgun_nodes = env.fuel_web.client.list_nodes() - - def store_astute_yaml_for_one_node(nailgun_node): - ssh_manager = SSHManager() - if 'roles' not in nailgun_node: - return None - errmsg = 'Downloading "{0}.yaml" from the {1} failed' - msg = 'File "{0}.yaml" was downloaded from the {1}' - nodename = nailgun_node['name'] - ip = nailgun_node['ip'] - for role in nailgun_node['roles']: - filename = '{0}/{1}-{2}-{3}.yaml'.format(settings.LOGS_DIR, - func_name, - nodename, - role) - - if not ssh_manager.isfile_on_remote(ip, - '/etc/{0}.yaml'.format(role)): - role = 'primary-' + role - if ssh_manager.download_from_remote(ip, - '/etc/{0}.yaml'.format(role), - filename): - logger.info(msg.format(role, nodename)) - else: - logger.error(errmsg.format(role, nodename)) - if settings.DOWNLOAD_FACTS: - fact_filename = re.sub(r'-\w*\.', '-facts.', filename) - generate_facts(ip) - if ssh_manager.download_from_remote(ip, - '/tmp/facts.yaml', - fact_filename): - logger.info(msg.format('facts', nodename)) - else: - logger.error(errmsg.format('facts', nodename)) - - try: - for node in nailgun_nodes: - store_astute_yaml_for_one_node(node) - except Exception: - logger.error(traceback.format_exc()) - - -@logwrap -def generate_facts(ip): - ssh_manager = SSHManager() - facter_dir = '/var/lib/puppet/lib/facter' - exluded_facts = ['naily.rb'] - - if not ssh_manager.isdir_on_remote(ip, facter_dir): - ssh_manager.mkdir_on_remote(ip, facter_dir) - logger.debug('Directory {0} was created'.format(facter_dir)) - - ssh_manager.execute_on_remote(ip, 'rm -f {0}/*.rb'.format(facter_dir)) - logger.debug('rb files were removed from {0}'.format(facter_dir)) - - facts_files = ssh_manager.execute_on_remote( - ip, - 'find /etc/puppet/modules/ -wholename "*/lib/facter/*.rb"')['stdout'] - facts_files = [i.strip() for i in facts_files] - logger.debug('The following facts {0} will' - ' be copied to {1}'.format(facts_files, facter_dir)) - for fact in facts_files: - if not fact or re.sub(r'.*/', '', fact) in exluded_facts: - continue - ssh_manager.execute_on_remote(ip, - 'cp {0} {1}/'.format(fact, facter_dir)) - logger.debug('Facts were copied') - - ssh_manager.execute_on_remote(ip, 'facter -p -y > /tmp/facts.yaml') - logger.info('Facts yaml was created') - - ssh_manager.execute_on_remote(ip, 'rm -f {0}/*.rb'.format(facter_dir)) - logger.debug('rb files were removed from {0}'.format(facter_dir)) - - -@logwrap -def get_node_packages(remote, func_name, node_role, - packages_dict, release=settings.OPENSTACK_RELEASE): - if settings.OPENSTACK_RELEASE_UBUNTU in release: - cmd = "dpkg-query -W -f='${Package} ${Version}'\r" - else: - cmd = 'rpm -qa --qf "%{name} %{version}"\r' - node_packages = remote.execute(cmd)['stdout'][0].split('\r')[:-1] - - logger.debug("node packages are {0}".format(node_packages)) - packages_dict[func_name][node_role] = node_packages\ - if node_role not in packages_dict[func_name].keys()\ - else list(set(packages_dict[func_name][node_role]) | - set(node_packages)) - return packages_dict - - -@logwrap -def store_packages_json(env): - ssh_manager = SSHManager() - func_name = "".join(get_test_method_name()) - packages = {func_name: {}} - cluster_id = env.fuel_web.get_last_created_cluster() - for nailgun_node in env.fuel_web.client.list_cluster_nodes(cluster_id): - role = '_'.join(nailgun_node['roles']) - logger.debug('role is {0}'.format(role)) - remote = ssh_manager.get_remote(nailgun_node['ip']) - packages = get_node_packages(remote, func_name, role, packages) - packages_file = '{0}/packages.json'.format(settings.LOGS_DIR) - if os.path.isfile(packages_file): - with open(packages_file, 'r') as outfile: - try: - file_packages = json.load(outfile) - except: - file_packages = {} - packages.update(file_packages) - with open(packages_file, 'w') as outfile: - json.dump(packages, outfile) - - -@logwrap -def get_test_method_name(): - # Find the name of the current test in the stack. It can be found - # right under the class name 'NoneType' (when proboscis - # run the test method with unittest.FunctionTestCase) - stack = inspect.stack() - method = '' - for m in stack: - if 'self' in m[0].f_locals: - if m[0].f_locals['self'].__class__.__name__ == 'NoneType': - break - method = m[3] - return method - - -def get_current_env(args): - if args[0].__class__.__name__ == "EnvironmentModel": - return args[0] - elif args[0].__class__.__name__ in ("FuelWebClient", - "FuelWebClient29", - "FuelWebClient30"): - return args[0].environment - else: - try: - return args[0].env - except AttributeError as attr_err: - logger.error("Class '{0}' doesn't have 'env' attribute! {1}" - .format(args[0].__class__.__name__, attr_err.message)) - raise - - -@logwrap -def update_yaml(yaml_tree=None, yaml_value='', is_uniq=True, - yaml_file=settings.TIMESTAT_PATH_YAML): - """Store/update a variable in YAML file. - - yaml_tree - path to the variable in YAML file, will be created if absent, - yaml_value - value of the variable, will be overwritten if exists, - is_uniq - If false, add the unique two-digit suffix to the variable name. - """ - if yaml_tree is None: - yaml_tree = [] - yaml_data = {} - if os.path.isfile(yaml_file): - with open(yaml_file, 'r') as f: - yaml_data = yaml.load(f) - - # Walk through the 'yaml_data' dict, find or create a tree using - # sub-keys in order provided in 'yaml_tree' list - item = yaml_data - for n in yaml_tree[:-1]: - if n not in item: - item[n] = {} - item = item[n] - - if is_uniq: - last = yaml_tree[-1] - else: - # Create an uniq suffix in range '_00' to '_99' - for n in range(100): - last = str(yaml_tree[-1]) + '_' + str(n).zfill(2) - if last not in item: - break - - item[last] = yaml_value - with open(yaml_file, 'w') as f: - yaml.dump(yaml_data, f, default_flow_style=False) - - -class TimeStat(object): - """ Context manager for measuring the execution time of the code. - Usage: - with TimeStat([name],[is_uniq=True]): - """ - - def __init__(self, name=None, is_uniq=False): - if name: - self.name = name - else: - self.name = 'timestat' - self.is_uniq = is_uniq - self.begin_time = 0 - self.end_time = 0 - self.total_time = 0 - - def __enter__(self): - self.begin_time = time.time() - return self - - def __exit__(self, exc_type, exc_value, exc_tb): - self.end_time = time.time() - self.total_time = self.end_time - self.begin_time - - # Create a path where the 'self.total_time' will be stored. - yaml_path = [] - - # There will be a list of one or two yaml subkeys: - # - first key name is the method name of the test - method_name = get_test_method_name() - if method_name: - yaml_path.append(method_name) - - # - second (subkey) name is provided from the decorator (the name of - # the just executed function), or manually. - yaml_path.append(self.name) - - try: - update_yaml(yaml_path, '{:.2f}'.format(self.total_time), - self.is_uniq) - except Exception: - logger.error("Error storing time statistic for {0}" - " {1}".format(yaml_path, traceback.format_exc())) - raise - - @property - def spent_time(self): - return time.time() - self.begin_time - - -def install_pkg(remote, pkg_name): - """Install a package on node - :param remote: SSHClient to remote node - :param pkg_name: name of a package - :return: exit code of installation - """ - remote_status = remote.execute("rpm -q '{0}'".format(pkg_name)) - if remote_status['exit_code'] == 0: - logger.info("Package '{0}' already installed.".format(pkg_name)) - else: - logger.info("Installing package '{0}' ...".format(pkg_name)) - remote_status = remote.execute("yum -y install {0}" - .format(pkg_name)) - logger.info("Installation of the package '{0}' has been" - " completed with exit code {1}" - .format(pkg_name, remote_status['exit_code'])) - return remote_status['exit_code'] - - -def install_pkg_2(ip, pkg_name, port=22): - """Install a package on node - :param ip: ip of node - :param pkg_name: name of a package - :param port: ssh port - :return: exit code of installation - """ - ssh_manager = SSHManager() - remote_status = ssh_manager.execute( - ip=ip, - port=port, - cmd="rpm -q '{0}'".format(pkg_name) - ) - if remote_status['exit_code'] == 0: - logger.info("Package '{0}' already installed.".format(pkg_name)) - else: - logger.info("Installing package '{0}' ...".format(pkg_name)) - remote_status = ssh_manager.execute( - ip=ip, - port=port, - cmd="yum -y install {0}".format(pkg_name) - ) - logger.info("Installation of the package '{0}' has been" - " completed with exit code {1}" - .format(pkg_name, remote_status['exit_code'])) - return remote_status['exit_code'] - - -def cond_upload(remote, source, target, condition=''): - # Upload files only if condition in regexp matches filenames - if remote.isdir(target): - target = posixpath.join(target, os.path.basename(source)) - - source = os.path.expanduser(source) - if not os.path.isdir(source): - if re.match(condition, source): - remote.upload(source, target) - logger.debug("File '{0}' uploaded to the remote folder '{1}'" - .format(source, target)) - return 1 - else: - logger.debug("Pattern '{0}' doesn't match the file '{1}', " - "uploading skipped".format(condition, source)) - return 0 - - files_count = 0 - for rootdir, _, files in os.walk(source): - targetdir = os.path.normpath( - os.path.join( - target, - os.path.relpath(rootdir, source))).replace("\\", "/") - - remote.mkdir(targetdir) - - for entry in files: - local_path = os.path.join(rootdir, entry) - remote_path = posixpath.join(targetdir, entry) - if re.match(condition, local_path): - remote.upload(local_path, remote_path) - files_count += 1 - logger.debug("File '{0}' uploaded to the remote folder '{1}'" - .format(source, target)) - if 'deb' in entry: - continue - entry_name = entry[0:entry.rfind('-', 0, entry.rfind('-'))] - asserts.assert_true(compare_packages_version( - remote, entry_name, remote_path)) - else: - logger.debug("Pattern '{0}' doesn't match the file '{1}', " - "uploading skipped".format(condition, local_path)) - return files_count - - -def json_deserialize(json_string): - """ - Deserialize json_string and return object - - :param json_string: string or list with json - :return: obj - :raise: Exception - """ - msg = ( - 'put_json_on_remote_from_dict helper is unused in fuel-qa and planned ' - 'for deletion on 14.09.2016') - warn(msg, DeprecationWarning) - logger.critical(msg) - if isinstance(json_string, list): - json_string = ''.join(json_string) - - try: - obj = json.loads(json_string) - except Exception: - log_msg = "Unable to deserialize" - logger.error("{0}. Actual string:\n{1}".format(log_msg, json_string)) - raise Exception(log_msg) - return obj - - -def check_distribution(): - """Checks whether distribution is supported. - - :return: None - :raise: Exception - """ - if settings.OPENSTACK_RELEASE not in (settings.OPENSTACK_RELEASE_CENTOS, - settings.OPENSTACK_RELEASE_UBUNTU): - error_msg = ("{0} distribution is not supported!".format( - settings.OPENSTACK_RELEASE)) - logger.error(error_msg) - raise Exception(error_msg) - - -@logwrap -def get_network_template(template_name): - templates_path = ('{0}/fuelweb_test/network_templates/'.format( - os.environ.get("WORKSPACE", "./"))) - template = os.path.join(templates_path, '{}.yaml'.format(template_name)) - if os.path.exists(template): - with open(template) as template_file: - return yaml.load(template_file) - - -@logwrap -def get_net_settings(remote, skip_interfaces=None): - if skip_interfaces is None: - skip_interfaces = set() - net_settings = dict() - interface_cmd = ('awk \'$1~/:/{split($1,iface,":"); print iface[1]}\'' - ' /proc/net/dev') - vlan_cmd = 'awk \'$1~/\./{print $1}\' /proc/net/vlan/config' - bond_cmd = ('awk \'{gsub(" ","\\n"); print}\' ' - '/sys/class/net/bonding_masters') - bridge_cmd = 'ls -d1 /sys/class/net/*/bridge/ | cut -d/ -f5' - ip_cmd = 'ip -o -4 addr show dev {0} | awk \'{{print $4}}\'' - bond_mode_cmd = 'awk \'{{print $1}}\' /sys/class/net/{0}/bonding/mode' - bond_slaves_cmd = ('awk \'{{gsub(" ","\\n"); print}}\' ' - '/sys/class/net/{0}/bonding/slaves') - bridge_slaves_cmd = 'ls -1 /sys/class/net/{0}/brif/' - - node_interfaces = [ - l.strip() for l in remote.check_call(interface_cmd).stdout - if not any(re.search(regex, l.strip()) - for regex in skip_interfaces)] - node_vlans = [l.strip() for l in remote.check_call(vlan_cmd).stdout] - node_bonds = [l.strip() for l in remote.check_call(bond_cmd).stdout] - node_bridges = [l.strip() for l in remote.check_call(bridge_cmd).stdout] - - for interface in node_interfaces: - bond_mode = None - bond_slaves = None - bridge_slaves = None - if interface in node_vlans: - if_type = 'vlan' - elif interface in node_bonds: - if_type = 'bond' - bond_mode = ''.join( - [l.strip() for l in - remote.check_call(bond_mode_cmd.format(interface)).stdout]) - bond_slaves = set( - [l.strip() for l in - remote.check_call(bond_slaves_cmd.format(interface)).stdout] - ) - elif interface in node_bridges: - if_type = 'bridge' - bridge_slaves = set( - [l.strip() for l in - remote.check_call(bridge_slaves_cmd.format(interface)).stdout - if not any(re.search(regex, l.strip()) - for regex in skip_interfaces)] - ) - else: - if_type = 'common' - if_ips = set( - [l.strip() - for l in remote.check_call(ip_cmd.format(interface)).stdout] - ) - - net_settings[interface] = { - 'type': if_type, - 'ip_addresses': if_ips, - 'bond_mode': bond_mode, - 'bond_slaves': bond_slaves, - 'bridge_slaves': bridge_slaves - } - return net_settings - - -@logwrap -def get_ip_listen_stats(remote, proto='tcp'): - # If bindv6only is disabled, then IPv6 sockets listen on IPv4 too - check_v6_bind_cmd = 'cat /proc/sys/net/ipv6/bindv6only' - bindv6only = ''.join( - [l.strip() for l in remote.check_call(check_v6_bind_cmd).stdout]) - check_v6 = bindv6only == '0' - if check_v6: - cmd = ("awk '$4 == \"0A\" {{gsub(\"00000000000000000000000000000000\"," - "\"00000000\", $2); print $2}}' " - "/proc/net/{0} /proc/net/{0}6").format(proto) - else: - cmd = "awk '$4 == \"0A\" {{print $2}}' /proc/net/{0}".format(proto) - return [l.strip() for l in remote.check_call(cmd).stdout] - - -@logwrap -def node_freemem(remote, unit='MB'): - """Return free memory and swap - - units :type : str, can be a KB, MB, GB. Default is MB - """ - denominators = { - 'KB': 1, - 'MB': 1024, - 'GB': 1024 ** 2 - } - denominator = denominators.get(unit, denominators['MB']) - cmd_mem_free = 'free -k | grep Mem:' - cmd_swap_free = 'free -k | grep Swap:' - mem_free = remote.check_call(cmd_mem_free).stdout[0] - swap_free = remote.check_call(cmd_swap_free).stdout[0] - ret = { - "mem": { - "total": int(mem_free.split()[1]) // denominator, - "used": int(mem_free.split()[2]) // denominator, - "free": int(mem_free.split()[3]) // denominator, - "shared": int(mem_free.split()[4]) // denominator, - "buffers": int(mem_free.split()[5]) // denominator, - "cached": int(mem_free.split()[6]) // denominator - }, - "swap": { - "total": int(swap_free.split()[1]) // denominator, - "used": int(swap_free.split()[2]) // denominator, - "free": int(swap_free.split()[3]) // denominator, - } - } - return ret - - -def hiera_json_out(node_ip, parameter): - hiera_cmd = "ruby -rhiera -rjson -e \"h = Hiera.new(); " \ - "Hiera.logger = 'noop'; " \ - "puts JSON.dump(h.lookup(\'{0}\', " \ - "[], {{}}, nil, nil))\"".format(parameter) - ssh_manager = SSHManager() - config = ssh_manager.execute_on_remote( - ip=node_ip, - cmd=hiera_cmd, - jsonify=True, - err_msg='Cannot get floating ranges')['stdout_json'] - return config - - -def generate_floating_ranges(start_ip, end_ip, step): - """Generating floating range by first and last ip with any step - - :param start_ip: first ip address in floating range - :param end_ip: last ip address in floating range - :param step: count of ip addresses in floating range - :return: - """ - ranges = [] - ip_start = netaddr.IPAddress(str(start_ip)) - ip_end = netaddr.IPAddress(str(end_ip)) - while ip_end - step > ip_start: - ranges.append([str(ip_start), str(ip_start + step)]) - ip_start += (step + 1) - return ranges - - -def get_node_hiera_roles(remote, fqdn=None): - """Get hiera roles assigned to host - - :param remote: SSHClient to node - :param fqdn: fqdn of the host - :return: rtype: dict host plus role - """ - cmd = 'hiera roles' - if fqdn: - cmd += ' fqdn={}'.format(fqdn) - roles = remote.check_call(cmd).stdout_str - # Content string with roles like a ["ceph-osd", "controller"] to list - return [role.strip('" ') for role in roles.strip("[]").split(',')] - - -class RunLimit(object): - def __init__(self, seconds=60, error_message='Timeout'): - self.seconds = seconds - self.error_message = error_message - - def handle_timeout(self, signum, frame): - raise TimeoutException(self.error_message) - - def __enter__(self): - signal.signal(signal.SIGALRM, self.handle_timeout) - signal.alarm(self.seconds) - - def __exit__(self, exc_type, value, traceback): - signal.alarm(0) - - -class TimeoutException(Exception): - pass - - -def pretty_log(src, indent=0, invert=False): - """ Make log more readable and awesome - The main application is using instead of json.dumps(). - - :param src: dictionary with data, list of dicts - can be also used for strings or lists of strings, - but it makes no sense. - Note: Indent for list by default is +3. If you want to call - pretty_log for list , call it with indent=-3 for 0, - indent=-3+1 for 1 and etc. - :param indent: int - :param invert: Swaps first and second columns. Can be used ONLY - with one levels dictionary - :return: formatted string with result, can be used in log - - """ - - result = '' - templates = ["\n{indent}{item:{len}}{value}" if not invert else - "\n{indent}{value:{len}}{item}", - "\n{indent}{item}:", - '\n{indent}{value}'] - - if src and isinstance(src, dict): - max_len = len(max(src.values() if invert else src.keys(), - key=lambda x: len(str(x)))) - for key, value in src.items(): - if (isinstance(value, dict) and value) or \ - isinstance(value, list): - result += templates[1].format(indent=' ' * indent, item=key) - result += pretty_log(value, indent + 3) - else: - result += templates[0].format(indent=' ' * indent, - item=key, - value=str(value), - len=max_len + 5) - - elif src and isinstance(src, list): - for el in src: - if (isinstance(el, dict) and el) or isinstance(el, list): - res = pretty_log(el, indent + 3) - else: - res = templates[2].format(indent=' ' * (indent + 3), - value=str(el)) - result += res[:indent + 2] + '-' + res[indent + 3:] - return result - - -@logwrap -def get_config_template(template_name): - """Get content of yaml file as dictionary. - - :param template_name: a string of name yaml file - :return: a dictionary with configuration data - """ - import fuelweb_test - template = os.path.join(os.path.dirname(fuelweb_test.__file__), - 'config_templates/{0}.yaml'.format(template_name)) - if os.path.exists(template): - with open(template) as template_file: - return yaml.load(template_file) - - -@logwrap -def get_ini_config(data): - """Get a data of configuration file. - - :param data: a file object - :return: a ConfigParser object - """ - config = configparser.ConfigParser() - config.readfp(data) - return config - - -@logwrap -def check_config(conf, conf_name, section, option, value): - """Check existence of parameter with a proper value - or its absence in configuration file. - - :param conf: a file object - :param conf_name: a string of full file path - :param section: a string of section name in configuration file - :param option: a string of option name in configuration file - :param value: None or a string of value in configuration file - """ - if value is None: - if conf.has_section(section) and conf.has_option(section, option): - current_value = conf.get(section, option) - raise Exception('The option "{0}" of section "{1}" should be ' - 'absent but actually has value "{2}" ' - 'in config file "{3}": FAIL'.format(option, - section, - current_value, - conf_name)) - logger.debug('Expected that the option "{0}" of section "{1}" is ' - 'absent in config file "{2}": SUCCESS'.format(option, - section, - conf_name)) - else: - if not conf.has_section(section) and section != 'DEFAULT': - raise Exception('The section "{0}" is absent in ' - 'config file "{1}": FAIL'.format(section, - conf_name)) - if not conf.has_option(section, option): - raise Exception('The option "{0}" of section "{1}" is absent ' - 'in config file "{2}": FAIL'.format(option, - section, - conf_name)) - current_value = conf.get(section, option) - asserts.assert_equal(current_value, - value, - 'Expected that the option "{0}" has value ' - '"{1}" in config file {2} but actually has ' - 'value "{3}": FAIL'.format(option, - value, - conf_name, - current_value)) - logger.debug('The config file "{0}" contains ' - 'the correct value "{1}" of option "{2}" ' - 'in section "{3}": SUCCESS'.format(conf_name, - value, - option, - section)) - - -@logwrap -def get_process_uptime(remote, process_name): - """Get process uptime. - - :param remote: SSHClient to node - :param process_name: a string of process name - :return: a int value of process uptime in seconds - """ - cmd = "ps hf -opid -C {0} | awk '{{print $1; exit}}'".format(process_name) - parent_pid = remote.execute(cmd)['stdout'] - asserts.assert_not_equal(parent_pid, - [], - "No such process " - "with name {0}".format(process_name)) - parent_pid = parent_pid[0].replace('\n', '') - cmd = "ps -p {0} -o etime= | awk '{{print $1}}'".format(parent_pid) - ps_output = remote.execute(cmd)['stdout'][0].replace('\n', '') - ps_output = ps_output.split(':') - uptime = 0 - time_factor = 1 - for i in xrange(1, len(ps_output) + 1): - uptime += int(ps_output[-i]) * time_factor - time_factor *= 60 - return uptime - - -def get_package_version(remote_admin, package, income=None): - if income: - cmd_version = ('rpm ' - '-qp {0} --queryformat ' - '"%{{VERSION}} %{{RELEASE}}"'.format(package)) - else: - cmd_version = ('rpm ' - '-q {0} --queryformat ' - '"%{{VERSION}} %{{RELEASE}}"'.format(package)) - result = remote_admin.execute(cmd_version) - logger.debug('Command {0} execution result {1}'.format( - cmd_version, result)) - if result['exit_code'] != 0: - asserts.assert_true('not installed' in ''.join(result['stdout']), - 'Command {0} fails by unexpected ' - 'reason {1}'.format(cmd_version, result)) - return None - return ''.join(result['stdout']).strip() - - -def compare_packages_version(remote, package_name, income_package_name): - income_release, income_version = get_package_version( - remote, income_package_name, income=True).split(' ') - if not get_package_version(remote, package_name): - return True - installed_release, installed_version = get_package_version( - remote, package_name).split(' ') - if not version.LooseVersion(income_release) == version.LooseVersion( - installed_release): - raise exceptions.PackageVersionError( - package=income_package_name, version=income_release) - if version.LooseVersion(installed_version) >= version.LooseVersion( - income_version): - raise exceptions.PackageVersionError( - package=income_package_name, version=income_version) - else: - return True - - -def erase_data_from_hdd(remote, - device=None, - mount_point=None, - source="/dev/zero", - block_size=512, - blocks_from_start=2 * 1024 * 8, - blocks_from_end=2 * 1024 * 8): - """Erases data on "device" using "dd" utility. - - :param remote: devops.SSHClient, remote to node - :param device: str, block device which should be corrupted. If none - - drive mounted at "mount_point" will be used for erasing - :param mount_point: str, mount point for auto-detecting drive for erasing - :param source: str, block device or file that will be used as source for - "dd", default - /dev/zero - :param block_size: int, block size which will be pass to "dd" - :param blocks_from_start: int, count of blocks which will be erased from - the beginning of the hard drive. Default - 16,384 (with bs=512 - 8MB) - :param blocks_from_end: int, count of blocks which will be erased from - the end of the hard drive. Default - 16,384 (with bs=512 - 8MB) - :raises Exception: if return code of any of commands is not 0 - """ - if not device: - asserts.assert_is_not_none( - mount_point, - "Mount point is not defined, will do nothing") - device = remote.execute( - "awk '$2 == \"{mount_point}\" {{print $1}}' /proc/mounts".format( - mount_point=mount_point) - )['stdout'][0] - # get block device for partition - try: - device = re.findall(r"(/dev/[a-z]+)", device)[0] - except IndexError: - logger.error("Can not find any block device in output! " - "Output is:'{}'".format(device)) - commands = [] - logger.debug("Boot sector of device '{}' will be erased".format(device)) - if blocks_from_start > 0: - commands.append( - "dd bs={block_size} if={source} of={device} " - "count={blocks_from_start}".format( - block_size=block_size, - source=source, - device=device, - blocks_from_start=blocks_from_start) - ) - if blocks_from_end > 0: - commands.append( - "dd bs={block_size} if={source} of={device} " - "count={blocks_from_end} " - "seek=$((`blockdev --getsz {device}` - {seek}))".format( - block_size=block_size, - source=source, - device=device, - blocks_from_end=blocks_from_end, - seek=block_size * blocks_from_end) - ) - commands.append("sync") - - for cmd in commands: - remote.check_call(cmd) - - -@logwrap -def fill_space(ip, file_dir, size): - """Allocates space to some file in the specified directory - on the specified node - - :param ip: the ip of the node - :param file_dir: the specified directory - :param size: the amount of space in Mb - """ - file_name = "test_data" - file_path = os.path.join(file_dir, file_name) - SSHManager().execute_on_remote( - ip=ip, - cmd='fallocate -l {0}M {1}'.format(size, file_path), - err_msg="The file {0} was not allocated".format(file_name)) - - -@logwrap -def get_ceph_partitions(ip, device, fs_type="xfs"): - # Moved from checkers.py for improvement of code - ret = SSHManager().check_call( - ip=ip, - command="parted {device} print | grep {type}".format(device=device, - type=fs_type) - ).stdout - if not ret: - logger.error( - "Partition not present! {partitions}: ".format( - partitions=SSHManager().check_call( - ip=ip, command="parted {device} print").stdout_str)) - raise Exception() - logger.debug("Partitions: {part}".format(part=ret)) - return ret - - -@logwrap -def get_mongo_partitions(ip, device): - # Moved from checkers.py for improvement of code - ret = SSHManager().check_call( - ip=ip, - command="lsblk | grep {device} | awk {size}".format( - device=device, - size=re.escape('{print $4}')) - )['stdout'] - if not ret: - logger.error( - "Partition not present! {partitions}: ".format( - partitions=SSHManager().check_call( - ip=ip, command="parted {device} print").stdout_str)) - raise Exception() - logger.debug("Partitions: {part}".format(part=ret)) - return ret - - -@logwrap -def upload_tarball(ip, tar_path, tar_target): - # Moved from checkers.py for improvement of code - assert_true(tar_path, "Source path for uploading 'tar_path' is empty, " - "please check test settings!") - if os.path.splitext(tar_path)[1] not in [".tar", ".lrz", ".fp", ".rpm"]: - raise Exception("Wrong archive type!") - try: - logger.info("Start to upload tar file") - SSHManager().upload_to_remote( - ip=ip, - source=tar_path, - target=tar_target - ) - logger.info('File {} was uploaded on master'.format(tar_path)) - except Exception: - logger.error('Failed to upload file') - logger.error(traceback.format_exc()) - - -@logwrap -def install_plugin_check_code(ip, plugin, exit_code=0): - # Moved from checkers.py for improvement of code - cmd = "cd /var && fuel plugins --install {0} ".format(plugin) - chan, _, stderr, _ = SSHManager().execute_async_on_remote( - ip=ip, - cmd=cmd - ) - logger.debug('Try to read status code from chain...') - assert_equal( - chan.recv_exit_status(), exit_code, - 'Install script fails with next message {0}'.format(''.join(stderr))) - - -@logwrap -def get_package_versions_from_node(ip, name, os_type): - # Moved from checkers.py for improvement of code - if os_type and 'Ubuntu' in os_type: - cmd = "dpkg-query -W -f='${Version}' %s" % name - else: - cmd = "rpm -q {0}".format(name) - try: - result = ''.join(SSHManager().execute(ip, cmd)['stdout']) - return result.strip() - except Exception: - logger.error(traceback.format_exc()) - raise - - -@logwrap -def get_file_size(ip, file_name, file_path): - # Moved from checkers.py for improvement of code - file_size = SSHManager().check_call( - ip, 'stat -c "%s" {0}/{1}'.format(file_path, file_name), - error_info="Failed to get '{0}/{1}' file stats on" - " remote node".format(file_path, file_name) - ) - return int(file_size['stdout'][0].rstrip()) - - -@logwrap -def get_quantity_of_numa(ip): - """Get number of NUMA nodes that are contained on remote node - - :param ip: node IP - :return: int, count of available NUMA nodes on the node - """ - - numa = int(SSHManager().check_call( - ip=ip, - command="lstopo | grep -c NUMANode" - ).stdout[0]) - - if not numa: - logger.debug("There are no NUMA nodes on {0}".format(ip)) - else: - logger.debug("There is {0} NUMA node(s) on {1}".format(numa, ip)) - return numa - - -@logwrap -def dict_merge(a, b): - """ Recursively merges dict's. - - Not just simple a['key'] = b['key'], if both a and b have a key - who's value is a dict then dict_merge is called on both values - and the result stored in the returned dictionary. - """ - if not isinstance(b, dict): - return copy.deepcopy(b) - result = copy.deepcopy(a) - for k, v in b.items(): - if k in result and isinstance(result[k], dict): - result[k] = dict_merge(result[k], v) - else: - result[k] = copy.deepcopy(v) - return result - - -@logwrap -def get_access_config_file(): - """Get path to file on master node, which contains access parameters. - That can be changed in fuel library/fuel main. - - :return: string with path to file - """ - ssh_manager = SSHManager() - return ssh_manager.check_call( - ssh_manager.admin_ip, - 'ls -1 $HOME/.config/fuel/fuel_client.yaml')['stdout_str'] - - -@logwrap -def install_configdb(): - """ Install ConfigDB extension on master node - - :return: None - """ - # TODO(akostrikov) There is a space for improvement. - if not settings.PERESTROIKA_REPO: - raise exceptions.FuelQAVariableNotSet( - 'PERESTROIKA_REPO', - 'http://perestroika-repo-tst.infra.site.net/mos-repos/centos/') - ssh_manager = SSHManager() - admin_ip = ssh_manager.admin_ip - fuel_config_file = get_access_config_file() - openrc_content = FuelAccessParams.from_yaml_params( - YamlEditor(fuel_config_file, - ip=admin_ip).get_content() - ).to_openrc_content() - - openrc_path = '/root/.openrc' - with ssh_manager.open_on_remote(admin_ip, openrc_path, 'w') as openrc_file: - openrc_file.write(openrc_content) - - commands = [ - 'yum-config-manager --add-repo ' - '{}'.format(settings.PERESTROIKA_REPO), - 'yum-config-manager --add-repo {}'.format(settings.PACKAGES_CENTOS), - 'rpm --import {}'.format(settings.MASTER_CENTOS_GPG), - # TODO(akostrikov) Temporary hack to be on the edge. - 'yum install -y tuning-box git', - 'yum remove -y tuning-box', - 'git clone http://github.com/openstack/tuning-box', - 'cd tuning-box/ && python setup.py install', - # TODO(akostrikov) Hack end - 'nailgun_syncdb', - "sudo -u postgres psql -c '\dt' nailgun | grep tuning_box", - 'service nailgun restart', - '. .openrc; openstack service create --name tuning-box config', - ('. .openrc; openstack endpoint create' - ' --publicurl $SERVICE_URL/api/config' - ' --region RegionOne tuning-box;') - ] - for install_command in commands: - ssh_manager.check_call(admin_ip, install_command) - - -# pylint: disable=eval-used -class SettingsChanger(object): - """ - Class for changing cluster settings - """ - - SKIPPED_FIELDS_LIST = [ - 'additional_components.mongo', 'storage.osd_pool_size', - 'syslog.syslog_port', 'murano_settings.murano_repo_url', - 'external_mongo.hosts_ip', 'kernel_params.kernel', 'corosync.port', - 'repo_setup.uca_openstack_release', 'repo_setup.uca_repo_url', - 'public_ssl.cert_source', 'public_ssl.hostname', - 'operator_user.homedir', 'access.email', 'common.libvirt_type', - 'additional_components.ironic', 'additional_components.ceilometer', - 'workloads_collector.tenant', 'access.user', - 'workloads_collector.user', 'operator_user.name'] - - def __init__(self, attrs=None): - self._attrs = attrs['editable'] if attrs else None - self._types = ['checkbox', 'radio', 'text', 'textarea'] - self.options = None - - @staticmethod - def _gen_random(length=10): - return ''.join(random.choice(string.lowercase) for _ in range(length)) - - @staticmethod - def _get_condition(restriction): - rst = re.findall( - 'settings:(.+?).value ([=!]+) (true|false|\'.+\')', - restriction) - cmd = [] - keys = [] - for r in rst: - cmd.append("self.options['{opt}'][2] {oper} {sign}".format( - opt=r[0], - oper=r[1], - sign=True - if r[2] == 'true' - else False if r[2] == 'false' else r[2])) - keys.append(r[0]) - if ' and ' in restriction: - expression = ' and '.join(cmd) - elif ' or ' in restriction: - expression = ' or '.join(cmd) - else: - expression = ' '.join(cmd) - return expression, keys - - def __change_check_box(self, key, opt, keys=None): - opt['value'] = not self.options[key][2] - self.options[key][2] = opt['value'] - if keys: - for k in keys: - self.options[k][5] = False - logger.info('`{0}` was changed to `{1}`'.format(key, opt['value'])) - - def __change_radio(self, key, opt, keys=None): - values = self.options[key][3] - current_val = self.options[key][2] - for val in values: - if val['data'] != current_val: - opt['value'] = val['data'] - self.options[key][2] = opt['value'] - if keys: - for k in keys: - self.options[k][5] = False - logger.info( - '`{0}` was changed to `{1}`'.format( - key, val['data'])) - break - else: - logger.debug( - 'Failed to set radio {}'.format(self.options[key][0])) - - def __change_check_box_restr(self, condition, keys, key, opt): - try: - if not eval(condition) and self.options[key][5]: - self.__change_check_box(key, opt, keys) - else: - logger.info( - "Value `{}` couldn't be changed to `{}` due " - "to restrictions".format(key, not self.options[key][2])) - except KeyError as e: - logger.debug('Value was not found {}'.format(e)) - - def __change_radio_restr(self, condition, keys, key, opt): - try: - if not eval(condition) and self.options[key][5]: - self.__change_radio(key, opt, keys) - except KeyError as e: - logger.debug('Value was not found {}'.format(e)) - - def _make_change_wo_restr(self, key, opt): - tp = opt.get('type') - if tp == 'checkbox': - self.__change_check_box(key, opt) - elif tp == 'radio': - self.__change_radio(key, opt) - elif tp == 'text': - opt['value'] = self._gen_random() - self.options[key][2] = opt['value'] - logger.info('`{0}` was changed to `{1}`'.format(key, opt['value'])) - - def _make_change_with_restr(self, key, opt): - restrictions = self.options[key][4] - tp = opt.get('type') - if not restrictions: - return - for rest in restrictions: - condition, keys = self._get_condition(rest) - if tp == 'checkbox': - self.__change_check_box_restr(condition, keys, key, opt) - elif tp == 'radio': - self.__change_radio_restr(condition, keys, key, opt) - elif tp == 'text': - logger.debug('Did you forget me `{}`?'.format(key)) - - def _change_options(self, options, restr=False, ensure=False): - if not (restr or ensure): - change_func = self._make_change_wo_restr - elif restr and not ensure: - change_func = self._make_change_with_restr - else: - change_func = self._ensure_options_correct - - for attr in self._attrs: - for option in self._attrs[attr]: - key = '.'.join([attr, option]) - if key not in options: - continue - # skip some values - if key in self.SKIPPED_FIELDS_LIST: - logger.debug("Skipped option `{}`".format(key)) - continue - opt = self._attrs[attr][option] - change_func(key, opt) - - def _ensure_options_correct(self, key, opt): - restrictions = self.options[key][4] - tp = opt.get('type') - if not restrictions: - return - for rest in restrictions: - condition, _ = self._get_condition(rest) - if tp == 'checkbox': - try: - if eval(condition) and self.options[key][2]: - self.__change_check_box(key, opt) - except KeyError as e: - logger.debug('Value was not found {}'.format(e)) - elif tp == 'radio': - logger.info('Radio `{0}` has value `{1}` when restriction ' - 'is: {2}'.format(key, opt['value'], condition)) - elif tp == 'text': - logger.info('Do I rely on anything `{}`?'.format(key)) - - @staticmethod - def _calculate_options(options, randomize=None): - if randomize: - count = randomize if randomize < len(options) else len(options) - 1 - random_keys = random.sample(options.keys(), count) - options_wo_restr = \ - {opt: options[opt] - for opt in options - if opt in random_keys and options[opt][4] is None} - options_with_restr = \ - {opt: options[opt] - for opt in options - if opt in random_keys and options[opt][4] is not None} - else: - options_wo_restr = \ - {opt: options[opt] - for opt in options if options[opt][4] is None} - options_with_restr = \ - {opt: options[opt] - for opt in options if options[opt][4] is not None} - - return options_wo_restr, options_with_restr - - def make_changes(self, attrs=None, options=None, randomize=None): - """ - Function makes changes in cluster settings in paramsters - which are presented in options list - :param attrs: cluster attributes - :param options: dict with options provided by parser - :param randomize: specify if random changing is needed - :return: changed cluster attributes - """ - if attrs: - self.attrs = attrs - # Create two dicts with options without restrictions and with them - self.options = options if options else self.parse_settings() - opt_wo_restr, opt_with_restr = \ - self._calculate_options(self.options, randomize) - # First of all lets modify values without restrictions - logger.info("Changing options without restrictions") - self._change_options(opt_wo_restr, False) - self.options.update(opt_wo_restr) - # iterate through options with restrictions - logger.info("Changing options with restrictions") - self._change_options(opt_with_restr, True) - logger.info("Check options for invalid due to restrictions " - "and modify it if it's necessary") - self.options.update(opt_with_restr) - self._change_options(self.options, True, True) - return self.attrs - - def load_settings_from_file(self, file_name, file_type='json'): - """ - Function loads settings from file - :param file_name: file to load from - :param file_type: file format `json` or `yaml` - :return: nothing - """ - try: - with open(file_name, 'r') as f: - if file_type == 'json': - self.attrs = json.load(f) - else: - self.attrs = yaml.load(f) - except ValueError: - logger.error("Check settings file for consistency") - raise - except IOError: - logger.error("Check settings file existence") - raise - else: - logger.info('Settings were loaded from file {}'.format(file_name)) - - def save_settings_to_file(self, file_name, file_type='json'): - """ - Function saves settings to file - :param file_name: file to save to - :param file_type: file format `json` or `yaml` - :return: nothing - """ - with open(file_name, 'w') as f: - if file_type == 'json': - json.dump(self.attrs, f) - else: - yaml.dump(self.attrs, f) - logger.info('Settings were saved to file {}'.format(file_name)) - - # pylint: disable=too-many-nested-blocks - def parse_settings(self, attrs=None): - """ - Function parses attributes by its type - :param attrs: a cluster attributes - :return: a dict with options - """ - attrs = attrs['editable'] if attrs else self._attrs - self.options = {} - for attr in attrs: - for option in attrs[attr]: - key = '.'.join([attr, option]) - opt = attrs[attr][option] - tp = opt.get('type') - label = opt.get('label') - value = opt.get('value') - values = opt.get('values') - restrictions = opt.get('restrictions', None) - if tp not in self._types: - continue - if key in self.options: - logger.debug('`{0}` has duplicates'.format(key)) - continue - restr = None - if restrictions: - restr = [] - for rest in restrictions: - if isinstance(rest, dict): - if rest.get('condition') \ - and 'value' in rest['condition']: - restr.append(rest['condition']) - elif 'value' in rest.keys()[0]: - restr.append(rest.keys()[0]) - elif 'value' in rest: - restr.append(rest) - else: - restr.append(rest) - self.options[key] = \ - [label, tp, value, values, - restr if restr else None, True] - logger.debug( - 'Option {0} has been added with {1}'.format( - key, self.options[key])) - return self.options - # pylint: enable=too-many-nested-blocks - - @property - def attrs(self): - dct = dict() - dct['editable'] = self._attrs - return dct - - @attrs.setter - def attrs(self, attrs): - self._attrs = attrs['editable'] -# pylint: enable=eval-used - - -@logwrap -def install_lynis_master(master_node_ip): - """ Install Lynis package on master node - - :return: None - """ - ssh_manager = SSHManager() - asserts.assert_is_not_none( - settings.PERESTROIKA_REPO, - message='PERESTROIKA_REPO is empty, please set it to correct path' - ) - cmds = ['yum-config-manager --add-repo ' - '{}'.format(settings.PERESTROIKA_REPO), - - 'rpm --import {}'.format(settings.MASTER_CENTOS_GPG), - - 'yum install -y lynis' - ] - for cmd in cmds: - ssh_manager.execute_on_remote(ip=master_node_ip, cmd=cmd) - - -class YamlEditor(object): - """Manipulations with local or remote .yaml files. - Usage: - - with YamlEditor("tasks.yaml") as editor: - editor.content[key] = "value" - - with YamlEditor("astute.yaml", ip=self.admin_ip) as editor: - editor.content[key] = "value" - """ - - def __init__(self, file_path, ip=None, port=None): - """YAML files editor - - :type file_path: str - :type ip: str - :type port: int - """ - self.__file_path = file_path - self.ip = ip - self.port = port or 22 - self.__content = None - self.__original_content = None - - @property - def file_path(self): - """Open file path - - :rtype: str - """ - return self.__file_path - - @property - def content(self): - if self.__content is None: - self.__content = self.get_content() - return self.__content - - @content.setter - def content(self, new_content): - self.__content = new_content - - def __get_file(self, mode="r"): - if self.ip: - return SSHManager().open_on_remote( - self.ip, self.__file_path, - mode=mode, port=self.port) - else: - return open(self.__file_path, mode) - - def get_content(self): - with self.__get_file() as file_obj: - return yaml.safe_load(file_obj) - - def write_content(self, content=None): - if content: - self.content = content - with self.__get_file("w") as file_obj: - yaml.safe_dump(self.content, file_obj, - default_flow_style=False, - default_style='"') - - def __enter__(self): - self.__content = self.get_content() - self.__original_content = copy.deepcopy(self.content) - return self - - def __exit__(self, x, y, z): - if self.content == self.__original_content: - return - self.write_content() - - -def generate_yum_repos_config(repositories): - """ - Function will parce yaml file with describing of repos - and will create yum config - :param repositories: yaml file with repo - :return: a dict with options - """ - repos = YamlEditor(repositories).get_content() - logger.debug('custom RPM repos from yaml: {0}'. format(repos)) - config = "" - for repo in repos: - config += "[{name}]\n" \ - "name={name}\n" \ - "baseurl={uri}\n" \ - "enabled=1\n" \ - "gpgcheck=0\n" \ - "priority={priority}\n" \ - "skip_if_unavailable=1\n".format(**repo) - return config - - -def preserve_partition(admin_remote, node_id, partition): - """ - Marks the given partition to be preserved during slave node reinstallation - - :param admin_remote: SSHClient to master node - :param node_id: ID of a slave node to update settings for - :param partition: name of the partition to be preserved - :return: None - """ - # Retrieve disks config for the given node - res = admin_remote.execute( - "fuel node --node-id {0} " - "--disk --download".format(str(node_id))) - disk_config_path = res['stdout'][-1].rstrip() - - # Enable partition preservation in the disks config - with YamlEditor(disk_config_path, admin_remote.host) as editor: - for disk in editor.content: - for volume in disk['volumes']: - if volume['name'] == partition: - volume['keep_data'] = True - - # Upload the updated disks config to the corresponding node - admin_remote.execute("fuel node --node-id {0} " - "--disk --upload".format(str(node_id))) - - -def get_instance_ipv6(instance, network): - """Get instance ip of version 6 - - :param instance: obj, object of instance - :param network: obj, object of assigned network - :return: - """ - instance_ip = [addr['addr'] - for addr in instance.addresses[network['name']] - if addr['OS-EXT-IPS:type'] == 'fixed' and - addr['version'] == 6] - logger.debug( - '\ninstance: {instance_id!s}\n' - '\tIPv6 address: {ipv6!s}'.format( - instance_id=instance.id, - ipv6=instance_ip)) - assert_true(instance_ip, - 'Not found ip v6 for instance. Details:\n {}' - .format(instance.addresses[network['name']])) - return instance_ip[0] diff --git a/fuelweb_test/models/__init__.py b/fuelweb_test/models/__init__.py deleted file mode 100644 index 3ca5bd0ba..000000000 --- a/fuelweb_test/models/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__author__ = 'nprikazchikov' diff --git a/fuelweb_test/models/collector_client.py b/fuelweb_test/models/collector_client.py deleted file mode 100644 index 53ddcfb57..000000000 --- a/fuelweb_test/models/collector_client.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import absolute_import - -from traceback import print_stack -from warnings import warn - -from fuelweb_test import logger - -from core.models.collector_client import CollectorClient - -msg = ( - 'fuelweb_test.models.collector_client is deprecated and will be dropped ' - 'on 14.09.2016. Please use core.models.collector_client instead' -) -warn(msg) -print_stack() -logger.critical(msg) - -__all__ = ['CollectorClient'] diff --git a/fuelweb_test/models/environment.py b/fuelweb_test/models/environment.py deleted file mode 100644 index afd1aaecc..000000000 --- a/fuelweb_test/models/environment.py +++ /dev/null @@ -1,814 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import re -import time - -from devops.client.client import DevopsClient -from devops.helpers.helpers import tcp_ping_ -from devops.helpers.helpers import wait_pass -from devops.helpers.helpers import wait -from devops.helpers.metaclasses import SingletonMeta -from keystoneauth1 import exceptions -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_true -import six - -from core.models.collector_client import CollectorClient -from core.helpers.log_helpers import logwrap -from core.helpers.log_helpers import QuietLogger - -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers.decorators import revert_info -from fuelweb_test.helpers.decorators import update_rpm_packages -from fuelweb_test.helpers.decorators import upload_manifests -from fuelweb_test.helpers.eb_tables import Ebtables -from fuelweb_test.helpers.fuel_actions import AdminActions -from fuelweb_test.helpers.fuel_actions import BaseActions -from fuelweb_test.helpers.fuel_actions import CobblerActions -from fuelweb_test.helpers.fuel_actions import NailgunActions -from fuelweb_test.helpers.fuel_actions import PostgresActions -from fuelweb_test.helpers.fuel_actions import FuelBootstrapCliActions -from fuelweb_test.helpers.ssh_manager import SSHManager -from fuelweb_test.helpers.utils import TimeStat -from fuelweb_test.helpers.utils import YamlEditor -from fuelweb_test.helpers import multiple_networks_hacks -from fuelweb_test.models.fuel_web_client import FuelWebClient -from fuelweb_test import settings -from fuelweb_test.settings import CUSTOM_FUEL_SETTING_YAML -from fuelweb_test.settings import iface_alias -from fuelweb_test import logger - - -class EnvironmentModel(six.with_metaclass(SingletonMeta, object)): - """EnvironmentModel.""" # TODO documentation - - def __init__(self, config=None): - if not hasattr(self, "_virt_env"): - self._virt_env = None - if not hasattr(self, "_fuel_web"): - self._fuel_web = None - self._config = config - self.ssh_manager = SSHManager() - self.ssh_manager.initialize( - self.get_admin_node_ip(), - admin_login=settings.SSH_FUEL_CREDENTIALS['login'], - admin_password=settings.SSH_FUEL_CREDENTIALS['password'], - slave_login=settings.SSH_SLAVE_CREDENTIALS['login'], - slave_password=settings.SSH_SLAVE_CREDENTIALS['password'] - ) - self.admin_actions = AdminActions() - self.base_actions = BaseActions() - self.cobbler_actions = CobblerActions() - self.nailgun_actions = NailgunActions() - self.postgres_actions = PostgresActions() - self.fuel_bootstrap_actions = FuelBootstrapCliActions() - - @property - def fuel_web(self): - if self._fuel_web is None: - self._fuel_web = FuelWebClient(self) - return self._fuel_web - - def __repr__(self): - klass, obj_id = type(self), hex(id(self)) - if getattr(self, '_fuel_web'): - ip = self.fuel_web.admin_node_ip - else: - ip = None - return "[{klass}({obj_id}), ip:{ip}]".format(klass=klass, - obj_id=obj_id, - ip=ip) - - @property - def admin_node_ip(self): - return self.fuel_web.admin_node_ip - - @property - def collector(self): - return CollectorClient(settings.ANALYTICS_IP, 'api/v1/json') - - @logwrap - def add_syslog_server(self, cluster_id, port=5514): - self.fuel_web.add_syslog_server( - cluster_id, self.d_env.get_default_gw(), port) - - def bootstrap_nodes(self, devops_nodes, timeout=settings.BOOTSTRAP_TIMEOUT, - skip_timesync=False): - """Lists registered nailgun nodes - Start vms and wait until they are registered on nailgun. - :rtype : List of registered nailgun nodes - """ - # self.dhcrelay_check() - - for node in devops_nodes: - logger.info("Bootstrapping node: {}".format(node.name)) - node.start() - # TODO(aglarendil): LP#1317213 temporary sleep - # remove after better fix is applied - time.sleep(5) - - with TimeStat("wait_for_nodes_to_start_and_register_in_nailgun"): - wait(lambda: all(self.nailgun_nodes(devops_nodes)), 15, timeout, - timeout_msg='Bootstrap timeout for nodes: {}' - ''.format([node.name for node in devops_nodes])) - - wait_pass( - lambda: checkers.validate_minimal_amount_nodes( - nodes=self.nailgun_nodes(devops_nodes), - expected_amount=len(devops_nodes) - ), - timeout=30) - - if not skip_timesync: - self.sync_time() - - return self.nailgun_nodes(devops_nodes) - - def sync_time(self, nodes_names=None, skip_sync=False): - if nodes_names is None: - roles = ['fuel_master', 'fuel_slave'] - nodes_names = [node.name for node in self.d_env.get_nodes() - if node.role in roles and - node.driver.node_active(node)] - logger.info("Please wait while time on nodes: {0} " - "will be synchronized" - .format(', '.join(sorted(nodes_names)))) - new_time = self.d_env.sync_time(node_names=nodes_names, - skip_sync=skip_sync) - for name in sorted(new_time): - logger.info("New time on '{0}' = {1}".format(name, new_time[name])) - - @logwrap - def get_admin_node_ip(self): - return str( - self.d_env.nodes().admin.get_ip_address_by_network_name('admin')) - - @logwrap - def get_ebtables(self, cluster_id, devops_nodes): - return Ebtables(self.get_target_devs(devops_nodes), - self.fuel_web.client.get_cluster_vlans(cluster_id)) - - def get_keys(self, node, custom=None, build_images=None, - iso_connect_as='cdrom'): - params = { - 'device_label': settings.ISO_LABEL, - 'iface': iface_alias('eth0'), - 'ip': node.get_ip_address_by_network_name('admin'), - 'mask': self.d_env.get_network(name='admin').ip.netmask, - 'gw': self.d_env.get_default_gw(), - 'hostname': ''.join((settings.FUEL_MASTER_HOSTNAME, - settings.DNS_SUFFIX)), - 'nat_interface': '', - 'nameserver': settings.DNS, - 'showmenu': 'yes' if settings.SHOW_FUELMENU else 'no', - 'wait_for_external_config': 'yes', - 'build_images': '1' if build_images else '0', - 'MASTER_NODE_EXTRA_PACKAGES': settings.MASTER_NODE_EXTRA_PACKAGES - } - # TODO(akostrikov) add tests for menu items/kernel parameters - # TODO(akostrikov) refactor it. - if iso_connect_as == 'usb': - keys = ( - "\n" # USB boot uses boot_menu=yes for master node - "\n" - "2\n" - ) - else: # cdrom is default - keys = ( - "\n" - "\n" - "\n" - ) - - keys += ( - "\n" - "\n" - "vmlinuz initrd=initrd.img" - " inst.ks=cdrom:LABEL=%(device_label)s:/ks.cfg" - " inst.repo=cdrom:LABEL=%(device_label)s:/" - " ip=%(ip)s::%(gw)s:%(mask)s:%(hostname)s" - ":%(iface)s:off::: nameserver=%(nameserver)s" - " showmenu=%(showmenu)s\n" - " wait_for_external_config=%(wait_for_external_config)s" - " build_images=%(build_images)s\n" - " MASTER_NODE_EXTRA_PACKAGES='%(MASTER_NODE_EXTRA_PACKAGES)s'\n" - " \n" - ) % params - return keys - - @staticmethod - def get_target_devs(devops_nodes): - return [devops_node.get_interface_target_dev(interface.mac_address) - for devops_node in devops_nodes - for interface in devops_node.interfaces] - - @property - def d_env(self): - if self._virt_env is not None: - return self._virt_env - - env_name = settings.ENV_NAME if not self._config else \ - self._config['template']['devops_settings']['env_name'] - - try: - from devops.error import DevopsObjNotFound - EnvDoesNotExist = DevopsObjNotFound - except ImportError: - from devops.models import Environment - # pylint: disable=no-member - EnvDoesNotExist = Environment.DoesNotExist - # pylint: enable=no-member - - try: - logger.info("Try to find environment '{0}'".format(env_name)) - self._virt_env = DevopsClient().get_env(env_name) - except EnvDoesNotExist: - logger.info("Try to create environment '{0}'".format(env_name)) - if self._config: - self._virt_env = DevopsClient().create_env_from_config( - config=self._config) - else: - self._virt_env = DevopsClient().create_env( - boot_from=settings.ADMIN_BOOT_DEVICE) - self._virt_env.define() - logger.info("New environment '{0}' was defined".format(env_name)) - return self._virt_env - - def resume_environment(self): - self.d_env.resume() - admin = self.d_env.nodes().admin - - self.ssh_manager.clean_all_connections() - - try: - admin.await('admin', timeout=30, by_port=8000) - except Exception as e: - logger.warning("From first time admin isn't reverted: " - "{0}".format(e)) - admin.destroy() - logger.info('Admin node was destroyed. Wait 10 sec.') - time.sleep(10) - - admin.start() - logger.info('Admin node started second time.') - self.d_env.nodes().admin.await('admin') - self.set_admin_ssh_password() - self.admin_actions.wait_for_fuel_ready(timeout=600) - - # set collector address in case of admin node destroy - if settings.FUEL_STATS_ENABLED: - self.nailgun_actions.set_collector_address( - settings.FUEL_STATS_HOST, - settings.FUEL_STATS_PORT, - settings.FUEL_STATS_SSL) - # Restart statsenderd in order to apply new collector address - self.nailgun_actions.force_fuel_stats_sending() - self.fuel_web.client.send_fuel_stats(enabled=True) - logger.info('Enabled sending of statistics to {0}:{1}'.format( - settings.FUEL_STATS_HOST, settings.FUEL_STATS_PORT - )) - self.set_admin_ssh_password() - self.admin_actions.wait_for_fuel_ready() - - def make_snapshot(self, snapshot_name, description="", is_make=False): - if settings.MAKE_SNAPSHOT or is_make: - self.d_env.suspend() - time.sleep(10) - - self.d_env.snapshot(snapshot_name, force=True, - description=description) - revert_info(snapshot_name, self.get_admin_node_ip(), description) - - if settings.FUEL_STATS_CHECK: - self.resume_environment() - - def nailgun_nodes(self, devops_nodes): - return [self.fuel_web.get_nailgun_node_by_devops_node(node) - for node in devops_nodes] - - def check_slaves_are_ready(self): - devops_nodes = [node for node in self.d_env.nodes().slaves - if node.driver.node_active(node)] - # Bug: 1455753 - time.sleep(30) - - self.fuel_web.wait_nodes_get_online_state(devops_nodes, timeout=60 * 6) - return True - - def revert_snapshot(self, name, skip_timesync=False, - skip_slaves_check=False): - if not self.d_env.has_snapshot(name): - return False - - logger.info('We have snapshot with such name: {:s}'.format(name)) - - logger.info("Reverting the snapshot '{0}' ....".format(name)) - self.d_env.revert(name) - - logger.info("Resuming the snapshot '{0}' ....".format(name)) - self.resume_environment() - - if not skip_timesync: - self.sync_time() - else: - self.sync_time(['admin']) - try: - with QuietLogger(upper_log_level=logging.CRITICAL): - # TODO(astudenov): add timeout_msg - wait_pass( - self.fuel_web.client.get_releases, - expected=( - exceptions.RetriableConnectionFailure, - exceptions.UnknownConnectionError), - timeout=300) - except exceptions.Unauthorized: - self.set_admin_keystone_password() - self.fuel_web.get_nailgun_version() - - if not skip_slaves_check: - # TODO(astudenov): add timeout_msg - wait_pass(lambda: self.check_slaves_are_ready(), timeout=60 * 6) - return True - - def set_admin_ssh_password(self): - new_login = settings.SSH_FUEL_CREDENTIALS['login'] - new_password = settings.SSH_FUEL_CREDENTIALS['password'] - try: - self.ssh_manager.check_call( - ip=self.ssh_manager.admin_ip, - command='date' - ) - logger.debug('Accessing admin node using SSH: SUCCESS') - except Exception: - logger.debug('Accessing admin node using SSH credentials:' - ' FAIL, trying to change password from default') - self.ssh_manager.initialize( - admin_ip=self.ssh_manager.admin_ip, - admin_login='root', - admin_password='r00tme', - slave_login=settings.SSH_SLAVE_CREDENTIALS['login'], - slave_password=settings.SSH_SLAVE_CREDENTIALS['password'] - ) - self.ssh_manager.check_call( - ip=self.ssh_manager.admin_ip, - command='echo -e "{1}\\n{1}" | passwd {0}'.format(new_login, - new_password) - ) - self.ssh_manager.initialize( - admin_ip=self.ssh_manager.admin_ip, - admin_login=new_login, - admin_password=new_password, - slave_login=settings.SSH_SLAVE_CREDENTIALS['login'], - slave_password=settings.SSH_SLAVE_CREDENTIALS['password'] - ) - self.ssh_manager.update_connection( - ip=self.ssh_manager.admin_ip, - port=22, - login=new_login, - password=new_password - ) - logger.debug("Admin node password has changed.") - logger.info("Admin node login name: '{0}' , password: '{1}'". - format(new_login, new_password)) - - def set_admin_keystone_password(self): - try: - self.fuel_web.client.get_releases() - # TODO(akostrikov) CENTOS7 except exceptions.Unauthorized: - except: - self.ssh_manager.check_call( - ip=self.ssh_manager.admin_ip, - command='fuel user --newpass {0} --change-password'.format( - settings.KEYSTONE_CREDS['password']) - ) - config_file_path = 'ls -1 $HOME/.config/fuel/fuel_client.yaml' - config_file = self.ssh_manager.check_call( - ip=self.ssh_manager.admin_ip, - command=config_file_path)['stdout_str'] - - with YamlEditor(config_file, ip=self.admin_node_ip) as editor: - editor.content["OS_USERNAME"] = \ - settings.KEYSTONE_CREDS['username'] - editor.content["OS_PASSWORD"] = \ - settings.KEYSTONE_CREDS['password'] - - with YamlEditor(settings.FUEL_SETTINGS_YAML, - ip=self.admin_node_ip) as editor: - editor.content["FUEL_ACCESS"]['user'] = \ - settings.KEYSTONE_CREDS['username'] - editor.content["FUEL_ACCESS"]['password'] = \ - settings.KEYSTONE_CREDS['password'] - - logger.info( - 'New Fuel UI (keystone) username: "{0}", password: "{1}"' - .format(settings.KEYSTONE_CREDS['username'], - settings.KEYSTONE_CREDS['password'])) - - def setup_environment(self, custom=settings.CUSTOM_ENV, - build_images=settings.BUILD_IMAGES, - iso_connect_as=settings.ADMIN_BOOT_DEVICE, - security=settings.SECURITY_TEST): - # Create environment and start the Fuel master node - admin = self.d_env.nodes().admin - self.d_env.start([admin]) - - def provision_admin(admin_node): - logger.info("Waiting for admin node to start up") - wait(lambda: admin.driver.node_active(admin_node), 60, - timeout_msg='Admin node startup timeout') - logger.info("Proceed with installation") - # update network parameters at boot screen - admin_node.send_keys(self.get_keys( - admin_node, - custom=custom, - build_images=build_images, - iso_connect_as=iso_connect_as)) - if settings.SHOW_FUELMENU: - self.wait_for_fuelmenu() - else: - self.wait_for_provisioning() - - try: - provision_admin(admin) - except Exception as e: - logger.info('Master node restart: LP1587411') - logger.info('Exception is: {e}'.format(e=e)) - admin.reset() - provision_admin(admin) - - self.set_admin_ssh_password() - - self.wait_for_external_config() - if custom: - self.setup_customisation() - if security: - nessus_node = self.d_env.get_node(name='slave-nessus') - nessus_node.start() - # wait while installation complete - - self.admin_actions.modify_configs(self.d_env.get_default_gw()) - if CUSTOM_FUEL_SETTING_YAML: - self.admin_actions.update_fuel_setting_yaml( - CUSTOM_FUEL_SETTING_YAML) - self.kill_wait_for_external_config() - self.wait_bootstrap() - self.admin_actions.wait_for_fuel_ready() - - @logwrap - def enable_force_https(self, admin_node_ip): - cmd = """ - echo -e '"SSL":\n "force_https": "true"' >> /etc/fuel/astute.yaml - """ - self.ssh_manager.check_call(admin_node_ip, cmd) - cmd = "find / -name \"nginx_services.pp\"" - puppet_manifest = \ - self.ssh_manager.check_call( - admin_node_ip, cmd)['stdout'][0].strip() - cmd = 'puppet apply {0}'.format(puppet_manifest) - self.ssh_manager.check_call(admin_node_ip, cmd) - cmd = """ - systemctl status nginx.service | - awk 'match($0, /\s+Active:.*\((\w+)\)/, a) {print a[1]}' - """ - wait(lambda: ( - self.ssh_manager.check_call( - admin_node_ip, cmd)['stdout'][0] != 'dead'), interval=10, - timeout=30, - timeout_msg='Nginx service is dead after trying to enable ' - 'it with the command: {}'.format(cmd)) - - # pylint: disable=no-self-use - @update_rpm_packages - @upload_manifests - def setup_customisation(self): - logger.info('Installing custom packages/manifests ' - 'before master node bootstrap...') - # pylint: enable=no-self-use - - @logwrap - def wait_for_provisioning(self, - timeout=settings.WAIT_FOR_PROVISIONING_TIMEOUT): - # TODO(astudenov): add timeout_msg - wait_pass(lambda: tcp_ping_( - self.d_env.nodes( - ).admin.get_ip_address_by_network_name - ('admin'), 22), timeout=timeout) - - @logwrap - def wait_for_fuelmenu(self, - timeout=settings.WAIT_FOR_PROVISIONING_TIMEOUT): - - def check_ssh_connection(): - """Try to close fuelmenu and check ssh connection""" - try: - tcp_ping_( - self.d_env.nodes().admin.get_ip_address_by_network_name( - 'admin'), - 22) - except Exception: - # send F8 trying to exit fuelmenu - self.d_env.nodes().admin.send_keys("\n") - return False - return True - - wait(check_ssh_connection, interval=30, timeout=timeout, - timeout_msg="Fuelmenu hasn't appeared during allocated timeout") - - @logwrap - def wait_for_external_config(self, timeout=120): - - wait(lambda: self.ssh_manager.exists_on_remote( - self.ssh_manager.admin_ip, - '/var/lock/wait_for_external_config'), - timeout=600, - timeout_msg='wait_for_external_config lock file timeout ' - 'while bootstrapping the Fuel master node') - - check_cmd = 'pkill -0 -f wait_for_external_config' - - wait( - lambda: self.ssh_manager.execute( - ip=self.ssh_manager.admin_ip, - cmd=check_cmd)['exit_code'] == 0, - timeout=timeout, - timeout_msg='wait_for_external_config process timeout ' - 'while bootstrapping the Fuel master node') - - @logwrap - def kill_wait_for_external_config(self): - kill_cmd = 'pkill -f "^wait_for_external_config"' - check_cmd = 'pkill -0 -f "^wait_for_external_config"; [[ $? -eq 1 ]]' - self.ssh_manager.check_call( - ip=self.ssh_manager.admin_ip, - command=kill_cmd - ) - self.ssh_manager.check_call( - ip=self.ssh_manager.admin_ip, - command=check_cmd - ) - - def wait_bootstrap(self): - logger.info("Waiting while bootstrapping is in progress") - log_path = "/var/log/puppet/bootstrap_admin_node.log" - logger.info("Running bootstrap (timeout: {0})".format( - float(settings.ADMIN_NODE_BOOTSTRAP_TIMEOUT))) - with TimeStat("admin_node_bootsrap_time", is_uniq=True): - wait( - lambda: self.ssh_manager.execute( - ip=self.ssh_manager.admin_ip, - cmd="grep 'Fuel node deployment' '{:s}'".format(log_path) - )['exit_code'] == 0, - timeout=(float(settings.ADMIN_NODE_BOOTSTRAP_TIMEOUT)), - timeout_msg='Fuel master node bootstrap timeout, ' - 'please check the log {}'.format(log_path) - ) - result = self.ssh_manager.execute( - ip=self.ssh_manager.admin_ip, - cmd="grep 'Fuel node deployment " - "complete' '{:s}'".format(log_path))['exit_code'] - if result != 0: - raise Exception('Fuel node deployment failed.') - self.bootstrap_image_check() - - def dhcrelay_check(self): - # CentOS 7 is pretty stable with admin iface. - # TODO(akostrikov) refactor it. - iface = iface_alias('eth0') - command = "dhcpcheck discover " \ - "--ifaces {iface} " \ - "--repeat 3 " \ - "--timeout 10".format(iface=iface) - - out = self.ssh_manager.execute( - ip=self.ssh_manager.admin_ip, - cmd=command - )['stdout'] - - assert_true(self.get_admin_node_ip() in "".join(out), - "dhcpcheck doesn't discover master ip") - - def bootstrap_image_check(self): - fuel_settings = self.admin_actions.get_fuel_settings() - if fuel_settings['BOOTSTRAP']['flavor'].lower() != 'ubuntu': - logger.warning('Default image for bootstrap ' - 'is not based on Ubuntu!') - return - - bootstrap_images = self.ssh_manager.check_call( - ip=self.ssh_manager.admin_ip, - command='fuel-bootstrap --quiet list' - )['stdout'] - assert_true(any('active' in line for line in bootstrap_images), - 'Ubuntu bootstrap image wasn\'t built and activated! ' - 'See logs in /var/log/fuel-bootstrap-image-build.log ' - 'for details.') - - def admin_install_pkg(self, pkg_name): - """Install a package on the admin node""" - remote_status = self.ssh_manager.execute( - ip=self.ssh_manager.admin_ip, - cmd="rpm -q {0}'".format(pkg_name) - ) - if remote_status['exit_code'] == 0: - logger.info("Package '{0}' already installed.".format(pkg_name)) - else: - logger.info("Installing package '{0}' ...".format(pkg_name)) - remote_status = self.ssh_manager.execute( - ip=self.ssh_manager.admin_ip, - cmd="yum -y install {0}".format(pkg_name) - ) - logger.info("Installation of the package '{0}' has been" - " completed with exit code {1}" - .format(pkg_name, remote_status['exit_code'])) - return remote_status['exit_code'] - - def admin_run_service(self, service_name): - """Start a service on the admin node""" - - self.ssh_manager.execute( - ip=self.ssh_manager.admin_ip, - cmd="service {0} start".format(service_name) - ) - remote_status = self.ssh_manager.execute( - ip=self.ssh_manager.admin_ip, - cmd="service {0} status".format(service_name) - ) - if any('running...' in status for status in remote_status['stdout']): - logger.info("Service '{0}' is running".format(service_name)) - else: - logger.info("Service '{0}' failed to start" - " with exit code {1} :\n{2}" - .format(service_name, - remote_status['exit_code'], - remote_status['stdout'])) - - def admin_install_updates(self): - """Update packages using yum and install updates via - update-master-node.sh tool""" - logger.info('Searching for updates..') - update_command = 'yum clean expire-cache && ' \ - 'yum update -y 2>>/var/log/yum-update-error.log' - - logger.info('Performing yum clean and update commands') - update_result = self.ssh_manager.check_call( - ip=self.ssh_manager.admin_ip, - command=update_command, - error_info='Packages update failed, inspect logs for details') - - logger.info('Packages were updated successfully') - - # Check if any packets were updated and update was successful - match_updated_count = re.search(r'Upgrade\s+(\d+)\s+Package', - update_result['stdout_str']) - # In case of package replacement, the new one is marked as - # installed and the old one as removed - match_installed_count = re.search(r'Install\s+(\d+)\s+Package', - update_result['stdout_str']) - match_complete_message = re.search(r'Complete!', - update_result['stdout_str']) - - match_no_updates = re.search("No Packages marked for Update", - update_result['stdout_str']) - - if match_no_updates or not match_complete_message \ - or not (match_updated_count or match_installed_count): - logger.warning('No updates were found or update was incomplete.') - return - - updates_count = 0 - - if match_updated_count: - updates_count += int(match_updated_count.group(1)) - - if match_installed_count: - updates_count += int(match_installed_count.group(1)) - - logger.info('{0} package(s) were updated'.format(updates_count)) - - logger.info('Applying updates via update-master-node.sh') - # LP #1664635 - we need to redirect stdout to /dev/null to avoid - # ssh connection hanging on massive output from puppet run. - cmd = '/usr/share/fuel-utils/update-master-node.sh > /dev/null 2>&1' - - self.ssh_manager.check_call( - ip=self.ssh_manager.admin_ip, - command=cmd, - error_info='Update failed, inspect logs for details', - ) - logger.info('Update successful') - - # Modifies a resolv.conf on the Fuel master node and returns - # its original content. - # * adds 'nameservers' at start of resolv.conf if merge=True - # * replaces resolv.conf with 'nameservers' if merge=False - def modify_resolv_conf(self, nameservers=None, merge=True): - if nameservers is None: - nameservers = [] - - with self.ssh_manager.open_on_remote( - ip=self.ssh_manager.admin_ip, - path='/etc/resolv.conf', - ) as f: - resolv_conf = f.readlines() - - if merge: - nameservers.extend(resolv_conf) - resolv_keys = ['search', 'domain', 'nameserver'] - resolv_new = "".join( - '{0}\n'.format(ns) for ns in nameservers - if any(x in ns for x in resolv_keys)) - with self.ssh_manager.open_on_remote( - ip=self.ssh_manager.admin_ip, - path='/etc/resolv.conf', - mode='w' - ) as f: - f.write(resolv_new) - - return resolv_conf - - @logwrap - def describe_other_admin_interfaces(self, admin): - admin_networks = [iface.network.name for iface in admin.interfaces] - iface_name = None - for i, network_name in enumerate(admin_networks): - if 'admin' in network_name and 'admin' != network_name: - # This will be replaced with actual interface labels - # form fuel-devops - iface_name = 'enp0s' + str(i + 3) - logger.info("Describe Fuel admin node interface {0} for " - "network {1}".format(iface_name, network_name)) - self.describe_admin_interface(iface_name, network_name) - - if iface_name: - return self.ssh_manager.execute( - ip=self.ssh_manager.admin_ip, - cmd="cobbler sync") - - @logwrap - def describe_admin_interface(self, admin_if, network_name): - admin_net_object = self.d_env.get_network(name=network_name) - admin_network = admin_net_object.ip.network - admin_netmask = admin_net_object.ip.netmask - admin_ip = str(self.d_env.nodes( - ).admin.get_ip_address_by_network_name(network_name)) - logger.info(('Parameters for admin interface configuration: ' - 'Network - {0}, Netmask - {1}, Interface - {2}, ' - 'IP Address - {3}').format(admin_network, - admin_netmask, - admin_if, - admin_ip)) - add_admin_ip = ('DEVICE={0}\\n' - 'ONBOOT=yes\\n' - 'NM_CONTROLLED=no\\n' - 'USERCTL=no\\n' - 'PEERDNS=no\\n' - 'BOOTPROTO=static\\n' - 'IPADDR={1}\\n' - 'NETMASK={2}\\n').format(admin_if, - admin_ip, - admin_netmask) - cmd = ('echo -e "{0}" > /etc/sysconfig/network-scripts/ifcfg-{1};' - 'ifup {1}; ip -o -4 a s {1} | grep -w {2}').format( - add_admin_ip, admin_if, admin_ip) - logger.debug('Trying to assign {0} IP to the {1} on master node...'. - format(admin_ip, admin_if)) - - result = self.ssh_manager.execute( - ip=self.ssh_manager.admin_ip, - cmd=cmd - ) - assert_equal(result['exit_code'], 0, ('Failed to assign second admin ' - 'IP address on master node: {0}').format(result)) - logger.debug('Done: {0}'.format(result['stdout'])) - - # TODO for ssh manager - multiple_networks_hacks.configure_second_admin_dhcp( - self.ssh_manager.admin_ip, - admin_if - ) - multiple_networks_hacks.configure_second_admin_firewall( - self.ssh_manager.admin_ip, - admin_network, - admin_netmask, - admin_if, - self.get_admin_node_ip() - ) - - @logwrap - def get_masternode_uuid(self): - return self.postgres_actions.run_query( - db='nailgun', - query="select master_node_uid from master_node_settings limit 1;") diff --git a/fuelweb_test/models/fuel_web_client.py b/fuelweb_test/models/fuel_web_client.py deleted file mode 100644 index 6e82fe927..000000000 --- a/fuelweb_test/models/fuel_web_client.py +++ /dev/null @@ -1,3544 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import division - -import logging -import re -import time -import traceback - -import distutils -import devops -from devops.error import DevopsCalledProcessError -from devops.error import TimeoutError -from devops.helpers.helpers import wait_pass -from devops.helpers.helpers import wait -from devops.models.node import Node -try: - from devops.error import DevopsObjNotFound -except ImportError: - # pylint: disable=no-member - DevopsObjNotFound = Node.DoesNotExist - # pylint: enable=no-member -from keystoneauth1 import exceptions -from keystoneauth1.identity import V2Password -from keystoneauth1.session import Session as KeystoneSession -import netaddr -import six -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_false -from proboscis.asserts import assert_is_not_none -from proboscis.asserts import assert_not_equal -from proboscis.asserts import assert_raises -from proboscis.asserts import assert_true -import yaml - -from core.helpers.log_helpers import logwrap -from core.helpers.log_helpers import QuietLogger -from core.models.fuel_client import Client as FuelClient - -from fuelweb_test import logger -from fuelweb_test import ostf_test_mapping -from fuelweb_test.helpers import ceph -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers import replace_repos -from fuelweb_test.helpers.decorators import check_repos_management -from fuelweb_test.helpers.decorators import custom_repo -from fuelweb_test.helpers.decorators import download_astute_yaml -from fuelweb_test.helpers.decorators import download_packages_json -from fuelweb_test.helpers.decorators import duration -from fuelweb_test.helpers.decorators import retry -from fuelweb_test.helpers.decorators import update_fuel -from fuelweb_test.helpers.decorators import upload_manifests -from fuelweb_test.helpers.security import SecurityChecks -from fuelweb_test.helpers.ssl_helpers import change_cluster_ssl_config -from fuelweb_test.helpers.ssl_helpers import copy_cert_from_master -from fuelweb_test.helpers.uca import change_cluster_uca_config -from fuelweb_test.helpers.utils import get_node_hiera_roles -from fuelweb_test.helpers.utils import node_freemem -from fuelweb_test.helpers.utils import pretty_log -from fuelweb_test.models.nailgun_client import NailgunClient -import fuelweb_test.settings as help_data -from fuelweb_test.settings import ATTEMPTS -from fuelweb_test.settings import BONDING -from fuelweb_test.settings import DEPLOYMENT_MODE_HA -from fuelweb_test.settings import DISABLE_SSL -from fuelweb_test.settings import DNS_SUFFIX -from fuelweb_test.settings import iface_alias -from fuelweb_test.settings import KEYSTONE_CREDS -from fuelweb_test.settings import KVM_USE -from fuelweb_test.settings import MULTIPLE_NETWORKS -from fuelweb_test.settings import NOVA_QUOTAS_ENABLED -from fuelweb_test.settings import AUTH_S3_KEYSTONE_CEPH_ENABLED -from fuelweb_test.settings import NETWORK_PROVIDERS -from fuelweb_test.settings import NEUTRON -from fuelweb_test.settings import NEUTRON_SEGMENT -from fuelweb_test.settings import NEUTRON_SEGMENT_TYPE -from fuelweb_test.settings import NODEGROUPS -from fuelweb_test.settings import OPENSTACK_RELEASE -from fuelweb_test.settings import OPENSTACK_RELEASE_UBUNTU -from fuelweb_test.settings import OSTF_TEST_NAME -from fuelweb_test.settings import OSTF_TEST_RETRIES_COUNT -from fuelweb_test.settings import REPLACE_DEFAULT_REPOS -from fuelweb_test.settings import REPLACE_DEFAULT_REPOS_ONLY_ONCE -from fuelweb_test.settings import SSL_CN -from fuelweb_test.settings import TIMEOUT -from fuelweb_test.settings import UCA_ENABLED -from fuelweb_test.settings import USER_OWNED_CERT -from fuelweb_test.settings import UBUNTU_SERVICE_PROVIDER - - -class FuelWebClient29(object): - """FuelWebClient.""" # TODO documentation - - def __init__(self, environment): - self._environment = environment - self.ssh_manager = environment.ssh_manager - self.admin_node_ip = self.ssh_manager.admin_ip - - keystone_url = "http://{0}:5000/v2.0".format(self.admin_node_ip) - - auth = V2Password( - auth_url=keystone_url, - username=KEYSTONE_CREDS['username'], - password=KEYSTONE_CREDS['password'], - tenant_name=KEYSTONE_CREDS['tenant_name']) - # TODO: in v3 project_name - - self._session = KeystoneSession(auth=auth, verify=False) - - self.client = NailgunClient(session=self._session) - self.fuel_client = FuelClient(session=self._session) - - self.security = SecurityChecks(self.client, self._environment) - - super(FuelWebClient29, self).__init__() - - @property - def environment(self): - """Environment Model - :rtype: EnvironmentModel - """ - return self._environment - - @staticmethod - @logwrap - def get_cluster_status(os_conn, smiles_count, networks_count=2): - checkers.verify_service_list_api(os_conn, service_count=smiles_count) - checkers.verify_glance_image_api(os_conn) - checkers.verify_network_list_api(os_conn, networks_count) - - @logwrap - def _ostf_test_wait(self, cluster_id, timeout): - logger.info('Wait OSTF tests at cluster #%s for %s seconds', - cluster_id, timeout) - wait( - lambda: all( - [ - run['status'] == 'finished' for run - in self.fuel_client.ostf.get_test_runs( - cluster_id=cluster_id)]), - timeout=timeout, - timeout_msg='OSTF tests run timeout ' - '(cluster_id={})'.format(cluster_id)) - return self.fuel_client.ostf.get_test_runs(cluster_id=cluster_id) - - @logwrap - def _tasks_wait(self, tasks, timeout): - return [self.task_wait(task, timeout) for task in tasks] - - @logwrap - def add_syslog_server(self, cluster_id, host, port): - logger.info('Add syslog server %s:%s to cluster #%s', - host, port, cluster_id) - self.client.add_syslog_server(cluster_id, host, port) - - @logwrap - def assert_cluster_floating_list(self, os_conn, cluster_id, expected_ips): - logger.info('Assert floating IPs on cluster #{0}. Expected {1}'.format( - cluster_id, expected_ips)) - current_ips = self.get_cluster_floating_list(os_conn, cluster_id) - assert_equal(set(expected_ips), set(current_ips), - 'Current floating IPs {0}'.format(current_ips)) - - @logwrap - def assert_cluster_ready(self, os_conn, smiles_count, - networks_count=2, timeout=300): - logger.info('Assert cluster services are UP') - # TODO(astudenov): add timeout_msg - wait_pass( - lambda: self.get_cluster_status( - os_conn, - smiles_count=smiles_count, - networks_count=networks_count), - timeout=timeout) - - @logwrap - def assert_ha_services_ready(self, cluster_id, timeout=20 * 60, - should_fail=0): - """Wait until HA services are UP. - Should be used before run any other check for services.""" - if self.get_cluster_mode(cluster_id) == DEPLOYMENT_MODE_HA: - logger.info('Waiting {0} sec. for passed OSTF HA tests.' - .format(timeout)) - with QuietLogger(logging.ERROR): - # TODO(astudenov): add timeout_msg - wait_pass(lambda: self.run_ostf(cluster_id, - test_sets=['ha'], - should_fail=should_fail), - interval=20, timeout=timeout) - logger.info('OSTF HA tests passed successfully.') - else: - logger.debug('Cluster {0} is not in HA mode, OSTF HA tests ' - 'skipped.'.format(cluster_id)) - - @logwrap - def assert_os_services_ready(self, cluster_id, timeout=5 * 60, - should_fail=0): - """Wait until OpenStack services are UP. - Should be used before run any other check for services.""" - logger.info('Waiting {0} sec. for passed OSTF Sanity checks.' - .format(timeout)) - with QuietLogger(): - # TODO(astudenov): add timeout_msg - wait_pass(lambda: self.run_ostf(cluster_id, - test_sets=['sanity'], - should_fail=should_fail), - interval=10, timeout=timeout) - logger.info('OSTF Sanity checks passed successfully.') - - @logwrap - def assert_ostf_run_certain(self, cluster_id, tests_must_be_passed, - timeout=10 * 60): - """Wait for OSTF tests to finish, check that the tests specified - in [tests_must_be_passed] are passed""" - - logger.info('Assert OSTF tests are passed at cluster #{0}: {1}'.format( - cluster_id, pretty_log(tests_must_be_passed, indent=1))) - - set_result_list = self._ostf_test_wait(cluster_id, timeout) - tests_pass_count = 0 - tests_count = len(tests_must_be_passed) - fail_details = [] - - for set_result in set_result_list: - for test in set_result['tests']: - if test['id'] in tests_must_be_passed: - if test['status'] == 'success': - tests_pass_count += 1 - logger.info('Passed OSTF test %s found', test['id']) - else: - details = ('%s (%s). Test status: %s, message: %s' - % (test['name'], test['id'], test['status'], - test['message'])) - fail_details.append(details) - - assert_true(tests_pass_count == tests_count, - 'The following tests have not succeeded, while they ' - 'must have passed: {}'.format(pretty_log(fail_details, - indent=1))) - - @logwrap - def assert_ostf_run(self, cluster_id, should_fail=0, failed_test_name=None, - timeout=15 * 60, test_sets=None): - """Wait for OSTF tests to finish, check that there is no failed tests. - If [failed_test_name] tests are expected, ensure that these tests - are not passed""" - - logger.info('Assert OSTF run at cluster #{0}. ' - 'Should fail {1} tests named {2}'.format(cluster_id, - should_fail, - failed_test_name)) - set_result_list = self._ostf_test_wait(cluster_id, timeout) - failed_tests_res = [] - failed = 0 - actual_failed_names = [] - test_result = {} - for set_result in set_result_list: - if set_result['testset'] not in test_sets: - continue - failed += len([test for test in set_result['tests'] - if test['status'] in {'failure', 'error'}]) - - for test in set_result['tests']: - test_result.update({test['name']: test['status']}) - if test['status'] not in ['success', 'disabled', 'skipped']: - actual_failed_names.append(test['name']) - key = ('{name:s} ({status:s})' - ''.format(name=test['name'], status=test['status'])) - failed_tests_res.append( - {key: test['message']}) - - logger.info('OSTF test statuses are :\n{}\n'.format( - pretty_log(test_result, indent=1))) - - if failed_test_name: - for test_name in actual_failed_names: - assert_true(test_name in failed_test_name, - 'WARNING! Unexpected fail: ' - 'expected {0}, actual {1}'.format( - failed_test_name, actual_failed_names) - ) - - assert_true( - failed <= should_fail, 'Failed {0} OSTF tests; should fail' - ' {1} tests. Names of failed tests: {2}' - .format(failed, - should_fail, - pretty_log(failed_tests_res, - indent=1))) - - def assert_release_state(self, release_name, state='available'): - logger.info('Assert release %s has state %s', release_name, state) - for release in self.client.get_releases(): - if release["name"].lower().find(release_name) != -1: - assert_equal(release['state'], state, - 'Release state {0}'.format(release['state'])) - return release["id"] - - def assert_release_role_present(self, release_name, role_name): - logger.info('Assert role %s is available in release %s', - role_name, release_name) - release_id = self.assert_release_state(release_name) - release_data = self.client.get_release(release_id=release_id) - assert_equal( - True, role_name in release_data['roles'], - message='There is no {0} role in release id {1}'.format( - role_name, release_name)) - - @logwrap - def assert_fuel_version(self, fuel_version): - logger.info('Assert fuel version is {0}'.format(fuel_version)) - version = self.client.get_api_version() - logger.debug('version get from api is {0}'.format(version['release'])) - assert_equal(version['release'], fuel_version, - 'Release state is not {0}'.format(fuel_version)) - - @logwrap - def assert_nailgun_upgrade_migration(self, - key='can_update_from_versions'): - for release in self.client.get_releases(): - assert_true(key in release) - - @logwrap - def assert_task_success( - self, task, timeout=130 * 60, interval=5, progress=None): - def _message(_task): - if 'message' in _task: - return _task['message'] - else: - return '' - - logger.info('Assert task %s is success', task) - if not progress: - task = self.task_wait(task, timeout, interval) - assert_equal( - task['status'], 'ready', - "Task '{0}' has incorrect status. {1} != {2}, '{3}'".format( - task["name"], task['status'], 'ready', _message(task) - ) - ) - else: - logger.info('Start to polling task progress') - task = self.task_wait_progress( - task, timeout=timeout, interval=interval, progress=progress) - assert_not_equal( - task['status'], 'error', - "Task '{0}' has error status. '{1}'" - .format(task['status'], _message(task))) - assert_true( - task['progress'] >= progress, - 'Task has other progress{0}'.format(task['progress'])) - - @logwrap - def assert_task_failed(self, task, timeout=70 * 60, interval=5): - logger.info('Assert task %s is failed', task) - task = self.task_wait(task, timeout, interval) - assert_equal( - 'error', task['status'], - "Task '{name}' has incorrect status. {status} != {exp}".format( - status=task['status'], exp='error', name=task["name"] - ) - ) - - @logwrap - def assert_all_tasks_completed(self, cluster_id=None): - cluster_info_template = "\n\tCluster ID: {cluster}{info}\n" - all_tasks = sorted( - self.client.get_all_tasks_list(), - key=lambda _tsk: _tsk['id'], - reverse=True - ) - - not_ready_tasks, deploy_tasks = checkers.incomplete_tasks( - all_tasks, cluster_id) - - not_ready_transactions = checkers.incomplete_deploy( - { - cluster: self.client.get_deployment_task_hist(task_id) - for cluster, task_id in deploy_tasks.items()}) - - if len(not_ready_tasks) > 0: - task_details_template = ( - "\n" - "\t\tTask name: {name}\n" - "\t\t\tStatus: {status}\n" - "\t\t\tProgress: {progress}\n" - "\t\t\tResult: {result}\n" - "\t\t\tMessage: {message}\n" - "\t\t\tTask ID: {id}" - ) - - task_text = 'Not all tasks completed: {}'.format( - ''.join( - cluster_info_template.format( - cluster=cluster, - info="".join( - task_details_template.format(**task) - for task in tasks)) - for cluster, tasks in sorted(not_ready_tasks.items()) - )) - logger.error(task_text) - if len(not_ready_transactions) == 0: - # Else: we will raise assert with detailed info - # about deployment - assert_true(len(not_ready_tasks) == 0, task_text) - - checkers.fail_deploy(not_ready_transactions) - - def wait_node_is_online(self, node, timeout=60 * 5): - # transform devops node to nailgun node - if isinstance(node, Node): - node = self.get_nailgun_node_by_devops_node(node) - logger.info( - 'Wait for node {!r} online status'.format(node['name'])) - wait(lambda: self.get_nailgun_node_online_status(node), - timeout=timeout, - timeout_msg='Node {!r} failed to become online' - ''.format(node['name'])) - - def wait_node_is_offline(self, devops_node, timeout=60 * 5): - logger.info( - 'Wait for node {!r} offline status'.format(devops_node.name)) - wait(lambda: not self.get_nailgun_node_by_devops_node( - devops_node)['online'], - timeout=timeout, - timeout_msg='Node {!r} failed to become offline' - ''.format(devops_node.name)) - - @logwrap - def fqdn(self, devops_node): - logger.info('Get FQDN of a devops node %s', devops_node.name) - nailgun_node = self.get_nailgun_node_by_devops_node(devops_node) - if OPENSTACK_RELEASE_UBUNTU in OPENSTACK_RELEASE: - return nailgun_node['meta']['system']['fqdn'] - return nailgun_node['fqdn'] - - @logwrap - def get_pcm_nodes(self, ctrl_node, pure=False): - nodes = {} - with self.get_ssh_for_node(ctrl_node) as remote: - pcm_nodes = remote.execute('pcs status nodes').stdout_yaml - - for status in ('Online', 'Offline', 'Standby'): - list_nodes = (pcm_nodes['Pacemaker Nodes'][status] or '').split() - if not pure: - nodes[status] = [self.get_fqdn_by_hostname(x) - for x in list_nodes] - else: - nodes[status] = list_nodes - return nodes - - @logwrap - def get_rabbit_running_nodes(self, ctrl_node): - """ - - :param ctrl_node: str - :return: list - """ - ip = self.get_node_ip_by_devops_name(ctrl_node) - cmd = 'rabbitmqctl cluster_status' - # If any rabbitmq nodes failed, we have return(70) from rabbitmqctl - # Acceptable list: - # 0 | EX_OK | Self-explanatory - # 69 | EX_UNAVAILABLE | Failed to connect to node - # 70 | EX_SOFTWARE | Any other error discovered when running command - # | | against live node - # 75 | EX_TEMPFAIL | Temporary failure (e.g. something timed out) - rabbit_status = self.ssh_manager.execute_on_remote( - ip, cmd, raise_on_assert=False, assert_ec_equal=[0, 69, 70, 75] - )['stdout_str'] - rabbit_status = re.sub(r',\n\s*', ',', rabbit_status) - found_nodes = re.search( - "\{running_nodes,\[([^\]]*)\]\}", - rabbit_status) - if not found_nodes: - logger.info( - 'No running rabbitmq nodes found on {0}. Status:\n {1}'.format( - ctrl_node, rabbit_status)) - return [] - rabbit_nodes = found_nodes.group(1).replace("'", "").split(',') - logger.debug('rabbit nodes are {}'.format(rabbit_nodes)) - nodes = [node.replace('rabbit@', "") for node in rabbit_nodes] - hostname_prefix = self.ssh_manager.execute_on_remote( - ip, 'hiera node_name_prefix_for_messaging', raise_on_assert=False - )['stdout_str'] - if hostname_prefix not in ('', 'nil'): - nodes = [n.replace(hostname_prefix, "") for n in nodes] - return nodes - - @logwrap - def assert_pacemaker(self, ctrl_node, online_nodes, offline_nodes): - logger.info('Assert pacemaker status at devops node %s', ctrl_node) - - online = sorted([self.fqdn(n) for n in online_nodes]) - offline = sorted([self.fqdn(n) for n in offline_nodes]) - try: - wait(lambda: self.get_pcm_nodes(ctrl_node)['Online'] == online and - self.get_pcm_nodes(ctrl_node)['Offline'] == offline, - timeout=60) - except TimeoutError: - nodes = self.get_pcm_nodes(ctrl_node) - assert_true(nodes['Online'] == online, - 'Online nodes: {0} ; should be online: {1}' - .format(nodes['Online'], online)) - assert_true(nodes['Offline'] == offline, - 'Offline nodes: {0} ; should be offline: {1}' - .format(nodes['Offline'], offline)) - - @logwrap - @upload_manifests - @update_fuel - def create_cluster(self, - name, - settings=None, - release_name=OPENSTACK_RELEASE, - mode=DEPLOYMENT_MODE_HA, - port=514, - release_id=None, - configure_ssl=True): - """Creates a cluster - :param name: - :param release_name: - :param mode: - :param settings: - :param port: - :param configure_ssl: - :param release_id: - :return: cluster_id - """ - logger.info('Create cluster with name %s', name) - if not release_id: - release_id = self.client.get_release_id(release_name=release_name) - logger.info('Release_id of %s is %s', - release_name, str(release_id)) - - if settings is None: - settings = {} - - if REPLACE_DEFAULT_REPOS and not REPLACE_DEFAULT_REPOS_ONLY_ONCE: - self.replace_default_repos(release_name=release_name) - - cluster_id = self.client.get_cluster_id(name) - if not cluster_id: - data = { - "name": name, - "release": release_id, - "mode": mode - } - - if "net_provider" in settings: - data.update({'net_provider': settings["net_provider"]}) - - if "net_segment_type" in settings: - data.update({'net_segment_type': settings["net_segment_type"]}) - - # NEUTRON_SEGMENT_TYPE should not override any option - # configured from test, in case if test is going to set only - # 'net_provider' for a cluster. - if (NEUTRON_SEGMENT_TYPE and - "net_provider" not in settings and - "net_segment_type" not in settings): - data.update( - { - 'net_provider': NEUTRON, - 'net_segment_type': NEUTRON_SEGMENT[ - NEUTRON_SEGMENT_TYPE] - } - ) - - self.client.create_cluster(data=data) - cluster_id = self.client.get_cluster_id(name) - logger.info('The cluster id is %s', cluster_id) - - logger.info('Set cluster settings to {}'.format( - pretty_log(settings, indent=1))) - attributes = self.client.get_cluster_attributes(cluster_id) - - for option in settings: - section = '' - if option in ('sahara', 'murano', 'ceilometer', 'mongo', - 'ironic'): - section = 'additional_components' - elif option in {'mongo_db_name', 'mongo_replset', 'mongo_user', - 'hosts_ip', 'mongo_password'}: - section = 'external_mongo' - elif option in {'volumes_ceph', 'images_ceph', - 'ephemeral_ceph', 'objects_ceph', - 'osd_pool_size', 'volumes_lvm', - 'volumes_block_device'}: - section = 'storage' - elif option in {'tenant', 'password', 'user'}: - section = 'access' - elif option == 'assign_to_all_nodes': - section = 'public_network_assignment' - elif option in {'neutron_l3_ha', 'neutron_dvr', - 'neutron_l2_pop', 'neutron_qos'}: - section = 'neutron_advanced_configuration' - elif option in {'dns_list'}: - section = 'external_dns' - elif option in {'ntp_list'}: - section = 'external_ntp' - elif option in {'propagate_task_deploy'}: - section = 'common' - if section: - try: - attributes['editable'][section][option]['value'] =\ - settings[option] - except KeyError: - if section not in attributes['editable']: - raise KeyError( - "Section '{0}' not in " - "attributes['editable']: {1}".format( - section, attributes['editable'].keys())) - raise KeyError( - "Option {0} not in attributes['editable'][{1}]: " - "{2}".format( - option, section, - attributes['editable'][section].keys())) - - # we should check DVR limitations - section = 'neutron_advanced_configuration' - if attributes['editable'][section]['neutron_dvr']['value']: - if attributes['editable'][section]['neutron_l3_ha']['value']: - raise Exception("Neutron DVR and Neutron L3 HA can't be" - " used simultaneously.") - - if 'net_segment_type' in settings: - net_segment_type = settings['net_segment_type'] - elif NEUTRON_SEGMENT_TYPE: - net_segment_type = NEUTRON_SEGMENT[NEUTRON_SEGMENT_TYPE] - else: - net_segment_type = None - - if not attributes['editable'][section]['neutron_l2_pop'][ - 'value'] and net_segment_type == 'tun': - raise Exception("neutron_l2_pop is not enabled but " - "it is required for VxLAN DVR " - "network configuration.") - - public_gw = self.get_public_gw() - - if help_data.FUEL_USE_LOCAL_NTPD\ - and ('ntp_list' not in settings)\ - and checkers.is_ntpd_active( - self.admin_node_ip, public_gw): - attributes['editable']['external_ntp']['ntp_list']['value'] =\ - [public_gw] - logger.info("Configuring cluster #{0}" - "to use NTP server {1}" - .format(cluster_id, public_gw)) - - if help_data.FUEL_USE_LOCAL_DNS and ('dns_list' not in settings): - attributes['editable']['external_dns']['dns_list']['value'] =\ - [public_gw] - logger.info("Configuring cluster #{0} to use DNS server {1}" - .format(cluster_id, public_gw)) - - logger.info('Set DEBUG MODE to %s', help_data.DEBUG_MODE) - attributes['editable']['common']['debug']['value'] = \ - help_data.DEBUG_MODE - - if KVM_USE: - logger.info('Set Hypervisor type to KVM') - hpv_data = attributes['editable']['common']['libvirt_type'] - hpv_data['value'] = "kvm" - - if NOVA_QUOTAS_ENABLED: - logger.info('Enable Nova quotas') - nova_quotas = attributes['editable']['common']['nova_quota'] - nova_quotas['value'] = True - - if AUTH_S3_KEYSTONE_CEPH_ENABLED: - logger.info( - 'Enable S3 API Authentication \ - via Keystone in Ceph RadosGW') - auth_s3_keystone_ceph = \ - attributes['editable']['common']['auth_s3_keystone_ceph'] - auth_s3_keystone_ceph['value'] = True - - if not help_data.TASK_BASED_ENGINE: - logger.info('Switch to Granular deploy') - attributes['editable']['common']['task_deploy']['value'] =\ - False - - # Updating attributes is needed before updating - # networking configuration because additional networks - # may be created by new components like ironic - self.client.update_cluster_attributes(cluster_id, attributes) - - self.nodegroups_configure(cluster_id) - - logger.debug("Try to update cluster " - "with next attributes {0}".format(attributes)) - self.client.update_cluster_attributes(cluster_id, attributes) - - if configure_ssl: - self.ssl_configure(cluster_id) - - if UCA_ENABLED or settings.get('uca_enabled', False): - self.enable_uca(cluster_id) - - if not cluster_id: - raise Exception("Could not get cluster '{:s}'".format(name)) - # TODO: rw105719 - # self.client.add_syslog_server( - # cluster_id, self.environment.get_host_node_ip(), port) - - return cluster_id - - @logwrap - def get_public_gw(self): - return self.environment.d_env.router(router_name="public") - - @logwrap - def nodegroups_configure(self, cluster_id): - """Update nodegroups configuration - """ - if not MULTIPLE_NETWORKS: - return - - ng = {rack['name']: [] for rack in NODEGROUPS} - self.update_nodegroups(cluster_id=cluster_id, node_groups=ng) - self.update_nodegroups_network_configuration(cluster_id, NODEGROUPS) - - @logwrap - def ssl_configure(self, cluster_id): - attributes = self.client.get_cluster_attributes(cluster_id) - change_cluster_ssl_config(attributes, SSL_CN) - logger.debug("Try to update cluster " - "with next attributes {0}".format(attributes)) - self.client.update_cluster_attributes(cluster_id, attributes) - - @logwrap - def set_ovs_firewall_driver(self, cluster_id): - """Set OVS firewall driver for neutron security groups - - :param cluster_id: int, cluster id - """ - cluster_attrs = self.client.get_cluster_attributes(cluster_id) - logger.debug('Trying to set OVS firewall driver') - cluster_attrs['editable']['common']['security_groups']['value'] = \ - 'openvswitch' - self.client.update_cluster_attributes(cluster_id, cluster_attrs) - - @logwrap - def enable_uca(self, cluster_id): - attributes = self.client.get_cluster_attributes(cluster_id) - change_cluster_uca_config(attributes) - logger.debug("Try to update cluster " - "with next attributes {0}".format(attributes)) - self.client.update_cluster_attributes(cluster_id, attributes) - - def add_local_ubuntu_mirror(self, cluster_id, name='Auxiliary', - path=help_data.LOCAL_MIRROR_UBUNTU, - suite='auxiliary', section='main', - priority=help_data.EXTRA_DEB_REPOS_PRIORITY): - # Append new mirror to attributes of currently creating Ubuntu cluster - mirror_url = path.replace('/var/www/nailgun', - 'http://{0}:8080'.format(self.admin_node_ip)) - mirror = '{0},deb {1} {2} {3}'.format(name, mirror_url, suite, section) - - attributes = self.client.get_cluster_attributes(cluster_id) - repos_attr = attributes['editable']['repo_setup']['repos'] - - repos_attr['value'] = replace_repos.add_ubuntu_extra_mirrors( - repos=repos_attr['value'], - prefix=suite, - mirrors=mirror, - priority=priority) - - replace_repos.report_ubuntu_repos(repos_attr['value']) - self.client.update_cluster_attributes(cluster_id, attributes) - - def add_local_centos_mirror(self, cluster_id, repo_name='auxiliary', - path=help_data.LOCAL_MIRROR_CENTOS, - priority=help_data.EXTRA_RPM_REPOS_PRIORITY): - # Append new mirror to attributes of currently creating CentOS cluster - mirror_url = path.replace('/var/www/nailgun', - 'http://{0}:8080'.format(self.admin_node_ip)) - mirror = '{0},{1}'.format(repo_name, mirror_url) - - attributes = self.client.get_cluster_attributes(cluster_id) - repos_attr = attributes['editable']['repo_setup']['repos'] - - repos_attr['value'] = replace_repos.add_centos_extra_mirrors( - repos=repos_attr['value'], - mirrors=mirror, - priority=priority) - - replace_repos.report_centos_repos(repos_attr['value']) - self.client.update_cluster_attributes(cluster_id, attributes) - - def replace_default_repos(self, release_name=None): - if release_name is None: - for release_name in [help_data.OPENSTACK_RELEASE_UBUNTU, - help_data.OPENSTACK_RELEASE_UBUNTU_UCA]: - self.replace_release_repos(release_name=release_name) - else: - self.replace_release_repos(release_name=release_name) - - def replace_release_repos(self, release_name): - release_id = self.client.get_release_id(release_name=release_name) - release_data = self.client.get_release(release_id) - if release_data["state"] == "available": - logger.info("Replace default repository list for {0}: '{1}'" - " release".format(release_id, release_name)) - release_meta = release_data["attributes_metadata"] - release_repos = release_meta["editable"]["repo_setup"]["repos"] - if release_data["operating_system"] == "Ubuntu": - release_repos["value"] = replace_repos.replace_ubuntu_repos( - release_repos, upstream_host='archive.ubuntu.com') - self.client.put_release(release_id, release_data) - replace_repos.report_ubuntu_repos(release_repos["value"]) - elif release_data["operating_system"] == "CentOS": - release_repos["value"] = replace_repos.replace_centos_repos( - release_repos, upstream_host=self.admin_node_ip) - self.client.put_release(release_id, release_data) - replace_repos.report_centos_repos(release_repos["value"]) - else: - logger.info("Unknown Operating System for release {0}: '{1}'." - " Repository list not updated".format( - release_id, release_name)) - else: - logger.info("Release {0}: '{1}' is unavailable. Repository list" - " not updated".format(release_id, release_name)) - - def get_cluster_repos(self, cluster_id): - attributes = self.client.get_cluster_attributes(cluster_id) - return attributes['editable']['repo_setup']['repos'] - - def check_deploy_state(self, cluster_id, check_services=True, - check_tasks=True, allow_partially_deploy=False): - self.check_cluster_status(cluster_id, allow_partially_deploy) - - if check_tasks: - self.assert_all_tasks_completed(cluster_id=cluster_id) - if check_services: - self.assert_ha_services_ready(cluster_id) - self.assert_os_services_ready(cluster_id) - if not DISABLE_SSL and not USER_OWNED_CERT: - with self.environment.d_env.get_admin_remote() as admin_remote: - copy_cert_from_master(admin_remote, cluster_id) - n_nodes = self.client.list_cluster_nodes(cluster_id) - for n in filter(lambda n: 'ready' in n['status'], n_nodes): - node = self.get_devops_node_by_nailgun_node(n) - if node: - node_name = node.name - with self.get_ssh_for_node(node_name) as remote: - free = node_freemem(remote) - hiera_roles = get_node_hiera_roles(remote, n['fqdn']) - node_status = { - node_name: - { - 'Host': n['hostname'], - 'Roles': - { - 'Nailgun': n['roles'], - 'Hiera': hiera_roles, - }, - 'Memory': - { - 'RAM': free['mem'], - 'SWAP': free['swap'], - }, - }, - } - - logger.info('Node status: {}'.format(pretty_log(node_status, - indent=1))) - - @download_packages_json - @download_astute_yaml - @duration - @check_repos_management - @custom_repo - def deploy_cluster_wait(self, cluster_id, is_feature=False, - timeout=help_data.DEPLOYMENT_TIMEOUT, interval=30, - check_services=True, check_tasks=True, - allow_partially_deploy=False): - cluster_attributes = self.client.get_cluster_attributes(cluster_id) - self.client.assign_ip_address_before_deploy_start(cluster_id) - network_settings = self.client.get_networks(cluster_id) - if not is_feature and help_data.DEPLOYMENT_RETRIES == 1: - logger.info('Deploy cluster %s', cluster_id) - task = self.deploy_cluster(cluster_id) - self.assert_task_success(task, interval=interval, timeout=timeout) - self.check_deploy_state(cluster_id, check_services, check_tasks, - allow_partially_deploy) - return - - logger.info('Provision nodes of a cluster %s', cluster_id) - task = self.client.provision_nodes(cluster_id) - self.assert_task_success(task, timeout=timeout, interval=interval) - - for retry_number in range(help_data.DEPLOYMENT_RETRIES): - logger.info('Deploy nodes of a cluster %s, run: %s', - cluster_id, str(retry_number + 1)) - task = self.client.deploy_nodes(cluster_id) - self.assert_task_success(task, timeout=timeout, interval=interval) - self.check_deploy_state(cluster_id, check_services, check_tasks, - allow_partially_deploy) - self.check_cluster_settings(cluster_id, cluster_attributes) - self.check_network_settings(cluster_id, network_settings) - self.check_deployment_info_save_for_task(cluster_id) - - def check_cluster_status(self, cluster_id, allow_partially_deploy): - cluster_info = self.client.get_cluster(cluster_id) - cluster_status = cluster_info['status'] - error_msg = \ - "Cluster is not deployed: some nodes are in the Error state" - check = 'operational' in cluster_status - if not check and allow_partially_deploy: - logger.warning(error_msg) - elif not check: - assert_true(check, error_msg) - else: - logger.info("Cluster with id {} is in Operational state".format( - cluster_id)) - - @logwrap - def check_cluster_settings(self, cluster_id, cluster_attributes): - task_id = self.get_last_task_id(cluster_id, 'deployment') - cluster_settings = \ - self.client.get_cluster_settings_for_deployment_task(task_id) - logger.debug('Cluster settings before deploy {}'.format( - cluster_attributes)) - logger.debug('Cluster settings after deploy {}'.format( - cluster_settings)) - assert_equal(cluster_attributes, cluster_settings, - message='Cluster attributes before deploy are not equal' - ' with cluster settings after deploy') - - @logwrap - def check_network_settings(self, cluster_id, network_settings): - task_id = self.get_last_task_id(cluster_id, 'deployment') - network_configuration = \ - self.client.get_network_configuration_for_deployment_task(task_id) - logger.debug('Network settings before deploy {}'.format( - network_settings)) - logger.debug('Network settings after deploy {}'.format( - network_configuration)) - assert_equal(network_settings, network_configuration, - message='Network settings from cluster configuration ' - 'and deployment task are not equal') - - @logwrap - def check_deployment_info_save_for_task(self, cluster_id): - try: - task_id = self.get_last_task_id(cluster_id, 'deployment') - self.client.get_deployment_info_for_task(task_id) - except Exception: - logger.error( - "Cannot get information about deployment for task {}".format( - task_id)) - - @logwrap - def get_last_task_id(self, cluster_id, task_name): - filtered_tasks = self.filter_nailgun_entities(self.client.get_tasks(), - cluster=cluster_id, - name=task_name) - return max([task['id'] for task in filtered_tasks]) - - @staticmethod - @logwrap - def filter_nailgun_entities(entities, **filters): - res = [] - for entity in entities: - for f_key, f_value in six.iteritems(filters): - if entity.get(f_key) != f_value: - break - else: - res.append(entity) - return res - - @logwrap - def wait_for_tasks_presence(self, get_tasks, **filters): - wait(lambda: self.filter_nailgun_entities(get_tasks(), **filters), - timeout=300, - timeout_msg="Timeout exceeded while waiting for tasks.") - - def deploy_cluster_wait_progress(self, cluster_id, progress, - return_task=None): - task = self.deploy_cluster(cluster_id) - self.assert_task_success(task, interval=30, progress=progress) - if return_task: - return task - - def redeploy_cluster_changes_wait_progress(self, cluster_id, progress, - data=None, return_task=None): - logger.info('Re-deploy cluster {}' - ' to apply the changed settings'.format(cluster_id)) - if data is None: - data = {} - task = self.client.redeploy_cluster_changes(cluster_id, data) - self.assert_task_success(task, interval=30, progress=progress) - if return_task: - return task - - @logwrap - def deploy_cluster(self, cluster_id): - """Return hash with task description.""" - logger.info('Launch deployment of a cluster #%s', cluster_id) - return self.client.deploy_cluster_changes(cluster_id) - - @logwrap - def get_cluster_predefined_networks_name(self, cluster_id): - net_params = self.client.get_networks( - cluster_id)['networking_parameters'] - return {'private_net': net_params.get('internal_name', 'net04'), - 'external_net': net_params.get('floating_name', 'net04_ext')} - - @logwrap - def get_cluster_floating_list(self, os_conn, cluster_id): - logger.info('Get floating IPs list at cluster #{0}'.format(cluster_id)) - - subnet = os_conn.get_subnet('{0}__subnet'.format( - self.get_cluster_predefined_networks_name( - cluster_id)['external_net'])) - ret = [] - for pool in subnet['allocation_pools']: - ret.extend([str(ip) for ip in - netaddr.iter_iprange(pool['start'], pool['end'])]) - return ret - - @logwrap - def get_cluster_block_devices(self, node_name): - logger.info('Get %s node block devices (lsblk)', node_name) - with self.get_ssh_for_node(node_name) as remote: - # Note: 'lsblk' comand returns some undecodable symbols - return ''.join(remote.check_call('/bin/lsblk')['stdout']) - - @logwrap - def get_pacemaker_status(self, controller_node_name): - logger.info('Get pacemaker status at %s node', controller_node_name) - with self.get_ssh_for_node(controller_node_name) as remote: - return ''.join(remote.check_call('crm_mon -1')['stdout']) - - @logwrap - def get_pacemaker_config(self, controller_node_name): - logger.info('Get pacemaker config at %s node', controller_node_name) - with self.get_ssh_for_node(controller_node_name) as remote: - return ''.join(remote.check_call('crm_resource --list')['stdout']) - - @logwrap - def get_pacemaker_resource_location(self, controller_node_name, - resource_name): - """Get devops nodes where the resource is running.""" - logger.info('Get pacemaker resource %s life status at %s node', - resource_name, controller_node_name) - hosts = [] - with self.get_ssh_for_node(controller_node_name) as remote: - for line in remote.check_call( - 'crm_resource --resource {0} ' - '--locate --quiet'.format(resource_name))['stdout']: - hosts.append( - self.get_devops_node_by_nailgun_fqdn(line.strip())) - - return hosts - - @logwrap - def get_last_created_cluster(self): - # return id of last created cluster - logger.info('Get ID of a last created cluster') - clusters = self.client.list_clusters() - if len(clusters) > 0: - return sorted( - clusters, key=lambda cluster: cluster['id'] - ).pop()['id'] - return None - - @logwrap - def get_nailgun_node_roles(self, nodes_dict): - nailgun_node_roles = [] - for node_name in nodes_dict: - slave = self.environment.d_env.get_node(name=node_name) - node = self.get_nailgun_node_by_devops_node(slave) - nailgun_node_roles.append((node, nodes_dict[node_name])) - return nailgun_node_roles - - @logwrap - def get_nailgun_node_by_name(self, node_name): - logger.info('Get nailgun node by %s devops node', node_name) - return self.get_nailgun_node_by_devops_node( - self.environment.d_env.get_node(name=node_name)) - - @logwrap - def get_nailgun_node_by_base_name(self, base_node_name): - logger.debug('Get nailgun node by "{0}" base ' - 'node name.'.format(base_node_name)) - nodes = self.client.list_nodes() - for node in nodes: - if base_node_name in node['name']: - return node - - @logwrap - def get_nailgun_node_by_devops_node(self, devops_node): - """Return slave node description. - Returns dict with nailgun slave node description if node is - registered. Otherwise return None. - """ - d_macs = {netaddr.EUI(i.mac_address) for i in devops_node.interfaces} - logger.debug('Verify that nailgun api is running') - attempts = ATTEMPTS - nodes = [] - while attempts > 0: - logger.debug( - 'current timeouts is {0} count of ' - 'attempts is {1}'.format(TIMEOUT, attempts)) - try: - nodes = self.client.list_nodes() - logger.debug('Got nodes %s', nodes) - attempts = 0 - except Exception: - logger.debug(traceback.format_exc()) - attempts -= 1 - time.sleep(TIMEOUT) - logger.debug('Look for nailgun node by macs %s', d_macs) - for nailgun_node in nodes: - node_nics = self.client.get_node_interfaces(nailgun_node['id']) - macs = {netaddr.EUI(nic['mac']) - for nic in node_nics if nic['type'] == 'ether'} - logger.debug('Look for macs returned by nailgun {0}'.format(macs)) - # Because our HAproxy may create some interfaces - if d_macs.issubset(macs): - nailgun_node['devops_name'] = devops_node.name - return nailgun_node - # On deployed environment MAC addresses of bonded network interfaces - # are changes and don't match addresses associated with devops node - if BONDING: - return self.get_nailgun_node_by_base_name(devops_node.name) - - @logwrap - def get_nailgun_node_by_fqdn(self, fqdn): - """Return nailgun node with fqdn - - :type fqdn: String - :rtype: Dict - """ - for nailgun_node in self.client.list_nodes(): - if nailgun_node['meta']['system']['fqdn'] == fqdn: - return nailgun_node - - @logwrap - def get_nailgun_node_by_status(self, status): - """Return nailgun nodes with status - - :type status: String - :rtype: List - """ - returned_nodes = [] - for nailgun_node in self.client.list_nodes(): - if nailgun_node['status'] == status: - returned_nodes.append(nailgun_node) - return returned_nodes - - @logwrap - def find_devops_node_by_nailgun_fqdn(self, fqdn, devops_nodes): - """Return devops node by nailgun fqdn - - :type fqdn: String - :type devops_nodes: List - :rtype: Devops Node or None - """ - nailgun_node = self.get_nailgun_node_by_fqdn(fqdn) - macs = {netaddr.EUI(i['mac']) for i in - nailgun_node['meta']['interfaces']} - for devops_node in devops_nodes: - devops_macs = {netaddr.EUI(i.mac_address) for i in - devops_node.interfaces} - if devops_macs == macs: - return devops_node - - @logwrap - def get_devops_node_by_mac(self, mac_address): - """Return devops node by nailgun node - - :type mac_address: String - :rtype: Node or None - """ - for node in self.environment.d_env.nodes(): - for iface in node.interfaces: - if netaddr.EUI(iface.mac_address) == netaddr.EUI(mac_address): - return node - - @logwrap - def get_devops_nodes_by_nailgun_nodes(self, nailgun_nodes): - """Return devops node by nailgun node - - :type nailgun_nodes: List - :rtype: list of Nodes or None - """ - d_nodes = [self.get_devops_node_by_nailgun_node(n) for n - in nailgun_nodes] - d_nodes = [n for n in d_nodes if n is not None] - return d_nodes if len(d_nodes) == len(nailgun_nodes) else None - - @logwrap - def get_devops_node_by_nailgun_node(self, nailgun_node): - """Return devops node by nailgun node - - :type nailgun_node: Dict - :rtype: Node or None - """ - if nailgun_node: - return self.get_devops_node_by_mac(nailgun_node['mac']) - - @logwrap - def get_devops_node_by_nailgun_node_id(self, nailgun_node_id): - """Return devops node by nailgun node id - - :type nailgun_node_id: int - :rtype: Node or None - """ - nailgun_node = [node for node in self.client.list_nodes() if - node['id'] == nailgun_node_id].pop() - return self.get_devops_node_by_mac(nailgun_node['mac']) - - @logwrap - def get_devops_node_by_nailgun_fqdn(self, fqdn): - """Return devops node with nailgun fqdn - - :type fqdn: String - :rtype: Devops Node or None - """ - return self.get_devops_node_by_nailgun_node( - self.get_nailgun_node_by_fqdn(fqdn)) - - @logwrap - def get_nailgun_cluster_nodes_by_roles(self, cluster_id, roles, - role_status='roles'): - """Return list of nailgun nodes from cluster with cluster_id which have - a roles - - :type cluster_id: Int - :type roles: list - :rtype: list - """ - nodes = self.client.list_cluster_nodes(cluster_id=cluster_id) - return [n for n in nodes if set(roles) <= set(n[role_status])] - - @logwrap - def get_node_ip_by_devops_name(self, node_name): - """Get node ip by it's devops name (like "slave-01" and etc) - - :param node_name: str - :return: str - """ - # TODO: This method should be part of fuel-devops - try: - node = self.get_nailgun_node_by_devops_node( - self.environment.d_env.get_node(name=node_name)) - except DevopsObjNotFound: - node = self.get_nailgun_node_by_fqdn(node_name) - assert_true(node is not None, - 'Node with name "{0}" not found!'.format(node_name)) - return node['ip'] - - @logwrap - def get_ssh_for_node(self, node_name): - return self.environment.d_env.get_node_remote(node_name) - - @logwrap - def get_ssh_for_role(self, nodes_dict, role): - node_name = sorted(filter(lambda name: role in nodes_dict[name], - nodes_dict.keys()))[0] - return self.get_ssh_for_node(node_name) - - @logwrap - def get_ssh_for_ip(self, ip): - return self.ssh_manager.get_remote(ip) - - @logwrap - def get_ssh_for_nailgun_node(self, nailgun_node): - return self.get_ssh_for_ip(nailgun_node['ip']) - - @logwrap - def is_node_discovered(self, nailgun_node): - return any( - map(lambda node: - node['mac'] == nailgun_node['mac'] and - node['status'] == 'discover', self.client.list_nodes())) - - def wait_node_is_discovered(self, nailgun_node, timeout=6 * 60): - logger.info('Wait for node {!r} to become discovered' - ''.format(nailgun_node['name'])) - wait(lambda: self.is_node_discovered(nailgun_node), - timeout=timeout, - timeout_msg='Node {!r} failed to become discovered' - ''.format(nailgun_node['name'])) - - @logwrap - def run_network_verify(self, cluster_id): - logger.info('Run network verification on the cluster %s', cluster_id) - return self.client.verify_networks(cluster_id) - - @logwrap - def run_ostf(self, cluster_id, test_sets=None, - should_fail=0, tests_must_be_passed=None, - timeout=None, failed_test_name=None): - """Run specified OSTF test set(s), check that all of them - or just [tests_must_be_passed] are passed""" - - test_sets = test_sets or ['smoke', 'sanity'] - timeout = timeout or 30 * 60 - self.fuel_client.ostf.run_tests(cluster_id, test_sets) - if tests_must_be_passed: - self.assert_ostf_run_certain( - cluster_id, - tests_must_be_passed, - timeout) - else: - logger.info('Try to run assert ostf with ' - 'expected fail name {0}'.format(failed_test_name)) - self.assert_ostf_run( - cluster_id, - should_fail=should_fail, timeout=timeout, - failed_test_name=failed_test_name, test_sets=test_sets) - - @logwrap - def return_ostf_results(self, cluster_id, timeout, test_sets): - """Filter and return OSTF results for further analysis""" - - set_result_list = self._ostf_test_wait(cluster_id, timeout) - tests_res = [] - for set_result in set_result_list: - for test in set_result['tests']: - if (test['testset'] in test_sets and - test['status'] != 'disabled'): - tests_res.append({test['name']: test['status']}) - - logger.info('OSTF test statuses are : {0}' - .format(pretty_log(tests_res, indent=1))) - return tests_res - - @logwrap - def run_single_ostf_test(self, cluster_id, - test_sets=None, test_name=None, - retries=None, timeout=15 * 60): - """Run a single OSTF test""" - - self.fuel_client.ostf.run_tests(cluster_id, test_sets, test_name) - if retries: - return self.return_ostf_results(cluster_id, timeout=timeout, - test_sets=test_sets) - else: - self.assert_ostf_run_certain(cluster_id, - tests_must_be_passed=[test_name], - timeout=timeout) - - @logwrap - def task_wait(self, task, timeout, interval=5, states=None): - # check task is finished by default - states = states or ('ready', 'error') - logger.info('Wait for task {0} seconds: {1}'.format( - timeout, pretty_log(task, indent=1))) - start = time.time() - - wait( - lambda: (self.client.get_task(task['id'])['status'] in states), - interval=interval, - timeout=timeout, - timeout_msg='Waiting task {0!r} timeout {1} sec ' - 'was exceeded'.format(task['name'], timeout)) - - took = time.time() - start - task = self.client.get_task(task['id']) - logger.info('Task changed its state to one of {}. Took {} seconds.' - ' {}'.format(states, took, pretty_log(task, indent=1))) - return task - - @logwrap - def task_wait_progress(self, task, timeout, interval=5, progress=None): - logger.info('start to wait with timeout {0} ' - 'interval {1}'.format(timeout, interval)) - wait( - lambda: self.client.get_task( - task['id'])['progress'] >= progress, - interval=interval, - timeout=timeout, - timeout_msg='Waiting task {0!r} timeout {1} sec ' - 'was exceeded'.format(task["name"], timeout)) - return self.client.get_task(task['id']) - - # TODO(ddmitriev): this method will be replaced - # after switching to fuel-devops3.0 - # pylint: disable=no-self-use - def get_node_group_and_role(self, node_name, nodes_dict): - if MULTIPLE_NETWORKS: - node_roles = nodes_dict[node_name][0] - node_group = nodes_dict[node_name][1] - else: - node_roles = nodes_dict[node_name] - node_group = 'default' - return node_group, node_roles - # pylint: enable=no-self-use - - @logwrap - def update_nodes(self, cluster_id, nodes_dict, - pending_addition=True, pending_deletion=False, - update_nodegroups=False, custom_names=None, - update_interfaces=True): - - failed_nodes = {} - for node_name, node_roles in nodes_dict.items(): - try: - self.environment.d_env.get_node(name=node_name) - except DevopsObjNotFound: - failed_nodes[node_name] = node_roles - if failed_nodes: - text = 'Some nodes is inaccessible:\n' - for node_name, node_roles in sorted(failed_nodes.items()): - text += '\t{name} for roles: {roles!s}\n'.format( - name=node_name, - roles=['{}'.format(node) for node in sorted(node_roles)]) - text += 'Impossible to continue!' - logger.error(text) - raise KeyError(sorted(list(failed_nodes.keys()))) - - # update nodes in cluster - nodes_data = [] - nodes_groups = {} - updated_nodes = [] - for node_name in nodes_dict: - devops_node = self.environment.d_env.get_node(name=node_name) - - node_group, node_roles = self.get_node_group_and_role(node_name, - nodes_dict) - self.wait_node_is_online(devops_node, timeout=60 * 2) - node = self.get_nailgun_node_by_devops_node(devops_node) - - if custom_names: - name = custom_names.get(node_name, - '{}_{}'.format( - node_name, - "_".join(node_roles))) - else: - name = '{0}_{1}'.format(node_name, "_".join(node_roles)) - - node_data = { - 'cluster_id': cluster_id, - 'id': node['id'], - 'pending_addition': pending_addition, - 'pending_deletion': pending_deletion, - 'pending_roles': node_roles, - 'name': name - } - nodes_data.append(node_data) - if node_group not in nodes_groups.keys(): - nodes_groups[node_group] = [] - nodes_groups[node_group].append(node) - updated_nodes.append(node) - - # assume nodes are going to be updated for one cluster only - cluster_id = nodes_data[-1]['cluster_id'] - node_ids = [str(node_info['id']) for node_info in nodes_data] - self.client.update_nodes(nodes_data) - - nailgun_nodes = self.client.list_cluster_nodes(cluster_id) - cluster_node_ids = [str(_node['id']) for _node in nailgun_nodes] - assert_true( - all([node_id in cluster_node_ids for node_id in node_ids])) - - if update_interfaces and not pending_deletion: - self.update_nodes_interfaces(cluster_id, updated_nodes) - if update_nodegroups: - self.update_nodegroups(cluster_id=cluster_id, - node_groups=nodes_groups) - - return nailgun_nodes - - @logwrap - def delete_node(self, node_id, interval=30, timeout=600): - task = self.client.delete_node(node_id) - logger.debug("task info is {}".format(task)) - self.assert_task_success(task, interval=interval, timeout=timeout) - - @logwrap - def update_node_networks(self, node_id, interfaces_dict, - raw_data=None, - override_ifaces_params=None): - interfaces = self.client.get_node_interfaces(node_id) - - if raw_data is not None: - interfaces.extend(raw_data) - - def get_bond_ifaces(): - # Filter out all interfaces to be bonded - ifaces = [] - for bond in [i for i in interfaces if i['type'] == 'bond']: - ifaces.extend(s['name'] for s in bond['slaves']) - return ifaces - - # fuelweb_admin is always on 1st iface unless the iface is not bonded - iface = iface_alias('eth0') - if iface not in get_bond_ifaces(): - interfaces_dict[iface] = interfaces_dict.get(iface, - []) - if 'fuelweb_admin' not in interfaces_dict[iface]: - interfaces_dict[iface].append('fuelweb_admin') - - def get_iface_by_name(ifaces, name): - iface = [_iface for _iface in ifaces if _iface['name'] == name] - assert_true(len(iface) > 0, - "Interface with name {} is not present on " - "node. Please check override params.".format(name)) - return iface[0] - - if override_ifaces_params is not None: - for interface in override_ifaces_params: - get_iface_by_name(interfaces, interface['name']).\ - update(interface) - - all_networks = dict() - for interface in interfaces: - all_networks.update( - {net['name']: net for net in interface['assigned_networks']}) - - for interface in interfaces: - name = interface["name"] - interface['assigned_networks'] = \ - [all_networks[i] for i in interfaces_dict.get(name, []) if - i in all_networks.keys()] - - self.client.put_node_interfaces( - [{'id': node_id, 'interfaces': interfaces}]) - - @logwrap - def update_node_disk(self, node_id, disks_dict): - disks = self.client.get_node_disks(node_id) - for disk in disks: - dname = disk['name'] - if dname not in disks_dict: - continue - for volume in disk['volumes']: - vname = volume['name'] - if vname in disks_dict[dname]: - volume['size'] = disks_dict[dname][vname] - - self.client.put_node_disks(node_id, disks) - - @logwrap - def get_node_disk_size(self, node_id, disk_name): - disks = self.client.get_node_disks(node_id) - size = 0 - for disk in disks: - if disk['name'] == disk_name: - for volume in disk['volumes']: - size += volume['size'] - return size - - def get_node_partition_size(self, node_id, partition_name): - disks = self.client.get_node_disks(node_id) - size = 0 - logger.debug('Disks of node-{}: \n{}'.format(node_id, - pretty_log(disks))) - for disk in disks: - for volume in disk['volumes']: - if volume['name'] == partition_name: - size += volume['size'] - return size - - @logwrap - def update_node_partitioning(self, node, disk='vdc', - node_role='cinder', unallocated_size=11116): - node_size = self.get_node_disk_size(node['id'], disk) - disk_part = { - disk: { - node_role: node_size - unallocated_size - } - } - self.update_node_disk(node['id'], disk_part) - return node_size - unallocated_size - - @logwrap - def update_vlan_network_fixed( - self, cluster_id, amount=1, network_size=256): - self.client.update_network( - cluster_id, - networking_parameters={ - "net_manager": help_data.NETWORK_MANAGERS['vlan'], - "fixed_network_size": network_size, - "fixed_networks_amount": amount - } - ) - - @retry(count=2, delay=20) - @logwrap - def verify_network(self, cluster_id, timeout=60 * 5, success=True): - def _report_verify_network_result(task): - # Report verify_network results using style like on UI - if task['status'] == 'error' and 'result' in task: - msg = "Network verification failed:\n" - if task['result']: - msg += ("{0:30} | {1:20} | {2:15} | {3}\n" - .format("Node Name", "Node MAC address", - "Node Interface", - "Expected VLAN (not received)")) - for res in task['result']: - name = None - mac = None - interface = None - absent_vlans = [] - if 'name' in res: - name = res['name'] - if 'mac' in res: - mac = res['mac'] - if 'interface' in res: - interface = res['interface'] - if 'absent_vlans' in res: - absent_vlans = res['absent_vlans'] - msg += ("{0:30} | {1:20} | {2:15} | {3}\n".format( - name or '-', mac or '-', interface or '-', - [x or 'untagged' for x in absent_vlans])) - logger.error(''.join([msg, task['message']])) - - # TODO(apanchenko): remove this hack when network verification begins - # TODO(apanchenko): to work for environments with multiple net groups - if len(self.client.get_nodegroups()) > 1: - logger.warning('Network verification is temporary disabled when ' - '"multiple cluster networks" feature is used') - return - try: - task = self.run_network_verify(cluster_id) - with QuietLogger(): - if success: - self.assert_task_success(task, timeout, interval=10) - else: - self.assert_task_failed(task, timeout, interval=10) - logger.info("Network verification of cluster {0} finished" - .format(cluster_id)) - except AssertionError: - # Report the result of network verify. - task = self.client.get_task(task['id']) - _report_verify_network_result(task) - raise - - @logwrap - def update_nodes_interfaces(self, cluster_id, nailgun_nodes=None): - if nailgun_nodes is None: - nailgun_nodes = [] - net_provider = self.client.get_cluster(cluster_id)['net_provider'] - if NEUTRON == net_provider: - assigned_networks = { - iface_alias('eth0'): ['fuelweb_admin'], - iface_alias('eth1'): ['public'], - iface_alias('eth2'): ['management'], - iface_alias('eth3'): ['private'], - iface_alias('eth4'): ['storage'], - } - else: - assigned_networks = { - iface_alias('eth1'): ['public'], - iface_alias('eth2'): ['management'], - iface_alias('eth3'): ['fixed'], - iface_alias('eth4'): ['storage'], - } - - baremetal_iface = iface_alias('eth5') - if self.get_cluster_additional_components(cluster_id).get( - 'ironic', False): - assigned_networks[baremetal_iface] = ['baremetal'] - - logger.info('Assigned networks are: {}'.format(str(assigned_networks))) - - if not nailgun_nodes: - nailgun_nodes = self.client.list_cluster_nodes(cluster_id) - for node in nailgun_nodes: - self.update_node_networks(node['id'], assigned_networks) - - @logwrap - def get_offloading_modes(self, node_id, interfaces): - """Get offloading modes for predifened ifaces - - :param node_id: int, nailgun node id - :param interfaces: list, list of iface names - :return: list, list of available offloading modes - """ - target_ifaces = [iface - for iface in self.client.get_node_interfaces(node_id) - if iface['name'] in interfaces] - - if 'interface_properties' in target_ifaces[0]: - logger.debug("Using old interface serialization scheme") - offloading_types = set([ - offloading_type['name'] - for iface in target_ifaces - for offloading_type in iface['offloading_modes']]) - else: - logger.debug("Using new interface serialization scheme") - offloading_types = set([ - offloading_type['name'] - for iface in target_ifaces - for offloading_type in iface['meta']['offloading_modes']]) - return list(offloading_types) - - @logwrap - def update_offloads(self, node_id, update_values, interface_to_update): - """Update offloading modes for the corresponding interface - - :param node_id: int, nailgun node id - :param update_values: dict, pair of mode name and value - :param interface_to_update: str, target iface name - """ - ifaces = self.client.get_node_interfaces(node_id) - # get target iface - for i in ifaces: - if i['name'] == interface_to_update: - iface = i - break - - def prepare_offloading_modes(types): - return [{'name': name, 'state': types[name], 'sub': []} - for name in types] - - if 'interface_properties' in iface: - logger.debug("Using old interface serialization scheme") - offloading_modes = prepare_offloading_modes(update_values) - for new_mode in offloading_modes: - for mode in iface['offloading_modes']: - if mode['name'] == new_mode['name']: - mode.update(new_mode) - break - else: - raise Exception("Offload type '{0}' is not applicable" - " for interface {1}".format( - new_mode['name'], - interface_to_update)) - else: - logger.debug("Using new interface serialization scheme") - for mode in update_values: - iface['attributes']['offloading']['modes']['value'][mode] = \ - update_values[mode] - - self.client.put_node_interfaces( - [{'id': node_id, 'interfaces': ifaces}]) - - def set_mtu(self, nailgun_node_id, iface, mtu=1500): - """Set MTU for the corresponding interfaces - - :param nailgun_node_id: int, naigun node id - :param iface: str, interface name - :param mtu: int, value of MTU - """ - ifaces = self.client.get_node_interfaces(nailgun_node_id) - # get target iface - for i in ifaces: - if i['name'] == iface: - target_iface = i - break - - if 'interface_properties' in target_iface: - logger.debug("Using old interface serialization scheme") - target_iface['interface_properties']['mtu'] = mtu - else: - logger.debug("Using new interface serialization scheme") - target_iface['attributes']['mtu']['value']['value'] = mtu - self.client.put_node_interfaces([{'id': nailgun_node_id, - 'interfaces': ifaces}]) - - def disable_offloading(self, nailgun_node_id, iface, - offloading=False): - """Disable offloading for the corresponding interfaces - - :param nailgun_node_id: int, naigun node id - :param iface: str, interface name - :param offloading: bool, enable or disable offloading - """ - ifaces = self.client.get_node_interfaces(nailgun_node_id) - # get target iface - for i in ifaces: - if i['name'] == iface: - target_iface = i - break - - if 'interface_properties' in target_iface: - logger.debug("Using old interface serialization scheme") - target_iface['interface_properties']['disable_offloading'] = \ - offloading - else: - logger.debug("Using new interface serialization scheme") - target_iface['attributes']['offloading']['disable']['value'] = \ - offloading - self.client.put_node_interfaces([{'id': nailgun_node_id, - 'interfaces': ifaces}]) - - def change_default_network_settings(self): - def fetch_networks(networks): - """Parse response from api/releases/1/networks and return dict with - networks' settings - need for avoiding hardcode""" - result = {} - for net in networks: - if (net['name'] == 'private' and - net.get('seg_type', '') == 'tun'): - result['private_tun'] = net - elif (net['name'] == 'private' and - net.get('seg_type', '') == 'gre'): - result['private_gre'] = net - elif net['name'] == 'public': - result['public'] = net - elif net['name'] == 'management': - result['management'] = net - elif net['name'] == 'storage': - result['storage'] = net - elif net['name'] == 'baremetal': - result['baremetal'] = net - return result - - default_networks = {} - - for n in ('public', 'management', 'storage', 'private'): - if self.environment.d_env.get_networks(name=n): - default_networks[n] = self.environment.d_env.get_network( - name=n).ip - - logger.info("Applying default network settings") - for _release in self.client.get_releases(): - if (_release['is_deployable'] is False and - _release['state'] != 'available'): - logger.info("Release {!r} (version {!r}) is not available for " - "deployment; skipping default network " - "replacement".format(_release['name'], - _release['version'])) - continue - - logger.info( - 'Applying changes for release: {}'.format( - _release['name'])) - net_settings = \ - self.client.get_release_default_net_settings( - _release['id']) - for net_provider in NETWORK_PROVIDERS: - if net_provider not in net_settings: - continue - - networks = fetch_networks( - net_settings[net_provider]['networks']) - - networks['public']['cidr'] = str(default_networks['public']) - networks['public']['gateway'] = str( - default_networks['public'].network + 1) - networks['public']['notation'] = 'ip_ranges' - - # use the first half of public network as static public range - networks['public']['ip_range'] = self.get_range( - default_networks['public'], ip_range=-1)[0] - - # use the second half of public network as floating range - net_settings[net_provider]['config']['floating_ranges'] = \ - self.get_range(default_networks['public'], ip_range=1) - - devops_env = self.environment.d_env - - # NOTE(akostrikov) possible break. - if 'baremetal' in networks and \ - devops_env.get_networks(name='ironic'): - ironic_net = self.environment.d_env.get_network( - name='ironic').ip - prefix = netaddr.IPNetwork( - str(ironic_net.cidr) - ).prefixlen - subnet1, subnet2 = tuple(ironic_net.subnet(prefix + 1)) - networks['baremetal']['cidr'] = str(ironic_net) - net_settings[net_provider]['config'][ - 'baremetal_gateway'] = str(ironic_net[-2]) - networks['baremetal']['ip_range'] = [ - str(subnet1[2]), str(subnet2[0])] - net_settings[net_provider]['config']['baremetal_range'] =\ - [str(subnet2[1]), str(subnet2[-3])] - networks['baremetal']['vlan_start'] = None - - if BONDING: - # leave defaults for mgmt, storage and private if - # BONDING is enabled - continue - for net, cidr in default_networks.items(): - if net in ('public', 'private'): - continue - networks[net]['cidr'] = str(cidr) - networks[net]['ip_range'] = self.get_range(cidr)[0] - networks[net]['notation'] = 'ip_ranges' - networks[net]['vlan_start'] = None - - if net_provider == 'neutron': - networks['private_tun']['cidr'] = str( - default_networks['private']) - networks['private_gre']['cidr'] = str( - default_networks['private']) - - net_settings[net_provider]['config']['internal_cidr'] = \ - str(default_networks['private']) - net_settings[net_provider]['config']['internal_gateway'] =\ - str(default_networks['private'][1]) - - elif net_provider == 'nova_network': - net_settings[net_provider]['config'][ - 'fixed_networks_cidr'] = str( - default_networks['private']) - - self.client.put_release_default_net_settings( - _release['id'], net_settings) - - @logwrap - def update_nodegroups_network_configuration(self, cluster_id, - nodegroups=None): - net_config = self.client.get_networks(cluster_id) - new_settings = net_config - - for nodegroup in nodegroups: - logger.info('Update network settings of cluster %s, ' - 'nodegroup %s', cluster_id, nodegroup['name']) - new_settings = self.update_nodegroup_net_settings(new_settings, - nodegroup, - cluster_id) - self.client.update_network( - cluster_id=cluster_id, - networking_parameters=new_settings["networking_parameters"], - networks=new_settings["networks"] - ) - - @staticmethod - def _get_true_net_name(name, net_pools): - """Find a devops network name in net_pools""" - for net in net_pools: - if name in net: - return {name: net_pools[net]} - - def update_nodegroup_net_settings(self, network_configuration, nodegroup, - cluster_id=None): - seg_type = network_configuration.get('networking_parameters', {}) \ - .get('segmentation_type') - nodegroup_id = self.get_nodegroup(cluster_id, nodegroup['name'])['id'] - for net in network_configuration.get('networks'): - if net['group_id'] == nodegroup_id: - # Do not overwrite default PXE admin network configuration - if nodegroup['name'] == 'default' and \ - net['name'] == 'fuelweb_admin': - continue - self.set_network(net_config=net, - net_name=net['name'], - net_devices=nodegroup['networks'], - seg_type=seg_type) - # For all admin/pxe networks except default use master - # node as router - # TODO(mstrukov): find way to get admin node networks only - if net['name'] != 'fuelweb_admin': - continue - for devops_network in self.environment.d_env.get_networks(): - if str(devops_network.ip_network) == net['cidr']: - net['gateway'] = \ - self.environment.d_env.nodes().\ - admin.get_ip_address_by_network_name( - devops_network.name) - logger.info('Set master node ({0}) as ' - 'router for admin network ' - 'in nodegroup {1}.'.format( - net['gateway'], nodegroup_id)) - return network_configuration - - def set_network(self, net_config, net_name, net_devices=None, - seg_type=None): - nets_wo_floating = ['public', 'management', 'storage', 'baremetal'] - if (seg_type == NEUTRON_SEGMENT['tun'] or - seg_type == NEUTRON_SEGMENT['gre']): - nets_wo_floating.append('private') - - if not net_devices: - if not BONDING: - if 'floating' == net_name: - self.net_settings(net_config, 'public', floating=True) - elif net_name in nets_wo_floating: - self.net_settings(net_config, net_name) - else: - ip_obj = self.environment.d_env.get_network(name="public").ip - pub_subnets = list(ip_obj.subnet(new_prefix=27)) - if "floating" == net_name: - self.net_settings(net_config, pub_subnets[0], - floating=True, jbond=True) - elif net_name in nets_wo_floating: - i = nets_wo_floating.index(net_name) - self.net_settings(net_config, pub_subnets[i], jbond=True) - else: - if not BONDING: - if 'floating' == net_name: - self.net_settings(net_config, net_devices['public'], - floating=True) - self.net_settings(net_config, net_devices[net_name]) - else: - ip_obj = self.environment.d_env.get_network( - name=net_devices['public']).ip - pub_subnets = list(ip_obj.subnet(new_prefix=27)) - - if "floating" == net_name: - self.net_settings(net_config, pub_subnets[0], - floating=True, jbond=True) - elif net_name in nets_wo_floating: - i = nets_wo_floating.index(net_name) - self.net_settings(net_config, pub_subnets[i], jbond=True) - elif net_name in 'fuelweb_admin': - self.net_settings(net_config, net_devices['fuelweb_admin']) - if 'ip_ranges' in net_config: - if net_config['ip_ranges']: - net_config['meta']['notation'] = 'ip_ranges' - - def net_settings(self, net_config, net_name, floating=False, jbond=False): - if jbond: - if net_config['name'] == 'public': - net_config['gateway'] = self.environment.d_env.router('public') - ip_network = net_name - elif net_config['name'] == 'baremetal': - baremetal_net = self.environment.d_env.get_network( - name='ironic').ip_network - net_config['gateway'] = str( - list(netaddr.IPNetwork(str(baremetal_net)))[-2]) - ip_network = baremetal_net - else: - ip_network = net_name - else: - net_config['vlan_start'] = None - if net_config['name'] == 'baremetal': - baremetal_net = self.environment.d_env.get_network( - name='ironic').ip_network - net_config['gateway'] = str( - list(netaddr.IPNetwork(str(baremetal_net)))[-2]) - ip_network = baremetal_net - else: - net_config['gateway'] = self.environment.d_env.router(net_name) - ip_network = self.environment.d_env.get_network( - name=net_name).ip_network - - net_config['cidr'] = str(ip_network) - - if 'admin' in net_config['name']: - net_config['ip_ranges'] = self.get_range(ip_network, 2) - elif floating: - net_config['ip_ranges'] = self.get_range(ip_network, 1) - else: - net_config['ip_ranges'] = self.get_range(ip_network, -1) - - @staticmethod - def get_range(ip_network, ip_range=0): - net = list(netaddr.IPNetwork(str(ip_network))) - half = len(net) // 2 - if ip_range == 0: - return [[str(net[2]), str(net[-2])]] - elif ip_range == 1: - return [[str(net[half]), str(net[-2])]] - elif ip_range == -1: - return [[str(net[2]), str(net[half - 1])]] - elif ip_range == 2: - return [[str(net[3]), str(net[half - 1])]] - elif ip_range == 3: - return [[str(net[half]), str(net[-3])]] - - def get_floating_ranges(self, network_set=''): - net_name = 'public{0}'.format(network_set) - net = list(self.environment.d_env.get_network(name=net_name).ip) - ip_ranges, expected_ips = [], [] - - for i in [0, -20, -40]: - l = [] - for k in range(11): - l.append(str(net[-12 + i + k])) - expected_ips.append(l) - e, s = str(net[-12 + i]), str(net[-2 + i]) - ip_ranges.append([e, s]) - - return ip_ranges, expected_ips - - @logwrap - def get_nailgun_node_online_status(self, node): - return self.client.get_node_by_id(node['id'])['online'] - - def get_devops_node_online_status(self, devops_node): - return self.get_nailgun_node_online_status( - self.get_nailgun_node_by_devops_node(devops_node)) - - def warm_shutdown_nodes(self, devops_nodes, timeout=10 * 60): - logger.info('Shutting down (warm) nodes %s', - [n.name for n in devops_nodes]) - for node in devops_nodes: - logger.debug('Shutdown node %s', node.name) - nailgun_node = self.get_nailgun_node_by_devops_node(node) - # TODO: LP1620680 - self.ssh_manager.check_call(ip=nailgun_node['ip'], sudo=True, - command='sudo shutdown +1') - for node in devops_nodes: - self.wait_node_is_offline(node, timeout=timeout) - node.destroy() - - def warm_start_nodes(self, devops_nodes, timeout=4 * 60): - logger.info('Starting nodes %s', [n.name for n in devops_nodes]) - for node in devops_nodes: - node.start() - self.wait_nodes_get_online_state(devops_nodes, timeout=timeout) - - def warm_restart_nodes(self, devops_nodes, timeout=10 * 60): - logger.info('Reboot (warm restart) nodes %s', - [n.name for n in devops_nodes]) - self.warm_shutdown_nodes(devops_nodes, timeout=timeout) - self.warm_start_nodes(devops_nodes, timeout=timeout) - - def cold_restart_nodes(self, devops_nodes, - wait_offline=True, wait_online=True, - wait_after_destroy=None, timeout=4 * 60): - logger.info('Cold restart nodes %s', - [n.name for n in devops_nodes]) - for node in devops_nodes: - logger.info('Destroy node %s', node.name) - node.destroy() - for node in devops_nodes: - if wait_offline: - self.wait_node_is_offline(node, timeout=timeout) - - if wait_after_destroy: - time.sleep(wait_after_destroy) - - for node in devops_nodes: - logger.info('Start %s node', node.name) - node.start() - if wait_online: - for node in devops_nodes: - self.wait_node_is_online(node, timeout=timeout) - self.environment.sync_time() - - @logwrap - def ip_address_show(self, node_name, interface, namespace=None): - """Return ip on interface in node with node_name inside namespace - - :type node_name: String - :type namespace: String - :type interface: String - :rtype: String on None - """ - try: - if namespace: - cmd = 'ip netns exec {0} ip -4 ' \ - '-o address show {1}'.format(namespace, interface) - else: - cmd = 'ip -4 -o address show {0}'.format(interface) - - with self.get_ssh_for_node(node_name) as remote: - ret = remote.check_call(cmd) - - ip_search = re.search( - 'inet (?P\d+\.\d+\.\d+.\d+/\d+).*scope .* ' - '{0}'.format(interface), ' '.join(ret['stdout'])) - if ip_search is None: - logger.debug("Ip show output does not match in regex. " - "Current value is None. On node {0} in netns " - "{1} for interface {2}".format(node_name, - namespace, - interface)) - return None - return ip_search.group('ip') - except DevopsCalledProcessError as err: - logger.error(err) - return None - - @logwrap - def ip_address_del(self, node_name, namespace, interface, ip): - logger.info('Delete %s ip address of %s interface at %s node', - ip, interface, node_name) - with self.get_ssh_for_node(node_name) as remote: - remote.check_call( - 'ip netns exec {0} ip addr' - ' del {1} dev {2}'.format(namespace, ip, interface)) - - @logwrap - def provisioning_cluster_wait(self, cluster_id, progress=None): - logger.info('Start cluster #%s provisioning', cluster_id) - task = self.client.provision_nodes(cluster_id) - self.assert_task_success(task, progress=progress) - - @logwrap - def deploy_custom_graph_wait(self, - cluster_id, - graph_type, - node_ids=None, - tasks=None, - progress=None): - """Deploy custom graph of a given type. - - :param cluster_id: Id of a cluster to deploy - :param graph_type: Custom graph type to deploy - :param node_ids: Ids of nodes to deploy. None means all - :param tasks: list of tasks. None means all - :param progress: Progress at which count deployment as a success. - """ - logger.info('Start cluster #{cid} custom type "{type}" ' - 'graph deployment on nodes: {nodes}. With tasks "{tasks}" ' - 'None means on all nodes.'.format( - cid=cluster_id, - type=graph_type, - tasks=tasks, - nodes=node_ids - )) - task = self.client.deploy_custom_graph(cluster_id, - graph_type, - node_ids, tasks) - self.assert_task_success(task, progress=progress) - - @logwrap - def deploy_task_wait(self, cluster_id, progress=None): - logger.info('Start cluster #%s deployment', cluster_id) - task = self.client.deploy_nodes(cluster_id) - self.assert_task_success( - task, progress=progress) - - @logwrap - def stop_deployment_wait(self, cluster_id): - logger.info('Stop cluster #%s deployment', cluster_id) - task = self.client.stop_deployment(cluster_id) - self.assert_task_success(task, timeout=50 * 60, interval=30) - - @logwrap - def stop_reset_env_wait(self, cluster_id): - logger.info('Reset cluster #%s', cluster_id) - task = self.client.reset_environment(cluster_id) - self.assert_task_success(task, timeout=50 * 60, interval=30) - - @logwrap - def delete_env_wait(self, cluster_id, timeout=10 * 60): - logger.info('Removing cluster with id={0}'.format(cluster_id)) - self.client.delete_cluster(cluster_id) - tasks = self.client.get_tasks() - delete_tasks = [t for t in tasks if t['status'] - in ('pending', 'running') and - t['name'] == 'cluster_deletion' and - t['cluster'] == cluster_id] - if delete_tasks: - for task in delete_tasks: - logger.info('Task found: {}'.format(task)) - task = delete_tasks[0] - logger.info('Selected task: {}'.format(task)) - - # Task will be removed with the cluster, so we will get 404 error - assert_raises( - exceptions.NotFound, - self.assert_task_success, task, timeout) - else: - assert 'No cluster_deletion task found!' - - @logwrap - def wait_nodes_get_online_state(self, nodes, timeout=4 * 60): - for node in nodes: - self.wait_node_is_online(node, timeout=timeout) - - @logwrap - def wait_nodes_get_offline_state(self, nodes, timeout=4 * 60): - for node in nodes: - self.wait_node_is_offline(node, timeout=timeout) - - @logwrap - def wait_cluster_nodes_get_online_state(self, cluster_id, - timeout=4 * 60): - self.wait_nodes_get_online_state( - self.client.list_cluster_nodes(cluster_id), - timeout=timeout) - - @logwrap - def wait_mysql_galera_is_up(self, node_names, timeout=60 * 4): - def _get_galera_status(_remote): - get_request = ( - "mysql --connect_timeout=5 -sse " - "\"SELECT VARIABLE_VALUE " - "FROM information_schema.GLOBAL_STATUS WHERE VARIABLE_NAME" - " = '{}';\"").format - result = _remote.execute(get_request('wsrep_ready')) - if result.exit_code != 0 or u'ON' not in result.stdout_str: - return False - result = _remote.execute(get_request('wsrep_connected')) - if result.exit_code != 0 or u'ON' not in result.stdout_str: - return False - result = _remote.execute(get_request('wsrep_cluster_size')) - if result.exit_code != 0 or\ - int(result.stdout_str) >= len(node_names): - return False # Nodes not connected - result = _remote.execute(get_request('wsrep_cluster_status')) - return result.exit_code == 0 and\ - u'Primary'.upper() in result.stdout_str.upper() - # PRIMARY (primary group configuration, quorum present) - - for node_name in node_names: - with self.get_ssh_for_node(node_name) as remote: - wait(lambda: _get_galera_status(remote), - timeout=timeout, - timeout_msg="MySQL Galera isn't ready on " - "{0}".format(node_name)) - logger.info("MySQL Galera is up on {host} node.".format( - host=node_name)) - return True - - @logwrap - def mcollective_nodes_online(self, cluster_id): - nodes_uids = set([str(n['id']) for n in - self.client.list_cluster_nodes(cluster_id)]) - # 'mco find' returns '1' exit code if rabbitmq is not ready - out = self.ssh_manager.execute_on_remote( - ip=self.admin_node_ip, - cmd='mco find', assert_ec_equal=[0, 1])['stdout_str'] - ready_nodes_uids = set(out.split('\n')) - unavailable_nodes = nodes_uids - ready_nodes_uids - logger.debug('Nodes {0} are not reachable via' - ' mcollective'.format(unavailable_nodes)) - return not unavailable_nodes - - @logwrap - def wait_cinder_is_up(self, node_names): - logger.info("Waiting for all Cinder services up.") - for node_name in node_names: - node = self.get_nailgun_node_by_name(node_name) - wait(lambda: checkers.check_cinder_status(node['ip']), - timeout=300, - timeout_msg='Cinder services not ready') - logger.info("All Cinder services up.") - return True - - def run_ostf_repeatably(self, cluster_id, test_name=None, - test_retries=None, checks=None): - res = [] - passed_count = [] - failed_count = [] - test_name_to_run = test_name or OSTF_TEST_NAME - retries = test_retries or OSTF_TEST_RETRIES_COUNT - test_path = ostf_test_mapping.OSTF_TEST_MAPPING.get(test_name_to_run) - logger.info('Test path is {0}'.format(test_path)) - - for _ in range(retries): - result = self.run_single_ostf_test( - cluster_id=cluster_id, test_sets=['smoke', 'sanity'], - test_name=test_path, - retries=True) - res.append(result) - logger.info('res is {0}'.format(res)) - - logger.info('full res is {0}'.format(res)) - for element in res: - for test in element: - if test.get(test_name) == 'success': - passed_count.append(test) - elif test.get(test_name) in {'failure', 'error'}: - failed_count.append(test) - - if not checks: - assert_true( - len(passed_count) == test_retries, - 'not all retries were successful,' - ' fail {0} retries'.format(len(failed_count))) - else: - return failed_count - - def get_nailgun_version(self): - logger.info("ISO version: {}".format(pretty_log( - self.client.get_api_version(), indent=1))) - - @logwrap - def run_ceph_task(self, cluster_id, offline_nodes): - ceph_id = [n['id'] for n in self.client.list_cluster_nodes(cluster_id) - if 'ceph-osd' in n['roles'] and - n['id'] not in offline_nodes] - res = self.client.put_deployment_tasks_for_cluster( - cluster_id, data=['top-role-ceph-osd'], - node_id=str(ceph_id).strip('[]')) - logger.debug('res info is {0}'.format(res)) - - self.assert_task_success(task=res) - - @retry(count=3) - def check_ceph_time_skew(self, cluster_id, offline_nodes): - ceph_nodes = self.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['ceph-osd']) - online_ceph_nodes = [ - n for n in ceph_nodes if n['id'] not in offline_nodes] - - # Let's find nodes where are a time skew. It can be checked on - # an arbitrary one. - logger.debug("Looking up nodes with a time skew and try to fix them") - with self.get_ssh_for_nailgun_node(online_ceph_nodes[0]) as remote: - if ceph.is_clock_skew(remote): - skewed = ceph.get_node_fqdns_w_clock_skew(remote) - logger.warning("Time on nodes {0} are to be " - "re-synchronized".format(skewed)) - nodes_to_sync = [ - n for n in online_ceph_nodes - if n['fqdn'].split('.')[0] in skewed] - self.environment.sync_time(nodes_to_sync) - - try: - wait(lambda: not ceph.is_clock_skew(remote), - timeout=120) - except TimeoutError: - skewed = ceph.get_node_fqdns_w_clock_skew(remote) - logger.error("Time on Ceph nodes {0} is still skewed. " - "Restarting Ceph monitor on these " - "nodes".format(', '.join(skewed))) - - for node in skewed: - fqdn = self.get_fqdn_by_hostname(node) - d_node = self.get_devops_node_by_nailgun_fqdn(fqdn) - logger.debug("Establish SSH connection to first Ceph " - "monitor node %s", fqdn) - - with self.get_ssh_for_node(d_node.name) as remote_to_mon: - logger.debug("Restart Ceph monitor service " - "on node %s", fqdn) - ceph.restart_monitor(remote_to_mon) - - wait(lambda: not ceph.is_clock_skew(remote), timeout=120, - timeout_msg='check ceph time skew timeout') - - @logwrap - def check_ceph_status(self, cluster_id, offline_nodes=(), - recovery_timeout=360): - ceph_nodes = self.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['ceph-osd']) - online_ceph_nodes = [ - n for n in ceph_nodes if n['id'] not in offline_nodes] - - logger.info('Waiting until Ceph service become up...') - for node in online_ceph_nodes: - with self.get_ssh_for_nailgun_node(node) as remote: - wait(lambda: ceph.check_service_ready(remote) is True, - interval=20, timeout=600, - timeout_msg='Ceph service is not properly started' - ' on {0}'.format(node['name'])) - - logger.info('Ceph service is ready. Checking Ceph Health...') - self.check_ceph_time_skew(cluster_id, offline_nodes) - - node = online_ceph_nodes[0] - with self.get_ssh_for_nailgun_node(node) as remote: - if not ceph.is_health_ok(remote): - if ceph.is_pgs_recovering(remote) and len(offline_nodes) > 0: - logger.info('Ceph is being recovered after osd node(s)' - ' shutdown.') - try: - wait(lambda: ceph.is_health_ok(remote), - interval=30, timeout=recovery_timeout) - except TimeoutError: - result = ceph.health_detail(remote) - msg = 'Ceph HEALTH is not OK on {0}. Details: {1}'\ - .format(node['name'], result) - logger.error(msg) - raise TimeoutError(msg) - else: - result = ceph.health_detail(remote) - msg = 'Ceph HEALTH is not OK on {0}. Details: {1}'.format( - node['name'], result) - assert_true(ceph.is_health_ok(remote), msg) - - logger.info('Checking Ceph OSD Tree...') - ceph.check_disks(remote, [n['id'] for n in online_ceph_nodes]) - - logger.info('Ceph cluster status is OK') - - @logwrap - def get_releases_list_for_os(self, release_name, release_version=None): - full_list = self.client.get_releases() - release_ids = [] - for release in full_list: - if release_version: - if release_name in release['name'].lower() \ - and release_version == release['version']: - logger.debug('release data is {0}'.format(release)) - release_ids.append(release['id']) - else: - if release_name in release['name'].lower(): - release_ids.append(release['id']) - return release_ids - - @logwrap - def get_next_deployable_release_id(self, release_id): - releases = self.client.get_releases() - release_details = self.client.get_release(release_id) - - for release in releases: - if (release["id"] > release_id and - release["operating_system"] == - release_details["operating_system"] and - release["is_deployable"] and - OPENSTACK_RELEASE in release["name"].lower()): - return release["id"] - - return None - - @logwrap - def update_cluster(self, cluster_id, data): - logger.debug( - "Try to update cluster with data {0}".format(data)) - self.client.update_cluster(cluster_id, data) - - @logwrap - def run_update(self, cluster_id, timeout, interval): - logger.info("Run update..") - task = self.client.run_update(cluster_id) - logger.debug("Invocation of update runs with result {0}".format(task)) - self.assert_task_success(task, timeout=timeout, interval=interval) - - @logwrap - def get_cluster_release_id(self, cluster_id): - data = self.client.get_cluster(cluster_id) - return data['release_id'] - - def assert_nodes_in_ready_state(self, cluster_id): - for nailgun_node in self.client.list_cluster_nodes(cluster_id): - assert_equal(nailgun_node['status'], 'ready', - 'Nailgun node status is not ready but {0}'.format( - nailgun_node['status'])) - - @staticmethod - @logwrap - def modify_python_file(remote, modification, filename): - remote.execute('sed -i "{0}" {1}'.format(modification, filename)) - - @staticmethod - def backup_master(remote): - # FIXME(kozhukalov): This approach is outdated - # due to getting rid of docker containers. - logger.info("Backup of the master node is started.") - remote.check_call( - "echo CALC_MY_MD5SUM > /etc/fuel/data", - error_info='command calc_my_mdsum failed') - remote.check_call( - "iptables-save > /etc/fuel/iptables-backup", - error_info='can not save iptables in iptables-backup') - remote.check_call( - "md5sum /etc/fuel/data | cut -d' ' -f1 > /etc/fuel/sum", - error_info='failed to create sum file') - remote.check_call('dockerctl backup') - remote.check_call( - 'rm -f /etc/fuel/data', - error_info='Can not remove /etc/fuel/data') - logger.info("Backup of the master node is complete.") - - @logwrap - def restore_master(self, ip): - # FIXME(kozhukalov): This approach is outdated - # due to getting rid of docker containers. - logger.info("Restore of the master node is started.") - path = checkers.find_backup(ip) - self.ssh_manager.execute_on_remote( - ip=ip, - cmd='dockerctl restore {0}'.format(path)) - logger.info("Restore of the master node is complete.") - - @logwrap - def restore_check_nailgun_api(self): - logger.info("Restore check nailgun api") - info = self.client.get_api_version() - os_version = info["openstack_version"] - assert_true(os_version, 'api version returned empty data') - - @logwrap - def get_nailgun_cidr_nova(self, cluster_id): - return self.client.get_networks(cluster_id).\ - get("networking_parameters").get("fixed_networks_cidr") - - @logwrap - def get_nailgun_cidr_neutron(self, cluster_id): - return self.client.get_networks(cluster_id).\ - get("networking_parameters").get("internal_cidr") - - @logwrap - def check_fixed_network_cidr(self, cluster_id, os_conn): - net_provider = self.client.get_cluster(cluster_id)['net_provider'] - if net_provider == 'nova_network': - nailgun_cidr = self.get_nailgun_cidr_nova(cluster_id) - logger.debug('nailgun cidr is {0}'.format(nailgun_cidr)) - net = os_conn.nova_get_net('novanetwork') - logger.debug('nova networks: {0}'.format( - net)) - assert_equal(nailgun_cidr, net.cidr.rstrip(), - 'Cidr after deployment is not equal' - ' to cidr by default') - - elif net_provider == 'neutron': - nailgun_cidr = self.get_nailgun_cidr_neutron(cluster_id) - logger.debug('nailgun cidr is {0}'.format(nailgun_cidr)) - private_net_name = self.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - subnet = os_conn.get_subnet('{0}__subnet'.format(private_net_name)) - logger.debug('subnet of pre-defined fixed network: {0}'.format( - subnet)) - assert_true(subnet, '{0}__subnet does not exists'.format( - private_net_name)) - logger.debug('cidr {0}__subnet: {1}'.format( - private_net_name, subnet['cidr'])) - assert_equal(nailgun_cidr, subnet['cidr'].rstrip(), - 'Cidr after deployment is not equal' - ' to cidr by default') - - @staticmethod - @logwrap - def check_fixed_nova_splited_cidr(os_conn, nailgun_cidr): - logger.debug('Nailgun cidr for nova: {0}'.format(nailgun_cidr)) - - subnets_list = [net.cidr for net in os_conn.get_nova_network_list()] - logger.debug('Nova subnets list: {0}'.format(subnets_list)) - - # Check that all subnets are included in nailgun_cidr - for subnet in subnets_list: - logger.debug("Check that subnet {0} is part of network {1}" - .format(subnet, nailgun_cidr)) - assert_true(netaddr.IPNetwork(str(subnet)) in - netaddr.IPNetwork(str(nailgun_cidr)), - 'Something goes wrong. Seems subnet {0} is out ' - 'of net {1}'.format(subnet, nailgun_cidr)) - - # Check that any subnet doesn't include any other subnet - subnets_pairs = [(subnets_list[x1], subnets_list[x2]) - for x1 in range(len(subnets_list)) - for x2 in range(len(subnets_list)) - if x1 != x2] - for subnet1, subnet2 in subnets_pairs: - logger.debug("Check if the subnet {0} is part of the subnet {1}" - .format(subnet1, subnet2)) - assert_true(netaddr.IPNetwork(str(subnet1)) not in - netaddr.IPNetwork(str(subnet2)), - "Subnet {0} is part of subnet {1}" - .format(subnet1, subnet2)) - - def update_internal_network(self, cluster_id, cidr, gateway=None): - net_provider = self.client.get_cluster(cluster_id)['net_provider'] - net_config = self.client.get_networks(cluster_id) - data = (cluster_id, net_config["networking_parameters"], - net_config["networks"]) - if net_provider == 'nova_network': - net_config["networking_parameters"]['fixed_networks_cidr']\ - = cidr - self.client.update_network(*data) - elif net_provider == 'neutron': - net_config["networking_parameters"]['internal_cidr']\ - = cidr - net_config["networking_parameters"]['internal_gateway']\ - = gateway - self.client.update_network(*data) - - def get_cluster_mode(self, cluster_id): - return self.client.get_cluster(cluster_id)['mode'] - - def get_public_ip(self, cluster_id): - # Find a controller and get it's IP for public network - network_data = [ - node['network_data'] - for node in self.client.list_cluster_nodes(cluster_id) - if "controller" in node['roles']][0] - pub_ip = [net['ip'] for net in network_data - if "public" in net['name']][0] - return pub_ip.split('/')[0] - - def get_public_vip(self, cluster_id): - if self.get_cluster_mode(cluster_id) == DEPLOYMENT_MODE_HA: - return self.client.get_networks( - cluster_id)['vips']['public']['ipaddr'] - else: - logger.error("Public VIP for cluster '{0}' not found, searching " - "for public IP on the controller".format(cluster_id)) - ip = self.get_public_ip(cluster_id) - logger.info("Public IP found: {0}".format(ip)) - return ip - - def get_management_vrouter_vip(self, cluster_id): - return self.client.get_networks( - cluster_id)['vips']['vrouter']['ipaddr'] - - def get_mgmt_vip(self, cluster_id): - return self.client.get_networks( - cluster_id)['vips']['management']['ipaddr'] - - def get_public_vrouter_vip(self, cluster_id): - return self.client.get_networks( - cluster_id)['vips']['vrouter_pub']['ipaddr'] - - @logwrap - def get_controller_with_running_service(self, slave, service_name): - ret = self.get_pacemaker_status(slave.name) - logger.debug("pacemaker status is {0}".format(ret)) - node_name = re.search(service_name, ret).group(1) - logger.debug("node name is {0}".format(node_name)) - fqdn = self.get_fqdn_by_hostname(node_name) - devops_node = self.find_devops_node_by_nailgun_fqdn( - fqdn, self.environment.d_env.nodes().slaves) - return devops_node - - @staticmethod - @logwrap - def get_fqdn_by_hostname(hostname): - return ( - hostname + DNS_SUFFIX if DNS_SUFFIX not in hostname else hostname - ) - - def get_nodegroup(self, cluster_id, name='default', group_id=None): - ngroups = self.client.get_nodegroups() - for group in ngroups: - if group['cluster_id'] == cluster_id and group['name'] == name: - if group_id and group['id'] != group_id: - continue - return group - return None - - def update_nodegroups(self, cluster_id, node_groups): - for ngroup in node_groups: - if not self.get_nodegroup(cluster_id, name=ngroup): - self.client.create_nodegroup(cluster_id, ngroup) - # Assign nodes to nodegroup if nodes are specified - if len(node_groups[ngroup]) > 0: - ngroup_id = self.get_nodegroup(cluster_id, name=ngroup)['id'] - self.client.assign_nodegroup(ngroup_id, node_groups[ngroup]) - - @logwrap - def get_nailgun_primary_node(self, slave, role='primary-controller'): - # returns controller or mongo that is primary in nailgun - with self.get_ssh_for_node(slave.name) as remote: - try: - with remote.open('/etc/hiera/cluster.yaml') as f: - data = yaml.safe_load(f) - # TODO(sbog): remove check for astute.yaml open as LP1660308 - # for fuel-library will be merged. - except IOError: - with remote.open('/etc/astute.yaml') as f: - data = yaml.safe_load(f) - nodes = data['network_metadata']['nodes'] - node_name = [node['fqdn'] for node in nodes.values() - if role in node['node_roles']][0] - logger.debug("node name is {0}".format(node_name)) - fqdn = self.get_fqdn_by_hostname(node_name) - devops_node = self.get_devops_node_by_nailgun_fqdn(fqdn) - return devops_node - - @logwrap - def get_rabbit_master_node(self, node, fqdn_needed=False): - with self.get_ssh_for_node(node) as remote: - cmd = 'crm resource status master_p_rabbitmq-server' - output = ''.join(remote.execute(cmd)['stdout']) - master_node = re.search( - 'resource master_p_rabbitmq-server is running on: (.*) Master', - output).group(1) - if fqdn_needed: - return master_node - else: - devops_node = self.find_devops_node_by_nailgun_fqdn( - master_node, self.environment.d_env.nodes().slaves) - return devops_node - - def check_plugin_exists(self, cluster_id, plugin_name, section='editable'): - attr = self.client.get_cluster_attributes(cluster_id)[section] - return plugin_name in attr - - @logwrap - def list_cluster_enabled_plugins(self, cluster_id): - enabled_plugins = [] - all_plugins = self.client.plugins_list() - cl_attrib = self.client.get_cluster_attributes(cluster_id) - for plugin in all_plugins: - plugin_name = plugin['name'] - if plugin_name in cl_attrib['editable']: - if cl_attrib['editable'][plugin_name]['metadata']['enabled']: - enabled_plugins.append(plugin) - logger.info('{} plugin is enabled ' - 'in cluster id={}'.format(plugin_name, - cluster_id)) - return enabled_plugins - - def update_plugin_data(self, cluster_id, plugin_name, data): - attr = self.client.get_cluster_attributes(cluster_id) - # Do not re-upload anything, except selected plugin data - plugin_attributes = { - 'editable': {plugin_name: attr['editable'][plugin_name]}} - - for option, value in data.items(): - plugin_data = plugin_attributes['editable'][plugin_name] - path = option.split("/") - """Key 'metadata' can be in section - plugin_data['metadata']['versions'] - For enable/disable plugin value must be set in - plugin_data['metadata']['enabled'] - """ - if 'metadata' in path: - plugin_data['metadata'][path[-1]] = value - elif 'versions' in plugin_data['metadata']: - for version in plugin_data['metadata']['versions']: - for p in path[:-1]: - version = version[p] - version[path[-1]] = value - else: - for p in path[:-1]: - plugin_data = plugin_data[p] - plugin_data[path[-1]] = value - self.client.update_cluster_attributes(cluster_id, plugin_attributes) - - def get_plugin_data(self, cluster_id, plugin_name, version): - """Return data (settings) for specified version of plugin - - :param cluster_id: int - :param plugin_name: string - :param version: string - :return: dict - """ - attr = self.client.get_cluster_attributes(cluster_id) - plugin_data = attr['editable'][plugin_name] - plugin_versions = plugin_data['metadata']['versions'] - for p in plugin_versions: - if p['metadata']['plugin_version'] == version: - return p - raise AssertionError("Plugin {0} version {1} is not " - "found".format(plugin_name, version)) - - def update_plugin_settings(self, cluster_id, plugin_name, version, data, - enabled=True): - """Update settings for specified version of plugin - - :param plugin_name: string - :param version: string - :param data: dict - settings for the plugin - :return: None - """ - attr = self.client.get_cluster_attributes(cluster_id) - plugin_versions = attr['editable'][plugin_name]['metadata']['versions'] - if enabled: - attr['editable'][plugin_name]['metadata']['enabled'] = True - plugin_data = None - for item in plugin_versions: - if item['metadata']['plugin_version'] == version: - plugin_data = item - break - assert_true(plugin_data is not None, "Plugin {0} version {1} is not " - "found".format(plugin_name, version)) - for option, value in data.items(): - path = option.split("/") - for p in path[:-1]: - plugin_settings = plugin_data[p] - plugin_settings[path[-1]] = value - self.client.update_cluster_attributes(cluster_id, attr) - - @staticmethod - @logwrap - def prepare_ceph_to_delete(remote_ceph): - hostname = ''.join(remote_ceph.execute( - "hostname -s")['stdout']).strip() - osd_tree = ceph.get_osd_tree(remote_ceph) - logger.debug("osd tree is {0}".format(osd_tree)) - ids = [] - for osd in osd_tree['nodes']: - if hostname in osd['name']: - ids = osd['children'] - - logger.debug("ids are {}".format(ids)) - assert_true(ids, "osd ids for {} weren't found".format(hostname)) - for osd_id in ids: - remote_ceph.execute("ceph osd out {}".format(osd_id)) - wait(lambda: ceph.is_health_ok(remote_ceph), - interval=30, timeout=10 * 60, - timeout_msg='ceph helth ok timeout') - for osd_id in ids: - if OPENSTACK_RELEASE_UBUNTU in OPENSTACK_RELEASE: - if UBUNTU_SERVICE_PROVIDER == 'systemd': - remote_ceph.execute("systemctl stop ceph-osd@{}" - .format(osd_id)) - else: - remote_ceph.execute("stop ceph-osd id={}" - .format(osd_id)) - else: - remote_ceph.execute("service ceph stop osd.{}".format(osd_id)) - remote_ceph.execute("ceph osd crush remove osd.{}".format(osd_id)) - remote_ceph.execute("ceph auth del osd.{}".format(osd_id)) - remote_ceph.execute("ceph osd rm osd.{}".format(osd_id)) - # remove ceph node from crush map - remote_ceph.execute("ceph osd crush remove {}".format(hostname)) - - @logwrap - def get_rabbit_slaves_node(self, node, fqdn_needed=False): - with self.get_ssh_for_node(node) as remote: - cmd = 'crm resource status master_p_rabbitmq-server' - list_output = ''.join(remote.execute(cmd)['stdout']).split('\n') - filtered_list = [el for el in list_output - if el and not el.endswith('Master')] - slaves_nodes = [] - for el in filtered_list: - slaves_nodes.append( - re.search('resource master_p_rabbitmq-server is running on:' - ' (.*)', el).group(1).strip()) - if fqdn_needed: - return slaves_nodes - else: - devops_nodes = [self.find_devops_node_by_nailgun_fqdn( - slave_node, self.environment.d_env.nodes().slaves) - for slave_node in slaves_nodes] - return devops_nodes - - @logwrap - def run_deployment_tasks(self, cluster_id, nodes, tasks): - self.client.put_deployment_tasks_for_cluster( - cluster_id=cluster_id, data=tasks, - node_id=','.join(map(str, nodes))) - tasks = self.client.get_tasks() - deploy_tasks = [t for t in tasks if t['status'] - in ('pending', 'running') and - t['name'] == 'deployment' and - t['cluster'] == cluster_id] - for task in deploy_tasks: - if min([t['progress'] for t in deploy_tasks]) == task['progress']: - return task - - @logwrap - def wait_deployment_tasks(self, cluster_id, nodes, tasks, timeout=60 * 10): - task = self.run_deployment_tasks(cluster_id, nodes, tasks) - assert_is_not_none(task, - 'Got empty result after running deployment tasks!') - self.assert_task_success(task, timeout) - - @logwrap - def get_alive_proxy(self, cluster_id, port='8888'): - online_controllers = [node for node in - self.get_nailgun_cluster_nodes_by_roles( - cluster_id, - roles=['controller', ]) if node['online']] - - with self.environment.d_env.get_admin_remote() as admin_remote: - check_proxy_cmd = ('[[ $(curl -s -w "%{{http_code}}" ' - '{0} -o /dev/null) -eq 200 ]]') - - for controller in online_controllers: - proxy_url = 'http://{0}:{1}/'.format(controller['ip'], port) - logger.debug('Trying to connect to {0} from master node...' - .format(proxy_url)) - if admin_remote.execute( - check_proxy_cmd.format(proxy_url))['exit_code'] == 0: - return proxy_url - - assert_true(len(online_controllers) > 0, - 'There are no online controllers available ' - 'to provide HTTP proxy!') - - assert_false(len(online_controllers) == 0, - 'There are online controllers available ({0}), ' - 'but no HTTP proxy is accessible from master ' - 'node'.format(online_controllers)) - - @logwrap - def get_cluster_credentials(self, cluster_id): - attributes = self.client.get_cluster_attributes(cluster_id) - username = attributes['editable']['access']['user']['value'] - password = attributes['editable']['access']['password']['value'] - tenant = attributes['editable']['access']['tenant']['value'] - return {'username': username, - 'password': password, - 'tenant': tenant} - - @logwrap - def get_cluster_additional_components(self, cluster_id): - components = {} - attributes = self.client.get_cluster_attributes(cluster_id) - add_comps = attributes['editable']['additional_components'].items() - for comp, opts in add_comps: - # exclude metadata - if 'metadata' not in comp: - components[comp] = opts['value'] - return components - - @logwrap - def get_cluster_ibp_packages(self, cluster_id): - attributes = self.client.get_cluster_attributes(cluster_id) - pkgs = attributes['editable']['provision']['packages']['value'] - return set(pkgs.splitlines()) - - @logwrap - def update_cluster_ibp_packages(self, cluster_id, pkgs): - attributes = self.client.get_cluster_attributes(cluster_id) - attributes['editable']['provision']['packages']['value'] = '\n'.join( - pkgs) - self.client.update_cluster_attributes(cluster_id, attributes) - return self.get_cluster_ibp_packages(cluster_id) - - @logwrap - def spawn_vms_wait(self, cluster_id, timeout=60 * 60, interval=30): - logger.info('Spawn VMs of a cluster %s', cluster_id) - task = self.client.spawn_vms(cluster_id) - self.assert_task_success(task, timeout=timeout, interval=interval) - - @logwrap - def get_all_ostf_set_names(self, cluster_id): - sets = self.fuel_client.ostf.get_test_sets(cluster_id=cluster_id) - return [s['id'] for s in sets] - - @logwrap - def update_network_cidr(self, cluster_id, network_name): - """Simple method for changing default network cidr - (just use its subnet with 2x smaller network mask) - - :param cluster_id: int - :param network_name: str - :return: None - """ - networks = self.client.get_networks(cluster_id)['networks'] - params = self.client.get_networks(cluster_id)['networking_parameters'] - for network in networks: - if network['name'] != network_name: - continue - old_cidr = netaddr.IPNetwork(str(network['cidr'])) - new_cidr = list(old_cidr.subnet(old_cidr.prefixlen + 1))[0] - assert_not_equal(old_cidr, new_cidr, - 'Can\t create a subnet using default cidr {0} ' - 'for {1} network!'.format(old_cidr, network_name)) - network['cidr'] = str(new_cidr) - logger.debug('CIDR for {0} network was changed from {1} to ' - '{2}.'.format(network_name, old_cidr, new_cidr)) - if network['meta']['notation'] != 'ip_ranges': - continue - if network['name'] == 'public': - network['ip_ranges'] = self.get_range(new_cidr, ip_range=-1) - params['floating_ranges'] = self.get_range(new_cidr, - ip_range=1) - else: - network['ip_ranges'] = self.get_range(new_cidr, ip_range=0) - self.client.update_network(cluster_id, params, networks) - - @logwrap - def wait_task_success(self, task_name='', interval=30, - timeout=help_data.DEPLOYMENT_TIMEOUT): - """Wait provided task to finish - - :param task_name: str - :param interval: int - :param timeout: int - :return: None - """ - all_tasks = self.client.get_tasks() - tasks = [task for task in all_tasks if task['name'] == task_name] - latest_task = sorted(tasks, key=lambda k: k['id'])[-1] - self.assert_task_success(latest_task, interval=interval, - timeout=timeout) - - def deploy_cluster_changes_wait( - self, cluster_id, data=None, - timeout=help_data.DEPLOYMENT_TIMEOUT, - interval=30): - """Redeploy cluster to apply changes in its settings - - :param cluster_id: int, env ID to apply changes for - :param data: dict, changed env settings - :param timeout: int, time (in seconds) to wait for deployment end - :param interval: int, time (in seconds) between deployment - status queries - :return: - """ - logger.info('Re-deploy cluster {} to apply the changed ' - 'settings'.format(cluster_id)) - if data is None: - data = {} - task = self.client.redeploy_cluster_changes(cluster_id, data) - self.assert_task_success(task, interval=interval, timeout=timeout) - self.check_deploy_state(cluster_id, check_services=True, - check_tasks=True, allow_partially_deploy=True) - - def execute_task_on_node(self, task_name, node_id, - cluster_id, force_exception=False, - force_execution=True): - """Execute deployment task against the corresponding node - - :param task_name: str, name of a task to execute - :param node_id: int, node ID to execute task on - :param cluster_id: int, cluster ID - :param force_exception: bool, indication whether exceptions on task - execution are ignored - :param force_execution: bool, run particular task on nodes - and do not care if there were changes or not - :return: None - """ - try: - logger.info("Trying to execute {!r} task on node {!r}" - .format(task_name, node_id)) - task = self.client.put_deployment_tasks_for_cluster( - cluster_id=cluster_id, - data=[task_name], - node_id=node_id, - force=force_execution) - self.assert_task_success(task, timeout=30 * 60) - except (AssertionError, TimeoutError): - logger.exception("Failed to run task {!r}".format(task_name)) - if force_exception: - raise - - def get_network_pool(self, pool_name, group_name=None): - net = self.environment.d_env.get_network(name=pool_name) - - _net_pool = { - "gateway": net.default_gw, - "network": net.ip_network - } - return _net_pool - - def setup_hugepages(self, nailgun_node_id, - hp_2mb=0, hp_1gb=0, hp_dpdk_mb=0): - node_attributes = self.client.get_node_attributes(nailgun_node_id) - node_attributes['hugepages']['nova']['value']['2048'] = hp_2mb - node_attributes['hugepages']['nova']['value']['1048576'] = hp_1gb - node_attributes['hugepages']['dpdk']['value'] = hp_dpdk_mb - self.client.upload_node_attributes(node_attributes, nailgun_node_id) - - def check_dpdk(self, nailgun_node_id, net='private'): - compute_interfaces = self.client.get_node_interfaces(nailgun_node_id) - target_interface = None - for interface in compute_interfaces: - if net in [n['name'] for n in interface['assigned_networks']]: - target_interface = interface - break - - assert_is_not_none( - target_interface, - "Network {!r} is not found on interfaces".format(net)) - - if 'interface_properties' in target_interface.keys(): - logger.debug("Using old interface serialization scheme") - dpdk_available = target_interface['interface_properties']['dpdk'][ - 'available'] - dpdk_enabled = target_interface['interface_properties']['dpdk'][ - 'enabled'] - else: - logger.debug("Using new interface serialization scheme") - dpdk_available = target_interface['meta']['dpdk']['available'] - dpdk_enabled = target_interface['attributes']['dpdk'][ - 'enabled']['value'] - - return {'available': dpdk_available, 'enabled': dpdk_enabled} - - def enable_dpdk(self, nailgun_node_id, switch_to=True, net='private', - force_enable=False): - if not force_enable: - assert_true(self.check_dpdk(nailgun_node_id, net=net)['available'], - 'DPDK not available on selected interface') - - compute_interfaces = self.client.get_node_interfaces(nailgun_node_id) - target_interface = None - for interface in compute_interfaces: - if net in [n['name'] for n in interface['assigned_networks']]: - target_interface = interface - break - - if 'interface_properties' in target_interface.keys(): - if target_interface['type'] == 'bond': - target_interface['bond_properties']['type__'] = 'dpdkovs' - logger.debug("Using old interface serialization scheme") - target_interface['interface_properties']['dpdk'][ - 'enabled'] = switch_to - else: - logger.debug("Using new interface serialization scheme") - target_interface['attributes']['dpdk'][ - 'enabled']['value'] = switch_to - if target_interface['type'] == 'bond': - target_interface['attributes']['type__']['value'] = 'dpdkovs' - - self.client.put_node_interfaces([{'id': nailgun_node_id, - 'interfaces': compute_interfaces}]) - - return self.check_dpdk( - nailgun_node_id, net=net)['enabled'] == switch_to - - def check_sriov(self, nailgun_node_id): - nailgun_node_ifaces = self.client.get_node_interfaces( - nailgun_node_id) - devops_node = self.get_devops_node_by_nailgun_node_id(nailgun_node_id) - devops_sriov_macs = [i.mac_address for i in devops_node.interfaces - if 'sriov' in i.features] - nailgun_sriov_nics = [] - devops_sriov_nics = [] - for interface in nailgun_node_ifaces: - if interface['mac'] in devops_sriov_macs: - devops_sriov_nics.append(interface['name']) - if interface['assigned_networks']: - continue - api_key = "meta" if "meta" in interface else "interface_properties" - if 'sriov' not in interface[api_key]: - continue - sriov_available = interface[api_key]['sriov']['available'] - if sriov_available: - nailgun_sriov_nics.append(interface['name']) - return set(devops_sriov_nics).intersection(nailgun_sriov_nics) - - def enable_sriov(self, nailgun_node_id): - nics_to_enable_sriov = self.check_sriov(nailgun_node_id) - assert_true(nics_to_enable_sriov, - 'There are no NICs with SR-IOV support on ' - 'node with ID {0}!'.format(nailgun_node_id)) - node_networks = self.client.get_node_interfaces(nailgun_node_id) - for interface in node_networks: - if interface['name'] not in nics_to_enable_sriov: - continue - if 'interface_properties' in interface: - interface['interface_properties']['sriov']['enabled'] = True - interface['interface_properties']['sriov'][ - 'sriov_numvfs'] = interface['interface_properties'][ - 'sriov']['sriov_totalvfs'] - else: - interface['attributes']['sriov']['enabled']['value'] = True - interface['attributes']['sriov']['numvfs']['value'] = \ - interface['meta']['sriov']['totalvfs'] - - self.client.put_node_interfaces( - [{'id': nailgun_node_id, 'interfaces': node_networks}]) - - self.client.put_node_interfaces( - [{'id': nailgun_node_id, 'interfaces': node_networks}]) - - def enable_cpu_pinning(self, nailgun_node_id, cpu_count=None): - nailgun_node = [node for node in self.client.list_nodes() - if node['id'] == nailgun_node_id].pop() - vcpu_total = nailgun_node['meta']['cpu']['total'] - node_attrs = self.client.get_node_attributes(nailgun_node_id) - if cpu_count is None: - cpu_count = vcpu_total - 1 - else: - assert_true( - cpu_count < vcpu_total, - "Too many cpu requested for cpu pinning! Should be less" - "than vcpu count (requested {!r}, vcpu found {!r}".format( - cpu_count, vcpu_total)) - node_attrs['cpu_pinning']['nova']['value'] = cpu_count - self.client.upload_node_attributes(node_attrs, nailgun_node_id) - - -class FuelWebClient30(FuelWebClient29): - """FuelWebClient that works with fuel-devops 3.0 - """ - @logwrap - def get_default_node_group(self): - return self.environment.d_env.get_group(name='default') - - @logwrap - def get_public_gw(self): - default_node_group = self.get_default_node_group() - pub_pool = default_node_group.get_network_pool(name='public') - return str(pub_pool.gateway) - - @logwrap - def nodegroups_configure(self, cluster_id): - # Add node groups with networks - if len(self.environment.d_env.get_groups()) > 1: - ng = {rack.name: [] for rack in - self.environment.d_env.get_groups()} - ng_nets = [] - for rack in self.environment.d_env.get_groups(): - nets = { - 'name': rack.name, - 'networks': { - r.name: r.address_pool.name - for r in rack.get_network_pools( - name__in=[ - 'fuelweb_admin', - 'public', - 'management', - 'storage', - 'private'])}} - ng_nets.append(nets) - self.update_nodegroups(cluster_id=cluster_id, - node_groups=ng) - self.update_nodegroups_network_configuration(cluster_id, - ng_nets) - - def change_default_network_settings(self): - def fetch_networks(networks): - """Parse response from api/releases/1/networks and return dict with - networks' settings - need for avoiding hardcode""" - result = {} - for net in networks: - if (net['name'] == 'private' and - net.get('seg_type', '') == 'tun'): - result['private_tun'] = net - elif (net['name'] == 'private' and - net.get('seg_type', '') == 'gre'): - result['private_gre'] = net - elif (net['name'] == 'private' and - net.get('seg_type', '') == 'vlan'): - result['private_vlan'] = net - elif net['name'] == 'public': - result['public'] = net - elif net['name'] == 'management': - result['management'] = net - elif net['name'] == 'storage': - result['storage'] = net - elif net['name'] == 'baremetal': - result['baremetal'] = net - return result - - default_node_group = self.get_default_node_group() - logger.info("Default node group has {} name".format( - default_node_group.name)) - - logger.info("Applying default network settings") - for _release in self.client.get_releases(): - logger.info( - 'Applying changes for release: {}'.format( - _release['name'])) - net_settings = \ - self.client.get_release_default_net_settings( - _release['id']) - for net_provider in NETWORK_PROVIDERS: - if net_provider not in net_settings: - # TODO(ddmitriev): should show warning if NETWORK_PROVIDERS - # are not match providers in net_settings. - continue - - networks = fetch_networks( - net_settings[net_provider]['networks']) - - pub_pool = default_node_group.get_network_pool( - name='public') - networks['public']['cidr'] = str(pub_pool.net) - networks['public']['gateway'] = str(pub_pool.gateway) - networks['public']['notation'] = 'ip_ranges' - networks['public']['vlan_start'] = \ - pub_pool.vlan_start if pub_pool.vlan_start else None - - networks['public']['ip_range'] = list( - pub_pool.ip_range(relative_start=2, relative_end=-16)) - - net_settings[net_provider]['config']['floating_ranges'] = [ - list(pub_pool.ip_range('floating', - relative_start=-15, - relative_end=-2))] - - if 'baremetal' in networks and \ - default_node_group.get_network_pools(name='ironic'): - ironic_net = self.environment.d_env.get_network( - name='ironic').ip - prefix = netaddr.IPNetwork( - str(ironic_net.cidr)).prefixlen - subnet1, subnet2 = tuple(ironic_net.subnet(prefix + 1)) - networks['baremetal']['cidr'] = str(ironic_net) - net_settings[net_provider]['config'][ - 'baremetal_gateway'] = str(ironic_net[-2]) - networks['baremetal']['ip_range'] = [ - str(subnet1[2]), str(subnet2[0])] - net_settings[net_provider]['config']['baremetal_range'] =\ - [str(subnet2[1]), str(subnet2[-3])] - pool = default_node_group.get_network_pool( - name='ironic') - networks['baremetal']['vlan_start'] = ( - pool.vlan_start if pool.vlan_start else None) - - if BONDING: - # leave defaults for mgmt, storage and private if - # BONDING is enabled - continue - - for pool in default_node_group.get_network_pools( - name__in=['storage', 'management']): - networks[pool.name]['cidr'] = str(pool.net) - networks[pool.name]['ip_range'] = self.get_range( - pool.net)[0] - networks[pool.name]['notation'] = 'ip_ranges' - networks[pool.name]['vlan_start'] = pool.vlan_start - - if net_provider == 'neutron': - net_settings[net_provider]['config']['internal_cidr'] = \ - '192.168.0.0/24' - net_settings[net_provider]['config']['internal_gateway'] =\ - '192.168.0.1' - private_net_pool = default_node_group.get_network_pool( - name='private') - - networks['private_tun']['cidr'] = \ - str(private_net_pool.net) - networks['private_gre']['cidr'] = \ - str(private_net_pool.net) - networks['private_tun']['vlan_start'] = \ - private_net_pool.vlan_start or None - networks['private_gre']['vlan_start'] = \ - private_net_pool.vlan_start or None - - networks['private_vlan']['vlan_start'] = None - net_settings[net_provider]['config']['vlan_range'] = \ - (private_net_pool.vlan_start or None, - private_net_pool.vlan_end or None) - - elif net_provider == 'nova_network': - private_net_pool = default_node_group.get_network_pool( - name='private') - net_settings[net_provider]['config'][ - 'fixed_networks_cidr'] = \ - str(private_net_pool.net) or None - net_settings[net_provider]['config'][ - 'fixed_networks_vlan_start'] = \ - private_net_pool.vlan_start or None - - self.client.put_release_default_net_settings( - _release['id'], net_settings) - - def get_node_group_and_role(self, node_name, nodes_dict): - devops_node = self.environment.d_env.get_node(name=node_name) - node_group = devops_node.group.name - if isinstance(nodes_dict[node_name][0], list): - # Backwards compatibility - node_roles = nodes_dict[node_name][0] - else: - node_roles = nodes_dict[node_name] - return node_group, node_roles - - @logwrap - def update_nodes_interfaces(self, cluster_id, nailgun_nodes=None): - nailgun_nodes = nailgun_nodes or [] - if not nailgun_nodes: - nailgun_nodes = self.client.list_cluster_nodes(cluster_id) - for node in nailgun_nodes: - assigned_networks = {} - interfaces = self.client.get_node_interfaces(node['id']) - interfaces = {iface['mac']: iface for iface in interfaces} - d_node = self.get_devops_node_by_nailgun_node(node) - for net in d_node.network_configs: - if net.aggregation is None: # Have some ifaces aggregation? - node_iface = d_node.interface_set.get(label=net.label) - assigned_networks[interfaces[ - node_iface.mac_address]['name']] = net.networks - else: - assigned_networks[net.label] = net.networks - - self.update_node_networks(node['id'], assigned_networks) - - @logwrap - def update_node_networks(self, node_id, interfaces_dict, - raw_data=None, - override_ifaces_params=None): - interfaces = self.client.get_node_interfaces(node_id) - phys_interfaces = self.filter_nailgun_entities(interfaces, - type="ether") - - node = [n for n in self.client.list_nodes() if n['id'] == node_id][0] - d_node = self.get_devops_node_by_nailgun_node(node) - if d_node: - bonds = [n for n in d_node.network_configs - if n.aggregation is not None] - for bond in bonds: - macs = [i.mac_address.lower() for i in - d_node.interface_set.filter(label__in=bond.parents)] - parents = [{'name': iface['name']} for iface in phys_interfaces - if iface['mac'].lower() in macs] - bond_config = { - 'mac': None, - 'mode': bond.aggregation, - 'name': bond.label, - 'slaves': parents, - 'state': None, - 'type': 'bond', - 'assigned_networks': [] - } - interfaces.append(bond_config) - - if raw_data is not None: - interfaces.extend(raw_data) - - def get_iface_by_name(ifaces, name): - iface = [_iface for _iface in ifaces if _iface['name'] == name] - assert_true(len(iface) > 0, - "Interface with name {} is not present on " - "node. Please check override params.".format(name)) - return iface[0] - - if override_ifaces_params is not None: - for interface in override_ifaces_params: - get_iface_by_name(interfaces, interface['name']).\ - update(interface) - - all_networks = dict() - for interface in interfaces: - all_networks.update( - {net['name']: net for net in interface['assigned_networks']}) - - for interface in interfaces: - name = interface["name"] - interface['assigned_networks'] = \ - [all_networks[i] for i in interfaces_dict.get(name, []) if - i in all_networks.keys()] - - self.client.put_node_interfaces( - [{'id': node_id, 'interfaces': interfaces}]) - - def update_nodegroup_net_settings(self, network_configuration, nodegroup, - cluster_id=None): - # seg_type = network_configuration.get('networking_parameters', {}) \ - # .get('segmentation_type') - nodegroup_id = self.get_nodegroup(cluster_id, nodegroup['name'])['id'] - for net in network_configuration.get('networks'): - if nodegroup['name'] == 'default' and \ - net['name'] == 'fuelweb_admin': - continue - - if net['group_id'] == nodegroup_id: - group = self.environment.d_env.get_group( - name=nodegroup['name']) - net_pool = group.networkpool_set.get(name=net['name']) - net['cidr'] = net_pool.net - # if net['meta']['use_gateway']: - # net['gateway'] = net_pool.gateway - # else: - # net['gateway'] = None - net['gateway'] = net_pool.gateway - if net['gateway']: - net['meta']['use_gateway'] = True - net['meta']['gateway'] = net['gateway'] - else: - net['meta']['use_gateway'] = False - - if not net['meta'].get('neutron_vlan_range', False): - net['vlan_start'] = net_pool.vlan_start - net['meta']['notation'] = 'ip_ranges' - net['ip_ranges'] = [list(net_pool.ip_range())] - - return network_configuration - - @logwrap - def get_network_pool(self, pool_name, group_name='default'): - group = self.environment.d_env.get_group(name=group_name) - net_pool = group.get_network_pool(name=pool_name) - _net_pool = { - "gateway": net_pool.gateway, - "network": net_pool.net - } - return _net_pool - - -# TODO(ddmitriev): this code will be removed after moving to fuel-devops3.0 -# pylint: disable=no-member -# noinspection PyUnresolvedReferences -if (distutils.version.LooseVersion(devops.__version__) < - distutils.version.LooseVersion('3')): - logger.info("Use FuelWebClient compatible to fuel-devops 2.9") - logger.warning("\n\nfuel-devops 2.9 NOT SUPPORTED now. UPDATE" - " fuel-devops up to 3.0 version ASAP.\n") - FuelWebClient = FuelWebClient29 -else: - logger.info("Use FuelWebClient compatible to fuel-devops 3.0") - FuelWebClient = FuelWebClient30 -# pylint: enable=no-member diff --git a/fuelweb_test/models/nailgun_client.py b/fuelweb_test/models/nailgun_client.py deleted file mode 100644 index 43f61f51f..000000000 --- a/fuelweb_test/models/nailgun_client.py +++ /dev/null @@ -1,920 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from warnings import warn - -from core.helpers.log_helpers import logwrap -from core.models.fuel_client import Client as FuelClient - -from fuelweb_test import logger - -from fuelweb_test.settings import OPENSTACK_RELEASE -from fuelweb_test.settings import RELEASE_VERSION - - -class NailgunClient(object): - """NailgunClient""" # TODO documentation - - def __init__(self, session): - logger.info( - 'Initialization of NailgunClient using shared session \n' - '(auth_url={})'.format(session.auth.auth_url)) - self.client = FuelClient(session=session) - self.session = session - - def __repr__(self): - klass, obj_id = type(self), hex(id(self)) - url = getattr(self, 'url', None) - return "[{klass}({obj_id}), url:{url}]".format(klass=klass, - obj_id=obj_id, - url=url) - - def _get(self, url, **kwargs): - if 'endpoint_filter' not in kwargs: - kwargs.update(endpoint_filter={'service_type': 'fuel'}) - return self.session.get(url=url, connect_retries=1, **kwargs) - - def _delete(self, url, **kwargs): - if 'endpoint_filter' not in kwargs: - kwargs.update(endpoint_filter={'service_type': 'fuel'}) - return self.session.delete(url=url, connect_retries=1, **kwargs) - - def _post(self, url, **kwargs): - if 'endpoint_filter' not in kwargs: - kwargs.update(endpoint_filter={'service_type': 'fuel'}) - return self.session.post(url=url, connect_retries=1, **kwargs) - - def _put(self, url, **kwargs): - if 'endpoint_filter' not in kwargs: - kwargs.update(endpoint_filter={'service_type': 'fuel'}) - return self.session.put(url=url, connect_retries=1, **kwargs) - - def list_nodes(self): - return self._get(url="/nodes/").json() - - def list_cluster_nodes(self, cluster_id): - return self._get(url="/nodes/?cluster_id={}".format(cluster_id)).json() - - @logwrap - def get_networks(self, cluster_id): - net_provider = self.get_cluster(cluster_id)['net_provider'] - return self._get( - url="/clusters/{}/network_configuration/{}".format( - cluster_id, net_provider - )).json() - - @logwrap - def verify_networks(self, cluster_id): - net_provider = self.get_cluster(cluster_id)['net_provider'] - return self._put( - "/clusters/{}/network_configuration/{}/verify/".format( - cluster_id, net_provider - ), - json=self.get_networks(cluster_id) - ).json() - - def get_cluster_attributes(self, cluster_id): - return self._get( - url="/clusters/{}/attributes/".format(cluster_id)).json() - - @logwrap - def update_cluster_attributes(self, cluster_id, attrs): - return self._put( - "/clusters/{}/attributes/".format(cluster_id), - json=attrs - ).json() - - @logwrap - def get_cluster(self, cluster_id): - return self._get(url="/clusters/{}".format(cluster_id)).json() - - @logwrap - def update_cluster(self, cluster_id, data): - return self._put( - "/clusters/{}/".format(cluster_id), - json=data - ).json() - - @logwrap - def delete_cluster(self, cluster_id): - return self._delete(url="/clusters/{}/".format(cluster_id)).json() - - @logwrap - def get_node_by_id(self, node_id): - return self._get(url="/nodes/{}".format(node_id)).json() - - @logwrap - def update_node(self, node_id, data): - return self._put( - "/nodes/{}/".format(node_id), json=data - ).json() - - @logwrap - def update_nodes(self, data): - return self._put(url="/nodes", json=data).json() - - @logwrap - def delete_node(self, node_id): - return self._delete(url="/nodes/{}/".format(node_id)).json() - - @logwrap - def deploy_cluster_changes(self, cluster_id): - return self._put(url="/clusters/{}/changes/".format(cluster_id)).json() - - @logwrap - def deploy_custom_graph(self, cluster_id, graph_type, node_ids=None, - tasks=None): - """Method to deploy custom graph on cluster. - - :param cluster_id: Cluster to be custom deployed - :param graph_type: Type of a graph to deploy - :param node_ids: nodes to deploy. None or empty list means all. - :param tasks: list of string with task names in graph - :return: ``task_uuid`` -- unique ID of accepted transaction - """ - scenario = {"cluster": int(cluster_id), - "graphs": [ - {"type": graph_type, - "tasks": tasks, - "nodes": node_ids - }], - "dry_run": False, - "force": False} - endpoint = '/graphs/execute/' - return self._post(endpoint, json=scenario).json() - - @logwrap - def get_release_tasks(self, release_id): - """Method to get release deployment tasks. - - :param release_id: Id of release to get tasks - :return: list of deployment graphs - """ - return self._get( - '/releases/{rel_id}/deployment_graphs/'.format( - rel_id=release_id)).json() - - @logwrap - def get_release_tasks_by_type(self, release_id, graph_type): - """Method to get release deployment tasks by type. - - :param release_id: Id of release to get tasks - :param graph_type: Type of a graph to deploy - :return: list of deployment graphs for a given type - """ - return self._get( - "/releases/{0}/deployment_graphs/{1}".format( - release_id, graph_type)).json() - - @logwrap - def get_task(self, task_id): - return self._get(url="/tasks/{}".format(task_id)).json() - - @logwrap - def get_tasks(self): - return self._get(url="/tasks").json() - - @logwrap - def get_releases(self): - return self._get(url="/releases/").json() - - @logwrap - def get_release(self, release_id): - return self._get(url="/releases/{}".format(release_id)).json() - - @logwrap - def put_release(self, release_id, data): - return self._put( - url="/releases/{}".format(release_id), json=data).json() - - @logwrap - def get_releases_details(self, release_id): - msg = 'get_releases_details is deprecated in favor of get_release' - warn(msg, DeprecationWarning) - logger.warning(msg) - return self._get(url="/releases/{}".format(release_id)).json() - - @logwrap - def get_node_disks(self, node_id): - return self._get(url="/nodes/{}/disks".format(node_id)).json() - - @logwrap - def put_node_disks(self, node_id, data): - return self._put( - url="/nodes/{}/disks".format(node_id), json=data).json() - - @logwrap - def get_deployable_releases(self): - return sorted( - [ - release for release - in self.get_releases() if release['is_deployable']], - key=lambda rel: rel['id'] - ) - - @logwrap - def get_release_id(self, release_name=OPENSTACK_RELEASE, - release_version=RELEASE_VERSION): - for release in self.get_releases(): - if (release_name.lower() in release["name"].lower() and - release_version.lower() in release["version"].lower()): - return release["id"] - - @logwrap - def get_release_default_net_settings(self, release_id): - return self._get(url="/releases/{}/networks".format(release_id)).json() - - @logwrap - def put_release_default_net_settings(self, release_id, data): - return self._put( - "/releases/{}/networks".format(release_id), - json=data).json() - - @logwrap - def get_node_interfaces(self, node_id): - return self._get(url="/nodes/{}/interfaces".format(node_id)).json() - - @logwrap - def put_node_interfaces(self, data): - return self._put(url="/nodes/interfaces", json=data).json() - - @logwrap - def list_clusters(self): - return self._get(url="/clusters/").json() - - @logwrap - def clone_environment(self, environment_id, data): - return self._post( - "/clusters/{}/upgrade/clone".format(environment_id), - json=data - ).json() - - @logwrap - def reassign_node(self, cluster_id, data): - return self._post( - "/clusters/{}/upgrade/assign".format(cluster_id), - json=data - ).json() - - @logwrap - def create_cluster(self, data): - logger.info('Before post to nailgun') - return self._post(url="/clusters", json=data).json() - - # ## OSTF ### - @logwrap - def get_ostf_test_sets(self, cluster_id): - warn('get_ostf_test_sets has been moved to ' - 'core.models.fuel_client.Client.ostf.get_test_sets', - DeprecationWarning) - return self.client.ostf.get_test_sets(cluster_id=cluster_id) - - @logwrap - def get_ostf_tests(self, cluster_id): - warn('get_ostf_tests has been moved to ' - 'core.models.fuel_client.Client.ostf.get_tests', - DeprecationWarning) - return self.client.ostf.get_tests(cluster_id=cluster_id) - - @logwrap - def get_ostf_test_run(self, cluster_id): - warn('get_ostf_test_run has been moved to ' - 'core.models.fuel_client.Client.ostf.get_test_runs', - DeprecationWarning) - return self.client.ostf.get_test_runs(cluster_id=cluster_id) - - @logwrap - def ostf_run_tests(self, cluster_id, test_sets_list): - warn('ostf_run_tests has been moved to ' - 'core.models.fuel_client.Client.ostf.run_tests', - DeprecationWarning) - return self.client.ostf.run_tests( - cluster_id=cluster_id, test_sets=test_sets_list) - - @logwrap - def ostf_run_singe_test(self, cluster_id, test_sets_list, test_name): - warn('ostf_run_singe_test has been moved to ' - 'core.models.fuel_client.Client.ostf.run_tests', - DeprecationWarning) - return self.client.ostf.run_tests( - cluster_id=cluster_id, test_sets=test_sets_list, - test_name=test_name) - # ## /OSTF ### - - @logwrap - def update_network(self, cluster_id, networking_parameters=None, - networks=None): - nc = self.get_networks(cluster_id) - if networking_parameters is not None: - for k in networking_parameters: - nc["networking_parameters"][k] = networking_parameters[k] - if networks is not None: - nc["networks"] = networks - - net_provider = self.get_cluster(cluster_id)['net_provider'] - return self._put( - "/clusters/{}/network_configuration/{}".format( - cluster_id, net_provider - ), - json=nc, - - ).json() - - @logwrap - def get_cluster_id(self, name): - for cluster in self.list_clusters(): - if cluster["name"] == name: - logger.info('Cluster name is {:s}'.format(name)) - logger.info('Cluster id is {:d}'.format(cluster["id"])) - return cluster["id"] - - @logwrap - def add_syslog_server(self, cluster_id, host, port): - # Here we updating cluster editable attributes - # In particular we set extra syslog server - attributes = self.get_cluster_attributes(cluster_id) - attributes["editable"]["syslog"]["syslog_server"]["value"] = host - attributes["editable"]["syslog"]["syslog_port"]["value"] = port - self.update_cluster_attributes(cluster_id, attributes) - - @logwrap - def get_cluster_vlans(self, cluster_id): - cluster_vlans = [] - nc = self.get_networks(cluster_id)['networking_parameters'] - vlans = nc["vlan_range"] - cluster_vlans.extend(vlans) - - return cluster_vlans - - @logwrap - def get_notifications(self): - return self._get(url="/notifications").json() - - @logwrap - def generate_logs(self): - return self._put(url="/logs/package").json() - - @logwrap - def provision_nodes(self, cluster_id, node_ids=None): - return self.do_cluster_action(cluster_id, node_ids=node_ids) - - @logwrap - def deploy_nodes(self, cluster_id, node_ids=None): - return self.do_cluster_action( - cluster_id, node_ids=node_ids, action="deploy") - - @logwrap - def stop_deployment(self, cluster_id): - return self.do_stop_reset_actions(cluster_id) - - @logwrap - def reset_environment(self, cluster_id): - return self.do_stop_reset_actions(cluster_id, action="reset") - - @logwrap - def do_cluster_action(self, cluster_id, node_ids=None, action="provision"): - if not node_ids: - nailgun_nodes = self.list_cluster_nodes(cluster_id) - # pylint: disable=map-builtin-not-iterating - node_ids = map(lambda _node: str(_node['id']), nailgun_nodes) - # pylint: enable=map-builtin-not-iterating - return self._put( - "/clusters/{0}/{1}?nodes={2}".format( - cluster_id, - action, - ','.join(node_ids)) - ).json() - - @logwrap - def do_stop_reset_actions(self, cluster_id, action="stop_deployment"): - return self._put( - "/clusters/{0}/{1}/".format(str(cluster_id), action)).json() - - @logwrap - def get_api_version(self): - return self._get(url="/version").json() - - @logwrap - def run_update(self, cluster_id): - return self._put( - "/clusters/{0}/update/".format(str(cluster_id))).json() - - @logwrap - def create_nodegroup(self, cluster_id, group_name): - data = {"cluster_id": cluster_id, "name": group_name} - return self._post(url="/nodegroups/", json=data).json() - - @logwrap - def get_nodegroups(self): - return self._get(url="/nodegroups/").json() - - @logwrap - def assign_nodegroup(self, group_id, nodes): - data = [{"group_id": group_id, "id": n["id"]} for n in nodes] - return self._put(url="/nodes/", json=data).json() - - @logwrap - def delete_nodegroup(self, group_id): - return self._delete(url="/nodegroups/{0}/".format(group_id)) - - @logwrap - def update_settings(self, data=None): - return self._put(url="/settings", json=data).json() - - @logwrap - def get_settings(self, data=None): - return self._get(url="/settings").json() - - @logwrap - def send_fuel_stats(self, enabled=False): - settings = self.get_settings() - params = ('send_anonymous_statistic', 'user_choice_saved') - for p in params: - settings['settings']['statistics'][p]['value'] = enabled - self.update_settings(data=settings) - - @logwrap - def get_cluster_deployment_tasks(self, cluster_id): - """ Get list of all deployment tasks for cluster.""" - return self._get( - url='/clusters/{}/deployment_tasks'.format(cluster_id), - ).json() - - @logwrap - def get_release_deployment_tasks(self, release_id): - """ Get list of all deployment tasks for release.""" - return self._get( - url='/releases/{}/deployment_tasks'.format(release_id), - ).json() - - @logwrap - def get_custom_cluster_deployment_tasks(self, cluster_id, custom_type): - """ Get list of all deployment tasks for cluster.""" - return self._get( - '/clusters/{}/deployment_tasks/?graph_type={}'.format( - cluster_id, - custom_type - )).json() - - @logwrap - def get_end_deployment_tasks(self, cluster_id, end, start=None): - """ Get list of all deployment tasks for cluster with end parameter. - If end=netconfig, return all tasks from the graph included netconfig - """ - if not start: - return self._get( - url='/clusters/{0}/deployment_tasks?end={1}'.format( - cluster_id, end) - ).json() - return self._get( - url='/clusters/{0}/deployment_tasks?start={1}&end={2}'.format( - cluster_id, start, end), - ).json() - - @logwrap - def get_orchestrator_deployment_info(self, cluster_id): - return self._get( - url='/clusters/{}/orchestrator/deployment'.format(cluster_id), - ).json() - - @logwrap - def put_deployment_tasks_for_cluster(self, cluster_id, data, node_id, - force=False): - """Put task to be executed on the nodes from cluster - - :param cluster_id: int, cluster id - :param data: list, tasks ids - :param node_id: str, Node ids where task should be run, - can be node_id=1, or node_id =1,2,3, - :param force: bool, run particular task on nodes and do not care - if there were changes or not - :return: - """ - return self._put( - '/clusters/{0}/deploy_tasks?nodes={1}{2}'.format( - cluster_id, node_id, '&force=1' if force else ''), - json=data).json() - - @logwrap - def put_deployment_tasks_for_release(self, release_id, data): - return self._put( - '/releases/{}/deployment_tasks'.format(release_id), - json=data).json() - - @logwrap - def set_hostname(self, node_id, new_hostname): - """ Set a new hostname for the node""" - data = dict(hostname=new_hostname) - return self._put(url='/nodes/{0}/'.format(node_id), json=data).json() - - @logwrap - def get_network_template(self, cluster_id): - return self._get( - url='/clusters/{}/network_configuration/template'.format( - cluster_id), - ).json() - - @logwrap - def upload_network_template(self, cluster_id, network_template): - return self._put( - '/clusters/{}/network_configuration/template'.format(cluster_id), - json=network_template).json() - - @logwrap - def delete_network_template(self, cluster_id): - return self._delete( - url='/clusters/{}/network_configuration/template'.format( - cluster_id), - ).json() - - @logwrap - def get_network_groups(self): - return self._get(url='/networks/').json() - - @logwrap - def get_network_group(self, network_id): - return self._get(url='/networks/{0}/'.format(network_id)).json() - - @logwrap - def add_network_group(self, network_data): - return self._post(url='/networks/', json=network_data).json() - - @logwrap - def del_network_group(self, network_id): - return self._delete(url='/networks/{0}/'.format(network_id)) - - @logwrap - def update_network_group(self, network_id, network_data): - return self._put(url='/networks/{0}/'.format(network_id), - json=network_data).json() - - @logwrap - def create_vm_nodes(self, node_id, data): - logger.info("Uploading VMs configuration to node {0}: {1}". - format(node_id, data)) - url = "/nodes/{0}/vms_conf/".format(node_id) - return self._put(url, json={'vms_conf': data}).json() - - @logwrap - def spawn_vms(self, cluster_id): - url = '/clusters/{0}/spawn_vms/'.format(cluster_id) - return self._put(url).json() - - @logwrap - def upload_configuration(self, config, cluster_id, role=None, - node_id=None, node_ids=None): - """Upload configuration. - - :param config: a dictionary of configuration to upload. - :param cluster_id: An integer number of cluster id. - :param role: a string of role name. - :param node_id: An integer number of node id. - :param node_ids: a list of node ids - :return: a decoded JSON response. - """ - data = {'cluster_id': cluster_id, 'configuration': config} - if role is not None: - data['node_role'] = role - if node_id is not None: - data['node_id'] = node_id - if node_ids is not None: - data['node_ids'] = node_ids - url = '/openstack-config/' - return self._post(url, json=data).json() - - @logwrap - def get_configuration(self, configuration_id): - """Get uploaded configuration by id. - - :param configuration_id: An integer number of configuration id. - :return: a decoded JSON response. - """ - return self._get( - url='/openstack-config/{0}'.format(configuration_id), - ).json() - - @logwrap - def list_configuration(self, cluster_id, role=None, node_id=None): - """Get filtered list of configurations. - - :param cluster_id: An integer number of cluster id. - :param role: a string of role name. - :param node_id: An integer number of node id. - :return: a decoded JSON response. - """ - url = '/openstack-config/?cluster_id={0}'.format(cluster_id) - if role is not None: - url += '&node_role={0}'.format(role) - if node_id is not None: - url += '&node_id={0}'.format(node_id) - return self._get(url=url).json() - - @logwrap - def delete_configuration(self, configuration_id): - """Delete configuration by id. - - :param configuration_id: An integer number of configuration id. - :return: urllib2's object of response. - """ - url = '/openstack-config/{0}'.format(configuration_id) - return self._delete(url=url) - - @logwrap - def apply_configuration(self, cluster_id, role=None, node_id=None): - """Apply configuration. - - :param cluster_id: An integer number of cluster id. - :param role: a string of role name. - :param node_id: An integer number of node id. - :return: a decoded JSON response. - """ - data = {'cluster_id': cluster_id} - if role is not None: - data['node_role'] = role - if node_id is not None: - data['node_id'] = node_id - url = '/openstack-config/execute/' - return self._put(url, json=data).json() - - @logwrap - def update_vip_ip(self, cluster_id, data): - return self._post( - "/clusters/{0}/network_configuration/ips/vips".format(cluster_id), - json=data).json() - - @logwrap - def upload_node_attributes(self, attributes, node_id): - """Upload node attributes for specified node. - - :param attributes: a dictionary of attributes to upload. - :param node_id: an integer number of node id. - :return: a decoded JSON response. - """ - url = '/nodes/{}/attributes/'.format(node_id) - return self._put(url, json=attributes).json() - - @logwrap - def get_node_attributes(self, node_id): - """Get attributes for specified node. - - :param node_id: an integer number of node id. - :return: a decoded JSON response. - """ - return self._get(url='/nodes/{}/attributes/'.format(node_id)).json() - - @logwrap - def get_deployed_cluster_attributes(self, cluster_id): - url = '/clusters/{}/attributes/deployed/'.format(cluster_id) - return self._get(url).json() - - @logwrap - def get_deployed_network_configuration(self, cluster_id): - url = '/clusters/{}/network_configuration/deployed'.format( - cluster_id) - return self._get(url).json() - - @logwrap - def get_default_cluster_settings(self, cluster_id): - url = '/clusters/{}/attributes/defaults'.format(cluster_id) - return self._get(url).json() - - @logwrap - def get_all_tasks_list(self): - return self._get(url='/transactions/').json() - - @logwrap - def get_deployment_task_hist(self, task_id): - url = '/transactions/{task_id}/deployment_history'.format( - task_id=task_id) - return self._get( - url=url, - ).json() - - @logwrap - def redeploy_cluster_changes(self, cluster_id, data=None): - """Deploy the changes of cluster settings - - :param cluster_id: int, target cluster ID - :param data: dict, updated cluster attributes (if empty, the already - uploaded attributes will be (re)applied) - :return: a decoded JSON response - """ - if data is None: - data = {} - return self._put( - "/clusters/{}/changes/redeploy".format(cluster_id), - json=data).json() - - @logwrap - def assign_ip_address_before_deploy_start(self, cluster_id): - return self._get( - url='/clusters/{}/orchestrator/deployment/defaults/'.format( - cluster_id) - ) - - @logwrap - def get_deployment_info_for_task(self, task_id): - return self._get( - url='/transactions/{}/deployment_info'.format(task_id), - ).json() - - @logwrap - def get_cluster_settings_for_deployment_task(self, task_id): - return self._get( - url='/transactions/{}/settings'.format(task_id), - ).json() - - @logwrap - def get_network_configuration_for_deployment_task(self, task_id): - return self._get( - url='/transactions/{}/network_configuration/'.format(task_id), - ).json() - - # ConfigDB Extension - - @logwrap - def get_components(self, comp_id=None): - """Get all existing components - - :param comp_id: component id - :return: components data - """ - endpoint = '/config/components' - endpoint = '{path}/{component_id}'.format( - path=endpoint, component_id=comp_id) if comp_id else endpoint - return self._get(endpoint).json() - - @logwrap - def create_component(self, data): - """ Create component with specified data - - :param data: - :return: - """ - return self._post('/config/components', json=data).json() - - @logwrap - def get_environments(self, env_id=None): - """Get all existing environments - - :param env_id: environment id - :return: env data - """ - endpoint = '/config/environments' - endpoint = '{path}/{env_id}'.format( - env_id=env_id, path=endpoint) if env_id else endpoint - return self._get(endpoint).json() - - @logwrap - def create_environment(self, data): - """ Create env with specified data - - :param data: - :return: - """ - return self._post('/config/environments', json=data).json() - - @logwrap - def get_global_resource_id_value(self, env_id, resource_id, - effective=False): - """ Get global resource value for specified env and resource - - :param env_id: str or int - :param resource_id: int - :param effective: true or false - :return: global resource value - """ - endpoint = '/config/environments/' \ - '{env_id}/resources/{resource}' \ - '/values'.format(env_id=env_id, resource=resource_id) - endpoint = endpoint + '?effective' if effective else endpoint - - return self._get(endpoint).json() - - @logwrap - def get_global_resource_name_value(self, env_id, resource_name, - effective=False): - """ Get global resource value for specified env and resource - - :param env_id: str or int - :param resource_name: str or int - :param effective: true or false - :return: global resource value - """ - endpoint = '/config/environments/' \ - '{env_id}/resources/{resource}' \ - '/values'.format(env_id=env_id, resource=resource_name) - endpoint = endpoint + '?effective' if effective else endpoint - - return self._get(endpoint).json() - - @logwrap - def put_global_resource_value(self, env_id, resource, data): - """Put global resource value - - :param env_id: str or int - :param resource: name or id - :param data: data in dict format - """ - endpoint = '/config/environments/' \ - '{env_id}/resources/{resource}' \ - '/values'.format(env_id=env_id, resource=resource) - return self._put(endpoint, json=data) - - @logwrap - def put_global_resource_override(self, env_id, resource, data): - """Put global resource override value - - :param env_id: str or int - :param resource: name or id - :param data: data in dict format - """ - endpoint = '/config/environments/' \ - '{env_id}/resources/{resource}' \ - '/overrides'.format(env_id=env_id, resource=resource) - return self._put(endpoint, json=data) - - @logwrap - def get_node_resource_id_value(self, env_id, resource_id, node_id, - effective=False): - """ Get node level resource value for specified env, resource and node - - :param env_id: str or int - :param resource_id: id - :param node_id: str or int - :param effective: true or false - :return: node resource value - """ - endpoint = '/config/environments/' \ - '{env_id}/nodes/{node_id}/resources/{resource}' \ - '/values'.format(env_id=env_id, resource=resource_id, - node_id=node_id) - endpoint = endpoint + '?effective' if effective else endpoint - - return self._get(endpoint).json() - - @logwrap - def get_node_resource_name_value(self, env_id, resource_name, node_id, - effective=False): - """ Get node level resource value for specified env, resource and node - - :param env_id: str or int - :param resource_name: name in string format - :param node_id: str or int - :param effective: true or false - :return: node resource value - """ - endpoint = '/config/environments/' \ - '{env_id}/nodes/{node_id}/resources/{resource}' \ - '/values'.format(env_id=env_id, resource=resource_name, - node_id=node_id) - endpoint = endpoint + '?effective' if effective else endpoint - - return self._get(endpoint).json() - - @logwrap - def put_node_resource_value(self, env_id, resource, node_id, data): - """ Put node resource value - - :param env_id: str or int - :param resource: name or id - :param node_id: str or int - :param data: data in dict format - """ - endpoint = '/config/environments/' \ - '{env_id}/nodes/{node_id}/resources/{resource}' \ - '/values'.format(env_id=env_id, resource=resource, - node_id=node_id) - return self._put(endpoint, json=data) - - @logwrap - def put_node_resource_overrides(self, env_id, resource, node_id, data): - """Put node resource override value - - :param env_id: str or int - :param resource: name or id - :param node_id: str or int - :param data: data in dict format - """ - endpoint = '/config/environments/' \ - '{env_id}/nodes/{node_id}/resources/{resource}' \ - '/overrides'.format(env_id=env_id, resource=resource, - node_id=node_id) - return self._put(endpoint, json=data) - - @logwrap - def plugins_list(self): - """Get list of installed plugins""" - endpoint = '/plugins' - return self._get(endpoint).json() diff --git a/fuelweb_test/network_templates/ceph.yaml b/fuelweb_test/network_templates/ceph.yaml deleted file mode 100644 index a8b96bb58..000000000 --- a/fuelweb_test/network_templates/ceph.yaml +++ /dev/null @@ -1,307 +0,0 @@ -adv_net_template: - default: - nic_mapping: - default: - if1: enp0s3 # admin - if2: enp0s4 # public - if3: enp0s5 # management (vlan 101) storage (vlan 102) & private (vlan 103) - if4: enp0s6 # other networks (vlan 201-299) - if5: enp0s7 # fake bond - templates_for_node_role: - controller: - - public - - private - - storage - - common - - custom - compute: - - common - - private - - storage - - custom - ceph-osd: - - common - - storage - - custom - network_assignments: - storage: - ep: br-storage - private: - ep: br-prv - public: - ep: br-ex - management: - ep: br-mgmt - fuelweb_admin: - ep: br-fw-admin - fake: - ep: br-fake - keystone: - ep: br-keystone - neutron-api: - ep: br-neutronapi - neutron-mesh: - ep: br-neutronmesh - swift: - ep: br-swift - sahara: - ep: br-sahara - cinder: - ep: br-cinder - glance: - ep: br-glance - heat: - ep: br-heat - nova: - ep: br-nova - nova-migration: - ep: br-novamigr - horizon: - ep: br-horizon - messaging: - ep: br-messaging - corosync: - ep: br-corosync - memcache: - ep: br-memcache - database: - ep: br-database - cinder-iscsi: - ep: br-cinderiscsi - swift-replication: - ep: br-swiftrepl - ceph-replication: - ep: br-cephrepl - ceph-radosgw: - ep: br-cephrados - network_scheme: - storage: - transformations: - - action: add-br - name: br-storage - - action: add-port - bridge: br-storage - name: <% if3 %>.102 - endpoints: - - br-storage - roles: - storage: br-storage - ceph/public: br-storage - private: - transformations: - - action: add-br - name: br-prv - provider: ovs - - action: add-br - name: br-aux - - action: add-patch - bridges: - - br-prv - - br-aux - provider: ovs - mtu: 65000 - - action: add-port - bridge: br-aux - name: <% if3 %>.103 - endpoints: - - br-prv - roles: - neutron/private: br-prv - public: - transformations: - - action: add-br - name: br-ex - - action: add-br - name: br-floating - provider: ovs - - action: add-patch - bridges: - - br-floating - - br-ex - provider: ovs - mtu: 65000 - - action: add-port - bridge: br-ex - name: <% if2 %> - endpoints: - - br-ex - roles: - public/vip: br-ex - neutron/floating: br-floating - ex: br-ex - common: - transformations: - - action: add-br - name: br-fw-admin - - action: add-port - bridge: br-fw-admin - name: <% if1 %> - - action: add-br - name: br-mgmt - - action: add-port - bridge: br-mgmt - name: <% if3 %>.101 - - action: add-br - name: br-fake - - action: add-port - name: <% if5 %>.555 - - action: add-port - name: <% if5 %>.666 - - action: add-bond - bridge: br-fake - name: lnxbond0 - interfaces: - - <% if5 %>.555 - - <% if5 %>.666 - bond_properties: - mode: active-backup - interface_properties: {} - endpoints: - - br-fw-admin - - br-mgmt - - br-fake - roles: - admin/pxe: br-fw-admin - fw-admin: br-fw-admin - management: br-mgmt - mgmt/vip: br-mgmt - mgmt/api: br-mgmt - ironic/api: br-mgmt - ironic/baremetal: br-fake - custom: - transformations: - - action: add-br - name: br-keystone - - action: add-port - bridge: br-keystone - name: <% if4 %>.202 - - action: add-br - name: br-neutronapi - - action: add-port - bridge: br-neutronapi - name: <% if4 %>.203 - - action: add-br - name: br-neutronmesh - - action: add-port - bridge: br-neutronmesh - name: <% if4 %>.204 - - action: add-br - name: br-swift - - action: add-port - bridge: br-swift - name: <% if4 %>.205 - - action: add-br - name: br-sahara - - action: add-port - bridge: br-sahara - name: <% if4 %>.206 - - action: add-br - name: br-cinder - - action: add-port - bridge: br-cinder - name: <% if4 %>.208 - - action: add-br - name: br-glance - - action: add-port - bridge: br-glance - name: <% if4 %>.209 - - action: add-br - name: br-heat - - action: add-port - bridge: br-heat - name: <% if4 %>.210 - - action: add-br - name: br-nova - - action: add-port - bridge: br-nova - name: <% if4 %>.211 - - action: add-br - name: br-novamigr - - action: add-port - bridge: br-novamigr - name: <% if4 %>.212 - - action: add-br - name: br-horizon - - action: add-port - bridge: br-horizon - name: <% if4 %>.214 - - action: add-br - name: br-messaging - - action: add-port - bridge: br-messaging - name: <% if4 %>.215 - - action: add-br - name: br-corosync - - action: add-port - bridge: br-corosync - name: <% if4 %>.216 - - action: add-br - name: br-memcache - - action: add-port - bridge: br-memcache - name: <% if4 %>.217 - - action: add-br - name: br-database - - action: add-port - bridge: br-database - name: <% if4 %>.218 - - action: add-br - name: br-cinderiscsi - - action: add-port - bridge: br-cinderiscsi - name: <% if4 %>.219 - - action: add-br - name: br-swiftrepl - - action: add-port - bridge: br-swiftrepl - name: <% if4 %>.220 - - action: add-br - name: br-cephrepl - - action: add-port - bridge: br-cephrepl - name: <% if4 %>.221 - - action: add-br - name: br-cephrados - - action: add-port - bridge: br-cephrados - name: <% if4 %>.222 - endpoints: - - br-keystone - - br-neutronapi - - br-neutronmesh - - br-swift - - br-sahara - - br-cinder - - br-glance - - br-heat - - br-nova - - br-novamigr - - br-horizon - - br-messaging - - br-corosync - - br-memcache - - br-database - - br-cinderiscsi - - br-swiftrepl - - br-cephrepl - - br-cephrados - roles: - keystone/api: br-keystone - neutron/api: br-neutronapi - neutron/mesh: br-neutronmesh - swift/api: br-swift - sahara/api: br-sahara - cinder/api: br-cinder - glance/api: br-glance - heat/api: br-heat - nova/api: br-nova - nova/migration: br-novamigr - horizon: br-horizon - mgmt/messaging: br-messaging - mgmt/corosync: br-corosync - mgmt/memcache: br-memcache - mgmt/database: br-database - cinder/iscsi: br-cinderiscsi - swift/replication: br-swiftrepl - ceph/replication: br-cephrepl - ceph/radosgw: br-cephrados diff --git a/fuelweb_test/network_templates/cinder.yaml b/fuelweb_test/network_templates/cinder.yaml deleted file mode 100644 index 063430a0e..000000000 --- a/fuelweb_test/network_templates/cinder.yaml +++ /dev/null @@ -1,302 +0,0 @@ -adv_net_template: - default: - nic_mapping: - default: - if1: enp0s3 # admin - if2: enp0s4 # public - if3: enp0s5 # management (vlan 101) storage (vlan 102) & private (vlan 103) - if4: enp0s6 # other networks (vlan 201-299) - if5: enp0s7 # fake bond - templates_for_node_role: - controller: - - public - - private - - storage - - common - - custom - compute: - - common - - private - - storage - - custom - cinder: - - common - - storage - - custom - network_assignments: - storage: - ep: br-storage - private: - ep: br-prv - public: - ep: br-ex - management: - ep: br-mgmt - fuelweb_admin: - ep: br-fw-admin - fake: - ep: br-fake - keystone: - ep: br-keystone - neutron-api: - ep: br-neutronapi - neutron-mesh: - ep: br-neutronmesh - swift: - ep: br-swift - sahara: - ep: br-sahara - cinder: - ep: br-cinder - glance: - ep: br-glance - heat: - ep: br-heat - nova: - ep: br-nova - nova-migration: - ep: br-novamigr - horizon: - ep: br-horizon - messaging: - ep: br-messaging - corosync: - ep: br-corosync - memcache: - ep: br-memcache - database: - ep: br-database - cinder-iscsi: - ep: br-cinderiscsi - swift-replication: - ep: br-swiftrepl - network_scheme: - storage: - transformations: - - action: add-br - name: br-storage - - action: add-port - bridge: br-storage - name: <% if3 %>.102 - endpoints: - - br-storage - roles: - storage: br-storage - ceph/public: br-storage - private: - transformations: - - action: add-br - name: br-prv - provider: ovs - - action: add-br - name: br-aux - - action: add-patch - bridges: - - br-prv - - br-aux - provider: ovs - mtu: 65000 - - action: add-port - bridge: br-aux - name: <% if3 %>.103 - endpoints: - - br-prv - roles: - neutron/private: br-prv - public: - transformations: - - action: add-br - name: br-ex - - action: add-br - name: br-floating - provider: ovs - - action: add-patch - bridges: - - br-floating - - br-ex - provider: ovs - mtu: 65000 - - action: add-port - bridge: br-ex - name: <% if2 %> - endpoints: - - br-ex - roles: - public/vip: br-ex - neutron/floating: br-floating - ex: br-ex - common: - transformations: - - action: add-br - name: br-fw-admin - - action: add-port - bridge: br-fw-admin - name: <% if1 %> - - action: add-br - name: br-mgmt - - action: add-port - bridge: br-mgmt - name: <% if3 %>.101 - - action: add-br - name: br-fake - - action: add-bond - bridge: br-fake - name: bond0 - interfaces: - - <% if5 %> - bond_properties: - mode: active-backup - interface_properties: {} - endpoints: - - br-fw-admin - - br-mgmt - - br-fake - roles: - admin/pxe: br-fw-admin - fw-admin: br-fw-admin - management: br-mgmt - mgmt/vip: br-mgmt - mgmt/api: br-mgmt - ironic/api: br-mgmt - ironic/baremetal: br-fake - custom: - transformations: - - action: add-br - name: br-keystone - - action: add-port - bridge: br-keystone - name: <% if4 %>.202 - - action: add-br - name: br-neutronapi - - action: add-port - bridge: br-neutronapi - name: <% if4 %>.203 - - action: add-br - name: br-neutronmesh - - action: add-port - bridge: br-neutronmesh - name: <% if4 %>.204 - - action: add-br - name: br-swift - - action: add-port - bridge: br-swift - name: <% if4 %>.205 - - action: add-br - name: br-sahara - - action: add-port - bridge: br-sahara - name: <% if4 %>.206 - - action: add-br - name: br-cinder - - action: add-port - bridge: br-cinder - name: <% if4 %>.208 - - action: add-br - name: br-glance - - action: add-port - bridge: br-glance - name: <% if4 %>.209 - - action: add-br - name: br-heat - - action: add-port - bridge: br-heat - name: <% if4 %>.210 - - action: add-br - name: br-nova - - action: add-port - bridge: br-nova - name: <% if4 %>.211 - - action: add-br - name: br-novamigr - - action: add-port - bridge: br-novamigr - name: <% if4 %>.212 - - action: add-br - name: br-horizon - - action: add-port - bridge: br-horizon - name: <% if4 %>.214 - - action: add-br - name: br-messaging - - action: add-port - bridge: br-messaging - name: <% if4 %>.215 - - action: add-br - name: br-corosync - - action: add-port - bridge: br-corosync - name: <% if4 %>.216 - - action: add-br - name: br-memcache - - action: add-port - bridge: br-memcache - name: <% if4 %>.217 - - action: add-br - name: br-database - - action: add-port - bridge: br-database - name: <% if4 %>.218 - - action: add-br - name: br-cinderiscsi - - action: add-port - bridge: br-cinderiscsi - name: <% if4 %>.219 - - action: add-br - name: br-swiftrepl - - action: add-port - bridge: br-swiftrepl - name: <% if4 %>.220 - endpoints: - - br-keystone - - br-neutronapi - - br-neutronmesh - - br-swift - - br-sahara - - br-cinder - - br-glance - - br-heat - - br-nova - - br-novamigr - - br-horizon - - br-messaging - - br-corosync - - br-memcache - - br-database - - br-cinderiscsi - - br-swiftrepl - roles: - keystone/api: br-keystone - neutron/api: br-neutronapi - neutron/mesh: br-neutronmesh - swift/api: br-swift - sahara/api: br-sahara - cinder/api: br-cinder - glance/api: br-glance - heat/api: br-heat - nova/api: br-nova - nova/migration: br-novamigr - horizon: br-horizon - mgmt/messaging: br-messaging - mgmt/corosync: br-corosync - mgmt/memcache: br-memcache - mgmt/database: br-database - cinder/iscsi: br-cinderiscsi - swift/replication: br-swiftrepl - unused: - transformations: - - action: add-br - name: br-cephrepl - - action: add-port - bridge: br-cephrepl - name: <% if4 %>.221 - - action: add-br - name: br-cephrados - - action: add-port - bridge: br-cephrados - name: <% if4 %>.222 - endpoints: - - br-cephrepl - - br-cephrados - roles: - ceph/replication: br-cephrepl - ceph/radosgw: br-cephrados diff --git a/fuelweb_test/network_templates/cinder_add_nodes.yaml b/fuelweb_test/network_templates/cinder_add_nodes.yaml deleted file mode 100644 index 947f290b5..000000000 --- a/fuelweb_test/network_templates/cinder_add_nodes.yaml +++ /dev/null @@ -1,308 +0,0 @@ -adv_net_template: - default: - nic_mapping: - default: - if1: enp0s3 # admin - if2: enp0s4 # public - if3: enp0s5 # management (vlan 101) storage (vlan 102) & private (vlan 103) - if4: enp0s6 # other networks (vlan 201-299) - if5: enp0s7 # fake bond - node-4: - if1: enp0s3 # admin - if2: enp0s5 # public - if3: enp0s7 # management (vlan 101) storage (vlan 102) & private (vlan 103) - if4: enp0s6 # other networks (vlan 201-299) - if5: enp0s4 # fake bond - templates_for_node_role: - controller: - - public - - private - - storage - - common - - custom - compute: - - common - - private - - storage - - custom - cinder: - - common - - storage - - custom - network_assignments: - storage: - ep: br-storage - private: - ep: br-prv - public: - ep: br-ex - management: - ep: br-mgmt - fuelweb_admin: - ep: br-fw-admin - fake: - ep: br-fake - keystone: - ep: br-keystone - neutron-api: - ep: br-neutronapi - neutron-mesh: - ep: br-neutronmesh - swift: - ep: br-swift - sahara: - ep: br-sahara - cinder: - ep: br-cinder - glance: - ep: br-glance - heat: - ep: br-heat - nova: - ep: br-nova - nova-migration: - ep: br-novamigr - horizon: - ep: br-horizon - messaging: - ep: br-messaging - corosync: - ep: br-corosync - memcache: - ep: br-memcache - database: - ep: br-database - cinder-iscsi: - ep: br-cinderiscsi - swift-replication: - ep: br-swiftrepl - network_scheme: - storage: - transformations: - - action: add-br - name: br-storage - - action: add-port - bridge: br-storage - name: <% if3 %>.102 - endpoints: - - br-storage - roles: - storage: br-storage - ceph/public: br-storage - private: - transformations: - - action: add-br - name: br-prv - provider: ovs - - action: add-br - name: br-aux - - action: add-patch - bridges: - - br-prv - - br-aux - provider: ovs - mtu: 65000 - - action: add-port - bridge: br-aux - name: <% if3 %>.103 - endpoints: - - br-prv - roles: - neutron/private: br-prv - public: - transformations: - - action: add-br - name: br-ex - - action: add-br - name: br-floating - provider: ovs - - action: add-patch - bridges: - - br-floating - - br-ex - provider: ovs - mtu: 65000 - - action: add-port - bridge: br-ex - name: <% if2 %> - endpoints: - - br-ex - roles: - public/vip: br-ex - neutron/floating: br-floating - ex: br-ex - common: - transformations: - - action: add-br - name: br-fw-admin - - action: add-port - bridge: br-fw-admin - name: <% if1 %> - - action: add-br - name: br-mgmt - - action: add-port - bridge: br-mgmt - name: <% if3 %>.101 - - action: add-br - name: br-fake - - action: add-bond - bridge: br-fake - name: bond0 - interfaces: - - <% if5 %> - bond_properties: - mode: active-backup - interface_properties: {} - endpoints: - - br-fw-admin - - br-mgmt - - br-fake - roles: - admin/pxe: br-fw-admin - fw-admin: br-fw-admin - management: br-mgmt - mgmt/vip: br-mgmt - mgmt/api: br-mgmt - ironic/api: br-mgmt - ironic/baremetal: br-fake - custom: - transformations: - - action: add-br - name: br-keystone - - action: add-port - bridge: br-keystone - name: <% if4 %>.202 - - action: add-br - name: br-neutronapi - - action: add-port - bridge: br-neutronapi - name: <% if4 %>.203 - - action: add-br - name: br-neutronmesh - - action: add-port - bridge: br-neutronmesh - name: <% if4 %>.204 - - action: add-br - name: br-swift - - action: add-port - bridge: br-swift - name: <% if4 %>.205 - - action: add-br - name: br-sahara - - action: add-port - bridge: br-sahara - name: <% if4 %>.206 - - action: add-br - name: br-cinder - - action: add-port - bridge: br-cinder - name: <% if4 %>.208 - - action: add-br - name: br-glance - - action: add-port - bridge: br-glance - name: <% if4 %>.209 - - action: add-br - name: br-heat - - action: add-port - bridge: br-heat - name: <% if4 %>.210 - - action: add-br - name: br-nova - - action: add-port - bridge: br-nova - name: <% if4 %>.211 - - action: add-br - name: br-novamigr - - action: add-port - bridge: br-novamigr - name: <% if4 %>.212 - - action: add-br - name: br-horizon - - action: add-port - bridge: br-horizon - name: <% if4 %>.214 - - action: add-br - name: br-messaging - - action: add-port - bridge: br-messaging - name: <% if4 %>.215 - - action: add-br - name: br-corosync - - action: add-port - bridge: br-corosync - name: <% if4 %>.216 - - action: add-br - name: br-memcache - - action: add-port - bridge: br-memcache - name: <% if4 %>.217 - - action: add-br - name: br-database - - action: add-port - bridge: br-database - name: <% if4 %>.218 - - action: add-br - name: br-cinderiscsi - - action: add-port - bridge: br-cinderiscsi - name: <% if4 %>.219 - - action: add-br - name: br-swiftrepl - - action: add-port - bridge: br-swiftrepl - name: <% if4 %>.220 - endpoints: - - br-keystone - - br-neutronapi - - br-neutronmesh - - br-swift - - br-sahara - - br-cinder - - br-glance - - br-heat - - br-nova - - br-novamigr - - br-horizon - - br-messaging - - br-corosync - - br-memcache - - br-database - - br-cinderiscsi - - br-swiftrepl - roles: - keystone/api: br-keystone - neutron/api: br-neutronapi - neutron/mesh: br-neutronmesh - swift/api: br-swift - sahara/api: br-sahara - cinder/api: br-cinder - glance/api: br-glance - heat/api: br-heat - nova/api: br-nova - nova/migration: br-novamigr - horizon: br-horizon - mgmt/messaging: br-messaging - mgmt/corosync: br-corosync - mgmt/memcache: br-memcache - mgmt/database: br-database - cinder/iscsi: br-cinderiscsi - swift/replication: br-swiftrepl - unused: - transformations: - - action: add-br - name: br-cephrepl - - action: add-port - bridge: br-cephrepl - name: <% if4 %>.221 - - action: add-br - name: br-cephrados - - action: add-port - bridge: br-cephrados - name: <% if4 %>.222 - endpoints: - - br-cephrepl - - br-cephrados - roles: - ceph/replication: br-cephrepl - ceph/radosgw: br-cephrados diff --git a/fuelweb_test/network_templates/default.yaml b/fuelweb_test/network_templates/default.yaml deleted file mode 100644 index 6d5966e18..000000000 --- a/fuelweb_test/network_templates/default.yaml +++ /dev/null @@ -1,135 +0,0 @@ -adv_net_template: - default: - nic_mapping: - default: - if1: enp0s3 # admin - if2: enp0s4 # public - if3: enp0s5 # management - if4: enp0s6 # private - if5: enp0s7 # storage - templates_for_node_role: - controller: - - public - - private - - storage - - common - compute: - - common - - private - - storage - cinder: - - common - - storage - ceph-osd: - - common - - storage - network_assignments: - storage: - ep: br-storage - private: - ep: br-prv - public: - ep: br-ex - management: - ep: br-mgmt - fuelweb_admin: - ep: br-fw-admin - network_scheme: - storage: - transformations: - - action: add-br - name: br-storage - - action: add-port - bridge: br-storage - name: <% if5 %> - endpoints: - - br-storage - roles: - cinder/iscsi: br-storage - swift/replication: br-storage - ceph/replication: br-storage - storage: br-storage - private: - transformations: - - action: add-br - name: br-prv - provider: ovs - - action: add-br - name: br-aux - - action: add-patch - bridges: - - br-prv - - br-aux - provider: ovs - mtu: 65000 - - action: add-port - bridge: br-aux - name: <% if4 %> - endpoints: - - br-prv - roles: - neutron/private: br-prv - public: - transformations: - - action: add-br - name: br-ex - - action: add-br - name: br-floating - provider: ovs - - action: add-patch - bridges: - - br-floating - - br-ex - provider: ovs - mtu: 65000 - - action: add-port - bridge: br-ex - name: <% if2 %> - endpoints: - - br-ex - roles: - public/vip: br-ex - neutron/floating: br-floating - ceph/radosgw: br-ex - ex: br-ex - common: - transformations: - - action: add-br - name: br-fw-admin - - action: add-port - bridge: br-fw-admin - name: <% if1 %> - - action: add-br - name: br-mgmt - - action: add-port - bridge: br-mgmt - name: <% if3 %> - endpoints: - - br-fw-admin - - br-mgmt - roles: - admin/pxe: br-fw-admin - fw-admin: br-fw-admin - management: br-mgmt - keystone/api: br-mgmt - neutron/api: br-mgmt - neutron/mesh: br-mgmt - swift/api: br-mgmt - sahara/api: br-mgmt - cinder/api: br-mgmt - glance/api: br-mgmt - heat/api: br-mgmt - nova/api: br-mgmt - nova/migration: br-mgmt - horizon: br-mgmt - mgmt/api: br-mgmt - mgmt/memcache: br-mgmt - mgmt/database: br-mgmt - mgmt/messaging: br-mgmt - mgmt/corosync: br-mgmt - mgmt/vip: br-mgmt - mgmt/api: br-mgmt - ceph/public: br-mgmt - ironic/api: br-mgmt - ironic/baremetal: br-mgmt - diff --git a/fuelweb_test/network_templates/default_no_mgmt_nwk.yaml b/fuelweb_test/network_templates/default_no_mgmt_nwk.yaml deleted file mode 100644 index 7a62ce13c..000000000 --- a/fuelweb_test/network_templates/default_no_mgmt_nwk.yaml +++ /dev/null @@ -1,126 +0,0 @@ -adv_net_template: - default: - nic_mapping: - default: - if1: enp0s3 # admin - if2: enp0s4 # public - if3: enp0s5 # private - if4: enp0s6 # storage - templates_for_node_role: - controller: - - public - - private - - storage - - common - compute: - - common - - private - - storage - cinder: - - common - - storage - ceph-osd: - - common - - storage - network_assignments: - storage: - ep: br-storage - private: - ep: br-prv - public: - ep: br-ex - fuelweb_admin: - ep: br-fw-admin - network_scheme: - storage: - transformations: - - action: add-br - name: br-storage - - action: add-port - bridge: br-storage - name: <% if4 %> - endpoints: - - br-storage - roles: - cinder/iscsi: br-storage - swift/api: br-storage - swift/replication: br-storage - ceph/replication: br-storage - storage: br-storage - private: - transformations: - - action: add-br - name: br-prv - provider: ovs - - action: add-br - name: br-aux - - action: add-patch - bridges: - - br-prv - - br-aux - provider: ovs - mtu: 65000 - - action: add-port - bridge: br-aux - name: <% if3 %> - endpoints: - - br-prv - roles: - neutron/private: br-prv - public: - transformations: - - action: add-br - name: br-ex - - action: add-br - name: br-floating - provider: ovs - - action: add-patch - bridges: - - br-floating - - br-ex - provider: ovs - mtu: 65000 - - action: add-port - bridge: br-ex - name: <% if2 %> - endpoints: - - br-ex - roles: - public/vip: br-ex - neutron/floating: br-floating - ceph/radosgw: br-ex - ex: br-ex - common: - transformations: - - action: add-br - name: br-fw-admin - - action: add-port - bridge: br-fw-admin - name: <% if1 %> - endpoints: - - br-fw-admin - roles: - admin/pxe: br-fw-admin - fw-admin: br-fw-admin - management: br-fw-admin - keystone/api: br-fw-admin - neutron/api: br-fw-admin - neutron/mesh: br-fw-admin - sahara/api: br-fw-admin - cinder/api: br-fw-admin - glance/api: br-fw-admin - heat/api: br-fw-admin - nova/api: br-fw-admin - nova/migration: br-fw-admin - horizon: br-fw-admin - mgmt/api: br-fw-admin - mgmt/memcache: br-fw-admin - mgmt/database: br-fw-admin - mgmt/messaging: br-fw-admin - mgmt/corosync: br-fw-admin - mgmt/vip: br-fw-admin - mgmt/api: br-fw-admin - ceph/public: br-fw-admin - ironic/api: br-fw-admin - ironic/baremetal: br-fw-admin - diff --git a/fuelweb_test/network_templates/default_ovs.yaml b/fuelweb_test/network_templates/default_ovs.yaml deleted file mode 100644 index 04aa60a89..000000000 --- a/fuelweb_test/network_templates/default_ovs.yaml +++ /dev/null @@ -1,146 +0,0 @@ -adv_net_template: - default: - nic_mapping: - default: - if1: enp0s3 # admin - if2: enp0s4 # public - if3: enp0s7 # management - if4: enp0s5 # private - if5: enp0s6 # storage - templates_for_node_role: - controller: - - public - - private - - storage - - common - compute: - - common - - private - - storage - cinder: - - common - - storage - network_assignments: - storage: - ep: br-storage - private: - ep: br-prv - public: - ep: br-ex - management: - ep: br-mgmt - fuelweb_admin: - ep: br-fw-admin - network_scheme: - storage: - transformations: - - action: add-br - name: br-storage - provider: ovs - - action: add-port - bridge: br-storage - name: <% if5 %> - endpoints: - - br-storage - roles: - cinder/iscsi: br-storage - swift/replication: br-storage - ceph/replication: br-storage - storage: br-storage - private: - transformations: - - action: add-br - name: br-prv - provider: ovs - - action: add-br - name: br-aux - provider: ovs - - action: add-patch - bridges: - - br-prv - - br-aux - provider: ovs - mtu: 65000 - - action: add-port - bridge: br-aux - name: <% if4 %> - endpoints: - - br-prv - roles: - neutron/private: br-prv - public: - transformations: - - action: add-br - name: br-ex - provider: ovs - - action: add-br - name: br-floating - provider: ovs - - action: add-patch - bridges: - - br-floating - - br-ex - provider: ovs - mtu: 65000 - - action: add-port - bridge: br-ex - name: <% if2 %> - endpoints: - - br-ex - roles: - public/vip: br-ex - neutron/floating: br-floating - ceph/radosgw: br-ex - ex: br-ex - common: - transformations: - - action: add-br - name: br-fw-admin - provider: ovs - - action: add-br - name: br-test - provider: ovs - - action: add-patch - bridges: - - br-fw-admin - - br-test - provider: ovs - mtu: 65000 - - action: add-port - bridge: br-fw-admin - name: <% if1 %> - - action: add-br - name: br-mgmt - provider: ovs - - action: add-port - bridge: br-mgmt - name: <% if3 %> - endpoints: - - br-fw-admin - - br-mgmt - roles: - admin/pxe: br-fw-admin - fw-admin: br-fw-admin - management: br-mgmt - keystone/api: br-mgmt - neutron/api: br-mgmt - neutron/mesh: br-mgmt - swift/api: br-mgmt - sahara/api: br-mgmt - cinder/api: br-mgmt - glance/api: br-mgmt - heat/api: br-mgmt - nova/api: br-mgmt - nova/migration: br-mgmt - horizon: br-mgmt - mgmt/api: br-mgmt - mgmt/memcache: br-mgmt - mgmt/database: br-mgmt - mgmt/messaging: br-mgmt - mgmt/corosync: br-mgmt - mgmt/vip: br-mgmt - mgmt/api: br-mgmt - ceph/public: br-mgmt - ironic/api: br-mgmt - ironic/baremetal: br-mgmt - diff --git a/fuelweb_test/network_templates/hardware.yaml b/fuelweb_test/network_templates/hardware.yaml deleted file mode 100644 index 7475b7787..000000000 --- a/fuelweb_test/network_templates/hardware.yaml +++ /dev/null @@ -1,223 +0,0 @@ -adv_net_template: - default: - network_assignments: - ceph: - ep: br-ceph - database: - ep: br-database - fuelweb_admin: - ep: br-fw-admin - ha: - ep: br-ha - management: - ep: br-mgmt - messaging: - ep: br-messaging - openstack: - ep: br-openstack - private: - ep: br-prv - public: - ep: br-ex - services: - ep: br-services - storage: - ep: br-storage - network_scheme: - admin: - endpoints: - - br-fw-admin - roles: - admin/pxe: br-fw-admin - fw-admin: br-fw-admin - transformations: - - action: add-br - name: br-fw-admin - - action: add-port - bridge: br-fw-admin - name: <% if1 %> - ceph: - endpoints: - - br-ceph - roles: - ceph/replication: br-ceph - transformations: - - action: add-br - name: br-ceph - - action: add-port - bridge: br-ceph - name: <% if1 %>.364 - database: - endpoints: - - br-database - roles: - mgmt/database: br-database - mgmt/memcache: br-database - transformations: - - action: add-br - name: br-database - - action: add-port - bridge: br-database - name: <% if2 %>.367 - ha: - endpoints: - - br-ha - roles: - mgmt/corosync: br-ha - transformations: - - action: add-br - name: br-ha - - action: add-port - bridge: br-ha - name: <% if2 %>.366 - management: - endpoints: - - br-mgmt - roles: - management: br-mgmt - mgmt/api: br-mgmt - mgmt/vip: br-mgmt - ironic/api: br-mgmt - ironic/baremetal: br-mgmt - transformations: - - action: add-br - name: br-mgmt - - action: add-port - bridge: br-mgmt - name: <% if2 %>.360 - messaging: - endpoints: - - br-messaging - roles: - mgmt/messaging: br-messaging - transformations: - - action: add-br - name: br-messaging - - action: add-port - bridge: br-messaging - name: <% if2 %>.368 - openstack: - endpoints: - - br-openstack - roles: - horizon: br-openstack - keystone/api: br-openstack - neutron/api: br-openstack - neutron/mesh: br-openstack - nova/api: br-openstack - nova/migration: br-openstack - transformations: - - action: add-br - name: br-openstack - - action: add-port - bridge: br-openstack - name: <% if2 %>.365 - private: - endpoints: - - br-prv - roles: - neutron/private: br-prv - transformations: - - action: add-br - name: br-prv - provider: ovs - - action: add-br - name: br-aux - - action: add-patch - bridges: - - br-prv - - br-aux - mtu: 65000 - provider: ovs - - action: add-port - bridge: br-aux - name: <% if1 %>.362 - public: - endpoints: - - br-ex - roles: - ex: br-ex - neutron/floating: br-floating - public/vip: br-ex - transformations: - - action: add-br - name: br-ex - - action: add-br - name: br-floating - provider: ovs - - action: add-patch - bridges: - - br-floating - - br-ex - mtu: 65000 - provider: ovs - - action: add-port - bridge: br-ex - name: <% if2 %> - services: - endpoints: - - br-services - roles: - heat/api: br-services - sahara/api: br-services - transformations: - - action: add-br - name: br-services - - action: add-port - bridge: br-services - name: <% if1 %>.363 - storage: - endpoints: - - br-storage - roles: - ceph/public: br-storage - ceph/radosgw: br-storage - cinder/api: br-storage - cinder/iscsi: br-storage - glance/api: br-storage - storage: br-storage - swift/api: br-storage - swift/replication: br-storage - transformations: - - action: add-br - name: br-storage - - action: add-port - bridge: br-storage - name: <% if2 %>.361 - nic_mapping: - default: - if1: enp0s3 - if2: enp0s4 - templates_for_node_role: - ceph-osd: - - admin - - public - - management - - storage - - ceph - - messaging - cinder: - - admin - - public - - management - - storage - - messaging - compute: - - admin - - public - - management - - storage - - private - - openstack - - messaging - controller: - - admin - - public - - management - - storage - - private - - services - - ha - - openstack - - database - - messaging diff --git a/fuelweb_test/network_templates/public_api.yaml b/fuelweb_test/network_templates/public_api.yaml deleted file mode 100644 index fd1121f6f..000000000 --- a/fuelweb_test/network_templates/public_api.yaml +++ /dev/null @@ -1,164 +0,0 @@ -adv_net_template: - default: - network_assignments: - fuelweb_admin: - ep: br-fw-admin - management: - ep: br-mgmt - os-api: - ep: br-osapi - private: - ep: br-prv - public: - ep: br-ex - storage: - ep: br-storage - network_scheme: - admin: - endpoints: - - br-fw-admin - roles: - admin/pxe: br-fw-admin - fw-admin: br-fw-admin - transformations: - - action: add-br - name: br-fw-admin - - action: add-port - bridge: br-fw-admin - name: <% if1 %> - management: - endpoints: - - br-mgmt - roles: - ceph/radosgw: br-mgmt - ceph/replication: br-mgmt - cinder/api: br-mgmt - cinder/iscsi: br-mgmt - glance/api: br-mgmt - heat/api: br-mgmt - horizon: br-mgmt - ironic/api: br-mgmt - ironic/baremetal: br-mgmt - keystone/api: br-mgmt - management: br-mgmt - mgmt/api: br-mgmt - mgmt/corosync: br-mgmt - mgmt/database: br-mgmt - mgmt/memcache: br-mgmt - mgmt/messaging: br-mgmt - mgmt/vip: br-mgmt - neutron/api: br-mgmt - neutron/mesh: br-mgmt - nova/api: br-mgmt - nova/migration: br-mgmt - sahara/api: br-mgmt - swift/api: br-mgmt - swift/replication: br-mgmt - transformations: - - action: add-br - name: br-mgmt - - action: add-port - bridge: br-mgmt - name: <% if4 %> - os-api: - endpoints: - - br-osapi - roles: - public/vip: br-osapi - transformations: - - action: add-br - name: br-osapi - - action: add-port - bridge: br-osapi - name: <% if3 %> - private: - endpoints: - - br-prv - roles: - neutron/private: br-prv - private: br-prv - transformations: - - action: add-br - name: br-prv - provider: ovs - - action: add-br - name: br-aux - - action: add-patch - bridges: - - br-prv - - br-aux - mtu: 65000 - provider: ovs - - action: add-port - bridge: br-aux - name: <% if5 %> - public: - endpoints: - - br-ex - roles: - ex: br-ex - neutron/floating: br-ex - public: br-ex - transformations: - - action: add-br - name: br-ex - - action: add-br - name: br-floating - provider: ovs - - action: add-patch - bridges: - - br-floating - - br-ex - mtu: 65000 - provider: ovs - - action: add-port - bridge: br-ex - name: <% if2 %> - storage: - endpoints: - - br-storage - roles: - ceph/public: br-storage - ceph/radosgw: br-storage - ceph/replication: br-storage - storage: br-storage - transformations: - - action: add-br - name: br-storage - - action: add-port - bridge: br-storage - name: <% if6 %>.101 - nic_mapping: - default: - if1: enp0s3 # admin - if2: enp0s4 # public - if3: enp0s5 # os-api - if4: enp0s6 # management - if5: enp0s7 # private - if6: enp0s8 # storage - templates_for_node_role: - ceph-osd: - - admin - - public - - management - - private - - storage - cinder: - - admin - - public - - management - - private - - storage - compute: - - admin - - public - - management - - private - - storage - controller: - - admin - - public - - os-api - - management - - private - - storage diff --git a/fuelweb_test/network_templates/two_nodegroups.yaml b/fuelweb_test/network_templates/two_nodegroups.yaml deleted file mode 100644 index 0f8105e36..000000000 --- a/fuelweb_test/network_templates/two_nodegroups.yaml +++ /dev/null @@ -1,283 +0,0 @@ -adv_net_template: - default: - nic_mapping: - default: - if1: enp0s3 # admin - if2: enp0s4 # public - if3: enp0s5 # management - if4: enp0s6 # private - if5: enp0s7 # storage - templates_for_node_role: - controller: - - admin - - public - - management - - private - - storage - compute: - - admin - - public - - management - - private - - storage - cinder: - - admin - - public - - management - - private - - storage - network_assignments: - storage: - ep: br-storage - private: - ep: br-prv - public: - ep: br-ex - management: - ep: br-mgmt - fuelweb_admin: - ep: br-fw-admin - network_scheme: - admin: - transformations: - - action: add-br - name: br-fw-admin - - action: add-port - bridge: br-fw-admin - name: <% if1 %> - endpoints: - - br-fw-admin - roles: - admin/pxe: br-fw-admin - fw-admin: br-fw-admin - public: - transformations: - - action: add-br - name: br-ex - - action: add-br - name: br-floating - provider: ovs - - action: add-patch - bridges: - - br-floating - - br-ex - provider: ovs - mtu: 65000 - - action: add-port - bridge: br-ex - name: <% if2 %> - endpoints: - - br-ex - roles: - public/vip: br-ex - neutron/floating: br-ex - ex: br-ex - public: br-ex - management: - transformations: - - action: add-br - name: br-mgmt - - action: add-port - bridge: br-mgmt - name: <% if3 %> - endpoints: - - br-mgmt - roles: - management: br-mgmt - mgmt/vip: br-mgmt - mgmt/api: br-mgmt - keystone/api: br-mgmt - neutron/api: br-mgmt - neutron/mesh: br-mgmt - swift/api: br-mgmt - sahara/api: br-mgmt - cinder/api: br-mgmt - glance/api: br-mgmt - heat/api: br-mgmt - nova/api: br-mgmt - nova/migration: br-mgmt - horizon: br-mgmt - mgmt/messaging: br-mgmt - mgmt/corosync: br-mgmt - mgmt/memcache: br-mgmt - mgmt/database: br-mgmt - cinder/iscsi: br-mgmt - swift/replication: br-mgmt - ceph/replication: br-mgmt - ceph/radosgw: br-mgmt - ironic/api: br-mgmt - ironic/baremetal: br-mgmt - private: - transformations: - - action: add-br - name: br-prv - provider: ovs - - action: add-br - name: br-aux - - action: add-patch - bridges: - - br-prv - - br-aux - provider: ovs - mtu: 65000 - - action: add-port - bridge: br-aux - name: <% if4 %> - endpoints: - - br-prv - roles: - private: br-prv - neutron/private: br-prv - storage: - transformations: - - action: add-br - name: br-storage - - action: add-port - bridge: br-storage - name: <% if5 %> - endpoints: - - br-storage - roles: - storage: br-storage - ceph/public: br-storage - group-custom-1: - nic_mapping: - default: - if11: enp0s3 # admin - if12: enp0s4 # public - if13: enp0s5 # management - if14: enp0s6 # private - if15: enp0s7 # storage - templates_for_node_role: - controller: - - admin - - public - - management - - private - - storage - compute: - - admin - - public - - management - - private - - storage - cinder: - - admin - - public - - management - - private - - storage - network_assignments: - storage: - ep: br-storage - private: - ep: br-prv - public: - ep: br-ex - management: - ep: br-mgmt - fuelweb_admin: - ep: br-fw-admin - network_scheme: - admin: - transformations: - - action: add-br - name: br-fw-admin - - action: add-port - bridge: br-fw-admin - name: <% if11 %> - endpoints: - - br-fw-admin - roles: - admin/pxe: br-fw-admin - fw-admin: br-fw-admin - public: - transformations: - - action: add-br - name: br-ex - - action: add-br - name: br-floating - provider: ovs - - action: add-patch - bridges: - - br-floating - - br-ex - provider: ovs - mtu: 65000 - - action: add-port - bridge: br-ex - name: <% if12 %> - endpoints: - - br-ex - roles: - public/vip: br-ex - neutron/floating: br-ex - ex: br-ex - public: br-ex - management: - transformations: - - action: add-br - name: br-mgmt - - action: add-port - bridge: br-mgmt - name: <% if13 %> - endpoints: - - br-mgmt - roles: - management: br-mgmt - mgmt/vip: br-mgmt - mgmt/api: br-mgmt - keystone/api: br-mgmt - neutron/api: br-mgmt - neutron/mesh: br-mgmt - swift/api: br-mgmt - sahara/api: br-mgmt - cinder/api: br-mgmt - glance/api: br-mgmt - heat/api: br-mgmt - nova/api: br-mgmt - nova/migration: br-mgmt - horizon: br-mgmt - mgmt/messaging: br-mgmt - mgmt/corosync: br-mgmt - mgmt/memcache: br-mgmt - mgmt/database: br-mgmt - cinder/iscsi: br-mgmt - swift/replication: br-mgmt - ceph/replication: br-mgmt - ceph/radosgw: br-mgmt - ironic/api: br-mgmt - ironic/baremetal: br-mgmt - private: - transformations: - - action: add-br - name: br-prv - provider: ovs - - action: add-br - name: br-aux - - action: add-patch - bridges: - - br-prv - - br-aux - provider: ovs - mtu: 65000 - - action: add-port - bridge: br-aux - name: <% if14 %> - endpoints: - - br-prv - roles: - private: br-prv - neutron/private: br-prv - storage: - transformations: - - action: add-br - name: br-storage - - action: add-port - bridge: br-storage - name: <% if15 %> - endpoints: - - br-storage - roles: - storage: br-storage - ceph/public: br-storage diff --git a/fuelweb_test/network_templates/upgrades.yaml b/fuelweb_test/network_templates/upgrades.yaml deleted file mode 100644 index e06677941..000000000 --- a/fuelweb_test/network_templates/upgrades.yaml +++ /dev/null @@ -1,313 +0,0 @@ -adv_net_template: - default: - nic_mapping: - default: - if1: enp0s3 # admin - if2: enp0s4 # public - if3: enp0s5 # management (vlan 101) storage (vlan 102) & private (vlan 103) - if4: enp0s6 # other networks (vlan 201-299) - templates_for_node_role: - controller: - - public - - private - - storage - - common - - custom - compute: - - common - - private - - storage - - custom - ceph-osd: - - common - - storage - - custom - network_assignments: - storage: - ep: br-storage - private: - ep: br-prv - public: - ep: br-ex - management: - ep: br-mgmt - fuelweb_admin: - ep: br-fw-admin - mongo: - ep: br-mongo - keystone: - ep: br-keystone - neutron-api: - ep: br-neutronapi - neutron-mesh: - ep: br-neutronmesh - swift: - ep: br-swift - sahara: - ep: br-sahara - ceilometer: - ep: br-ceilometer - cinder: - ep: br-cinder - glance: - ep: br-glance - heat: - ep: br-heat - nova: - ep: br-nova - nova-migration: - ep: br-novamigr - murano: - ep: br-murano - horizon: - ep: br-horizon - messaging: - ep: br-messaging - corosync: - ep: br-corosync - memcache: - ep: br-memcache - database: - ep: br-database - cinder-iscsi: - ep: br-cinderiscsi - swift-replication: - ep: br-swiftrepl - ceph-replication: - ep: br-cephrepl - ceph-radosgw: - ep: br-cephrados - network_scheme: - storage: - transformations: - - action: add-br - name: br-storage - - action: add-port - bridge: br-storage - name: <% if3 %>.102 - endpoints: - - br-storage - roles: - storage: br-storage - ceph/public: br-storage - private: - transformations: - - action: add-br - name: br-prv - provider: ovs - - action: add-br - name: br-aux - - action: add-patch - bridges: - - br-prv - - br-aux - provider: ovs - mtu: 65000 - - action: add-port - bridge: br-aux - name: <% if3 %>.103 - endpoints: - - br-prv - roles: - neutron/private: br-prv - public: - transformations: - - action: add-br - name: br-ex - - action: add-br - name: br-floating - provider: ovs - - action: add-patch - bridges: - - br-floating - - br-ex - provider: ovs - mtu: 65000 - - action: add-port - bridge: br-ex - name: <% if2 %> - endpoints: - - br-ex - roles: - public/vip: br-ex - neutron/floating: br-floating - ex: br-ex - common: - transformations: - - action: add-br - name: br-fw-admin - - action: add-port - bridge: br-fw-admin - name: <% if1 %> - - action: add-br - name: br-mgmt - - action: add-port - bridge: br-mgmt - name: <% if3 %>.101 - endpoints: - - br-fw-admin - - br-mgmt - roles: - admin/pxe: br-fw-admin - fw-admin: br-fw-admin - management: br-mgmt - mgmt/vip: br-mgmt - mgmt/api: br-mgmt - custom: - transformations: - - action: add-br - name: br-mongo - - action: add-port - bridge: br-mongo - name: <% if4 %>.201 - - action: add-br - name: br-keystone - - action: add-port - bridge: br-keystone - name: <% if4 %>.202 - - action: add-br - name: br-neutronapi - - action: add-port - bridge: br-neutronapi - name: <% if4 %>.203 - - action: add-br - name: br-neutronmesh - - action: add-port - bridge: br-neutronmesh - name: <% if4 %>.204 - - action: add-br - name: br-swift - - action: add-port - bridge: br-swift - name: <% if4 %>.205 - - action: add-br - name: br-sahara - - action: add-port - bridge: br-sahara - name: <% if4 %>.206 - - action: add-br - name: br-ceilometer - - action: add-port - bridge: br-ceilometer - name: <% if4 %>.207 - - action: add-br - name: br-cinder - - action: add-port - bridge: br-cinder - name: <% if4 %>.208 - - action: add-br - name: br-glance - - action: add-port - bridge: br-glance - name: <% if4 %>.209 - - action: add-br - name: br-heat - - action: add-port - bridge: br-heat - name: <% if4 %>.210 - - action: add-br - name: br-nova - - action: add-port - bridge: br-nova - name: <% if4 %>.211 - - action: add-br - name: br-novamigr - - action: add-port - bridge: br-novamigr - name: <% if4 %>.212 - - action: add-br - name: br-murano - - action: add-port - bridge: br-murano - name: <% if4 %>.213 - - action: add-br - name: br-horizon - - action: add-port - bridge: br-horizon - name: <% if4 %>.214 - - action: add-br - name: br-messaging - - action: add-port - bridge: br-messaging - name: <% if4 %>.215 - - action: add-br - name: br-corosync - - action: add-port - bridge: br-corosync - name: <% if4 %>.216 - - action: add-br - name: br-memcache - - action: add-port - bridge: br-memcache - name: <% if4 %>.217 - - action: add-br - name: br-database - - action: add-port - bridge: br-database - name: <% if4 %>.218 - - action: add-br - name: br-cinderiscsi - - action: add-port - bridge: br-cinderiscsi - name: <% if4 %>.219 - - action: add-br - name: br-swiftrepl - - action: add-port - bridge: br-swiftrepl - name: <% if4 %>.220 - - action: add-br - name: br-cephrepl - - action: add-port - bridge: br-cephrepl - name: <% if4 %>.221 - - action: add-br - name: br-cephrados - - action: add-port - bridge: br-cephrados - name: <% if4 %>.222 - endpoints: - - br-mongo - - br-keystone - - br-neutronapi - - br-neutronmesh - - br-swift - - br-sahara - - br-ceilometer - - br-cinder - - br-glance - - br-heat - - br-nova - - br-novamigr - - br-murano - - br-horizon - - br-messaging - - br-corosync - - br-memcache - - br-database - - br-cinderiscsi - - br-swiftrepl - - br-cephrepl - - br-cephrados - roles: - mongo/db: br-mongo - keystone/api: br-keystone - neutron/api: br-neutronapi - neutron/mesh: br-neutronmesh - swift/api: br-swift - sahara/api: br-sahara - ceilometer/api: br-ceilometer - cinder/api: br-cinder - glance/api: br-glance - heat/api: br-heat - nova/api: br-nova - nova/migration: br-novamigr - murano/api: br-murano - horizon: br-horizon - mgmt/messaging: br-messaging - mgmt/corosync: br-corosync - mgmt/memcache: br-memcache - mgmt/database: br-database - cinder/iscsi: br-cinderiscsi - swift/replication: br-swiftrepl - ceph/replication: br-cephrepl - ceph/radosgw: br-cephrados diff --git a/fuelweb_test/ostf_test_mapping.py b/fuelweb_test/ostf_test_mapping.py deleted file mode 100644 index 1d1e7a48b..000000000 --- a/fuelweb_test/ostf_test_mapping.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -OSTF_TEST_MAPPING = { - 'Check data replication over mysql': 'fuel_health.tests.ha.test_' - 'mysql_replication.' - 'TestMysqlReplication.' - 'test_mysql_replication', - 'Check amount of tables in ' - 'databases is the same on each node': 'fuel_health.tests.ha.' - 'test_mysql_replication.' - 'TestMysqlReplication.' - 'test_os_databases', - 'Check mysql environment state': 'fuel_health.tests.ha.' - 'test_mysql_replication.' - 'TestMysqlReplication.' - 'test_state_of_mysql_cluster', - 'Check galera environment state': 'fuel_health.tests.ha.' - 'test_mysql_replication.' - 'TestMysqlReplication.' - 'test_state_of_galera_cluster', - 'Check RabbitMQ is available': 'fuel_health.tests.ha.' - 'test_rabbit.RabbitSanityTest.' - 'test_001_rabbitmqctl_status', - 'RabbitMQ availability': 'fuel_health.tests.ha.test_rabbit.' - 'RabbitSanityTest.' - 'test_002_rabbitmqctl_status_ubuntu', - 'List ceilometer availability': 'fuel_health.tests.sanity.' - 'test_sanity_ceilometer.' - 'CeilometerApiTests.test_list_meters', - 'Request instance list': 'fuel_health.tests.sanity.test_sanity_compute.' - 'SanityComputeTest.test_list_instances', - 'Request image list': 'fuel_health.tests.sanity.test_sanity_compute.' - 'SanityComputeTest.test_list_images', - 'Request volume list': 'fuel_health.tests.sanity.test_sanity_compute.' - 'SanityComputeTest.test_list_volumes', - 'Request snapshot list': 'fuel_health.tests.sanity.test_sanity_compute.' - 'SanityComputeTest.test_list_snapshots', - 'Request flavor list': 'fuel_health.tests.sanity.test_sanity_compute.' - 'SanityComputeTest.test_list_flavors', - 'Request absolute limits list': 'fuel_health.tests.sanity.' - 'test_sanity_compute.SanityComputeTest.' - 'test_list_rate_limits', - 'Request stack list': 'fuel_health.tests.sanity.test_sanity_heat.' - 'SanityHeatTest.test_list_stacks', - 'Request active services list': 'fuel_health.tests.sanity.' - 'test_sanity_identity.' - 'SanityIdentityTest.test_list_services', - 'Request user list': 'fuel_health.tests.sanity.test_sanity_identity.' - 'SanityIdentityTest.test_list_users', - 'Check that required services are running': 'fuel_health.tests.sanity.' - 'test_sanity_infrastructure.' - 'SanityInfrastructureTest.' - 'test_001_services_state', - 'Check internet connectivity from a compute': 'fuel_health.tests.sanity.' - 'test_sanity_infrastructure.' - 'SanityInfrastructureTest.' - 'test_002_internet_' - 'connectivity_from_compute', - 'Check DNS resolution on compute node': 'fuel_health.tests.sanity.' - 'test_sanity_infrastructure.' - 'SanityInfrastructureTest.' - 'test_003_dns_resolution', - 'Create and delete Murano environment': 'fuel_health.tests.sanity.' - 'test_sanity_murano.' - 'MuranoSanityTests.' - 'test_create_and_delete_service', - 'Request list of networks': 'fuel_health.tests.sanity.' - 'test_sanity_networking.NetworksTest.' - 'test_list_networks', - 'Sahara tests to create/list/delete node' - ' group and cluster templates': 'fuel_health.tests.sanity.' - 'test_sanity_sahara.' - 'SanitySaharaTests.test_sanity_sahara', - 'Create instance flavor': 'fuel_health.tests.smoke.test_create_flavor.' - 'FlavorsAdminTest.test_create_flavor', - 'Create volume and attach it to instance': 'fuel_health.tests.smoke.' - 'test_create_volume.' - 'VolumesTest.' - 'test_volume_create', - 'Create keypair': 'fuel_health.tests.smoke.' - 'test_nova_create_instance_with_connectivity.' - 'TestNovaNetwork.test_001_create_keypairs', - 'Create security group': 'fuel_health.tests.smoke.' - 'test_nova_create_instance_with_connectivity.' - 'TestNovaNetwork.' - 'test_002_create_security_groups', - 'Check network parameters': 'fuel_health.tests.smoke.' - 'test_nova_create_instance_with_connectivity.' - 'TestNovaNetwork.test_003_check_networks', - 'Launch instance': 'fuel_health.tests.smoke.' - 'test_nova_create_instance_with_connectivity.' - 'TestNovaNetwork.test_004_create_servers', - 'Check that VM is accessible ' - 'via floating IP address': 'fuel_health.tests.smoke.' - 'test_nova_create_instance_with_connectivity.' - 'TestNovaNetwork.' - 'test_005_check_public_network_connectivity', - 'Check network connectivity' - ' from instance via floating IP': 'fuel_health.tests.smoke.' - 'test_nova_create_instance_with_' - 'connectivity.TestNovaNetwork.' - 'test_008_check_public_instance_' - 'connectivity_from_instance', - 'Check network connectivity from ' - 'instance without floating IP': 'fuel_health.tests.smoke.test_nova_create_' - 'instance_with_connectivity.' - 'TestNovaNetwork.test_006_check_' - 'internet_connectivity_instance_' - 'without_floatingIP', - 'Launch instance, create snapshot,' - ' launch instance from snapshot': 'fuel_health.tests.smoke.' - 'test_nova_image_actions.' - 'TestImageAction.test_snapshot', - 'Create user and authenticate with it to Horizon': 'fuel_health.tests.' - 'smoke.test_' - 'user_create.TestUserTe' - 'nantRole.test_' - 'create_user', } diff --git a/fuelweb_test/rally/scenarios/nova.json b/fuelweb_test/rally/scenarios/nova.json deleted file mode 100644 index 80624e4fc..000000000 --- a/fuelweb_test/rally/scenarios/nova.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "NovaServers.boot_and_delete_server": [ - { - "args": { - "flavor": { - "name": "m1.micro" - }, - "image": { - "name": "TestVM" - }, - "force_delete": false - }, - "runner": { - "type": "constant", - "times": 30, - "concurrency": 3 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/fuelweb_test/rally/scenarios/scenarios.yaml b/fuelweb_test/rally/scenarios/scenarios.yaml deleted file mode 100644 index 1c1fe1be1..000000000 --- a/fuelweb_test/rally/scenarios/scenarios.yaml +++ /dev/null @@ -1,16 +0,0 @@ -categories: - - undefined: - tags: - scenarios: - nova - - nova: - tags: - nova - scenarions: - nova - - neutron: - tags: - neutron - scenarios: - neutron - nova \ No newline at end of file diff --git a/fuelweb_test/requirements-devops-source.txt b/fuelweb_test/requirements-devops-source.txt deleted file mode 100644 index 903aef5ba..000000000 --- a/fuelweb_test/requirements-devops-source.txt +++ /dev/null @@ -1,6 +0,0 @@ -# TODO(ddmitriev): remove this requirement when fuel-devops -# will be available as a PyPi package. -# Check virtual environment update scripts on CI first. -# If you want to use SQLite DB - postgree lib is not required and you can -# remove postgree egg -git+git://github.com/openstack/fuel-devops.git@3.0.5#egg=project[postgre] \ No newline at end of file diff --git a/fuelweb_test/requirements-devops.txt b/fuelweb_test/requirements-devops.txt deleted file mode 100644 index 8dbf99886..000000000 --- a/fuelweb_test/requirements-devops.txt +++ /dev/null @@ -1,3 +0,0 @@ -# TODO(ddmitriev): move this requirement to requirements.txt -# when fuel-devops will be available as a PyPi package -fuel-devops>=3.0.3 diff --git a/fuelweb_test/requirements.txt b/fuelweb_test/requirements.txt deleted file mode 100644 index 9d799c3ad..000000000 --- a/fuelweb_test/requirements.txt +++ /dev/null @@ -1,35 +0,0 @@ -setuptools>=22.0,!=24.0.0 # PSF/ZPL # MANDATORY TO BUILD CORRECT VIRTUALENV -pycparser<2.14 # BSD # Version pinned to avoid AssertionError: sorry, but this version only supports 100 named groups -pip>=6.0 # MIT # MANDATORY TO BUILD CORRECT VIRTUALENV -nose==1.2.1 -anyjson>=0.3.3 # BSD -paramiko>=1.16.0 # LGPL -proboscis==1.2.6.0 -junitxml>=0.7.0 -netaddr>=0.7.12,!=0.7.16 # BSD -pyOpenSSL>=0.14 # Apache-2.0 -Sphinx # BSD # Not required for tests, but required to build docs (pbr) -docutils # Not required for tests, but required to build docs (pbr) -markupsafe # Not required for tests, but required to build docs (pbr) -pytz>=2013.6 # MIT # Not required for tests, but required to build docs (pbr) -keystoneauth1>=2.1.0 # Apache-2.0 -python-glanceclient>=2.0.0 # Apache-2.0 -python-keystoneclient>=1.6.0,!=1.8.0,!=2.1.0 # Apache-2.0 -python-novaclient>=2.29.0,!=2.33.0 # Apache-2.0 -python-cinderclient>=1.3.1 # Apache-2.0 -python-neutronclient>=2.6.0,!=4.1.0 # Apache-2.0 -python-ironicclient>=1.1.0 # Apache-2.0 -python-heatclient>=0.6.0 # Apache-2.0 -oslo.i18n>=3.1.0 -six>=1.9.0 # MIT -Jinja2>=2.8 # BSD License (3 clause) -AllPairs==2.0.1 -launchpadlib -beautifulsoup4>=4.2.0 -joblib>=0.8.4 -pytest>=3.0.0 -python-jenkins>=0.4.12 -pyyaml>=3.1.0 # MIT -requests>=2.8.1,!=2.9.0 # Apache-2.0 -tablib>=0.11.2 -xmltodict>=0.10.1 # MIT \ No newline at end of file diff --git a/fuelweb_test/settings.py b/fuelweb_test/settings.py deleted file mode 100644 index ffeaff4ea..000000000 --- a/fuelweb_test/settings.py +++ /dev/null @@ -1,761 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import os -import time - -_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True, - '0': False, 'no': False, 'false': False, 'off': False} - - -def get_var_as_bool(name, default): - value = os.environ.get(name, '') - return _boolean_states.get(value.lower(), default) - - -# Default timezone for clear logging -TIME_ZONE = 'UTC' - -FUELQA_TEMPLATE = os.environ.get("FUELQA_TEMPLATE", None) -ENV_NAME = os.environ.get("ENV_NAME", "fuel_system_test") -VIRTUAL_ENV = os.environ.get("VIRTUAL_ENV", "") - -ACPI_ENABLE = get_var_as_bool('DRIVER_ENABLE_ACPI', False) - -nic_name_mask = 'enp0s{}' if not ACPI_ENABLE else 'ens{}' - -INTERFACES_DICT = { - 'eth0': os.environ.get('IFACE_0', nic_name_mask.format(3)), - 'eth1': os.environ.get('IFACE_1', nic_name_mask.format(4)), - 'eth2': os.environ.get('IFACE_2', nic_name_mask.format(5)), - 'eth3': os.environ.get('IFACE_3', nic_name_mask.format(6)), - 'eth4': os.environ.get('IFACE_4', nic_name_mask.format(7)), - 'eth5': os.environ.get('IFACE_5', nic_name_mask.format(8)), -} - - -# NOTE(akostrikov) The method is here to avoid problems with imports -# Refactor when additional logic is needed like info from master node/devops. -def iface_alias(interface_name): - return INTERFACES_DICT[interface_name] - - -ISO_PATH = os.environ.get('ISO_PATH') -LOGS_DIR = os.environ.get('LOGS_DIR', os.getcwd()) -# cdrom or usb -ADMIN_BOOT_DEVICE = os.environ.get('ADMIN_BOOT_DEVICE', 'cdrom') -ISO_MIRANTIS_FEATURE_GROUP = get_var_as_bool( - 'ISO_MIRANTIS_FEATURE_GROUP', - False) -ISO_LABEL = 'Mirantis_Fuel' if ISO_MIRANTIS_FEATURE_GROUP else 'OpenStack_Fuel' -SHOW_FUELMENU = get_var_as_bool('SHOW_FUELMENU', False) -DNS = os.environ.get('DNS', '8.8.8.8') -PUBLIC_TEST_IP = os.environ.get('PUBLIC_TEST_IP', '8.8.8.8') - -FORCE_HTTPS_MASTER_NODE = get_var_as_bool('FORCE_HTTPS_MASTER_NODE', False) -DISABLE_SSL = get_var_as_bool('DISABLE_SSL', False) -VERIFY_SSL = get_var_as_bool('VERIFY_SSL', False) -SSL_CN = os.environ.get('SSL_CN', 'public.fuel.local') -SSL_CERTS_DIR = os.environ.get('SSL_CERTS_DIR', os.getcwd()) -if not os.path.exists(SSL_CERTS_DIR): - os.makedirs(SSL_CERTS_DIR) -USER_OWNED_CERT = get_var_as_bool('USER_OWNED_CERT', True) -PATH_TO_CERT = os.environ.get('PATH_TO_CERT', os.path.join( - SSL_CERTS_DIR, 'ca.crt')) -PATH_TO_PEM = os.environ.get('PATH_TO_PEM', os.path.join( - SSL_CERTS_DIR, 'ca.pem')) - -OPENSTACK_RELEASE_CENTOS = 'centos' -OPENSTACK_RELEASE_UBUNTU = os.environ.get('OPENSTACK_RELEASE_UBUNTU', - 'Ubuntu 16.04').lower() -OPENSTACK_RELEASE_UBUNTU_UCA = os.environ.get('OPENSTACK_RELEASE_UBUNTU_UCA', - 'Ubuntu+UCA 16.04').lower() -OPENSTACK_RELEASE = os.environ.get( - 'OPENSTACK_RELEASE', 'Ubuntu').lower() - -RELEASE_VERSION = os.environ.get('RELEASE_VERSION', "newton") - -# FIXME(mattmymo): Update CI jobs to use 'Ubuntu 14.04' for OPENSTACK_RELEASE -# FIXME(dteselkin): Because of strange logic to detect Ubuntu release we need -# OPENSTACK_RELEASE == OPENSTACK_RELEASE_UBUNTU -# For UCA that means that all three variables should be -# the same. -if OPENSTACK_RELEASE == 'ubuntu': - OPENSTACK_RELEASE = OPENSTACK_RELEASE_UBUNTU - -DEPLOYMENT_MODE_SIMPLE = "multinode" -DEPLOYMENT_MODE_HA = "ha_compact" -DEPLOYMENT_MODE = os.environ.get("DEPLOYMENT_MODE", DEPLOYMENT_MODE_HA) -DEPLOYMENT_TIMEOUT = int(os.environ.get("DEPLOYMENT_TIMEOUT", 7800)) -DEPLOYMENT_RETRIES = int(os.environ.get("DEPLOYMENT_RETRIES", 1)) -BOOTSTRAP_TIMEOUT = int(os.environ.get("BOOTSTRAP_TIMEOUT", 900)) -WAIT_FOR_PROVISIONING_TIMEOUT = int(os.environ.get( - "WAIT_FOR_PROVISIONING_TIMEOUT", 1200)) - -ADMIN_NODE_SETUP_TIMEOUT = int(os.environ.get("ADMIN_NODE_SETUP_TIMEOUT", 30)) -ADMIN_NODE_BOOTSTRAP_TIMEOUT = int(os.environ.get( - "ADMIN_NODE_BOOTSTRAP_TIMEOUT", 3600)) - - -HARDWARE = { - "admin_node_memory": int(os.environ.get("ADMIN_NODE_MEMORY", 3072)), - "admin_node_cpu": int(os.environ.get("ADMIN_NODE_CPU", 2)), - "slave_node_cpu": int(os.environ.get("SLAVE_NODE_CPU", 1)), - "slave_node_memory": int(os.environ.get("SLAVE_NODE_MEMORY", 3584)), - "numa_nodes": int(os.environ.get("NUMA_NODES", 0)) -} -if OPENSTACK_RELEASE_UBUNTU in OPENSTACK_RELEASE: - slave_mem_default = 2560 -else: - slave_mem_default = 2048 -HARDWARE["slave_node_memory"] = int( - os.environ.get("SLAVE_NODE_MEMORY", slave_mem_default)) -NODE_VOLUME_SIZE = int(os.environ.get('NODE_VOLUME_SIZE', 50)) -NODES_COUNT = int(os.environ.get('NODES_COUNT', 10)) - -MULTIPATH = get_var_as_bool('MULTIPATH', False) -SLAVE_MULTIPATH_DISKS_COUNT = int(os.environ.get('SLAVE_MULTIPATH_DISKS_COUNT', - 0)) -MULTIPATH_TEMPLATE = os.environ.get( - 'MULTIPATH_TEMPLATE', - os.path.join( - os.getcwd(), - 'system_test/tests_templates/tests_configs/multipath_3_nodes.yaml')) -if MULTIPATH and not SLAVE_MULTIPATH_DISKS_COUNT: - os.environ.setdefault('SLAVE_MULTIPATH_DISKS_COUNT', 2) - SLAVE_MULTIPATH_DISKS_COUNT = int( - os.environ.get('SLAVE_MULTIPATH_DISKS_COUNT')) - -ENABLE_DMZ = get_var_as_bool('ENABLE_DMZ', False) -ENABLE_DMZ_TEMPLATE = os.environ.get( - 'ENABLE_DMZ_TEMPLATE', - os.path.join(os.getcwd(), - 'system_test/tests_templates/tests_configs/public_api.yaml')) - -MULTIPLE_NETWORKS = get_var_as_bool('MULTIPLE_NETWORKS', False) -MULTIPLE_NETWORKS_TEMPLATE = os.environ.get( - 'MULTIPLE_NETWORKS_TEMPLATE', - os.path.join(os.getcwd(), - 'system_test/tests_templates/tests_configs/multirack.yaml')) - -USE_HAPROXY_TEMPLATE = get_var_as_bool("USE_HAPROXY_TEMPLATE", False) -EXTERNAL_HAPROXY_TEMPLATE = os.environ.get( - 'EXTERNAL_HAPROXY_TEMPLATE', - os.path.join(os.getcwd(), - 'system_test/tests_templates/tests_configs/' - 'external_haproxy.yaml')) - -if MULTIPLE_NETWORKS: - NODEGROUPS = ( - { - 'name': 'default', - 'networks': { - 'fuelweb_admin': 'admin', - 'public': 'public', - 'management': 'management', - 'storage': 'storage', - 'private': 'private' - } - }, - { - 'name': 'group-custom-1', - 'networks': { - 'fuelweb_admin': 'admin2', - 'public': 'public2', - 'management': 'management2', - 'storage': 'storage', - 'private': 'private2' - } - }, - { - 'name': 'group-custom-2', - 'networks': { - 'fuelweb_admin': 'admin3', - 'public': 'public3', - 'management': 'management3', - 'storage': 'storage', - 'private': 'private3' - } - } - ) - FORWARD_DEFAULT = os.environ.get('FORWARD_DEFAULT', 'route') - ADMIN_FORWARD = os.environ.get('ADMIN_FORWARD', 'nat') - PUBLIC_FORWARD = os.environ.get('PUBLIC_FORWARD', 'nat') -else: - NODEGROUPS = () - FORWARD_DEFAULT = os.environ.get('FORWARD_DEFAULT', None) - ADMIN_FORWARD = os.environ.get('ADMIN_FORWARD', FORWARD_DEFAULT or 'nat') - PUBLIC_FORWARD = os.environ.get('PUBLIC_FORWARD', FORWARD_DEFAULT or 'nat') - -MGMT_FORWARD = os.environ.get('MGMT_FORWARD', FORWARD_DEFAULT) -PRIVATE_FORWARD = os.environ.get('PRIVATE_FORWARD', FORWARD_DEFAULT) -STORAGE_FORWARD = os.environ.get('STORAGE_FORWARD', FORWARD_DEFAULT) - -DEFAULT_INTERFACE_ORDER = 'admin,public,management,private,storage' -INTERFACE_ORDER = os.environ.get('INTERFACE_ORDER', - DEFAULT_INTERFACE_ORDER).split(',') - -FORWARDING = { - 'admin': ADMIN_FORWARD, - 'public': PUBLIC_FORWARD, - 'management': MGMT_FORWARD, - 'private': PRIVATE_FORWARD, - 'storage': STORAGE_FORWARD, -} - -DHCP = { - 'admin': False, - 'public': False, - 'management': False, - 'private': False, - 'storage': False, -} - -INTERFACES = { - 'admin': iface_alias('eth0'), - 'public': iface_alias('eth1'), - 'management': iface_alias('eth2'), - 'private': iface_alias('eth3'), - 'storage': iface_alias('eth4'), -} - -# May be one of virtio, e1000, pcnet, rtl8139 -INTERFACE_MODEL = os.environ.get('INTERFACE_MODEL', 'virtio') - -POOL_DEFAULT = os.environ.get('POOL_DEFAULT', '10.109.0.0/16:24') -POOL_ADMIN = os.environ.get('POOL_ADMIN', POOL_DEFAULT) -POOL_PUBLIC = os.environ.get('POOL_PUBLIC', POOL_DEFAULT) -POOL_MANAGEMENT = os.environ.get('POOL_MANAGEMENT', POOL_DEFAULT) -POOL_PRIVATE = os.environ.get('POOL_PRIVATE', POOL_DEFAULT) -POOL_STORAGE = os.environ.get('POOL_STORAGE', POOL_DEFAULT) - -DEFAULT_POOLS = { - 'admin': POOL_ADMIN, - 'public': POOL_PUBLIC, - 'management': POOL_MANAGEMENT, - 'private': POOL_PRIVATE, - 'storage': POOL_STORAGE, -} - -POOLS = { - 'admin': os.environ.get( - 'PUBLIC_POOL', - DEFAULT_POOLS.get('admin')).split(':'), - 'public': os.environ.get( - 'PUBLIC_POOL', - DEFAULT_POOLS.get('public')).split(':'), - 'management': os.environ.get( - 'PRIVATE_POOL', - DEFAULT_POOLS.get('management')).split(':'), - 'private': os.environ.get( - 'INTERNAL_POOL', - DEFAULT_POOLS.get('private')).split(':'), - 'storage': os.environ.get( - 'NAT_POOL', - DEFAULT_POOLS.get('storage')).split(':'), -} - -if MULTIPLE_NETWORKS: - FORWARDING['admin2'] = ADMIN_FORWARD - FORWARDING['public2'] = PUBLIC_FORWARD - FORWARDING['management2'] = MGMT_FORWARD - FORWARDING['private2'] = PRIVATE_FORWARD - FORWARDING['storage2'] = STORAGE_FORWARD - - DHCP['admin2'] = False - DHCP['public2'] = False - DHCP['management2'] = False - DHCP['private2'] = False - DHCP['storage2'] = False - - INTERFACES['admin2'] = iface_alias('eth5') - - POOL_DEFAULT2 = os.environ.get('POOL_DEFAULT2', '10.108.0.0/16:24') - POOL_ADMIN2 = os.environ.get('POOL_ADMIN2', POOL_DEFAULT2) - POOL_PUBLIC2 = os.environ.get('POOL_PUBLIC2', POOL_DEFAULT2) - POOL_MANAGEMENT2 = os.environ.get('POOL_MANAGEMENT', POOL_DEFAULT2) - POOL_PRIVATE2 = os.environ.get('POOL_PRIVATE', POOL_DEFAULT2) - POOL_STORAGE2 = os.environ.get('POOL_STORAGE', POOL_DEFAULT2) - - CUSTOM_POOLS = { - 'admin2': POOL_ADMIN2, - 'public2': POOL_PUBLIC2, - 'management2': POOL_MANAGEMENT2, - 'private2': POOL_PRIVATE2, - 'storage2': POOL_STORAGE2, - } - - POOLS['admin2'] = os.environ.get( - 'PUBLIC_POOL2', - CUSTOM_POOLS.get('admin2')).split(':') - POOLS['public2'] = os.environ.get( - 'PUBLIC_POOL2', - CUSTOM_POOLS.get('public2')).split(':') - POOLS['management2'] = os.environ.get( - 'PUBLIC_POOL2', - CUSTOM_POOLS.get('management2')).split(':') - POOLS['private2'] = os.environ.get( - 'PUBLIC_POOL2', - CUSTOM_POOLS.get('private2')).split(':') - POOLS['storage2'] = os.environ.get( - 'PUBLIC_POOL2', - CUSTOM_POOLS.get('storage2')).split(':') - - CUSTOM_INTERFACE_ORDER = os.environ.get( - 'CUSTOM_INTERFACE_ORDER', - 'admin2,public2,management2,private2,storage2') - INTERFACE_ORDER.extend(CUSTOM_INTERFACE_ORDER.split(',')) - -BONDING = get_var_as_bool("BONDING", False) - -BONDING_INTERFACES = { - 'admin': [iface_alias('eth0')], - 'public': [ - iface_alias('eth1'), - iface_alias('eth2'), - iface_alias('eth3'), - iface_alias('eth4') - ] -} - -NETWORK_MANAGERS = { - 'flat': 'FlatDHCPManager', - 'vlan': 'VlanManager' -} - -NETWORK_PROVIDERS = [ - 'neutron', - 'nova_network' -] - -NEUTRON = 'neutron' - -NEUTRON_SEGMENT = { - 'gre': 'gre', - 'vlan': 'vlan', - 'tun': 'tun' -} - -NEUTRON_SEGMENT_TYPE = NEUTRON_SEGMENT.get( - os.environ.get('NEUTRON_SEGMENT_TYPE', None), None) - -# Path to a network template dedicated for reduced footprint environments -RF_NET_TEMPLATE = os.environ.get("RF_NET_TEMPLATE", None) - -USE_ALL_DISKS = get_var_as_bool('USE_ALL_DISKS', True) - -UPLOAD_MANIFESTS = get_var_as_bool('UPLOAD_MANIFESTS', False) -SYNC_DEPL_TASKS = get_var_as_bool('SYNC_DEPL_TASKS', False) -UPLOAD_MANIFESTS_PATH = os.environ.get( - 'UPLOAD_MANIFESTS_PATH', '~/git/fuel/deployment/puppet/') -SITEPP_FOR_UPLOAD = os.environ.get( - 'SITEPP_PATH', '/etc/puppet/modules/osnailyfacter/examples/site.pp') - -GERRIT_REFSPEC = os.environ.get('GERRIT_REFSPEC') -PATCH_PATH = os.environ.get( - 'PATCH_PATH', '/tmp/fuel-ostf') - -KVM_USE = get_var_as_bool('KVM_USE', False) -DEBUG_MODE = get_var_as_bool('DEBUG_MODE', True) - -# Services tests -SERVTEST_LOCAL_PATH = os.environ.get('SERVTEST_LOCAL_PATH', '/tmp') -SERVTEST_USERNAME = os.environ.get('SERVTEST_USERNAME', 'admin') -SERVTEST_PASSWORD = os.environ.get('SERVTEST_PASSWORD', SERVTEST_USERNAME) -SERVTEST_TENANT = os.environ.get('SERVTEST_TENANT', SERVTEST_USERNAME) - -SERVTEST_SAHARA_VANILLA_2_IMAGE = ( - 'sahara-liberty-vanilla-2.7.1-ubuntu-14.04.qcow2') -SERVTEST_SAHARA_VANILLA_2_IMAGE_NAME = ( - 'sahara-liberty-vanilla-2.7.1-ubuntu-14.04') -SERVTEST_SAHARA_VANILLA_2_IMAGE_MD5 = '3da49911332fc46db0c5fb7c197e3a77' -SERVTEST_SAHARA_VANILLA_2_IMAGE_META = {'_sahara_tag_2.7.1': 'True', - '_sahara_tag_vanilla': 'True', - '_sahara_username': 'ubuntu'} - -SERVTEST_MURANO_IMAGE = "ubuntu_14_04-murano-agent_stable_juno_26_02_15.qcow2" -SERVTEST_MURANO_IMAGE_MD5 = '3da5ec5984d6d19c1b88d0062c885a89' -SERVTEST_MURANO_IMAGE_NAME = 'murano' -SERVTEST_MURANO_IMAGE_META = { - 'murano_image_info': '{"type": "linux", "title": "murano"}'} - -SERVTEST_EXTERNAL_MONGO_URLS = os.environ.get('EXTERNAL_MONGO_URLS') -SERVTEST_EXTERNAL_MONGO_DB_NAME = os.environ.get('EXTERNAL_MONGO_DB_NAME', - 'ceilometer') -SERVTEST_EXTERNAL_MONGO_USER = os.environ.get('EXTERNAL_MONGO_USER') -SERVTEST_EXTERNAL_MONGO_PASS = os.environ.get('EXTERNAL_MONGO_PASS') -SERVTEST_EXTERNAL_MONGO_REPL_SET = os.environ.get('EXTERNAL_MONGO_REPL_SET') - -DEFAULT_IMAGES_CENTOS = os.environ.get( - 'DEFAULT_IMAGES_CENTOS', - '/var/lib/libvirt/images/centos6.4-base.qcow2') - -DEFAULT_IMAGES_UBUNTU = os.environ.get( - 'DEFAULT_IMAGES_UBUNTU', - '/var/lib/libvirt/images/ubuntu-12.04.1-server-amd64-p2.qcow2') - -OS_IMAGE = os.environ.get('OS_IMAGE', DEFAULT_IMAGES_CENTOS) - -OSTF_TEST_NAME = os.environ.get('OSTF_TEST_NAME', - 'Check network connectivity' - ' from instance via floating IP') -OSTF_TEST_RETRIES_COUNT = int(os.environ.get('OSTF_TEST_RETRIES_COUNT', 50)) - -# The variable below is only for test: -# fuelweb_test.tests.tests_strength.test_ostf_repeatable_tests -# :OstfRepeatableTests.run_ostf_n_times_against_custom_deployment -DEPLOYMENT_NAME = os.environ.get('DEPLOYMENT_NAME') - -# Need for iso with docker -TIMEOUT = int(os.environ.get('TIMEOUT', 60)) -ATTEMPTS = int(os.environ.get('ATTEMPTS', 5)) - -# Create snapshots as last step in test-case -MAKE_SNAPSHOT = get_var_as_bool('MAKE_SNAPSHOT', False) - -FUEL_SETTINGS_YAML = os.environ.get('FUEL_SETTINGS_YAML', - '/etc/fuel/astute.yaml') - -# Full path to the custom fuel setting yaml -CUSTOM_FUEL_SETTING_YAML = os.environ.get('CUSTOM_FUEL_SETTING_YAML', None) - -# Upgrade-related variables -UPGRADE_TEST_TEMPLATE = os.environ.get("UPGRADE_TEST_TEMPLATE") -UPGRADE_CUSTOM_STEP_NAME = os.environ.get("UPGRADE_CUSTOM_STEP_NAME", "") -TARBALL_PATH = os.environ.get('TARBALL_PATH') - -OCTANE_REPO_LOCATION = os.environ.get('OCTANE_REPO_LOCATION', '') -if not OCTANE_REPO_LOCATION: - FUEL_PROPOSED_REPO_URL = os.environ.get('FUEL_PROPOSED_REPO_URL', '') - OCTANE_REPO_LOCATION = FUEL_PROPOSED_REPO_URL - -UPGRADE_FUEL_FROM = os.environ.get('UPGRADE_FUEL_FROM', '8.0') -UPGRADE_FUEL_TO = os.environ.get('UPGRADE_FUEL_TO', '9.0') -OCTANE_PATCHES = os.environ.get('OCTANE_PATCHES', None) -EXAMPLE_V3_PLUGIN_REMOTE_URL = os.environ.get('EXAMPLE_V3_PLUGIN_REMOTE_URL', - None) -EXAMPLE_V4_PLUGIN_REMOTE_URL = os.environ.get('EXAMPLE_V4_PLUGIN_REMOTE_URL', - None) -UPGRADE_BACKUP_FILES_LOCAL_DIR = os.environ.get( - 'UPGRADE_BACKUP_FILES_LOCAL_DIR', os.path.join( - os.path.curdir, "..", "backup_storage")) -UPGRADE_BACKUP_FILES_REMOTE_DIR = os.environ.get( - 'UPGRADE_BACKUP_FILES_REMOTE_DIR', "/var/upgrade/backups") -# End of upgrade-related variables - -SNAPSHOT = os.environ.get('SNAPSHOT', '') - -# Repos paths and files -MOS_REPOS = os.environ.get('MOS_REPOS', - 'http://mirror.fuel-infra.org/mos-repos/') -CENTOS_REPO_PATH = os.environ.get( - 'CENTOS_REPO_PATH', - MOS_REPOS + 'centos/mos{release_version}-centos7/') -UBUNTU_REPO_PATH = os.environ.get( - 'UBUNTU_REPO_PATH', - MOS_REPOS + 'ubuntu/{release_version}/') -GPG_CENTOS_KEY_PATH = os.environ.get( - 'GPG_CENTOS_KEY', - CENTOS_REPO_PATH + 'os/RPM-GPG-KEY-mos{release_version}') -MASTER_CENTOS_GPG = os.environ.get( - 'MASTER_CENTOS_GPG', 'http://packages.fuel-infra.org/repositories' - '/centos/master-centos7/os/RPM-GPG-KEY-' -) -PACKAGES_CENTOS = os.environ.get( - 'PACKAGES_CENTOS', - 'http://packages.fuel-infra.org/repositories/' - 'centos/master-centos7/os/x86_64/') - -# Release name of local Ubuntu mirror on Fuel master node. -UBUNTU_RELEASE = os.environ.get('UBUNTU_RELEASE', 'precise') - -UPDATE_TIMEOUT = int(os.environ.get('UPDATE_TIMEOUT', 3600)) - -PLUGIN_PACKAGE_VERSION = os.environ.get('PLUGIN_PACKAGE_VERSION', '') - -# Plugin path for plugins tests - -CONTRAIL_PLUGIN_PATH = os.environ.get('CONTRAIL_PLUGIN_PATH') -CONTRAIL_PLUGIN_PACK_UB_PATH = os.environ.get('CONTRAIL_PLUGIN_PACK_UB_PATH') -CONTRAIL_PLUGIN_PACK_CEN_PATH = os.environ.get('CONTRAIL_PLUGIN_PACK_CEN_PATH') -GLUSTER_PLUGIN_PATH = os.environ.get('GLUSTER_PLUGIN_PATH') -GLUSTER_CLUSTER_ENDPOINT = os.environ.get('GLUSTER_CLUSTER_ENDPOINT') -EXAMPLE_PLUGIN_PATH = os.environ.get('EXAMPLE_PLUGIN_PATH') -EXAMPLE_PLUGIN_V3_PATH = os.environ.get('EXAMPLE_PLUGIN_V3_PATH') -EXAMPLE_PLUGIN_V4_PATH = os.environ.get('EXAMPLE_PLUGIN_V4_PATH') -LBAAS_PLUGIN_PATH = os.environ.get('LBAAS_PLUGIN_PATH') -ZABBIX_PLUGIN_PATH = os.environ.get('ZABBIX_PLUGIN_PATH') -ZABBIX_SNMP_PLUGIN_PATH = os.environ.get('ZABBIX_SNMP_PLUGIN_PATH') -ZABBIX_SNMP_EMC_PLUGIN_PATH = os.environ.get('ZABBIX_SNMP_EMC_PLUGIN_PATH') -ZABBIX_SNMP_EXTREME_PLUGIN_PATH = os.environ.get( - 'ZABBIX_SNMP_EXTREME_PLUGIN_PATH') -LMA_COLLECTOR_PLUGIN_PATH = os.environ.get('LMA_COLLECTOR_PLUGIN_PATH') -LMA_INFRA_ALERTING_PLUGIN_PATH = os.environ.get( - 'LMA_INFRA_ALERTING_PLUGIN_PATH') -ELASTICSEARCH_KIBANA_PLUGIN_PATH = os.environ.get( - 'ELASTICSEARCH_KIBANA_PLUGIN_PATH') -INFLUXDB_GRAFANA_PLUGIN_PATH = os.environ.get('INFLUXDB_GRAFANA_PLUGIN_PATH') -SEPARATE_SERVICE_DB_PLUGIN_PATH = os.environ.get( - 'SEPARATE_SERVICE_DB_PLUGIN_PATH') -SEPARATE_SERVICE_RABBIT_PLUGIN_PATH = os.environ.get( - 'SEPARATE_SERVICE_RABBIT_PLUGIN_PATH') -SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH = os.environ.get( - 'SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH') -SEPARATE_SERVICE_HORIZON_PLUGIN_PATH = os.environ.get( - 'SEPARATE_SERVICE_HORIZON_PLUGIN_PATH') -ETCKEEPER_PLUGIN_REPO = os.environ.get( - 'ETCKEEPER_PLUGIN_REPO', - 'https://github.com/Mirantis/fuel-plugin-etckeeper') -SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH = os.environ.get( - 'SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH') -SEPARATE_SERVICE_BALANCER_PLUGIN_PATH = os.environ.get( - 'SEPARATE_SERVICE_BALANCER_PLUGIN_PATH') -MURANO_PLUGIN_PATH = os.environ.get('MURANO_PLUGIN_PATH') - -FUEL_STATS_CHECK = get_var_as_bool('FUEL_STATS_CHECK', False) -FUEL_STATS_ENABLED = get_var_as_bool('FUEL_STATS_ENABLED', True) -FUEL_STATS_SSL = get_var_as_bool('FUEL_STATS_SSL', False) -FUEL_STATS_HOST = os.environ.get('FUEL_STATS_HOST') -FUEL_STATS_PORT = os.environ.get('FUEL_STATS_PORT', '80') - -ANALYTICS_IP = os.environ.get('ANALYTICS_IP') - -CUSTOM_ENV = get_var_as_bool('CUSTOM_ENV', False) -SECURITY_TEST = get_var_as_bool('SECURITY_TEST', False) -NESSUS_IMAGE_PATH = os.environ.get('NESSUS_IMAGE_PATH', - '/var/lib/libvirt/images/nessus.qcow2') -BUILD_IMAGES = get_var_as_bool('BUILD_IMAGES', False) - -STORE_ASTUTE_YAML = get_var_as_bool('STORE_ASTUTE_YAML', False) - -EXTERNAL_DNS = [ - string.strip() for string in - os.environ.get('EXTERNAL_DNS', '208.67.220.220').split(',') -] -EXTERNAL_NTP = [ - string.strip() for string in - os.environ.get('EXTERNAL_NTP', 'ua.pool.ntp.org').split(',') -] -DNS_SUFFIX = os.environ.get('DNS_SUFFIX', '.test.domain.local') -FUEL_MASTER_HOSTNAME = os.environ.get('FUEL_MASTER_HOSTNAME', 'nailgun') - -TIMESTAT_PATH_YAML = os.environ.get( - 'TIMESTAT_PATH_YAML', os.path.join( - LOGS_DIR, 'timestat_{}.yaml'.format(time.strftime("%Y%m%d")))) - -FUEL_PLUGIN_BUILDER_FROM_GIT = get_var_as_bool('FUEL_PLUGIN_BUILDER_FROM_GIT', - True) -FUEL_PLUGIN_BUILDER_REPO = 'https://github.com/openstack/fuel-plugins.git' - -############################################################################### -# Change various Fuel master node default settings # -############################################################################### - -# URL to custom mirror with new OSCI packages which should be tested -CUSTOM_PKGS_MIRROR = os.environ.get('CUSTOM_PKGS_MIRROR', '') - -# Location of local mirrors on master node. -LOCAL_MIRROR_UBUNTU = os.environ.get('LOCAL_MIRROR_UBUNTU', - '/var/www/nailgun/ubuntu/x86_64') -LOCAL_MIRROR_CENTOS = os.environ.get('LOCAL_MIRROR_CENTOS', - '/var/www/nailgun/centos/x86_64') - -# MIRROR_UBUNTU and EXTRA_DEB_REPOS - lists of repositories, separated by '|', -# for example: -# MIRROR_UBUNTU = 'deb http://... trusty main universe multiverse|deb ...' -# If MIRROR_UBUNTU set, it will replace the default upstream repositories, -# the first repo in string should point to upstream Ubuntu mirror -# and use sections 'main universe multiverse'. -# Repos from EXTRA_DEB_REPOS will be appended to the list of repositories. -MIRROR_UBUNTU = os.environ.get('MIRROR_UBUNTU', '') -MIRROR_UBUNTU_PRIORITY = int(os.environ.get('MIRROR_UBUNTU_PRIORITY', 1001)) -EXTRA_DEB_REPOS = os.environ.get('EXTRA_DEB_REPOS', '') -EXTRA_DEB_REPOS_PRIORITY = int(os.environ.get('EXTRA_DEB_REPOS_PRIORITY', - 1050)) - -# The same for Centos repository: -MIRROR_CENTOS = os.environ.get('MIRROR_CENTOS', '') -MIRROR_CENTOS_PRIORITY = int(os.environ.get('MIRROR_CENTOS_PRIORITY', 50)) -EXTRA_RPM_REPOS = os.environ.get('EXTRA_RPM_REPOS', '') -EXTRA_RPM_REPOS_PRIORITY = int(os.environ.get('EXTRA_RPM_REPOS_PRIORITY', 20)) - -# Auxiliary repository priority will be set for a cluster if UPDATE_FUEL=true -AUX_DEB_REPO_PRIORITY = int(os.environ.get('AUX_DEB_REPO_PRIORITY', 1150)) -AUX_RPM_REPO_PRIORITY = int(os.environ.get('AUX_RPM_REPO_PRIORITY', 15)) - -# True: replace the default list of repositories in Nailgun -# False: keep original list of repositories in Nailgun -REPLACE_DEFAULT_REPOS = get_var_as_bool('REPLACE_DEFAULT_REPOS', True) - -# True: replace the default list of repositories once admin node is installed -# False: replace list of repositories before every cluster creation -REPLACE_DEFAULT_REPOS_ONLY_ONCE = get_var_as_bool( - 'REPLACE_DEFAULT_REPOS_ONLY_ONCE', True) - -# Set gateway of 'admin' network as NTPD server for Fuel master node -# , set gateway of 'public' network as NTPD server for new OS clusters -FUEL_USE_LOCAL_NTPD = get_var_as_bool('FUEL_USE_LOCAL_NTPD', True) -# Set gateway of 'public' network as DNS server for new OS clusters -FUEL_USE_LOCAL_DNS = get_var_as_bool('FUEL_USE_LOCAL_DNS', True) - -# Path to fuel-agent review repository. Used in ci-gates for fuel-agent -FUEL_AGENT_REPO_PATH = os.environ.get('FUEL_AGENT_REPO_PATH', '') - -# Default 'KEYSTONE_PASSWORD' can be changed for keystone on Fuel master node -KEYSTONE_CREDS = {'username': os.environ.get('KEYSTONE_USERNAME', 'admin'), - 'password': os.environ.get('KEYSTONE_PASSWORD', 'admin'), - 'tenant_name': os.environ.get('KEYSTONE_TENANT', 'admin')} - -# Default SSH password 'ENV_FUEL_PASSWORD' can be changed on Fuel master node -SSH_FUEL_CREDENTIALS = { - 'login': os.environ.get('ENV_FUEL_LOGIN', 'root'), - 'password': os.environ.get('ENV_FUEL_PASSWORD', 'r00tme'), - 'sudo': get_var_as_bool('ENV_FUEL_SUDO', False) -} - -SSH_SLAVE_CREDENTIALS = { - 'login': os.environ.get('ENV_SLAVE_LOGIN', 'fuel'), - 'password': os.environ.get('ENV_SLAVE_PASSWORD', 'fuel'), - 'sudo': get_var_as_bool('ENV_SLAVE_SUDO', True) -} - -SSH_IMAGE_CREDENTIALS = { - 'username': os.environ.get('SSH_IMAGE_CREDENTIALS_LOGIN', "cirros"), - 'password': os.environ.get('SSH_IMAGE_CREDENTIALS_PASSWORD', "cubswin:)") -} - -############################################################################### - -PATCHING_WEB_DIR = os.environ.get("PATCHING_WEB_DIR", "/var/www/nailgun/") -PATCHING_MIRRORS = os.environ.get("PATCHING_MIRRORS", - CUSTOM_PKGS_MIRROR).split() -PATCHING_MASTER_MIRRORS = os.environ.get("PATCHING_MASTER_MIRRORS", '').split() -PATCHING_BUG_ID = os.environ.get("PATCHING_BUG_ID", None) -PATCHING_PKGS_TESTS = os.environ.get("PATCHING_PKGS_TESTS", "./packages_tests") -PATCHING_APPLY_TESTS = os.environ.get("PATCHING_APPLY_TESTS", - "./patching_tests") -PATCHING_PKGS = os.environ.get("PATCHING_PKGS", None) -PATCHING_SNAPSHOT = os.environ.get("PATCHING_SNAPSHOT", None) -PATCHING_CUSTOM_TEST = os.environ.get("PATCHING_CUSTOM_TEST", None) -PATCHING_DISABLE_UPDATES = get_var_as_bool('PATCHING_DISABLE_UPDATES', False) -PATCHING_RUN_RALLY = get_var_as_bool("PATCHING_RUN_RALLY", False) - -DOWNLOAD_LINK = os.environ.get( - 'DOWNLOAD_LINK', 'http://ubuntu1.hti.pl/14.04.4/' - 'ubuntu-14.04.4-server-amd64.iso') -UPDATE_FUEL = get_var_as_bool('UPDATE_FUEL', False) -UPDATE_FUEL_PATH = os.environ.get('UPDATE_FUEL_PATH', '~/fuel/pkgs/') -UPDATE_FUEL_MIRROR = os.environ.get("UPDATE_FUEL_MIRROR", '').split() - -UPDATE_MASTER = get_var_as_bool('UPDATE_MASTER', False) - -EMC_PLUGIN_PATH = os.environ.get('EMC_PLUGIN_PATH') -EMC_SP_A_IP = os.environ.get('EMC_SP_A_IP') -EMC_SP_B_IP = os.environ.get('EMC_SP_B_IP') -EMC_USERNAME = os.environ.get('EMC_USERNAME') -EMC_PASSWORD = os.environ.get('EMC_PASSWORD') -EMC_POOL_NAME = os.environ.get('EMC_POOL_NAME', '') - -UCA_ENABLED = os.environ.get('UCA_ENABLED', False) -UCA_REPO_TYPE = os.environ.get('UCA_REPO_TYPE', 'uca') -UCA_PIN_HAPROXY = get_var_as_bool('UCA_PIN_HAPROXY', True) -UCA_PIN_RABBITMQ = get_var_as_bool('UCA_PIN_RABBITMQ', True) -UCA_PIN_CEPH = get_var_as_bool('UCA_PIN_CEPH', True) - -ALWAYS_CREATE_DIAGNOSTIC_SNAPSHOT = get_var_as_bool( - 'ALWAYS_CREATE_DIAGNOSTIC_SNAPSHOT', False) - -RALLY_DOCKER_REPO = os.environ.get('RALLY_DOCKER_REPO', - 'docker.io/rallyforge/rally') -RALLY_CONTAINER_NAME = os.environ.get('RALLY_CONTAINER_NAME', 'rally') -RALLY_TAGS = os.environ.get('RALLY_TAGS', 'nova').split(',') - -REGENERATE_ENV_IMAGE = get_var_as_bool('REGENERATE_ENV_IMAGE', False) -LATE_ARTIFACTS_JOB_URL = os.environ.get("LATE_ARTIFACTS_JOB_URL", '') - -NESSUS_ADDRESS = os.environ.get("NESSUS_ADDRESS", None) -NESSUS_PORT = os.environ.get("NESSUS_PORT", 8834) -NESSUS_USERNAME = os.environ.get("NESSUS_USERNAME") -NESSUS_PASSWORD = os.environ.get("NESSUS_PASSWORD") -NESSUS_SSL_VERIFY = get_var_as_bool("NESSUS_SSL_VERIFY", False) - -# is using in stability rabbit test to get -# possibility to change count of repeats failures -REPEAT_COUNT = int(os.environ.get("REPEAT_COUNT", 2)) - -# The number of cold restarts -# in the 'repetitive_restart' test group -RESTART_COUNT = int(os.environ.get("RESTART_COUNT", 10)) - -# Is used for stop_on_deploy test -PROGRESS_TO_STOP = int(os.environ.get("PROGRESS_TO_STOP", 60)) - -# RH-related variables -# Need to update these variables, when image with RH for -# MOS will be available. -EXTRA_COMP_IMAGE = os.environ.get("EXTRA_COMP_IMAGE") -EXTRA_COMP_IMAGE_PATH = os.environ.get("EXTRA_COMP_IMAGE_PATH") -EXTRA_COMP_IMAGE_MD5 = os.environ.get("EXTRA_COMP_IMAGE_MD5") -COMPUTE_BOOT_STRATEGY = os.environ.get("COMPUTE_BOOT_STRATEGY", "system") -EXTRA_COMP_IMAGE_USER = os.environ.get("EXTRA_COMP_IMAGE_USER", "root") -EXTRA_COMP_IMAGE_PASSWORD = os.environ.get("EXTRA_COMP_IMAGE_PASSWORD", - "r00tme") -RH_LICENSE_USERNAME = os.environ.get("RH_LICENSE_USERNAME") -RH_LICENSE_PASSWORD = os.environ.get("RH_LICENSE_PASSWORD") -RH_SERVER_URL = os.environ.get("RH_SERVER_URL") -RH_REGISTERED_ORG_NAME = os.environ.get("RH_REGISTERED_ORG_NAME") -RH_ACTIVATION_KEY = os.environ.get("RH_ACTIVATION_KEY") -RH_RELEASE = os.environ.get("RH_RELEASE") -RH_MAJOR_RELEASE = os.environ.get("RH_MAJOR_RELEASE", "7") -OL_MAJOR_RELEASE = os.environ.get("OL_MAJOR_RELEASE", "7") -CENTOS_DUMMY_DEPLOY = get_var_as_bool("CENTOS_DUMMY_DEPLOY", False) -PERESTROIKA_REPO = os.environ.get("PERESTROIKA_REPO") -RH_POOL_HASH = os.environ.get("RH_POOL_HASH") - -# Ironic variables -IRONIC_USER_IMAGE_URL = os.environ.get( - "IRONIC_USER_IMAGE_URL", "https://cloud-images.ubuntu.com/trusty/current/" - "trusty-server-cloudimg-amd64.tar.gz") - -NOVA_QUOTAS_ENABLED = get_var_as_bool("NOVA_QUOTAS_ENABLED", False) - -AUTH_S3_KEYSTONE_CEPH_ENABLED = get_var_as_bool( - "AUTH_S3_KEYSTONE_CEPH_ENABLED", False) - -GERRIT_PROJECT = os.environ.get("GERRIT_PROJECT") -GERRIT_BRANCH = os.environ.get("GERRIT_BRANCH") -GERRIT_CHANGE_ID = os.environ.get("GERRIT_CHANGE_ID") -GERRIT_PATCHSET_NUMBER = os.environ.get("GERRIT_PATCHSET_NUMBER") - -DOWNLOAD_FACTS = get_var_as_bool("DOWNLOAD_FACTS", False) - -TASK_BASED_ENGINE = get_var_as_bool("TASK_BASED_ENGINE", True) - -FUEL_RELEASE_PATH = os.environ.get("FUEL_RELEASE_PATH") - -S3_API_CLIENT = os.environ.get("S3_API_CLIENT", "s3cmd") - -MASTER_NODE_EXTRA_PACKAGES = os.environ.get("MASTER_NODE_EXTRA_PACKAGES", "") - -CENTOS_MASTER_NODE = os.environ.get("CENTOS_MASTER") - -LOG_SNAPSHOT_TIMEOUT = int(os.environ.get("LOG_SNAPSHOT_TIMEOUT", 10 * 60)) - -RPM_REPOS_YAML = os.environ.get("RPM_REPOS_YAML") - -DEB_REPOS_YAML = os.environ.get("DEB_REPOS_YAML") - -UBUNTU_SERVICE_PROVIDER = os.environ.get('UBUNTU_SERVICE_PROVIDER', - 'systemd') diff --git a/fuelweb_test/testrail/__init__.py b/fuelweb_test/testrail/__init__.py deleted file mode 100644 index 2eef0dbd7..000000000 --- a/fuelweb_test/testrail/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from fuelweb_test.testrail.settings import LOGS_DIR - -if not os.path.exists(LOGS_DIR): - os.makedirs(LOGS_DIR) diff --git a/fuelweb_test/testrail/builds.py b/fuelweb_test/testrail/builds.py deleted file mode 100644 index 73f03f907..000000000 --- a/fuelweb_test/testrail/builds.py +++ /dev/null @@ -1,205 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals - -import re - -import requests -from requests.packages.urllib3 import disable_warnings -from simplejson.scanner import JSONDecodeError - -from fuelweb_test.testrail.settings import JENKINS -from fuelweb_test.testrail.settings import logger - - -disable_warnings() - - -def get_jobs_for_view(view): - """Return list of jobs from specified view - """ - view_url = "/".join([JENKINS["url"], 'view', view, 'api/json']) - logger.debug("Request view data from {}".format(view_url)) - view_data = requests.get(view_url).json() - jobs = [job["name"] for job in view_data["jobs"]] - return jobs - - -def get_downstream_builds_from_html(url): - """Return list of downstream jobs builds from specified job - """ - url = "/".join([url, 'downstreambuildview/']) - logger.debug("Request downstream builds data from {}".format(url)) - response = requests.get(url).text - jobs = [] - raw_downstream_builds = re.findall( - '.*downstream-buildview.*href="(/job/\S+/[0-9]+/).*', response) - for raw_build in raw_downstream_builds: - sub_job_name = raw_build.split('/')[2] - sub_job_build = raw_build.split('/')[3] - build = Build(name=sub_job_name, number=sub_job_build) - jobs.append( - { - 'name': build.name, - 'number': build.number, - 'result': build.build_data['result'] - } - ) - - return jobs - - -def get_build_artifact(url, artifact): - """Return content of job build artifact - """ - url = "/".join([url, 'artifact', artifact]) - logger.debug("Request artifact content from {}".format(url)) - return requests.get(url).text - - -class Build(object): - def __init__(self, name, number): - """Get build info via Jenkins API, get test info via direct HTTP - request. - - If number is 'latest', get latest completed build. - """ - - self.name = name - self._job_info = None - self._injected_vars = None - if number == 'latest': - self._job_info = self.get_job_info(depth=0) - self.number = self._job_info["lastCompletedBuild"]["number"] - elif number == 'latest_started': - self._job_info = self.get_job_info(depth=0) - self.number = self._job_info["lastBuild"]["number"] - else: - self.number = int(number) - - self._injected_vars = self.get_injected_vars( - depth=0, build_number=self.number) - - self.build_data = self.get_build_data(depth=0) - self.url = self.build_data["url"] - - @property - def job_info(self): - return self._job_info - - def get_job_info(self, depth=1): - job_url = "/".join([JENKINS["url"], 'job', self.name, - 'api/json?depth={depth}'.format(depth=depth)]) - logger.debug("Request job info from {}".format(job_url)) - return requests.get(job_url).json() - - @property - def injected_vars(self): - return self._injected_vars - - def get_injected_vars(self, depth=1, build_number=None): - if not build_number: - return [] - job_url = "/".join([JENKINS["url"], 'job', self.name, - str(build_number), 'injectedEnvVars', - 'api/json?depth={depth}'.format(depth=depth)]) - logger.debug("Request injected variables from job {}".format(job_url)) - try: - result = requests.get(job_url).json() - except JSONDecodeError: - logger.debug( - "Failed to decode injected variables from job {}".format( - job_url)) - result = [] - return result - - def get_job_console(self): - job_url = "/".join([JENKINS["url"], 'job', self.name, - str(self.number), 'consoleText']) - logger.debug("Request job console from {}".format(job_url)) - return requests.get(job_url).text - - def get_build_data(self, depth=1): - build_url = "/".join([JENKINS["url"], 'job', - self.name, - str(self.number), - 'api/json?depth={depth}'.format(depth=depth)]) - logger.debug("Request build data from {}".format(build_url)) - return requests.get(build_url).json() - - @staticmethod - def get_test_data(url, result_path=None): - if result_path: - test_url = "/".join( - [url.rstrip("/"), 'testReport'] + result_path + ['api/json']) - else: - test_url = "/".join([url.rstrip("/"), 'testReport', 'api/json']) - - logger.debug("Request test data from {}".format(test_url)) - return requests.get(test_url).json() - - def test_data(self, result_path=None): - try: - data = self.get_test_data(self.url, result_path) - except Exception as e: - logger.warning("No test data for {0}: {1}".format( - self.url, - e, - )) - # If we failed to get any tests for the build, return - # meta test case 'jenkins' with status 'failed'. - data = { - "suites": [ - { - "cases": [ - { - "name": "jenkins", - "className": "jenkins", - "status": "failed", - "duration": 0 - } - ] - } - ] - } - - return data - - def __str__(self): - string = "\n".join([ - "{0}: {1}".format(*item) for item in self.build_record() - ]) - return string - - def build_record(self): - """Return list of pairs. - - We cannot use dictionary, because columns are ordered. - """ - - data = [ - ('number', str(self.number)), - ('id', self.build_data["id"]), - ('description', self.build_data["description"]), - ('url', self.build_data["url"]), - ] - - test_data = self.test_data() - for suite in test_data['suites']: - for case in suite['cases']: - column_id = case['className'].lower().replace("_", "-") - data.append((column_id, case['status'].lower())) - - return data diff --git a/fuelweb_test/testrail/datetime_util.py b/fuelweb_test/testrail/datetime_util.py deleted file mode 100644 index 8515d7f5f..000000000 --- a/fuelweb_test/testrail/datetime_util.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import division - - -MINUTE = 60 -HOUR = MINUTE ** 2 -DAY = HOUR * 8 -WEEK = DAY * 5 - - -def duration_to_testrail_estimate(duration): - """Converts duration in minutes to testrail estimate format - """ - seconds = duration * MINUTE - week = seconds // WEEK - days = seconds % WEEK // DAY - hours = seconds % DAY // HOUR - minutes = seconds % HOUR // MINUTE - estimate = '' - for val, char in ((week, 'w'), (days, 'd'), (hours, 'h'), (minutes, 'm')): - if val: - estimate = ' '.join([estimate, '{0}{1}'.format(val, char)]) - return estimate.lstrip() diff --git a/fuelweb_test/testrail/generate_failure_group_statistics.py b/fuelweb_test/testrail/generate_failure_group_statistics.py deleted file mode 100644 index 170eac58e..000000000 --- a/fuelweb_test/testrail/generate_failure_group_statistics.py +++ /dev/null @@ -1,845 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from __future__ import division -import argparse -import hashlib -import json -import re -import sys -from logging import CRITICAL -from logging import DEBUG - -import tablib -import xmltodict -from fuelweb_test.testrail.builds import Build -from fuelweb_test.testrail.builds import get_build_artifact -from fuelweb_test.testrail.launchpad_client import LaunchpadBug -from fuelweb_test.testrail.report import get_version -from fuelweb_test.testrail.settings import FAILURE_GROUPING -from fuelweb_test.testrail.settings import JENKINS -from fuelweb_test.testrail.settings import logger -from fuelweb_test.testrail.settings import TestRailSettings -from fuelweb_test.testrail.testrail_client import TestRailProject - - -def get_sha(input_string): - """get sha hash - - :param input_string: str - input string - :return: sha hash string - """ - - return hashlib.sha256(input_string).hexdigest() - - -def make_cleanup(input_string): - """clean up string: remove IP/IP6/Mac/etc... by using regexp - - :param input_string: str - input string - :return: s after regexp and clean up - """ - - # let's try to find all IP, IP6, MAC - ip4re = re.compile(r'\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b') - ip6re = re.compile(r'\b(?:[a-fA-F0-9]{4}[:|\-]?){8}\b') - macre = re.compile(r'\b[a-fA-F0-9]{2}[:]{5}[a-fA-F0-9]{2}\b') - digitre = re.compile(r'\b(?:[0-9]{1,3}){1,50}\b') - hexre = re.compile(r'\b(?:[0-9a-fA-F]{1,8}){1,50}\b') - - def ismatch(match): - """ - :param match: string - :return: value or '' - """ - - value = match.group() - return " " if value else value - - stmp = ip4re.sub(ismatch, input_string) - stmp = ip6re.sub(ismatch, stmp) - stmp = macre.sub(ismatch, stmp) - stmp = digitre.sub('x', stmp) - listhex = hexre.findall(stmp) - if listhex: - for i in listhex: - stmp = hexre.sub('x' * len(i), stmp) - return stmp - - -def distance(astr, bstr): - """Calculates the Levenshtein distance between a and b - - :param astr: str - input string - :param bstr: str - input string - :return: distance: int - distance between astr and bstr - """ - - alen, blen = len(astr), len(bstr) - if alen > blen: - astr, bstr = bstr, astr - alen, blen = blen, alen - current_row = list(range(alen + 1)) # Keep current and previous row - for i in range(1, blen + 1): - previous_row, current_row = current_row, [i] + [0] * alen - for j in range(1, alen + 1): - add = previous_row[j] + 1 - delete = current_row[j - 1] + 1 - change = previous_row[j - 1] - if astr[j - 1] != bstr[i - 1]: - change += 1 - current_row[j] = min(add, delete, change) - return current_row[alen] - - -def get_bugs(subbuilds, testraildata): - """Get bugs of failed tests - - :param sub_builds: list of dict per each subbuild - :param testraildata: list test results for testrail run - :return: bugs: dict - bugs extracted from testrail - and they are belong to those failed tests - """ - - if not testraildata.get('tests'): - return {} - total_bugs = ({str(j.get('test')): [] - for i in subbuilds - for j in i.get('failure_reasons', [])}) - tests = [(i, j.get('id')) for i in total_bugs.keys() - for j in testraildata.get('tests') - if i == j.get('custom_test_group')] - bugs = [(t, iid, - rid.get('custom_launchpad_bug'), - rid.get('status_id')) - for (t, iid) in tests - for rid in testraildata.get('results') - if iid == rid.get('test_id')] - for i in bugs: - if i[2] and i[2].find('bugs.launchpad.net') > 0: - iid = int(re.search(r'.*bugs?/(\d+)/?', i[2]).group(1)) - title = get_bug_title(iid) or str(iid) - label = get_label(i[3], testraildata.get('statuses')) - color = get_color(i[3], testraildata.get('statuses')) - item = {'id': iid, - 'url': i[2], - 'title': title, - 'label': label, - 'color': color} - total_bugs[i[0]].append(item) - return total_bugs - - -def get_bug_title(bugid): - """ Get bug title - - :param bugid: int - launchpad bugid - :return: bug title - str - """ - - targets = LaunchpadBug(bugid).targets - return targets[0].get('title', '') - - -def get_color(stat_id, statuses): - """ Get color for test result - - :param stat_id: int - status id - :param statuses: list - statuses info extracted from TestRail - :return: color - str - """ - for stat in statuses: - if stat_id == stat.get('id'): - color = str(hex(stat.get('color_dark', 0)))[2:] - return "#" + color - - -def get_label(stat_id, statuses): - """ Get label for test result - - :param stat_id: int - status id - :param statuses: list - statuses info extracted from TestRail - :return: label - str - """ - for stat in statuses: - if stat_id == stat.get('id'): - return stat.get('label', 'None') - - -def get_testrail(): - """ Get test rail instance """ - logger.info('Initializing TestRail Project configuration...') - return TestRailProject(url=TestRailSettings.url, - user=TestRailSettings.user, - password=TestRailSettings.password, - project=TestRailSettings.project) - - -def generate_test_plan_name(job_name, build_number): - """ Generate name of TestPlan basing on iso image name - taken from Jenkins job build parameters""" - runner_build = Build(job_name, build_number) - milestone, iso_number, prefix = get_version(runner_build.build_data) - return ' '.join(filter(lambda x: bool(x), - (milestone, prefix, 'iso', '#' + str(iso_number)))) - - -def generate_test_run_name(job_name, build_number): - """ Generate name of TestRun basing on iso image name - taken from Jenkins job build parameters""" - runner_build = Build(job_name, build_number) - milestone = get_version(runner_build.build_data)[0] - return ''.join(filter(lambda x: bool(x), - ('[', milestone, ']', ' Swarm'))) - - -def get_runid_by_testplan(testplan, runname): - """ Get test rail plan and run by Swarm Jenkins job - - :param testplan: testreil testplan - :param runname: testreil runname - :return: id: testrail run id - """ - - for j in testplan.get('entries'): - for k in j.get('runs'): - if k.get('name') == runname: - return k.get('id') - return None - - -def get_testrail_testdata(job_name, build_number): - """ Get test rail plan and run by Swarm Jenkins job - - :param sub_builds: list of dict per each subbuild - :return: plan, run: tuple - TestRail plan and run dicts - """ - - planname = generate_test_plan_name(job_name, - build_number) - runname = generate_test_run_name(job_name, - build_number) - testrail_project = get_testrail() - project = testrail_project.project - plan = testrail_project.get_plan_by_name(planname) - runid = get_runid_by_testplan(plan, runname) - if not runid: - return {} - run = testrail_project.get_run(runid) - milestone = testrail_project.get_milestone_by_name( - TestRailSettings.milestone) - statuses = testrail_project.get_statuses() - tests = testrail_project.get_tests(run.get('id')) - results = testrail_project.get_results_for_run(run.get('id')) - return {'project': project, - 'plan': plan, - 'run': run, - 'milestone': milestone, - 'statuses': statuses, - 'tests': tests, - 'results': results} - - -def get_testrail_test_urls(tests, test_name): - """ Get test case url and test result url - - :param tests: list - TestRail tests gathered by run_id - :param test_name: string - TestRail custom_test_group field - :return: test case and test result urls - dict - {} otherwise return back - """ - - if tests.get('tests'): - for j in tests.get('tests'): - if j.get('custom_test_group') == test_name: - testcase_url = "".join([TestRailSettings.url, - '/index.php?/cases/view/', - str(j.get('case_id'))]) - testresult_url = "".join([TestRailSettings.url, - '/index.php?/tests/view/', - str(j.get('id'))]) - testresult_status = get_label(j.get('status_id'), - tests.get('statuses')) - testresult_status_color = get_color(j.get('status_id'), - tests.get('statuses')) - return {'testcase_url': testcase_url, - 'testresult_url': testresult_url, - 'testresult_status': testresult_status, - 'testresult_status_color': testresult_status_color} - return {} - - -def get_build_test_data(build_number, job_name, - jenkins_url=JENKINS.get('url')): - """ Get build test data from Jenkins from nosetests.xml - - :param build_number: int - Jenkins build number - :param job_name: str - Jenkins job_name - :param jenkins_url: str - Jenkins http url - :return: test_data: dict - build info or None otherwise - """ - - test_data = None - logger.info('Getting subbuild {} {}'.format(job_name, - build_number)) - runner_build = Build(job_name, build_number) - buildinfo = runner_build.get_build_data(depth=0) - if not buildinfo: - logger.error('Getting subbuilds info is failed. ' - 'Job={} Build={}'.format(job_name, build_number)) - return test_data - try: - artifact_paths = [v for i in buildinfo.get('artifacts') - for k, v in i.items() if k == 'relativePath' and - v == JENKINS.get('xml_testresult_file_name')][0] - artifact_url = "/".join([jenkins_url, 'job', job_name, - str(build_number)]) - xdata = get_build_artifact(artifact_url, artifact_paths) - test_data = xmltodict.parse(xdata, xml_attribs=True) - test_data.update({'build_number': build_number, - 'job_name': job_name, - 'job_url': buildinfo.get('url'), - 'job_description': - buildinfo.get('description'), - 'job_status': buildinfo.get('result')}) - except: - test_data = None - return test_data - - -def get_build_failure_reasons(test_data): - """ Gather all failure reasons across all tests - - :param test_data: dict - test data which were extracted from Jenkins - :return: test_data: list of dicts - {failure, test, build_number, job_name, url, test_url} - where: - failure(type and message were exctracted from nosetests.xml)-str - test(@classname was exctracted from nosetests.xml)-str - build_number(number which exctracted from build_info early)-int - job_name(Jenkins job name extracted from build_info early)-str - url(Jenkins job name full URL) - str - test_url(Jenkins test result URL) - str - [] otherwise - """ - failure_reasons = [] - for test in test_data.get('testsuite').get('testcase'): - failure_reason = None - if test.get('error'): - failure_reason = "___".join(['error', - 'type', - test.get('error').get('@type'), - 'message', - test.get('error').get('@message')]) - elif test.get('failure'): - failure_reason = "___".join(['failure', - 'type', - test.get('failure').get('@type'), - 'message', - test.get('failure').get('@message')]) - elif test.get('skipped'): - failure_reason = "___".join(['skipped', - 'type', - test.get('skipped').get('@type'), - 'message', - test.get('skipped').get('@message')]) - if failure_reason: - failure_reason_cleanup = make_cleanup(failure_reason) - failure_reasons.append({'failure': failure_reason_cleanup, - 'failure_origin': failure_reason, - 'test': test.get('@classname'), - 'build_number': - test_data.get('build_number'), - 'job_name': test_data.get('job_name'), - 'job_url': test_data.get('job_url'), - 'job_status': test_data.get('job_status'), - 'test_fail_url': "". - join([test_data.get('job_url'), - 'testReport/(root)/', - test.get('@classname'), - '/', test.get('@name')]) - }) - return failure_reasons - - -def get_sub_builds(build_number, job_name=JENKINS.get('job_name'), - jenkins_url=JENKINS.get('url')): - """ Gather all sub build info into subbuild list - - :param build_number: int - Jenkins build number - :param job_name: str - Jenkins job_name - :param jenkins_url: str - Jenkins http url - :return: sub_builds: list of dicts or None otherwise - {build_info, test_data, failure_reasons} - where: - build_info(sub build specific info got from Jenkins)-dict - test_data(test data per one sub build)-dict - failure_reasons(failures per one sub build)-list - """ - - runner_build = Build(job_name, build_number) - parent_build_info = runner_build.get_build_data(depth=0) - sub_builds = None - if parent_build_info: - sub_builds = parent_build_info.get('subBuilds') - if sub_builds: - for i in sub_builds: - test_data = get_build_test_data(i.get('buildNumber'), - i.get('jobName'), - jenkins_url) - if test_data: - i.update({'test_data': test_data}) - i.update({'description': test_data.get('job_description')}) - i.update({'failure_reasons': - get_build_failure_reasons(test_data)}) - return sub_builds, parent_build_info - - -def get_global_failure_group_list( - sub_builds, threshold=FAILURE_GROUPING.get('threshold')): - """ Filter out and grouping of all failure reasons across all tests - - :param sub_builds: list of dict per each subbuild - :param threshold: float -threshold - :return: (failure_group_dict, failure_reasons): tuple or () otherwise - where: - failure_group_dict(all failure groups and - associated failed test info per each failure group) - dict - failure_reasons(all failures across all subbuild) - list - """ - # let's find all failures in all builds - failure_reasons = [] - failure_group_dict = {} - failure_group_list = [] - for build in sub_builds: - if build.get('failure_reasons'): - for failure in build.get('failure_reasons'): - failure_reasons.append(failure) - failure_group_list.append(failure.get('failure')) - # let's truncate list - failure_group_list = list(set(failure_group_list)) - # let's update failure_group_dict - for failure in failure_reasons: - if failure.get('failure') in failure_group_list: - key = failure.get('failure') - if not failure_group_dict.get(key): - failure_group_dict[key] = [] - failure_group_dict[key].append(failure) - # let's find Levenshtein distance and update failure_group_dict - for num1, key1 in enumerate(failure_group_dict): - # pylint: disable=C0201 - for key2 in failure_group_dict.keys()[num1 + 1:]: - # let's skip grouping if len are different more 10% - if key1 == key2 or abs(float(len(key1) / len(key2))) >\ - FAILURE_GROUPING.get('max_len_diff'): - continue - # let's find other failures which can be grouped - # if normalized Levenshtein distance less threshold - llen = distance(key1, key2) - cal_threshold = float(llen) / max(len(key1), len(key2)) - if cal_threshold < threshold: - # seems we shall combine those groups to one - failure_group_dict[key1].extend(failure_group_dict[key2]) - logger.info("Those groups are going to be combined" - " due to Levenshtein distance\n" - " {}\n{}".format(key1, key2)) - del failure_group_dict[key2] - return failure_group_dict, failure_reasons - - -def update_subbuilds_failuregroup(sub_builds, failure_group_dict, - testrail_testdata, bugs): - """ update subbuilds by TestRail and Launchpad info - - :param sub_builds: dict of subbuilds - :param failure_group_dict: dict of failures - :param testrail_testdata: dict - data extracted from TestRail - :param bugs: dict - data extracted from launchpad - :return: None - """ - - failure_reasons_builds = [i for j in sub_builds - for i in j.get('failure_reasons', {})] - if failure_reasons_builds: - for fail in failure_reasons_builds: - fail.update(get_testrail_test_urls(testrail_testdata, - fail.get('test'))) - fail.update({'bugs': bugs.get(fail.get('test'))}) - for fgroup, flist in failure_group_dict.items(): - for fail in failure_reasons_builds: - for ffail in flist: - if not fail.get('failure_group')\ - and fail.get('failure') == ffail.get('failure'): - fail.update({'failure_group': fgroup}) - if fail.get('test') == ffail.get('test'): - ffail.update({'testresult_status': - fail.get('testresult_status'), - 'testresult_status_color': - fail.get('testresult_status_color'), - 'testcase_url': - fail.get('testcase_url'), - 'testresult_url': - fail.get('testresult_url'), - 'bugs': fail.get('bugs')}) - - -def get_statistics(failure_group_dict, format_out=None): - """ Generate statistics for all failure reasons across all tests - - Note: non hml format is going to be flat - :param failure_group_dict: dict of failures - :param testrail_tests: list of test cases extracted from TestRail - :param format_output: html, json, xls, xlsx, csv, yam - :return: statistics - """ - - if format_out != 'html': - return failure_group_dict - html_statistics = {} - failure_type_count = 0 - failure_group_count = 0 - ctests = list() - cbugs = list() - for failure, tests in failure_group_dict.items(): - # let's through list of tests - ftype = failure.split('___message___')[0] - skipped = (ftype.find('skipped___type___') == 0) - if not skipped: - if not html_statistics.get(ftype): - html_statistics[ftype] = {} - failure_type_count += 1 - if not html_statistics[ftype].get(failure): - html_statistics[ftype][failure] = [] - failure_group_count += 1 - for test in tests: - html_statistics[ftype][failure].append(test) - ctests.append(test.get('test')) - for bug in test.get('bugs', {}): - cbugs.append(bug.get('id')) - return {'html_statistics': html_statistics, - 'failure_type_count': failure_type_count, - 'failure_group_count': failure_group_count, - 'test_count': len(set(ctests)), - 'bug_count': len(set(cbugs))} - - -def dump_statistics(statistics, build_number, job_name, - format_output=None, file_output=None): - """ Save statistics info to file according to requested format - Note: Please, follow tablib python lib supported formats - http://docs.python-tablib.org/en/latest/ - - non hml format is going to be flat - html format shall use rowspan for tests under one failure group - - :param statistics: list - :param format_output: html, json, xls, xlsx, csv, yam - :param file_output: output file path - :return: None - """ - - filename = None - html_statistics = statistics.get('html_statistics') - data = tablib.Dataset() - html_top = "" - html_total_count = "" \ - "" \ - "" \ - "" \ - "" \ - "" \ - ""\ - "" \ - "" \ - "" \ - "" \ - "" \ - "
BuildJobFailureTypeCountFailureGroupCountTestCountBugCount
{}" \ - "{}{}{}{}{}
".\ - format(build_number, - job_name, - statistics.get('failure_type_count'), - statistics.get('failure_group_count'), - statistics.get('test_count'), - statistics.get('bug_count')) - - html_failurestat_header = "" \ - "" \ - "" - html_buttom = "
FailureTypeFailureGroupTestBug
" - html = "" - if format_output and file_output: - filename = ".".join([file_output, format_output]) - if format_output != 'html': - data.json = json.dumps(html_statistics) - else: - html_body = "" - for failure_type in html_statistics.keys(): - rowspan_failure_type = len([j for i in html_statistics. - get(failure_type).keys() - for j in html_statistics. - get(failure_type).get(i)]) - failure_groups = sorted(html_statistics.get(failure_type).keys()) - rowspan_failure_group = len([j for j in html_statistics. - get(failure_type). - get(failure_groups[0])]) - tests = html_statistics.get(failure_type).get(failure_groups[0]) - failure_message = ": ".join(failure_groups[0]. - split('___type___')[1]. - split('___message___')) - failure_message = re.sub('\t', '    ', - failure_message) - failure_message = '
'.join(failure_message.splitlines()) - - html_bugs = "
". \ - join(['#{}: {}'. - format(bug.get('url'), - bug.get('id'), - bug.get('title')) - for bug in tests[0].get('bugs')]) - html_tr = '' \ - 'count groups:{} / ' \ - 'count tests:{}
{}' \ - 'count tests: {}
{}' \ - '{}' \ - '
{}' \ - '
[job]' \ - '{}'\ - ''.format(rowspan_failure_type, - len(failure_groups), - rowspan_failure_type, - failure_type, - rowspan_failure_group, - rowspan_failure_group, - failure_message, - tests[0].get('testresult_status_color'), - tests[0].get('testresult_status'), - tests[0].get('testresult_url'), - tests[0].get('test'), - tests[0].get('test_fail_url'), - html_bugs) - html_body += html_tr - if len(tests) > 1: - for i in tests[1:]: - html_bugs = "
".\ - join(['#{}: {}'. - format(bug.get('url'), - bug.get('id'), - bug.get('title')) - for bug in i.get('bugs')]) - html_tr = "".join(["", - "{}" - "
{}" - "
[job]\ - {}". - format(i.get('testresult_status_color'), - i.get('testresult_status'), - i.get('testresult_url'), - i.get('test'), - i.get('test_fail_url'), - html_bugs), - ""]) - html_body += html_tr - for fgroup in failure_groups[1:]: - tstat = html_statistics.get(failure_type).get(fgroup) - rowspan_fg = len(tstat) - failure_message = ": ".join(fgroup. - split('___type___')[1]. - split('___message___')) - failure_message = re.sub('\t', '    ', - failure_message) - failure_message = '
'.join(failure_message.splitlines()) - html_bugs = "
". \ - join(['#{}: {}'. - format(bug.get('url'), - bug.get('id'), - bug.get('title')) - for bug in tstat[0].get('bugs')]) - html_tr = '' \ - '{}
{}' \ - '{}' \ - '
{}' \ - '
[job]' \ - '{}' \ - ''.format(rowspan_fg, rowspan_fg, - failure_message, - tstat[0]. - get('testresult_status_color'), - tstat[0].get('testresult_status'), - tstat[0].get('testresult_url'), - tstat[0].get('test'), - tstat[0].get('test_fail_url'), - html_bugs) - html_body += html_tr - if len(tstat) > 1: - for i in tstat[1:]: - html_bugs = "
". \ - join(['#{}: {}'. - format(bug.get('url'), - bug.get('id'), - bug.get('title')) - for bug in i.get('bugs')]) - color = i.get('testresult_status_color') - html_tr = "".join(["", - "{}" - "
{}" - "
[job]\ - {}". - format(color, - i.get('testresult_status'), - i.get('testresult_url'), - i.get('test'), - i.get('test_fail_url'), - html_bugs), - ""]) - html_body += html_tr - html += html_top - html += html_total_count - html += html_failurestat_header - html += html_body - html += html_buttom - if filename: - with open(filename, 'w') as fileoutput: - if format_output not in ['html']: - mdata = getattr(data, format_output) - fileoutput.write(mdata) - else: - fileoutput.write(html) - - -def publish_statistics(stat, build_number, job_name): - """ Publish statistics info to TestRail - Note: Please, follow tablib python lib supported formats - - :param statistics: list. - Each item contains test specific info and failure reason group - :return: True/False - """ - - dump_statistics(stat, build_number, job_name, - format_output='html', - file_output='/tmp/failure_groups_statistics') - # We've got file and it shall be uploaded to TestRail to custom field - # but TestRail shall be extended at first. Waiting... - return True - - -def main(): - """ - :param argv: command line arguments - :return: None - """ - - parser = argparse.ArgumentParser(description='Get downstream build info' - ' for Jenkins swarm.runner build.' - ' Generate matrix statistics:' - ' (failure group -> builds & tests).' - ' Publish matrix to Testrail' - ' if necessary.') - parser.add_argument('-n', '--build-number', type=int, required=False, - dest='build_number', help='Jenkins job build number') - parser.add_argument('-j', '--job-name', type=str, - dest='job_name', default='9.0.swarm.runner', - help='Name of Jenkins job which runs tests (runner)') - parser.add_argument('-f', '--format', type=str, dest='formatfile', - default='html', - help='format statistics: html,json,table') - parser.add_argument('-o', '--out', type=str, dest="fileoutput", - default='failure_groups_statistics', - help='Save statistics to file') - parser.add_argument('-t', '--track', action="store_true", - help='Publish statistics to TestPlan description') - parser.add_argument('-q', '--quiet', action="store_true", - help='Be quiet (disable logging except critical) ' - 'Overrides "--verbose" option.') - parser.add_argument("-v", "--verbose", action="store_true", - help="Enable debug logging.") - args = parser.parse_args() - - if args.verbose: - logger.setLevel(DEBUG) - if args.quiet: - logger.setLevel(CRITICAL) - if args.formatfile and\ - args.formatfile not in ['json', 'html', 'xls', 'xlsx', 'yaml', 'csv']: - logger.info('Not supported format output. Exit') - return 2 - if not args.build_number: - runner_build = Build(args.job_name, 'latest') - logger.info('Latest build number is {}. Job is {}'. - format(runner_build.number, args.job_name)) - args.build_number = runner_build.number - - logger.info('Getting subbuilds for {} {}'.format(args.job_name, - args.build_number)) - subbuilds, swarm_jenkins_info = get_sub_builds(args.build_number) - if not subbuilds or not swarm_jenkins_info: - logger.error('Necessary subbuilds info are absent. Exit') - return 3 - logger.info('{} Subbuilds have been found'.format(len(subbuilds))) - - logger.info('Calculating failure groups') - failure_gd = get_global_failure_group_list(subbuilds)[0] - if not failure_gd: - logger.error('Necessary failure grpoup info are absent. Exit') - return 4 - logger.info('{} Failure groups have been found'.format(len(failure_gd))) - - logger.info('Getting TestRail data') - testrail_testdata = get_testrail_testdata(args.job_name, - args.build_number) - if not testrail_testdata: - logger.error('Necessary testrail info are absent. Exit') - return 5 - logger.info('TestRail data have been downloaded') - - logger.info('Getting TestRail bugs') - testrail_bugs = get_bugs(subbuilds, testrail_testdata) - if not testrail_bugs: - logger.error('Necessary testrail bugs info are absent. Exit') - return 6 - logger.info('TestRail bugs have been got') - - logger.info('Update subbuilds data') - update_subbuilds_failuregroup(subbuilds, failure_gd, - testrail_testdata, - testrail_bugs) - logger.info('Subbuilds data have been updated') - - logger.info('Generating statistics across all failure groups') - statistics = get_statistics(failure_gd, format_out=args.formatfile) - if not statistics: - logger.error('Necessary statistics info are absent. Exit') - return 7 - logger.info('Statistics have been generated') - - if args.fileoutput and args.formatfile: - logger.info('Save statistics') - dump_statistics(statistics, args.build_number, args.job_name, - args.formatfile, args.fileoutput) - logger.info('Statistics have been saved') - if args.track: - logger.info('Publish statistics to TestRail') - if publish_statistics(statistics, args.build_number, args.job_name): - logger.info('Statistics have been published') - else: - logger.info('Statistics have not been published' - 'due to internal issue') - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/fuelweb_test/testrail/generate_statistics.py b/fuelweb_test/testrail/generate_statistics.py deleted file mode 100644 index a14a460be..000000000 --- a/fuelweb_test/testrail/generate_statistics.py +++ /dev/null @@ -1,533 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals - -import json -import os -import re -import sys -import time - -import argparse -from collections import OrderedDict -from logging import CRITICAL -from logging import DEBUG - -from fuelweb_test.testrail.builds import Build -from fuelweb_test.testrail.launchpad_client import LaunchpadBug -from fuelweb_test.testrail.report import get_version -from fuelweb_test.testrail.settings import GROUPS_TO_EXPAND -from fuelweb_test.testrail.settings import LaunchpadSettings -from fuelweb_test.testrail.settings import logger -from fuelweb_test.testrail.settings import TestRailSettings -from fuelweb_test.testrail.testrail_client import TestRailProject - - -def inspect_bug(bug): - # Return target which matches defined in settings project/milestone and - # has 'open' status. If there are no such targets, then just return first - # one available target. - for target in bug.targets: - if target['project'] == LaunchpadSettings.project and \ - LaunchpadSettings.milestone in target['milestone'] and\ - target['status'] not in LaunchpadSettings.closed_statuses: - return target - return bug.targets[0] - - -def generate_test_plan_name(job_name, build_number): - # Generate name of TestPlan basing on iso image name - # taken from Jenkins job build parameters - runner_build = Build(job_name, build_number) - milestone, iso_number, prefix = get_version(runner_build.build_data) - if 'snapshot' not in prefix: - return ' '.join(filter(lambda x: bool(x), (milestone, - prefix, 'iso', - '#' + str(iso_number)))) - else: - return ' '.join(filter(lambda x: bool(x), (milestone, - prefix))) - - -def get_testrail(): - logger.info('Initializing TestRail Project configuration...') - return TestRailProject(url=TestRailSettings.url, - user=TestRailSettings.user, - password=TestRailSettings.password, - project=TestRailSettings.project) - - -class TestRunStatistics(object): - """Statistics for attached bugs in TestRun - """ - - def __init__(self, project, run_id, check_blocked=False): - self.project = project - self.run = self.project.get_run(run_id) - self.tests = self.project.get_tests(run_id) - self.results = self.get_results() - logger.info('Found TestRun "{0}" on "{1}" with {2} tests and {3} ' - 'results'.format(self.run['name'], - self.run['config'] or 'default config', - len(self.tests), len(self.results))) - self.blocked_statuses = [self.project.get_status(s)['id'] - for s in TestRailSettings.stauses['blocked']] - self.failed_statuses = [self.project.get_status(s)['id'] - for s in TestRailSettings.stauses['failed']] - self.check_blocked = check_blocked - self._bugs_statistics = {} - - def __getitem__(self, item): - return self.run.__getitem__(item) - - def get_results(self): - results = [] - stop = 0 - offset = 0 - while not stop: - new_results = self.project.get_results_for_run( - self.run['id'], - limit=TestRailSettings.max_results_per_request, - offset=offset) - results += new_results - offset += len(new_results) - stop = TestRailSettings.max_results_per_request - len(new_results) - return results - - def get_test_by_group(self, group, version): - if group in GROUPS_TO_EXPAND: - m = re.search(r'^\d+_(\S+)_on_[\d\.]+', version) - if m: - tests_thread = m.group(1) - group = '{0}_{1}'.format(group, tests_thread) - elif TestRailSettings.extra_factor_of_tc_definition: - group = '{}_{}'.format( - group, - TestRailSettings.extra_factor_of_tc_definition - ) - for test in self.tests: - if test['custom_test_group'] == group: - return test - logger.error('Test with group "{0}" not found!'.format(group)) - - def handle_blocked(self, test, result): - if result['custom_launchpad_bug']: - return False - m = re.search(r'Blocked by "(\S+)" test.', result['comment'] or '') - if m: - blocked_test_group = m.group(1) - else: - logger.warning('Blocked result #{0} for test {1} does ' - 'not have upstream test name in its ' - 'comments!'.format(result['id'], - test['custom_test_group'])) - return False - - if not result['version']: - logger.debug('Blocked result #{0} for test {1} does ' - 'not have version, can\'t find upstream ' - 'test case!'.format(result['id'], - test['custom_test_group'])) - return False - - bug_link = None - blocked_test = self.get_test_by_group(blocked_test_group, - result['version']) - if not blocked_test: - return False - logger.debug('Test {0} was blocked by failed test {1}'.format( - test['custom_test_group'], blocked_test_group)) - - blocked_results = self.project.get_results_for_test( - blocked_test['id']) - - # Since we manually add results to failed tests with statuses - # ProdFailed, TestFailed, etc. and attach bugs links to them, - # we could skip original version copying. So look for test - # results with target version, but allow to copy links to bugs - # from other results of the same test (newer are checked first) - if not any(br['version'] == result['version'] and - br['status_id'] in self.failed_statuses - for br in blocked_results): - logger.debug('Did not find result for test {0} with version ' - '{1}!'.format(blocked_test_group, result['version'])) - return False - - for blocked_result in sorted(blocked_results, - key=lambda x: x['id'], - reverse=True): - if blocked_result['status_id'] not in self.failed_statuses: - continue - - if blocked_result['custom_launchpad_bug']: - bug_link = blocked_result['custom_launchpad_bug'] - break - - if bug_link is not None: - result['custom_launchpad_bug'] = bug_link - self.project.add_raw_results_for_test(test['id'], result) - logger.info('Added bug {0} to blocked result of {1} test.'.format( - bug_link, test['custom_test_group'])) - return bug_link - return False - - @property - def bugs_statistics(self): - if self._bugs_statistics != {}: - return self._bugs_statistics - logger.info('Collecting stats for TestRun "{0}" on "{1}"...'.format( - self.run['name'], self.run['config'] or 'default config')) - - for test in self.tests: - logger.debug('Checking "{0}" test...'.format(test['title'])) - test_results = sorted( - self.project.get_results_for_test(test['id'], self.results), - key=lambda x: x['id'], reverse=True) - - linked_bugs = [] - is_blocked = False - - for result in test_results: - if result['status_id'] in self.blocked_statuses: - if self.check_blocked: - new_bug_link = self.handle_blocked(test, result) - if new_bug_link: - linked_bugs.append(new_bug_link) - is_blocked = True - break - if result['custom_launchpad_bug']: - linked_bugs.append(result['custom_launchpad_bug']) - is_blocked = True - break - if result['status_id'] in self.failed_statuses \ - and result['custom_launchpad_bug']: - linked_bugs.append(result['custom_launchpad_bug']) - - bug_ids = set([re.search(r'.*bugs?/(\d+)/?', link).group(1) - for link in linked_bugs - if re.search(r'.*bugs?/(\d+)/?', link)]) - - for bug_id in bug_ids: - if bug_id in self._bugs_statistics: - self._bugs_statistics[bug_id][test['id']] = { - 'group': test['custom_test_group'] or 'manual', - 'config': self.run['config'] or 'default', - 'blocked': is_blocked - } - - else: - self._bugs_statistics[bug_id] = { - test['id']: { - 'group': test['custom_test_group'] or 'manual', - 'config': self.run['config'] or 'default', - 'blocked': is_blocked - } - } - return self._bugs_statistics - - -class StatisticsGenerator(object): - """Generate statistics for bugs attached to TestRuns in TestPlan - """ - - def __init__(self, project, plan_id, run_ids=(), handle_blocked=False): - self.project = project - self.test_plan = self.project.get_plan(plan_id) - logger.info('Found TestPlan "{0}"'.format(self.test_plan['name'])) - - self.test_runs_stats = [ - TestRunStatistics(self.project, r['id'], handle_blocked) - for e in self.test_plan['entries'] for r in e['runs'] - if r['id'] in run_ids or len(run_ids) == 0 - ] - - self.bugs_statistics = {} - - def generate(self): - for test_run in self.test_runs_stats: - test_run_stats = test_run.bugs_statistics - self.bugs_statistics[test_run['id']] = dict() - for bug, tests in test_run_stats.items(): - if bug in self.bugs_statistics[test_run['id']]: - self.bugs_statistics[test_run['id']][bug].update(tests) - else: - self.bugs_statistics[test_run['id']][bug] = tests - logger.info('Found {0} linked bug(s)'.format( - len(self.bugs_statistics[test_run['id']]))) - - def update_desription(self, stats): - old_description = self.test_plan['description'] - new_description = '' - for line in old_description.split('\n'): - if not re.match(r'^Bugs Statistics \(generated on .*\)$', line): - new_description += line + '\n' - else: - break - new_description += '\n' + stats - return self.project.update_plan(plan_id=self.test_plan['id'], - description=new_description) - - def dump(self, run_id=None): - stats = dict() - - if not run_id: - joint_bugs_statistics = dict() - for run in self.bugs_statistics: - for bug, tests in self.bugs_statistics[run].items(): - if bug in joint_bugs_statistics: - joint_bugs_statistics[bug].update(tests) - else: - joint_bugs_statistics[bug] = tests - else: - for _run_id, _stats in self.bugs_statistics.items(): - if _run_id == run_id: - joint_bugs_statistics = _stats - - for bug_id in joint_bugs_statistics: - try: - lp_bug = LaunchpadBug(bug_id).get_duplicate_of() - except KeyError: - logger.warning("Bug with ID {0} not found! Most probably it's " - "private or private security.".format(bug_id)) - continue - bug_target = inspect_bug(lp_bug) - - if lp_bug.bug.id in stats: - stats[lp_bug.bug.id]['tests'].update( - joint_bugs_statistics[bug_id]) - else: - stats[lp_bug.bug.id] = { - 'title': bug_target['title'], - 'importance': bug_target['importance'], - 'status': bug_target['status'], - 'project': bug_target['project'], - 'link': lp_bug.bug.web_link, - 'tests': joint_bugs_statistics[bug_id] - } - stats[lp_bug.bug.id]['failed_num'] = len( - [t for t, v in stats[lp_bug.bug.id]['tests'].items() - if not v['blocked']]) - stats[lp_bug.bug.id]['blocked_num'] = len( - [t for t, v in stats[lp_bug.bug.id]['tests'].items() - if v['blocked']]) - - return OrderedDict(sorted(stats.items(), - key=lambda x: (x[1]['failed_num'] + - x[1]['blocked_num']), - reverse=True)) - - def dump_html(self, stats=None, run_id=None): - if stats is None: - stats = self.dump() - - html = '\n' - html += '

Bugs Statistics (generated on {0})

\n'.format( - time.strftime("%c")) - html += '

TestPlan: "{0}"

\n'.format(self.test_plan['name']) - if run_id: - test_run = [r for r in self.test_runs_stats if r['id'] == run_id] - if test_run: - html += '

TestRun: "{0}"

\n'.format(test_run[0]['name']) - - for values in stats.values(): - if values['status'].lower() in ('invalid',): - color = 'gray' - elif values['status'].lower() in ('new', 'confirmed', 'triaged'): - color = 'red' - elif values['status'].lower() in ('in progress',): - color = 'blue' - elif values['status'].lower() in ('fix committed',): - color = 'goldenrod' - elif values['status'].lower() in ('fix released',): - color = 'green' - else: - color = 'orange' - - title = re.sub(r'(Bug\s+#\d+\s+)(in\s+[^:]+:\s+)', '\g<1>', - values['title']) - title = re.sub(r'(.{100}).*', '\g<1>...', title) - html += '[{0:<3} failed TC(s)]'.format(values['failed_num']) - html += '[{0:<3} blocked TC(s)]'.format(values['blocked_num']) - html += ('[{0:^4}][{1:^9}]' - '[{2:^13}]').format( - values['project'], values['importance'], values['status'], - color) - html += '[{1}]'.format(values['link'], title) - index = 1 - for tid, params in values['tests'].items(): - if index > 1: - link_text = '{}'.format(index) - else: - link_text = '{0} on {1}'.format(params['group'], - params['config']) - html += ('[{2}]').format(TestRailSettings.url, tid, link_text) - index += 1 - html += '
\n' - html += '\n' - return html - - def publish(self, stats=None): - if stats is None: - stats = self.dump() - - header = 'Bugs Statistics (generated on {0})\n'.format( - time.strftime("%c")) - header += '==================================\n' - - bugs_table = ('|||:Failed|:Blocked|:Project|:Priority' - '|:Status|:Bug link|:Tests\n') - - for values in stats.values(): - title = re.sub(r'(Bug\s+#\d+\s+)(in\s+[^:]+:\s+)', '\g<1>', - values['title']) - title = re.sub(r'(.{100}).*', '\g<1>...', title) - title = title.replace('[', '{') - title = title.replace(']', '}') - bugs_table += ( - '||{failed}|{blocked}|{project}|{priority}|{status}|').format( - failed=values['failed_num'], blocked=values['blocked_num'], - project=values['project'].upper(), - priority=values['importance'], status=values['status']) - bugs_table += '[{0}]({1})|'.format(title, values['link']) - index = 1 - for tid, params in values['tests'].items(): - if index > 1: - link_text = '{}'.format(index) - else: - link_text = '{0} on {1}'.format(params['group'], - params['config']) - bugs_table += '[{{{0}}}]({1}/index.php?/tests/view/{2}) '.\ - format(link_text, TestRailSettings.url, tid) - index += 1 - bugs_table += '\n' - - return self.update_desription(header + bugs_table) - - -def save_stats_to_file(stats, file_name, html=''): - def warn_file_exists(file_path): - if os.path.exists(file_path): - logger.warning('File {0} exists and will be ' - 'overwritten!'.format(file_path)) - - json_file_path = '{}.json'.format(file_name) - warn_file_exists(json_file_path) - - with open(json_file_path, 'w+') as f: - json.dump(stats, f) - - if html: - html_file_path = '{}.html'.format(file_name) - warn_file_exists(html_file_path) - with open(html_file_path, 'w+') as f: - f.write(html) - - -def main(): - parser = argparse.ArgumentParser( - description="Generate statistics for bugs linked to TestRun. Publish " - "statistics to testrail if necessary." - ) - parser.add_argument('plan_id', type=int, nargs='?', default=None, - help='Test plan ID in TestRail') - parser.add_argument('-j', '--job-name', - dest='job_name', type=str, default=None, - help='Name of Jenkins job which runs tests (runner). ' - 'It will be used for TestPlan search instead ID') - parser.add_argument('-n', '--build-number', dest='build_number', - default='latest', help='Jenkins job build number') - parser.add_argument('-r', '--run-id', - dest='run_ids', type=str, default=None, - help='(optional) IDs of TestRun to check (skip other)') - parser.add_argument('-b', '--handle-blocked', action="store_true", - dest='handle_blocked', default=False, - help='Copy bugs links to downstream blocked results') - parser.add_argument('-s', '--separate-runs', action="store_true", - dest='separate_runs', default=False, - help='Create separate statistics for each test run') - parser.add_argument('-p', '--publish', action="store_true", - help='Publish statistics to TestPlan description') - parser.add_argument('-o', '--out-file', dest='output_file', - default=None, type=str, - help='Path to file to save statistics as JSON and/or ' - 'HTML. Filename extension is added automatically') - parser.add_argument('-H', '--html', action="store_true", - help='Save statistics in HTML format to file ' - '(used with --out-file option)') - parser.add_argument('-q', '--quiet', action="store_true", - help='Be quiet (disable logging except critical) ' - 'Overrides "--verbose" option.') - parser.add_argument("-v", "--verbose", action="store_true", - help="Enable debug logging.") - - args = parser.parse_args() - - if args.verbose: - logger.setLevel(DEBUG) - - if args.quiet: - logger.setLevel(CRITICAL) - - testrail_project = get_testrail() - - if args.job_name: - logger.info('Inspecting {0} build of {1} Jenkins job for TestPlan ' - 'details...'.format(args.build_number, args.job_name)) - test_plan_name = generate_test_plan_name(args.job_name, - args.build_number) - test_plan = testrail_project.get_plan_by_name(test_plan_name) - if test_plan: - args.plan_id = test_plan['id'] - else: - logger.warning('TestPlan "{0}" not found!'.format(test_plan_name)) - - if not args.plan_id: - logger.error('There is no TestPlan to process, exiting...') - return 1 - - run_ids = () if not args.run_ids else tuple( - int(arg) for arg in args.run_ids.split(',')) - - generator = StatisticsGenerator(testrail_project, - args.plan_id, - run_ids, - args.handle_blocked) - generator.generate() - stats = generator.dump() - - if args.publish: - logger.debug('Publishing bugs statistics to TestRail..') - generator.publish(stats) - - if args.output_file: - html = generator.dump_html(stats) if args.html else args.html - save_stats_to_file(stats, args.output_file, html) - - if args.separate_runs: - for run in generator.test_runs_stats: - file_name = '{0}_{1}'.format(args.output_file, run['id']) - stats = generator.dump(run_id=run['id']) - html = (generator.dump_html(stats, run['id']) if args.html - else args.html) - save_stats_to_file(stats, file_name, html) - - logger.info('Statistics generation complete!') - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/fuelweb_test/testrail/launchpad_client.py b/fuelweb_test/testrail/launchpad_client.py deleted file mode 100644 index 0082ef7b7..000000000 --- a/fuelweb_test/testrail/launchpad_client.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals - -from launchpadlib.launchpad import Launchpad - - -class LaunchpadBug(object): - """LaunchpadBug.""" # TODO documentation - - def __init__(self, bug_id): - self.launchpad = Launchpad.login_anonymously('just testing', - 'production', - '.cache') - self.bug = self.launchpad.bugs[int(bug_id)] - - @property - def targets(self): - return [ - { - 'project': task.bug_target_name.split('/')[0], - 'milestone': str(task.milestone).split('/')[-1], - 'status': task.status, - 'importance': task.importance, - 'title': task.title, - } for task in self.bug_tasks] - - def get_duplicate_of(self): - bug = self.bug - duplicates = [] - while bug.duplicate_of and bug.id not in duplicates: - duplicates.append(bug.id) - bug = self.launchpad.load(str(bug.duplicate_of)) - return LaunchpadBug(bug.id) - - def __getattr__(self, item): - return self.bug.__getattr__(item) diff --git a/fuelweb_test/testrail/report.py b/fuelweb_test/testrail/report.py deleted file mode 100755 index 042ad79a3..000000000 --- a/fuelweb_test/testrail/report.py +++ /dev/null @@ -1,717 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import division -from __future__ import unicode_literals - -import functools -import re -import time - -from logging import DEBUG -from optparse import OptionParser -from fuelweb_test.testrail.builds import Build -from fuelweb_test.testrail.builds import get_build_artifact -from fuelweb_test.testrail.builds import get_downstream_builds_from_html -from fuelweb_test.testrail.builds import get_jobs_for_view -from fuelweb_test.testrail.launchpad_client import LaunchpadBug -from fuelweb_test.testrail.settings import JENKINS -from fuelweb_test.testrail.settings import GROUPS_TO_EXPAND -from fuelweb_test.testrail.settings import LaunchpadSettings -from fuelweb_test.testrail.settings import logger -from fuelweb_test.testrail.settings import TestRailSettings -from fuelweb_test.testrail.testrail_client import TestRailProject - - -class TestResult(object): - """TestResult.""" # TODO documentation - - def __init__(self, name, group, status, duration, url=None, - version=None, description=None, comments=None, - launchpad_bug=None, steps=None): - self.name = name - self.group = group - self._status = status - self.duration = duration - self.url = url - self._version = version - self.description = description - self.comments = comments - self.launchpad_bug = launchpad_bug - self.available_statuses = { - 'passed': ['passed', 'fixed'], - 'failed': ['failed', 'regression'], - 'skipped': ['skipped'], - 'blocked': ['blocked'], - 'custom_status2': ['in_progress'] - } - self._steps = steps - - @property - def version(self): - # Version string length is limited by 250 symbols because field in - # TestRail has type 'String'. This limitation can be removed by - # changing field type to 'Text' - return (self._version or '')[:250] - - @version.setter - def version(self, value): - self._version = value[:250] - - @property - def status(self): - for s in self.available_statuses: - if self._status in self.available_statuses[s]: - return s - logger.error('Unsupported result status: "{0}"!'.format(self._status)) - return self._status - - @status.setter - def status(self, value): - self._status = value - - @property - def steps(self): - return self._steps - - def __str__(self): - result_dict = { - 'name': self.name, - 'group': self.group, - 'status': self.status, - 'duration': self.duration, - 'url': self.url, - 'version': self.version, - 'description': self.description, - 'comments': self.comments - } - return str(result_dict) - - -def retry(count=3): - def wrapped(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - i = 0 - while True: - try: - return func(*args, **kwargs) - except: - i += 1 - if i >= count: - raise - return wrapper - return wrapped - - -def get_downstream_builds(jenkins_build_data, status=None): - if 'subBuilds' not in jenkins_build_data.keys(): - return get_downstream_builds_from_html(jenkins_build_data['url']) - - return [{'name': b['jobName'], 'number': b['buildNumber'], - 'result': b['result']} for b in jenkins_build_data['subBuilds']] - - -def get_version(jenkins_build_data): - version = get_version_from_parameters(jenkins_build_data) - if not version: - version = get_version_from_artifacts(jenkins_build_data) - if not version: - version = get_version_from_upstream_job(jenkins_build_data) - if not version: - raise Exception('Failed to get iso version from Jenkins jobs ' - 'parameters/artifacts!') - return version - - -def get_version_from_upstream_job(jenkins_build_data): - upstream_job = get_job_parameter(jenkins_build_data, 'UPSTREAM_JOB_URL') - if not upstream_job: - return - causes = [a['causes'] for a in jenkins_build_data['actions'] - if 'causes' in a.keys()][0] - if len(causes) > 0: - upstream_job_name = causes[0]['upstreamProject'] - upstream_build_number = causes[0]['upstreamBuild'] - upstream_build = Build(upstream_job_name, upstream_build_number) - return (get_version_from_artifacts(upstream_build.build_data) or - get_version_from_parameters(upstream_build.build_data)) - - -def get_job_parameter(jenkins_build_data, parameter): - parameters_arr = [a['parameters'] for a in jenkins_build_data['actions'] - if 'parameters' in a.keys()] - # NOTE(akostrikov) LP #1603088 The root job is a snapshot job without - # parameters. It has fullDisplayName, which is parse-able. - if len(parameters_arr) == 0: - return jenkins_build_data['fullDisplayName'] - parameters = parameters_arr[0] - target_params = [p['value'] for p in parameters - if p['name'].lower() == str(parameter).lower()] - if len(target_params) > 0: - return target_params[0] - - -def get_version_from_parameters(jenkins_build_data): - custom_version = get_job_parameter(jenkins_build_data, 'CUSTOM_VERSION') - if custom_version: - swarm_timestamp = jenkins_build_data['timestamp'] // 1000 \ - if 'timestamp' in jenkins_build_data else None - return (TestRailSettings.milestone, - time.strftime("%D %H:%M", time.localtime(swarm_timestamp)), - custom_version) - - iso_link = get_job_parameter(jenkins_build_data, 'magnet_link') - if iso_link: - return get_version_from_iso_name(iso_link) - - -def get_version_from_artifacts(jenkins_build_data): - if not any([artifact for artifact in jenkins_build_data['artifacts'] - if artifact['fileName'] == JENKINS['magnet_link_artifact']]): - return - iso_link = (get_build_artifact(url=jenkins_build_data['url'], - artifact=JENKINS['magnet_link_artifact'])) - if iso_link: - return get_version_from_iso_name(iso_link) - - -def get_version_from_iso_name(iso_link): - match = re.search(r'.*\bfuel-(?P[a-zA-Z]*)-?(?P\d+' - r'(?P\.\d+)+)-(?P[a-zA-Z]*)-?' - r'(?P\d+)-.*', iso_link) - if match: - return (match.group('version'), - int(match.group('buildnum')), - match.group('prefix1') or match.group('prefix2')) - - -def expand_test_group(group, systest_build_name, os): - """Expand specified test names with the group name of the job - which is taken from the build name, for example: - group: 'setup_master' - systest_build_name: '7.0.system_test.ubuntu.bonding_ha_one_controller' - os: str, release name in lower case, for example: 'ubuntu' - return: 'setup_master_bonding_ha_one_controller' - """ - if group in GROUPS_TO_EXPAND: - if os in systest_build_name: - sep = '.' + os + '.' - else: - sep = '.' - systest_group_name = systest_build_name.split(sep)[-1] - - if systest_group_name: - group = '_'.join([group, systest_group_name]) - elif TestRailSettings.extra_factor_of_tc_definition: - group = '{}_{}'.format( - group, - TestRailSettings.extra_factor_of_tc_definition - ) - return group - - -def check_blocked(test): - """Change test result status to 'blocked' if it was - skipped due to failure of another dependent test - :param test: dict, test result info - :return: None - """ - if test['status'].lower() != 'skipped': - return - match = re.match(r'^Failure in ', - test['skippedMessage']) - if match: - failed_func_name = match.group(1) - if test['name'] != failed_func_name: - test['status'] = 'blocked' - test['skippedMessage'] = 'Blocked by "{0}" test.'.format( - failed_func_name) - - -def check_untested(test): - """Check if test result is fake - :param test: dict - :return: bool - """ - if test['name'] == 'jenkins' and 'skippedMessage' not in test: - return True - return False - - -def get_test_build(build_name, build_number, check_rebuild=False, - force_rebuild_search=False): - """Get test data from Jenkins job build - :param build_name: string - :param build_number: string - :param check_rebuild: bool, if True then look for newer job rebuild(s) - :param force_rebuild_search: bool, if True then force rebuild(s) search - :return: dict - """ - test_build = Build(build_name, build_number) - first_case = test_build.test_data()['suites'][0]['cases'].pop()['name'] - - if (force_rebuild_search or first_case == 'jenkins') and check_rebuild: - iso_magnet = get_job_parameter(test_build.build_data, 'MAGNET_LINK') - if not iso_magnet: - return test_build - - latest_build_number = Build(build_name, 'latest').number - builds_to_check = [i for i in - range(build_number + 1, latest_build_number + 1)] - if force_rebuild_search: - builds_to_check.reverse() - - for n in builds_to_check: - test_rebuild = Build(build_name, n) - if get_job_parameter(test_rebuild.build_data, 'MAGNET_LINK') \ - == iso_magnet: - logger.debug("Found test job rebuild: " - "{0}".format(test_rebuild.url)) - return test_rebuild - return test_build - - -@retry(count=3) -def get_tests_results(systest_build, os, force_rebuild_search=False): - tests_results = [] - test_build = get_test_build(systest_build['name'], - systest_build['number'], - check_rebuild=True, - force_rebuild_search=force_rebuild_search) - run_test_data = test_build.test_data() - test_classes = {} - for one in run_test_data['suites'][0]['cases']: - class_name = one['className'] - if class_name not in test_classes: - test_classes[class_name] = {} - test_classes[class_name]['child'] = [] - test_classes[class_name]['duration'] = 0 - test_classes[class_name]["failCount"] = 0 - test_classes[class_name]["passCount"] = 0 - test_classes[class_name]["skipCount"] = 0 - else: - if one['className'] == one['name']: - logger.warning("Found duplicate test in run - {}".format( - one['className'])) - continue - - test_class = test_classes[class_name] - test_class['child'].append(one) - test_class['duration'] += float(one['duration']) - if one['status'].lower() in ('failed', 'error'): - test_class["failCount"] += 1 - if one['status'].lower() == 'passed': - test_class["passCount"] += 1 - if one['status'].lower() == 'skipped': - test_class["skipCount"] += 1 - - for klass in test_classes: - klass_result = test_classes[klass] - fuel_tests_results = [] - if klass.startswith('fuel_tests.'): - for one in klass_result['child']: - test_name = one['name'] - test_package, _, test_class = one['className'].rpartition('.') - test_result = TestResult( - name=test_name, - group=expand_test_group(one['name'], - systest_build['name'], - os), - status=one['status'].lower(), - duration='{0}s'.format(int(one['duration']) + 1), - url='{0}testReport/{1}/{2}/{3}'.format( - test_build.url, - test_package, - test_class, - test_name), - version='_'.join( - [test_build.build_data["id"]] + ( - test_build.build_data["description"] or - test_name).split()), - description=(test_build.build_data["description"] or - test_name), - comments=one['skippedMessage'], - ) - fuel_tests_results.append(test_result) - elif len(klass_result['child']) == 1: - test = klass_result['child'][0] - if check_untested(test): - continue - check_blocked(test) - test_result = TestResult( - name=test['name'], - group=expand_test_group(test['className'], - systest_build['name'], - os), - status=test['status'].lower(), - duration='{0}s'.format(int(test['duration']) + 1), - url='{0}testReport/(root)/{1}/'.format(test_build.url, - test['name']), - version='_'.join( - [test_build.build_data["id"]] + ( - test_build.build_data["description"] or - test['name']).split()), - description=test_build.build_data["description"] or - test['name'], - comments=test['skippedMessage'] - ) - else: - case_steps = [] - test_duration = sum( - [float(c['duration']) for c in klass_result['child']]) - steps = [c for c in klass_result['child'] - if c['name'].startswith('Step')] - steps = sorted(steps, key=lambda k: k['name']) - test_name = steps[0]['className'] - test_group = steps[0]['className'] - test_comments = None - is_test_failed = any([s['status'].lower() in ('failed', 'error') - for s in steps]) - - for step in steps: - if step['status'].lower() in ('failed', 'error'): - case_steps.append({ - "content": step['name'], - "actual": step['errorStackTrace'] or - step['errorDetails'], - "status": step['status'].lower()}) - test_comments = "{err}\n\n\n{stack}".format( - err=step['errorDetails'], - stack=step['errorStackTrace']) - else: - case_steps.append({ - "content": step['name'], - "actual": "pass", - "status": step['status'].lower() - }) - test_result = TestResult( - name=test_name, - group=expand_test_group(test_group, - systest_build['name'], - os), - status='failed' if is_test_failed else 'passed', - duration='{0}s'.format(int(test_duration) + 1), - url='{0}testReport/(root)/{1}/'.format(test_build.url, - test_name), - version='_'.join( - [test_build.build_data["id"]] + ( - test_build.build_data["description"] or - test_name).split()), - description=test_build.build_data["description"] or - test_name, - comments=test_comments, - steps=case_steps, - ) - if fuel_tests_results: - tests_results.extend(fuel_tests_results) - else: - tests_results.append(test_result) - return tests_results - - -def publish_results(project, milestone_id, test_plan, - suite_id, config_id, results): - test_run_ids = [run['id'] for entry in test_plan['entries'] - for run in entry['runs'] if suite_id == run['suite_id'] and - config_id in run['config_ids']] - logger.debug('Looking for previous tests runs on "{0}" using tests suite ' - '"{1}"...'.format(project.get_config(config_id)['name'], - project.get_suite(suite_id)['name'])) - previous_tests_runs = project.get_previous_runs( - milestone_id=milestone_id, - suite_id=suite_id, - config_id=config_id, - limit=TestRailSettings.previous_results_depth, - days_to_analyze=TestRailSettings.previous_results_days_to_analyze) - logger.debug('Found next test runs: {0}'.format( - [test_run['description'] for test_run in previous_tests_runs])) - cases = project.get_cases(suite_id=suite_id) - tests = project.get_tests(run_id=test_run_ids[0]) - results_to_publish = [] - - for result in results: - test = project.get_test_by_group(run_id=test_run_ids[0], - group=result.group, - tests=tests) - if not test: - logger.error("Test for '{0}' group not found: {1}".format( - result.group, result.url)) - continue - existing_results_versions = [r['version'] for r in - project.get_results_for_test(test['id'])] - if result.version in existing_results_versions: - continue - if result.status not in ('passed', 'blocked'): - case_id = project.get_case_by_group(suite_id=suite_id, - group=result.group, - cases=cases)['id'] - run_ids = [run['id'] for run in previous_tests_runs[0: - int(TestRailSettings.previous_results_depth)]] - previous_results = project.get_all_results_for_case( - run_ids=run_ids, - case_id=case_id) - lp_bug = get_existing_bug_link(previous_results) - if lp_bug: - result.launchpad_bug = lp_bug['bug_link'] - results_to_publish.append(result) - - try: - if len(results_to_publish) > 0: - project.add_results_for_cases(run_id=test_run_ids[0], - suite_id=suite_id, - tests_results=results_to_publish) - except: - logger.error('Failed to add new results for tests: {0}'.format( - [r.group for r in results_to_publish] - )) - raise - return results_to_publish - - -@retry(count=3) -def get_existing_bug_link(previous_results): - results_with_bug = [result for result in previous_results if - result["custom_launchpad_bug"] is not None] - if not results_with_bug: - return - for result in sorted(results_with_bug, - key=lambda k: k['created_on'], - reverse=True): - try: - bug_id = int(result["custom_launchpad_bug"].strip('/').split( - '/')[-1]) - except ValueError: - logger.warning('Link "{0}" doesn\'t contain bug id.'.format( - result["custom_launchpad_bug"])) - continue - try: - bug = LaunchpadBug(bug_id).get_duplicate_of() - except KeyError: - logger.warning("Bug with id '{bug_id}' is private or \ - doesn't exist.".format(bug_id=bug_id)) - continue - except Exception: - logger.exception("Strange situation with '{bug_id}' \ - issue".format(bug_id=bug_id)) - continue - - for target in bug.targets: - if target['project'] == LaunchpadSettings.project and\ - target['milestone'] == LaunchpadSettings.milestone and\ - target['status'] not in LaunchpadSettings.closed_statuses: - target['bug_link'] = result["custom_launchpad_bug"] - return target - - -def main(): - - parser = OptionParser( - description="Publish results of system tests from Jenkins build to " - "TestRail. See settings.py for configuration." - ) - parser.add_option('-j', '--job-name', dest='job_name', default=None, - help='Jenkins swarm runner job name') - parser.add_option('-N', '--build-number', dest='build_number', - default='latest', - help='Jenkins swarm runner build number') - parser.add_option('-o', '--one-job', dest='one_job_name', - default=None, - help=('Process only one job name from the specified ' - 'parent job or view')) - parser.add_option("-w", "--view", dest="jenkins_view", default=False, - help="Get system tests jobs from Jenkins view") - parser.add_option("-l", "--live", dest="live_report", action="store_true", - help="Get tests results from running swarm") - parser.add_option("-m", "--manual", dest="manual_run", action="store_true", - help="Manually add tests cases to TestRun (tested only)") - parser.add_option('-c', '--create-plan-only', action="store_true", - dest="create_plan_only", default=False, - help='Jenkins swarm runner job name') - parser.add_option('-f', '--force-rebuild', action="store_true", - dest="force_rebuild_search", default=False, - help='Force manual job rebuild search ') - parser.add_option("-v", "--verbose", - action="store_true", dest="verbose", default=False, - help="Enable debug output") - - (options, _) = parser.parse_args() - - if options.verbose: - logger.setLevel(DEBUG) - - if options.live_report and options.build_number == 'latest': - options.build_number = 'latest_started' - - # STEP #1 - # Initialize TestRail Project and define configuration - logger.info('Initializing TestRail Project configuration...') - project = TestRailProject(url=TestRailSettings.url, - user=TestRailSettings.user, - password=TestRailSettings.password, - project=TestRailSettings.project) - - tests_suite = project.get_suite_by_name(TestRailSettings.tests_suite) - operation_systems = [{'name': config['name'], 'id': config['id'], - 'distro': config['name'].split()[0].lower()} - for config in project.get_config_by_name( - 'Operation System')['configs'] if - config['name'] in TestRailSettings.operation_systems] - tests_results = {os['distro']: [] for os in operation_systems} - - # STEP #2 - # Get tests results from Jenkins - logger.info('Getting tests results from Jenkins...') - if options.jenkins_view: - jobs = get_jobs_for_view(options.jenkins_view) - tests_jobs = [{'name': j, 'number': 'latest'} - for j in jobs if 'system_test' in j] if \ - not options.create_plan_only else [] - runner_job = [j for j in jobs if 'runner' in j][0] - runner_build = Build(runner_job, 'latest') - elif options.job_name: - runner_build = Build(options.job_name, options.build_number) - tests_jobs = get_downstream_builds(runner_build.build_data) if \ - not options.create_plan_only else [] - else: - logger.error("Please specify either Jenkins swarm runner job name (-j)" - " or Jenkins view with system tests jobs (-w). Exiting..") - return - - for systest_build in tests_jobs: - if (options.one_job_name and - options.one_job_name != systest_build['name']): - logger.debug("Skipping '{0}' because --one-job is specified" - .format(systest_build['name'])) - continue - if options.job_name: - if 'result' not in systest_build.keys(): - logger.debug("Skipping '{0}' job because it does't run tests " - "(build #{1} contains no results)".format( - systest_build['name'], - systest_build['number'])) - continue - if systest_build['result'] is None: - logger.debug("Skipping '{0}' job (build #{1}) because it's sti" - "ll running...".format(systest_build['name'], - systest_build['number'],)) - continue - for os in tests_results.keys(): - if os in systest_build['name'].lower(): - tests_results[os].extend( - get_tests_results(systest_build, os, - options.force_rebuild_search)) - - # STEP #3 - # Create new TestPlan in TestRail (or get existing) and add TestRuns - milestone, iso_number, prefix = get_version(runner_build.build_data) - milestone = project.get_milestone_by_name(name=milestone) - - # NOTE(akostrikov) LP #1603088 When there is a snapshot word in prefix, - # we can skip timestamp part of a test plan name. - if 'snapshot' in prefix: - test_plan_name = ' '.join( - filter(lambda x: bool(x), - (milestone['name'], prefix.replace('9.x.', '')))) - else: - test_plan_name = ' '.join( - filter(lambda x: bool(x), - (milestone['name'], prefix, 'iso', '#' + str(iso_number)))) - - test_plan = project.get_plan_by_name(test_plan_name) - - iso_job_name = '{0}{1}.all'.format(milestone['name'], - '-{0}'.format(prefix) if prefix - else '') - iso_link = '/'.join([JENKINS['url'], 'job', iso_job_name, str(iso_number)]) - test_run = TestRailSettings.tests_description - description = test_run if test_run else iso_link - if not test_plan: - test_plan = project.add_plan(test_plan_name, - description=description, - milestone_id=milestone['id'], - entries=[] - ) - logger.info('Created new TestPlan "{0}".'.format(test_plan_name)) - else: - logger.info('Found existing TestPlan "{0}".'.format(test_plan_name)) - test_plan_description = test_plan.get('description') - if description not in test_plan_description: - new_description = test_plan_description + '\n' + description - logger.info('Update description for existing TestPlan "{0}" ' - 'from "{1}" to {2}.'.format(test_plan_name, - test_plan_description, - new_description)) - project.update_plan(test_plan.get('id'), - description=new_description) - - if options.create_plan_only: - return - - plan_entries = [] - all_cases = project.get_cases(suite_id=tests_suite['id']) - for os in operation_systems: - cases_ids = [] - if options.manual_run: - all_results_groups = [r.group for r in tests_results[os['distro']]] - for case in all_cases: - if case['custom_test_group'] in all_results_groups: - cases_ids.append(case['id']) - plan_entries.append( - project.test_run_struct( - name='{suite_name}'.format(suite_name=tests_suite['name']), - suite_id=tests_suite['id'], - milestone_id=milestone['id'], - description='Results of system tests ({tests_suite}) on is' - 'o #"{iso_number}"'.format(tests_suite=tests_suite['name'], - iso_number=iso_number), - config_ids=[os['id']], - include_all=True, - case_ids=cases_ids - ) - ) - - if not any(entry['suite_id'] == tests_suite['id'] - for entry in test_plan['entries']): - if project.add_plan_entry(plan_id=test_plan['id'], - suite_id=tests_suite['id'], - config_ids=[os['id'] for os - in operation_systems], - runs=plan_entries): - test_plan = project.get_plan(test_plan['id']) - - # STEP #4 - # Upload tests results to TestRail - logger.info('Uploading tests results to TestRail...') - for os in operation_systems: - logger.info('Checking tests results for "{0}"...'.format(os['name'])) - results_to_publish = publish_results( - project=project, - milestone_id=milestone['id'], - test_plan=test_plan, - suite_id=tests_suite['id'], - config_id=os['id'], - results=tests_results[os['distro']] - ) - logger.debug('Added new results for tests ({os}): {tests}'.format( - os=os['name'], tests=[r.group for r in results_to_publish] - )) - - logger.info('Report URL: {0}'.format(test_plan['url'])) - - -if __name__ == "__main__": - main() diff --git a/fuelweb_test/testrail/report_pi.py b/fuelweb_test/testrail/report_pi.py deleted file mode 100644 index ef7d44ca1..000000000 --- a/fuelweb_test/testrail/report_pi.py +++ /dev/null @@ -1,209 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals - -from logging import DEBUG -from optparse import OptionParser - -import requests - -from fuelweb_test.testrail.builds import Build -from fuelweb_test.testrail.report import get_tests_results -from fuelweb_test.testrail.report import publish_results -from fuelweb_test.testrail.settings import JENKINS -from fuelweb_test.testrail.settings import logger -from fuelweb_test.testrail.settings import TestRailSettings -from fuelweb_test.testrail.testrail_client import TestRailProject - - -def find_run_by_name(test_plan, run_name): - """This function finds the test run by its name - """ - for entry in test_plan['entries']: - for run in entry['runs']: - if run['name'] == run_name: - return run - - -def get_job_info(url): - job_url = "/".join([url, 'api/json']) - logger.debug("Request job info from %s", job_url) - return requests.get(job_url).json() - - -def main(): - parser = OptionParser( - description="Publish results of system tests from Jenkins build to " - "TestRail. See settings.py for configuration." - ) - parser.add_option('-j', '--job-name', dest='job_name', default=None, - help='Jenkins swarm runner job name') - parser.add_option('-N', '--build-number', dest='build_number', - default='latest', - help='Jenkins swarm runner build number') - parser.add_option("-l", "--live", dest="live_report", action="store_true", - help="Get tests results from running swarm") - parser.add_option("-v", "--verbose", - action="store_true", dest="verbose", default=False, - help="Enable debug output") - - (options, _) = parser.parse_args() - - if options.verbose: - logger.setLevel(DEBUG) - - if options.live_report and options.build_number == 'latest': - build_number = 'latest_started' - else: - build_number = options.build_number - - # STEP #1 - # Initialize TestRail Project and define configuration - logger.info('Initializing TestRail Project configuration...') - project = TestRailProject(url=TestRailSettings.url, - user=TestRailSettings.user, - password=TestRailSettings.password, - project=TestRailSettings.project) - logger.info('Initializing TestRail Project configuration... done') - - operation_systems = [{'name': config['name'], 'id': config['id'], - 'distro': config['name'].split()[0].lower()} - for config in project.get_config_by_name( - 'Operation System')['configs']] - os_mile = {'6.1': ['Centos 6.5', 'Ubuntu 14.04'], - '6.0.1': ['Centos 6.5', 'Ubuntu 12.04']} - - tests_results = {} - - # STEP #2 - # Get tests results from Jenkins - runner_build = Build(options.job_name, build_number) - runs = runner_build.build_data['runs'] - - # Analyze each test individually - for run_one in runs: - if '5.1' in run_one['url']: - continue # Release 5.1 to skip - tests_result = get_job_info(run_one['url']) - if not tests_result['description']: - continue # Not completed results to skip - if 'skipping' in tests_result['description']: - continue # Not performed tests to skip - tests_job = {'result': tests_result['result'], - 'name': (options.job_name + '/' + - tests_result['url'].split('/')[-3]), - 'number': int(tests_result['url'].split('/')[-2]), - 'mile': (tests_result['description']. - split()[0].split('-')[0]), - 'iso': (int(tests_result['description']. - split()[0].split('-')[1]))} - if tests_job['mile'] not in tests_results: - tests_results[tests_job['mile']] = {} - test_mile = tests_results[tests_job['mile']] - if tests_job['iso'] not in test_mile: - test_mile[tests_job['iso']] = {} - test_iso = test_mile[tests_job['iso']] - for os in operation_systems: - if os['distro'] in tests_job['name'].lower() and\ - os['name'] in os_mile[tests_job['mile']]: - if os['id'] not in test_iso: - test_iso[os['id']] = [] - test_os_id = test_iso[os['id']] - test_os_id.extend(get_tests_results(tests_job, os['distro'])) - - # STEP #3 - # Create new TestPlan in TestRail (or get existing) and add TestRuns - for mile in tests_results: - mile_tests_suite = '{0}{1}'.format(TestRailSettings.tests_suite, mile) - logger.info(mile_tests_suite) - tests_suite = project.get_suite_by_name(mile_tests_suite) - milestone = project.get_milestone_by_name(name=mile) - for iso_number in tests_results.get(mile, {}): - # Create new TestPlan name check the same name in testrail - test_plan_name = '{milestone} iso #{iso_number}'.format( - milestone=milestone['name'], - iso_number=iso_number) - test_plan = project.get_plan_by_name(test_plan_name) - if not test_plan: - test_plan = project.add_plan( - test_plan_name, - description='/'.join([JENKINS['url'], - 'job', - '{0}.all'.format(milestone['name']), - str(iso_number)]), - milestone_id=milestone['id'], - entries=[]) - logger.info('Created new TestPlan "{0}".' - .format(test_plan_name)) - else: - logger.info('Found existing TestPlan "{0}".' - .format(test_plan_name)) - plan_entries = [] - # Create a test plan entry - config_ids = [] - for os in operation_systems: - if os['name'] in os_mile[mile]: - config_ids.append(os['id']) - cases_ids = [] - plan_entries.append( - project.test_run_struct( - name=tests_suite['name'], - suite_id=tests_suite['id'], - milestone_id=milestone['id'], - description=('Results of system tests ({t_suite})' - ' on iso #"{iso_number}"' - .format(t_suite=tests_suite['name'], - iso_number=iso_number)), - config_ids=[os['id']], - include_all=True, - case_ids=cases_ids)) - # Create a test plan entry with the test run - run = find_run_by_name(test_plan, tests_suite['name']) - if not run: - logger.info('Adding a test plan entry with test run %s ...', - tests_suite['name']) - entry = project.add_plan_entry(plan_id=test_plan['id'], - suite_id=tests_suite['id'], - config_ids=config_ids, - runs=plan_entries) - logger.info('The test plan entry has been added.') - run = entry['runs'][0] - test_plan = project.get_plan(test_plan['id']) - - # STEP #4 - # Upload tests results to TestRail - logger.info('Uploading tests results to TestRail...') - for os_id in tests_results.get(mile, {})\ - .get(iso_number, {}): - logger.info('Checking tests results for %s...', - project.get_config(os_id)['name']) - tests_added = publish_results( - project=project, - milestone_id=milestone['id'], - test_plan=test_plan, - suite_id=tests_suite['id'], - config_id=os_id, - results=tests_results[mile][iso_number][os_id]) - logger.debug('Added new results for tests (%s): %s', - project.get_config(os_id)['name'], - [r.group for r in tests_added]) - - logger.info('Report URL: %s', test_plan['url']) - - -if __name__ == "__main__": - main() diff --git a/fuelweb_test/testrail/report_tempest_results.py b/fuelweb_test/testrail/report_tempest_results.py deleted file mode 100644 index d4fa47bc9..000000000 --- a/fuelweb_test/testrail/report_tempest_results.py +++ /dev/null @@ -1,282 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals - -import optparse -from xml.etree import ElementTree - -# pylint: disable=import-error -# noinspection PyUnresolvedReferences -from six.moves import urllib -# pylint: enable=import-error - -from fuelweb_test.testrail import report -from fuelweb_test.testrail.settings import JENKINS -from fuelweb_test.testrail.settings import logger -from fuelweb_test.testrail.settings import TestRailSettings -from fuelweb_test.testrail.testrail_client import TestRailProject - - -LOG = logger - - -def parse_xml_report(path_to_report): - """This function parses the Tempest XML report and returns the list with - TestResult objects. Each TestResult object corresponds to one of the tests - and contains all the result information for the respective test. - """ - - tree = ElementTree.parse(path_to_report) - test_results = [] - for elem in tree.findall('testcase'): - status = 'passed' - description = None - child_elem = elem.getchildren() - if child_elem: - status = child_elem[0].tag - description = child_elem[0].text - - test_result = report.TestResult(name=elem.get('name'), - group=elem.get('classname'), - status='failed' - if status == 'failure' else status, - description=description, - duration=1) - test_results.append(test_result) - - return test_results - - -def mark_all_tests_as_blocked(client, tests_suite): - """This function marks all Tempest tests as blocked and returns the list - with TestResult objects. Each TestResult object corresponds to one of - the tests and contains the information that the test is blocked. - """ - - test_results = [] - for case in client.get_cases(tests_suite['id']): - test_result = report.TestResult(name=case['title'], - group=case['custom_test_group'], - status='blocked', - description=None, - duration=1) - test_results.append(test_result) - - return test_results - - -def mark_all_tests_as_in_progress(client, tests_suite): - """This function marks all Tempest tests as "in progress" and returns - the list with TestResult objects. Each TestResult object corresponds - to one of the tests and contains the information that the test is - "in progress" status. - """ - - test_results = [] - for case in client.get_cases(tests_suite['id']): - test_result = report.TestResult(name=case['title'], - group=case['custom_test_group'], - status='in_progress', - description=None, - duration=1) - test_results.append(test_result) - - return test_results - - -def find_run_by_name_and_config_in_test_plan(test_plan, run_name, config): - """This function finds the test run by its name and the specified - configuration (for example, Centos 6.5) in the specified test plan. - """ - - for entry in test_plan['entries']: - for run in entry['runs']: - if run['name'] == run_name and run['config'] == config: - return run - - -def find_run_by_config_in_test_plan_entry(test_plan_entry, config): - """This function finds the test run by the specified configuration - (for example, Ubuntu 14.04) in the specified test plan entry. - """ - - for run in test_plan_entry['runs']: - if run['config'] == config: - return run - - -def upload_test_results(client, test_run, suite_id, test_results): - """ This function allows to upload large number of test results - with the minimum number of APi requests to TestRail. - """ - - test_cases = client.get_cases(suite_id) - results = [] - statuses = {} - - for test_result in test_results: - if test_result.status in statuses: - status_id = statuses[test_result.status] - else: - status_id = client.get_status(test_result.status)['id'] - statuses[test_result.status] = status_id - - if 'setUpClass' in test_result.name: - i = test_result.name.find('tempest') - group = test_result.name[i:-1] - for test in test_cases: - if group in test.get("custom_test_group"): - results.append({"case_id": test['id'], - "status_id": status_id}) - else: - for test in test_cases: - if test_result.name in test.get("title"): - results.append({"case_id": test['id'], - "status_id": status_id}) - - client.add_results_for_tempest_cases(test_run['id'], results) - - -def main(): - parser = optparse.OptionParser( - description='Publish the results of Tempest tests in TestRail') - parser.add_option('-r', '--run-name', dest='run_name', - help='The name of a test run. ' - 'The name should describe the configuration ' - 'of the environment where Tempest tests were run') - parser.add_option('-i', '--iso', dest='iso_number', help='ISO number') - parser.add_option('-p', '--path-to-report', dest='path', - help='The path to the Tempest XML report') - parser.add_option('-c', '--conf', dest='config', default='Ubuntu 14.04', - help='The name of one of the configurations') - parser.add_option('-m', '--multithreading', dest='threads_count', - default=100, help='The count of threads ' - 'for uploading the test results') - parser.add_option('-b', '--block-all-tests', - dest='all_tests_blocked', action='store_true', - help='Mark all Tempest tests as "blocked"') - parser.add_option('-t', '--tests-in-progress', - dest='tests_in_progress', action='store_true', - help='Mark all Tempest tests as "in progress"') - parser.add_option('--prefix', - dest='prefix', action='store_true', default='', - help='Add some prefix to test run') - - (options, _) = parser.parse_args() - - if options.run_name is None: - raise optparse.OptionValueError('No run name was specified!') - if options.iso_number is None: - raise optparse.OptionValueError('No ISO number was specified!') - if (options.path is None and - not options.all_tests_blocked and not options.tests_in_progress): - raise optparse.OptionValueError('No path to the Tempest ' - 'XML report was specified!') - - # STEP #1 - # Initialize TestRail project client - LOG.info('Initializing TestRail project client...') - client = TestRailProject(url=TestRailSettings.url, - user=TestRailSettings.user, - password=TestRailSettings.password, - project=TestRailSettings.project) - LOG.info('TestRail project client has been initialized.') - - tests_suite = client.get_suite_by_name(TestRailSettings.tests_suite) - LOG.info('Tests suite is "{0}".'.format(tests_suite['name'])) - - # STEP #2 - # Parse the test results - if options.all_tests_blocked: - test_results = mark_all_tests_as_blocked(client, tests_suite) - elif options.tests_in_progress: - test_results = mark_all_tests_as_in_progress(client, tests_suite) - else: - LOG.info('Parsing the test results...') - test_results = parse_xml_report(options.path) - LOG.info('The test results have been parsed.') - - # STEP #3 - # Create new test plan (or find existing) - name = '{0} {1}iso #{2}' - if options.prefix is not '': - options.prefix += ' ' - - milestone = client.get_milestone_by_name(TestRailSettings.milestone) - test_plan_name = name.format(milestone['name'], options.prefix, - options.iso_number) - LOG.info('Test plan name is "{0}".'.format(test_plan_name)) - - LOG.info('Trying to find test plan "{0}"...'.format(test_plan_name)) - test_plan = client.get_plan_by_name(test_plan_name) - if not test_plan: - LOG.info('The test plan not found. Creating one...') - url = '/job/{0}.all/{1}'.format(milestone['name'], options.iso_number) - description = urllib.parse.urljoin(JENKINS['url'], url) - test_plan = client.add_plan(test_plan_name, - description=description, - milestone_id=milestone['id'], - entries=[]) - LOG.info('The test plan has been created.') - else: - LOG.info('The test plan found.') - - # Get ID of each OS from list "TestRailSettings.operation_systems" - config_ids = [] - for os_name in TestRailSettings.operation_systems: - for conf in client.get_config_by_name('Operation System')['configs']: - if conf['name'] == os_name: - config_ids.append(conf['id']) - break - - # Define test runs for CentOS and Ubuntu - run_name = 'Tempest - ' + options.run_name - runs = [] - for conf_id in config_ids: - run = client.test_run_struct(name=run_name, - suite_id=tests_suite['id'], - milestone_id=milestone['id'], - description='Tempest results', - config_ids=[conf_id]) - runs.append(run) - - # Create a test plan entry with the test runs - run = find_run_by_name_and_config_in_test_plan(test_plan, - run_name, options.config) - if not run: - LOG.info('Adding a test plan entry with test run ' - '"{0} ({1})" ...'.format(run_name, options.config)) - entry = client.add_plan_entry(plan_id=test_plan['id'], - suite_id=tests_suite['id'], - config_ids=config_ids, - runs=runs, - name=run_name) - LOG.info('The test plan entry has been added.') - run = find_run_by_config_in_test_plan_entry(entry, options.config) - - # STEP #4 - # Upload the test results to TestRail for the specified test run - LOG.info('Uploading the test results to TestRail...') - - upload_test_results(client, run, tests_suite['id'], test_results) - - LOG.info('The results of Tempest tests have been uploaded.') - LOG.info('Report URL: {0}'.format(test_plan['url'])) - - -if __name__ == "__main__": - main() diff --git a/fuelweb_test/testrail/settings.py b/fuelweb_test/testrail/settings.py deleted file mode 100644 index 1675e614b..000000000 --- a/fuelweb_test/testrail/settings.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals - -import logging -import os - -logger = logging.getLogger(__package__) -ch = logging.StreamHandler() -formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') -ch.setFormatter(formatter) -logger.addHandler(ch) -logger.setLevel(logging.INFO) - -LOGS_DIR = os.environ.get('LOGS_DIR', os.getcwd()) - -os.environ["ENV_NAME"] = "some_environment" -os.environ["ISO_PATH"] = "./fuel.iso" -os.environ["CENTOS_CLOUD_IMAGE_PATH"] = "./centos-cloud-image.img" - -JENKINS = { - 'url': os.environ.get('JENKINS_URL', 'http://localhost/'), - 'magnet_link_artifact': os.environ.get('JENKINS_MAGNET_LINK_ARTIFACT', - 'magnet_link.txt'), - 'username': os.environ.get('JENKINS_USER', None), - 'password': os.environ.get('JENKINS_PASS', None), - 'job_name': os.environ.get('TEST_RUNNER_JOB_NAME', '9.0.swarm.runner'), - 'xml_testresult_file_name': os.environ.get('TEST_XML_RESULTS', - 'nosetests.xml') -} - -GROUPS_TO_EXPAND = [ - 'setup_master', 'prepare_release', 'prepare_slaves_1', 'prepare_slaves_3', - 'prepare_slaves_5', 'prepare_slaves_9'] - -FAILURE_GROUPING = {'threshold': 0.04, 'max_len_diff': 0.1} - - -class LaunchpadSettings(object): - """LaunchpadSettings.""" # TODO documentation - - project = os.environ.get('LAUNCHPAD_PROJECT', 'fuel') - milestone = os.environ.get('LAUNCHPAD_MILESTONE', '9.0') - closed_statuses = [ - os.environ.get('LAUNCHPAD_RELEASED_STATUS', 'Fix Released'), - os.environ.get('LAUNCHPAD_INVALID_STATUS', 'Invalid') - ] - - -class TestRailSettings(object): - """TestRailSettings.""" # TODO documentation - - url = os.environ.get('TESTRAIL_URL') - user = os.environ.get('TESTRAIL_USER', 'user@example.com') - password = os.environ.get('TESTRAIL_PASSWORD', 'password') - project = os.environ.get('TESTRAIL_PROJECT', 'Fuel') - milestone = os.environ.get('TESTRAIL_MILESTONE', '9.0') - tests_description = os.environ.get('TESTRAIL_DESCRIPTION', None) - tests_suite = os.environ.get('TESTRAIL_TEST_SUITE', - '[{0}] Swarm'.format(milestone)) - tests_section = os.environ.get('TESTRAIL_TEST_SECTION', 'All') - tests_include = os.environ.get('TESTRAIL_TEST_INCLUDE', None) - tests_exclude = os.environ.get('TESTRAIL_TEST_EXCLUDE', None) - previous_results_depth = os.environ.get('TESTRAIL_TESTS_DEPTH', 5) - previous_results_days_to_analyze = os.environ.get( - 'TESTRAIL_DAYS_TO_ANALYZE', 14) - operation_systems = [] - centos_enabled = os.environ.get('USE_CENTOS', 'false') == 'true' - ubuntu_enabled = os.environ.get('USE_UBUNTU', 'true') == 'true' - if centos_enabled: - operation_systems.append(os.environ.get( - 'TESTRAIL_CENTOS_RELEASE', 'Centos 6.5')) - if ubuntu_enabled: - operation_systems.append(os.environ.get( - 'TESTRAIL_UBUNTU_RELEASE', 'Ubuntu 16.04')) - stauses = { - 'passed': ['passed'], - 'failed': ['failed', 'product_failed', 'test_failed', 'infra_failed'], - 'blocked': ['blocked'] - } - max_results_per_request = 250 - - extra_factor_of_tc_definition = os.environ.get( - 'EXTRA_FACTOR_OF_TC_DEFINITION', None) diff --git a/fuelweb_test/testrail/testrail.py b/fuelweb_test/testrail/testrail.py deleted file mode 100644 index 80608e17f..000000000 --- a/fuelweb_test/testrail/testrail.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# TestRail API binding for Python 2.x (API v2, available since -# TestRail 3.0) -# -# Learn more: -# -# http://docs.gurock.com/testrail-api2/start -# http://docs.gurock.com/testrail-api2/accessing -# -# Copyright Gurock Software GmbH. See license.md for details. -# - -from __future__ import unicode_literals - -import base64 -import time - -import requests -from requests.exceptions import HTTPError -from requests.packages.urllib3 import disable_warnings - -from fuelweb_test.testrail.settings import logger - - -disable_warnings() - - -def request_retry(codes): - log_msg = "Got {0} Error! Waiting {1} seconds and trying again..." - - def retry_request(func): - def wrapper(*args, **kwargs): - iter_number = 0 - while True: - try: - response = func(*args, **kwargs) - response.raise_for_status() - except HTTPError as e: - error_code = e.response.status_code - if error_code in codes: - if iter_number < codes[error_code]: - wait = 60 - if 'Retry-After' in e.response.headers: - wait = int(e.response.headers['Retry-after']) - logger.debug(log_msg.format(error_code, wait)) - time.sleep(wait) - iter_number += 1 - continue - raise - else: - return response.json() - return wrapper - return retry_request - - -class APIClient(object): - """APIClient.""" # TODO documentation - - def __init__(self, base_url): - self.user = '' - self.password = '' - if not base_url.endswith('/'): - base_url += '/' - self.__url = base_url + 'index.php?/api/v2/' - - def send_get(self, uri): - return self.__send_request('GET', uri, None) - - def send_post(self, uri, data): - return self.__send_request('POST', uri, data) - - def __send_request(self, method, uri, data): - retry_codes = {429: 3, - 503: 10} - - @request_retry(codes=retry_codes) - def __get_response(_url, _headers, _data): - if method == 'POST': - return requests.post(_url, json=_data, headers=_headers) - return requests.get(_url, headers=_headers) - - url = self.__url + uri - - auth = base64.encodestring( - '{0}:{1}'.format(self.user, self.password)).strip() - - headers = {'Authorization': 'Basic {}'.format(auth), - 'Content-Type': 'application/json'} - - try: - return __get_response(url, headers, data) - except HTTPError as e: - if e.message: - error = e.message - else: - error = 'No additional error message received' - raise APIError('TestRail API returned HTTP {0}: "{1}"'.format( - e.response.status_code, error)) - - -class APIError(Exception): - """APIError.""" # TODO documentation - pass diff --git a/fuelweb_test/testrail/testrail_client.py b/fuelweb_test/testrail/testrail_client.py deleted file mode 100644 index 5dff0abfe..000000000 --- a/fuelweb_test/testrail/testrail_client.py +++ /dev/null @@ -1,489 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals - -import time - -from fuelweb_test.testrail.settings import logger -from fuelweb_test.testrail.testrail import APIClient -from fuelweb_test.testrail.testrail import APIError - - -class TestRailProject(object): - """TestRailProject.""" # TODO documentation - - def __init__(self, url, user, password, project): - self.client = APIClient(base_url=url) - self.client.user = user - self.client.password = password - self.project = self._get_project(project) - - def _get_project(self, project_name): - projects_uri = 'get_projects' - projects = self.client.send_get(uri=projects_uri) - for project in projects: - if project['name'] == project_name: - return project - return None - - def test_run_struct(self, name, suite_id, milestone_id, description, - config_ids, include_all=True, assignedto=None, - case_ids=None): - struct = { - 'name': name, - 'suite_id': suite_id, - 'milestone_id': milestone_id, - 'description': description, - 'include_all': include_all, - 'config_ids': config_ids - } - if case_ids: - struct['include_all'] = False - struct['case_ids'] = case_ids - if assignedto: - struct['assignedto_id'] = self.get_user(assignedto)['id'] - return struct - - def get_users(self): - users_uri = 'get_users' - return self.client.send_get(uri=users_uri) - - def get_user(self, user_id): - user_uri = 'get_user/{user_id}'.format(user_id=user_id) - return self.client.send_get(uri=user_uri) - - def get_user_by_name(self, name): - for user in self.get_users(): - if user['name'] == name: - return self.get_user(user_id=user['id']) - - def get_configs(self): - configs_uri = 'get_configs/{project_id}'.format( - project_id=self.project['id']) - return self.client.send_get(configs_uri) - - def get_config(self, config_id): - for configs in self.get_configs(): - for config in configs['configs']: - if config['id'] == int(config_id): - return config - - def get_config_by_name(self, name): - for config in self.get_configs(): - if config['name'] == name: - return config - - def get_priorities(self): - priorities_uri = 'get_priorities' - return self.client.send_get(uri=priorities_uri) - - def get_milestones(self): - milestones_uri = 'get_milestones/{project_id}'.format( - project_id=self.project['id']) - return self.client.send_get(uri=milestones_uri) - - def get_milestone(self, milestone_id): - milestone_uri = 'get_milestone/{milestone_id}'.format( - milestone_id=milestone_id) - return self.client.send_get(uri=milestone_uri) - - def get_milestone_by_name(self, name): - for milestone in self.get_milestones(): - if milestone['name'] == name: - return self.get_milestone(milestone_id=milestone['id']) - - def get_suites(self): - suites_uri = 'get_suites/{project_id}'.format( - project_id=self.project['id']) - return self.client.send_get(uri=suites_uri) - - def get_suite(self, suite_id): - suite_uri = 'get_suite/{suite_id}'.format(suite_id=suite_id) - return self.client.send_get(uri=suite_uri) - - def get_suite_by_name(self, name): - for suite in self.get_suites(): - if suite['name'] == name: - return self.get_suite(suite_id=suite['id']) - - def get_sections(self, suite_id): - sections_uri = 'get_sections/{project_id}&suite_id={suite_id}'.format( - project_id=self.project['id'], - suite_id=suite_id - ) - return self.client.send_get(sections_uri) - - def get_section(self, section_id): - section_uri = 'get_section/{section_id}'.format(section_id=section_id) - return self.client.send_get(section_uri) - - def get_section_by_name(self, suite_id, section_name): - for section in self.get_sections(suite_id=suite_id): - if section['name'] == section_name: - return self.get_section(section_id=section['id']) - - def create_section(self, suite_id, name, parent_id=None): - return self.client.send_post('add_section/' + str(self.project['id']), - dict(suite_id=suite_id, name=name, - parent_id=parent_id)) - - def delete_section(self, section_id): - return self.client.send_post('delete_section/' + str(section_id), {}) - - def create_suite(self, name, description=None): - return self.client.send_post('add_suite/' + str(self.project['id']), - dict(name=name, description=description)) - - def get_cases(self, suite_id, section_id=None): - cases_uri = 'get_cases/{project_id}&suite_id={suite_id}'.format( - project_id=self.project['id'], - suite_id=suite_id - ) - if section_id: - cases_uri = '{0}§ion_id={section_id}'.format( - cases_uri, section_id=section_id - ) - return self.client.send_get(cases_uri) - - def get_case(self, case_id): - case_uri = 'get_case/{case_id}'.format(case_id=case_id) - return self.client.send_get(case_uri) - - def get_case_by_name(self, suite_id, name, cases=None): - for case in cases or self.get_cases(suite_id): - if case['title'] == name: - return self.get_case(case_id=case['id']) - - def get_case_by_group(self, suite_id, group, cases=None): - for case in cases or self.get_cases(suite_id): - if case['custom_test_group'] == group: - return self.get_case(case_id=case['id']) - - def add_case(self, section_id, case): - add_case_uri = 'add_case/{section_id}'.format(section_id=section_id) - return self.client.send_post(add_case_uri, case) - - def update_case(self, case_id, fields): - return self.client.send_post('update_case/{0}'.format(case_id), fields) - - def delete_case(self, case_id): - return self.client.send_post('delete_case/' + str(case_id), None) - - def get_case_fields(self): - return self.client.send_get('get_case_fields') - - def get_plans(self, milestone_ids=None, limit=None, offset=None, - created_after=None): - plans_uri = 'get_plans/{project_id}'.format( - project_id=self.project['id']) - if milestone_ids: - plans_uri += '&milestone_id=' + ','.join([str(m) - for m in milestone_ids]) - if limit: - plans_uri += '&limit={0}'.format(limit) - if offset: - plans_uri += '&offset={0}'.format(offset) - if created_after: - plans_uri += '&created_after={0}'.format(created_after) - - return self.client.send_get(plans_uri) - - def get_plan(self, plan_id): - plan_uri = 'get_plan/{plan_id}'.format(plan_id=plan_id) - return self.client.send_get(plan_uri) - - def get_plan_by_name(self, name): - for plan in self.get_plans(): - if plan['name'] == name: - return self.get_plan(plan['id']) - - def add_plan(self, name, description, milestone_id, entries): - add_plan_uri = 'add_plan/{project_id}'.format( - project_id=self.project['id']) - new_plan = { - 'name': name, - 'description': description, - 'milestone_id': milestone_id, - 'entries': entries - } - return self.client.send_post(add_plan_uri, new_plan) - - def update_plan(self, plan_id, name='', description='', - milestone_id=None, entries=None): - if entries is None: - entries = [] - update_plan_uri = 'update_plan/{plan_id}'.format( - plan_id=plan_id) - updated_plan = {} - if name: - updated_plan['name'] = name - if description: - updated_plan['description'] = description - if milestone_id: - updated_plan['milestone_id'] = milestone_id - if entries: - updated_plan['entries'] = entries - return self.client.send_post(update_plan_uri, updated_plan) - - def add_plan_entry(self, plan_id, suite_id, config_ids, runs, name=None): - add_plan_entry_uri = 'add_plan_entry/{plan_id}'.format(plan_id=plan_id) - new_entry = { - 'suite_id': suite_id, - 'config_ids': config_ids, - 'runs': runs - } - if name: - new_entry['name'] = name - return self.client.send_post(add_plan_entry_uri, new_entry) - - def delete_plan(self, plan_id): - delete_plan_uri = 'delete_plan/{plan_id}'.format(plan_id=plan_id) - self.client.send_post(delete_plan_uri, {}) - - def get_runs(self): - runs_uri = 'get_runs/{project_id}'.format( - project_id=self.project['id']) - return self.client.send_get(uri=runs_uri) - - def get_run(self, run_id): - run_uri = 'get_run/{run_id}'.format(run_id=run_id) - return self.client.send_get(uri=run_uri) - - def get_run_by_name(self, name): - for run in self.get_runs(): - if run['name'] == name: - return self.get_run(run_id=run['id']) - - def get_previous_runs(self, milestone_id, suite_id, config_id, limit=None, - days_to_analyze=None): - previous_runs = [] - offset = 0 - current_time = int(time.time()) - day_in_seconds = 24 * 60 * 60 - created_after = current_time - (day_in_seconds * days_to_analyze) - while len(previous_runs) < limit: - existing_plans = self.get_plans(milestone_ids=[milestone_id], - limit=limit, - offset=offset, - created_after=created_after) - if not existing_plans: - break - - for plan in existing_plans: - for entry in self.get_plan(plan['id'])['entries']: - if entry['suite_id'] == suite_id: - run_ids = [run for run in entry['runs'] if - config_id in run['config_ids']] - previous_runs.extend(run_ids) - - offset += limit - - return previous_runs - - def add_run(self, new_run): - add_run_uri = 'add_run/{project_id}'.format( - project_id=self.project['id']) - return self.client.send_post(add_run_uri, new_run) - - def update_run(self, name, milestone_id=None, description=None, - config_ids=None, include_all=None, case_ids=None): - tests_run = self.get_run(name) - update_run_uri = 'update_run/{run_id}'.format(run_id=tests_run['id']) - update_run = {} - if milestone_id: - update_run['milestone_id'] = milestone_id - if description: - update_run['description'] = description - if include_all is not None: - update_run['include_all'] = include_all is True - if case_ids: - update_run['case_ids'] = case_ids - if config_ids: - update_run['config_ids'] = config_ids - return self.client.send_post(update_run_uri, update_run) - - def create_or_update_run(self, name, suite, milestone_id, description, - config_ids, include_all=True, assignedto=None, - case_ids=None): - if self.get_run(name): - self.update_run(name=name, - milestone_id=milestone_id, - description=description, - config_ids=config_ids, - include_all=include_all, - case_ids=case_ids) - else: - self.add_run(self.test_run_struct(name, suite, milestone_id, - description, config_ids, - include_all=include_all, - assignedto=assignedto, - case_ids=case_ids)) - - def get_statuses(self): - statuses_uri = 'get_statuses' - return self.client.send_get(statuses_uri) - - def get_status(self, name): - for status in self.get_statuses(): - if status['name'] == name: - return status - - def get_tests(self, run_id, status_id=None): - tests_uri = 'get_tests/{run_id}'.format(run_id=run_id) - if status_id: - tests_uri = '{0}&status_id={1}'.format(tests_uri, - ','.join(status_id)) - return self.client.send_get(tests_uri) - - def get_test(self, test_id): - test_uri = 'get_test/{test_id}'.format(test_id=test_id) - return self.client.send_get(test_uri) - - def get_test_by_name(self, run_id, name): - for test in self.get_tests(run_id): - if test['title'] == name: - return self.get_test(test_id=test['id']) - - def get_test_by_group(self, run_id, group, tests=None): - for test in tests or self.get_tests(run_id): - if test['custom_test_group'] == group: - return self.get_test(test_id=test['id']) - - def get_test_by_name_and_group(self, run_id, name, group): - for test in self.get_tests(run_id): - if test['title'] == name and test['custom_test_group'] == group: - return self.get_test(test_id=test['id']) - - def get_tests_by_group(self, run_id, group, tests=None): - test_list = [] - for test in tests or self.get_tests(run_id): - if test['custom_test_group'] == group: - test_list.append(self.get_test(test_id=test['id'])) - return test_list - - def get_results_for_test(self, test_id, run_results=None): - if run_results: - test_results = [] - for result in run_results: - if result['test_id'] == test_id: - test_results.append(result) - return test_results - results_uri = 'get_results/{test_id}'.format(test_id=test_id) - return self.client.send_get(results_uri) - - def get_results_for_run(self, run_id, created_after=None, - created_before=None, created_by=None, limit=None, - offset=None, status_id=None): - results_run_uri = 'get_results_for_run/{run_id}'.format(run_id=run_id) - if created_after: - results_run_uri += '&created_after={}'.format(created_after) - if created_before: - results_run_uri += '&created_before={}'.format(created_before) - if created_by: - results_run_uri += '&created_by={}'.format(created_by) - if limit: - results_run_uri += '&limit={}'.format(limit) - if offset: - results_run_uri += '&offset={}'.format(offset) - if status_id: - results_run_uri += '&status_id={}'.format(status_id) - return self.client.send_get(results_run_uri) - - def get_results_for_case(self, run_id, case_id): - results_case_uri = 'get_results_for_case/{run_id}/{case_id}'.format( - run_id=run_id, case_id=case_id) - return self.client.send_get(results_case_uri) - - def get_all_results_for_case(self, run_ids, case_id): - all_results = [] - for run_id in run_ids: - try: - results = self.get_results_for_case(run_id=run_id, - case_id=case_id) - except APIError as e: - logger.error("[{0}], run_id={1}, case_id={2}" - .format(e, run_id, case_id)) - continue - all_results.extend(results) - return all_results - - def add_results_for_test(self, test_id, test_results): - new_results = { - 'status_id': self.get_status(test_results.status)['id'], - 'comment': '\n'.join(filter(lambda x: x is not None, - [test_results.description, - test_results.url, - test_results.comments])), - 'elapsed': test_results.duration, - 'version': test_results.version - } - if test_results.steps: - new_results['custom_step_results'] = test_results.steps - return self.add_raw_results_for_test(test_id, new_results) - - def add_raw_results_for_test(self, test_id, test_raw_results): - add_results_test_uri = 'add_result/{test_id}'.format(test_id=test_id) - return self.client.send_post(add_results_test_uri, test_raw_results) - - def add_results_for_cases(self, run_id, suite_id, tests_results): - add_results_test_uri = 'add_results_for_cases/{run_id}'.format( - run_id=run_id) - new_results = {'results': []} - tests_cases = self.get_cases(suite_id) - for results in tests_results: - case = self.get_case_by_group(suite_id=suite_id, - group=results.group, - cases=tests_cases) - case_id = case['id'] - new_result = { - 'case_id': case_id, - 'status_id': self.get_status(results.status)['id'], - 'comment': '\n'.join(filter(lambda x: x is not None, - [results.description, - results.url, - results.comments])), - 'elapsed': results.duration, - 'version': results.version, - 'custom_launchpad_bug': results.launchpad_bug - } - if results.steps: - custom_step_results = [] - steps = case.get('custom_test_case_steps', None) - if steps and len(steps) == len(results.steps): - for s in zip(steps, results.steps): - custom_step_results.append({ - "content": s[0]["content"], - "expected": s[0]["expected"], - "actual": s[1]['actual'], - "status_id": self.get_status(s[1]['status'])['id'] - }) - else: - for s in results.steps: - custom_step_results.append({ - "content": s['name'], - "expected": 'pass', - "actual": s['actual'], - "status_id": self.get_status(s['status'])['id'] - }) - new_result['custom_test_case_steps_results'] = \ - custom_step_results - new_results['results'].append(new_result) - return self.client.send_post(add_results_test_uri, new_results) - - def add_results_for_tempest_cases(self, run_id, tests_results): - add_results_test_uri = 'add_results_for_cases/{run_id}'.format( - run_id=run_id) - new_results = {'results': tests_results} - return self.client.send_post(add_results_test_uri, new_results) diff --git a/fuelweb_test/testrail/upload_cases_description.py b/fuelweb_test/testrail/upload_cases_description.py deleted file mode 100644 index b6115a1ec..000000000 --- a/fuelweb_test/testrail/upload_cases_description.py +++ /dev/null @@ -1,524 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals - -import re -import string - -from logging import DEBUG -from optparse import OptionParser - -from proboscis import TestPlan -from proboscis.decorators import DEFAULT_REGISTRY -import pytest - -from fuelweb_test.testrail.builds import Build -from fuelweb_test.testrail.settings import GROUPS_TO_EXPAND -from fuelweb_test.testrail.settings import logger -from fuelweb_test.testrail.settings import TestRailSettings -from fuelweb_test.testrail.testrail_client import TestRailProject -from fuelweb_test.testrail import datetime_util -from system_test import define_custom_groups -from system_test import discover_import_tests -from system_test import register_system_test_cases -from system_test import tests_directory -from system_test import get_basepath -from system_test.tests.base import ActionTest - -# pylint: disable=no-name-in-module - -GROUP_FIELD = 'custom_test_group' - -STEP_NUM_PATTERN = re.compile(r'^(\d{1,3})[.].+') -DURATION_PATTERN = re.compile(r'Duration:?\s+(\d+(?:[sm]|\s?m))(?:in)?\b') -TEST_GROUP_PATTERN = re.compile(r'run_system_test.py\s+.*--group=(\S+)\b') - -# Grab groups from pytest on import -pytest.main(['--collect-only', 'fuel_tests', ]) - -# pylint: disable=wrong-import-position -# noinspection PyPep8 -from fuel_tests.tests.conftest import test_groups # noqa -# noinspection PyPep8 -from fuel_tests.tests.conftest import test_names # noqa -# pylint: enable=wrong-import-position - - -def get_cases_from_pytest(group): - return [ - obj for grp in test_groups - for groups, obj in grp.items() - if group in groups] - - -def group_in_pytest(group): - return group in test_names - - -def get_tests_descriptions(milestone_id, tests_include, tests_exclude, groups, - default_test_priority): - plan = _create_test_plan_from_registry(groups=groups) - all_plan_tests = plan.tests[:] - - tests = [] - - for jenkins_suffix in groups: - group = groups[jenkins_suffix]['group'] - if group_in_pytest(group): - for case in get_cases_from_pytest(group): - docstring = case.obj.__doc__ or '' - - title, steps, duration = _parse_docstring(docstring, case) - - test_group = case.obj.__name__ - - if TestRailSettings.extra_factor_of_tc_definition: - title = '{} - {}'.format( - title, - TestRailSettings.extra_factor_of_tc_definition - ) - test_group = '{}_{}'.format( - test_group, - TestRailSettings.extra_factor_of_tc_definition - ) - - test_case = { - "title": title, - "type_id": 1, - "milestone_id": milestone_id, - "priority_id": default_test_priority, - "estimate": duration, - "refs": "", - "custom_test_group": test_group, - "custom_test_case_description": docstring or " ", - "custom_test_case_steps": steps, - "custom_job_settings": str( - groups[jenkins_suffix]['env_vars']) - } - tests.append(test_case) - else: - plan.filter(group_names=[group]) - for case in plan.tests: - if not _is_case_processable(case=case, tests=tests): - continue - - case_name = test_group = _get_test_case_name(case) - - if _is_not_included(case_name, tests_include) or \ - _is_excluded(case_name, tests_exclude): - continue - - docstring = _get_docstring(parent_home=case.entry.parent.home, - case_state=case.state, - home=case.entry.home) - - title, steps, duration = _parse_docstring(docstring, case) - - if case.entry.home.func_name in GROUPS_TO_EXPAND: - """ - Expand specified test names with the group names that are - used in jenkins jobs where this test is started. - """ - title = ' - '.join([title, jenkins_suffix]) - test_group = '_'.join([case.entry.home.func_name, - jenkins_suffix]) - elif TestRailSettings.extra_factor_of_tc_definition: - title = '{} - {}'.format( - title, - TestRailSettings.extra_factor_of_tc_definition - ) - test_group = '{}_{}'.format( - test_group, - TestRailSettings.extra_factor_of_tc_definition - ) - - test_case = { - "title": title, - "type_id": 1, - "milestone_id": milestone_id, - "priority_id": default_test_priority, - "estimate": duration, - "refs": "", - "custom_test_group": test_group, - "custom_test_case_description": docstring or " ", - "custom_test_case_steps": steps, - "custom_job_settings": str( - groups[jenkins_suffix]['env_vars']) - } - - if not any([x[GROUP_FIELD] == test_group for x in tests]): - tests.append(test_case) - else: - logger.warning("Testcase '{0}' run in multiple " - "Jenkins jobs!".format(test_group)) - - plan.tests = all_plan_tests[:] - - return tests - - -def upload_tests_descriptions(testrail_project, section_id, - tests, check_all_sections): - tests_suite = testrail_project.get_suite_by_name( - TestRailSettings.tests_suite) - check_section = None if check_all_sections else section_id - cases = testrail_project.get_cases(suite_id=tests_suite['id'], - section_id=check_section) - existing_cases = [case[GROUP_FIELD] for case in cases] - custom_cases_fields = _get_custom_cases_fields( - case_fields=testrail_project.get_case_fields(), - project_id=testrail_project.project['id']) - - for test_case in tests: - if test_case[GROUP_FIELD] in existing_cases: - testrail_case = _get_testrail_case(testrail_cases=cases, - test_case=test_case, - group_field=GROUP_FIELD) - fields_to_update = _get_fields_to_update(test_case, testrail_case) - - if fields_to_update: - logger.debug('Updating test "{0}" in TestRail project "{1}", ' - 'suite "{2}", section "{3}". Updated fields: {4}' - .format( - test_case[GROUP_FIELD], - TestRailSettings.project, - TestRailSettings.tests_suite, - TestRailSettings.tests_section, - ', '.join(fields_to_update.keys()))) - testrail_project.update_case(case_id=testrail_case['id'], - fields=fields_to_update) - else: - logger.debug('Skipping "{0}" test case uploading because ' - 'it is up-to-date in "{1}" suite' - .format(test_case[GROUP_FIELD], - TestRailSettings.tests_suite)) - - else: - for case_field, default_value in custom_cases_fields.items(): - if case_field not in test_case: - test_case[case_field] = default_value - - logger.debug('Uploading test "{0}" to TestRail project "{1}", ' - 'suite "{2}", section "{3}"'.format( - test_case[GROUP_FIELD], - TestRailSettings.project, - TestRailSettings.tests_suite, - TestRailSettings.tests_section)) - testrail_project.add_case(section_id=section_id, case=test_case) - - -def get_tests_groups_from_jenkins(runner_name, build_number, distros): - runner_build = Build(runner_name, build_number) - res = {} - sub_builds = \ - runner_build.build_data.get('subBuilds', [runner_build.build_data]) - for b in sub_builds: - if b['result'] is None: - logger.debug("Skipping '{0}' job (build #{1}) because it's still " - "running...".format(b['jobName'], b['buildNumber'],)) - continue - - # Get the test group from the console of the job - # Get the job suffix - if b.get('jobName'): - z = Build(b['jobName'], b['buildNumber']) - console = z.get_job_console() - job_name = b['jobName'] - job_info = z.job_info - env_vars = z.injected_vars - else: - console = runner_build.get_job_console() - job_name = runner_build.name - job_info = runner_build.job_info - env_vars = runner_build.injected_vars - - groups = re.findall(TEST_GROUP_PATTERN, console) - - if not groups: - # maybe it's failed baremetal job? - # because of a design baremetal tests run pre-setup job - # and when it fails there are no test groups in common meaning: - # groups which could be parsed by TEST_GROUP_PATTERN - baremetal_pattern = re.compile(r'Jenkins Build.*jenkins-(.*)-\d+') - baremetal_groups = re.findall(baremetal_pattern, console) - if not baremetal_groups: - logger.error( - "No test group found in console of the job {0}/{1}".format - (b['jobName'], b['buildNumber'])) - continue - # we should get the group via jobName because the test group name - # inside the log could be cut and some symbols will be changed to * - groups = b['jobName'].split('.') - # Use the last group (there can be several groups in upgrade jobs) - test_group = groups[-1] - - for distro in distros: - if distro in job_name: - sep = '.' + distro + '.' - job_suffix = job_name.split(sep)[-1] - break - else: - job_suffix = job_name.split('.')[-1] - res[job_suffix] = \ - {'group': test_group, 'job_info': job_info, 'env_vars': env_vars} - return res - - -def _create_test_plan_from_registry(groups): - discover_import_tests(get_basepath(), tests_directory) - define_custom_groups() - for one in groups: - register_system_test_cases(one) - return TestPlan.create_from_registry(DEFAULT_REGISTRY) - - -def _is_case_processable(case, tests): - if not case.entry.info.enabled or not hasattr(case.entry, 'parent'): - return False - - parent_home = case.entry.parent.home - if issubclass(parent_home, ActionTest) and \ - any([test[GROUP_FIELD] == parent_home.__name__ for test in tests]): - return False - - # Skip @before_class methods without doc strings: - # they are just pre-checks, not separate tests cases - if case.entry.info.before_class: - if case.entry.home.func_doc is None: - logger.debug('Skipping method "{0}", because it is not a ' - 'test case'.format(case.entry.home.func_name)) - return False - - return True - - -def _get_test_case_name(case): - """Returns test case name - """ - parent_home = case.entry.parent.home - return parent_home.__name__ if issubclass(parent_home, ActionTest) \ - else case.entry.home.func_name - - -def _is_not_included(case_name, include): - if include and include not in case_name: - logger.debug("Skipping '{0}' test because it doesn't " - "contain '{1}' in method name".format(case_name, include)) - return True - else: - return False - - -def _is_excluded(case_name, exclude): - if exclude and exclude in case_name: - logger.debug("Skipping '{0}' test because it contains" - " '{1}' in method name".format(case_name, exclude)) - return True - else: - return False - - -def _get_docstring(parent_home, case_state, home): - if issubclass(parent_home, ActionTest): - docstring = parent_home.__doc__.split('\n') - case_state.instance._load_config() - configuration = case_state.instance.config_name - docstring[0] = '{0} on {1}'.format(docstring[0], configuration) - docstring = '\n'.join(docstring) - else: - docstring = home.func_doc or '' - return docstring - - -def _parse_docstring(s, case): - split_s = s.strip().split('\n\n') - title_r, steps_r, duration_r = _unpack_docstring(split_s) - title = _parse_title(title_r, case) if title_r else '' - steps = _parse_steps(steps_r) if steps_r else '' - duration = _parse_duration(duration_r) - return title, steps, duration - - -def _unpack_docstring(items): - count = len(items) - title = steps = duration = '' - if count > 3: - title, steps, duration, _ = _unpack_list(*items) - elif count == 3: - title, steps, duration = items - elif count == 2: - title, steps = items - elif count == 1: - title = items[0] - return title, steps, duration - - -def _unpack_list(title, steps, duration, *other): - return title, steps, duration, other - - -def _parse_title(s, case): - title = ' '.join(map(string.strip, s.split('\n'))) - return title if title else case.entry.home.func_name - - -def _parse_steps(strings): - steps = [] - index = -1 - for s_raw in strings.strip().split('\n'): - s = s_raw.strip() - _match = STEP_NUM_PATTERN.search(s) - if _match: - steps.append({'content': _match.group(), 'expected': 'pass'}) - index += 1 - else: - if index > -1: - steps[index]['content'] = ' '.join([steps[index]['content'], - s]) - return steps - - -def _parse_duration(s): - match = DURATION_PATTERN.search(s) - return match.group(1).replace(' ', '') if match else '3m' - - -def _get_custom_cases_fields(case_fields, project_id): - custom_cases_fields = {} - for field in case_fields: - for config in field['configs']: - if ((project_id in - config['context']['project_ids'] or - not config['context']['project_ids']) and - config['options']['is_required']): - try: - custom_cases_fields[field['system_name']] = \ - int(config['options']['items'].split(',')[0]) - except: - logger.error("Couldn't find default value for required " - "field '{0}', setting '1' (index)!".format( - field['system_name'])) - custom_cases_fields[field['system_name']] = 1 - return custom_cases_fields - - -def _get_fields_to_update(test_case, testrail_case): - """Produces dictionary with fields to be updated - """ - fields_to_update = {} - for field in ('title', 'estimate', 'custom_test_case_description', - 'custom_test_case_steps', 'custom_job_settings'): - if test_case[field] and \ - test_case[field] != testrail_case[field]: - if field == 'estimate': - testcase_estimate_raw = int(test_case[field][:-1]) - testcase_estimate = \ - datetime_util.duration_to_testrail_estimate( - testcase_estimate_raw) - if testrail_case[field] == testcase_estimate: - continue - elif field == 'custom_test_case_description' and \ - test_case[field] == testrail_case[field].replace('\r', ''): - continue - fields_to_update[field] = test_case[field] - return fields_to_update - - -def _get_testrail_case(testrail_cases, test_case, group_field): - """Returns testrail case that corresponds to test case from repo - """ - return next((case for case in testrail_cases - if case[group_field] == test_case[group_field])) - - -def main(): - parser = OptionParser( - description="Upload tests cases to TestRail. " - "See settings.py for configuration." - ) - parser.add_option("-v", "--verbose", - action="store_true", dest="verbose", default=False, - help="Enable debug output") - parser.add_option('-j', '--job-name', dest='job_name', default=None, - help='Jenkins swarm runner job name') - parser.add_option('-N', '--build-number', dest='build_number', - default='latest', - help='Jenkins swarm runner build number') - parser.add_option('-o', '--check_one_section', action="store_true", - dest='check_one_section', default=False, - help='Look for existing test case only in specified ' - 'section of test suite.') - parser.add_option("-l", "--live", dest="live_upload", action="store_true", - help="Get tests results from running swarm") - - (options, _) = parser.parse_args() - - if options.verbose: - logger.setLevel(DEBUG) - - if options.live_upload and options.build_number == 'latest': - options.build_number = 'latest_started' - - project = TestRailProject( - url=TestRailSettings.url, - user=TestRailSettings.user, - password=TestRailSettings.password, - project=TestRailSettings.project - ) - - testrail_section = project.get_section_by_name( - suite_id=project.get_suite_by_name(TestRailSettings.tests_suite)['id'], - section_name=TestRailSettings.tests_section - ) - - testrail_milestone = project.get_milestone_by_name( - name=TestRailSettings.milestone) - - testrail_default_test_priority = [priority['id'] for priority in - project.get_priorities() if - priority['is_default'] is True][0] - - distros = [config['name'].split()[0].lower() - for config in project.get_config_by_name( - 'Operation System')['configs'] - if config['name'] in TestRailSettings.operation_systems] - - tests_groups = get_tests_groups_from_jenkins( - options.job_name, - options.build_number, - distros) if options.job_name else [] - - # If Jenkins job build is specified, but it doesn't have downstream builds - # with tests groups in jobs names, then skip tests cases uploading because - # ALL existing tests cases will be uploaded - if options.job_name and not tests_groups: - return - - tests_descriptions = get_tests_descriptions( - milestone_id=testrail_milestone['id'], - tests_include=TestRailSettings.tests_include, - tests_exclude=TestRailSettings.tests_exclude, - groups=tests_groups, - default_test_priority=testrail_default_test_priority - ) - - upload_tests_descriptions(testrail_project=project, - section_id=testrail_section['id'], - tests=tests_descriptions, - check_all_sections=not options.check_one_section) - - -if __name__ == '__main__': - main() diff --git a/fuelweb_test/testrail/upload_tempest_test_suite.py b/fuelweb_test/testrail/upload_tempest_test_suite.py deleted file mode 100644 index d4458b486..000000000 --- a/fuelweb_test/testrail/upload_tempest_test_suite.py +++ /dev/null @@ -1,232 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals - -from logging import DEBUG -import subprocess -from optparse import OptionParser -from joblib import Parallel, delayed - -from fuelweb_test.helpers.decorators import retry -from fuelweb_test.testrail.settings import TestRailSettings -from fuelweb_test.testrail.testrail_client import TestRailProject -from fuelweb_test.testrail.settings import logger -from fuelweb_test.testrail.upload_cases_description \ - import _get_custom_cases_fields - - -TEST_GROUPS = ["API", "CLI", "Scenario", "ThirdParty"] -TEST_SECTIONS = ["Ceilometer", "Cinder", "Glance", "Heat", "Ironic", - "Keystone", "Network", "Nova", "Sahara", "Swift", "Other"] - - -def generate_groups(line): - section = "Other" - - for group in [{"names": [".telemetry.", ], "tag": "Ceilometer"}, - {"names": [".volume.", ], "tag": "Cinder"}, - {"names": [".image.", ], "tag": "Glance"}, - {"names": [".orchestration.", ], "tag": "Heat"}, - {"names": [".baremetal.", ], "tag": "Ironic"}, - {"names": [".identity.", ], "tag": "Keystone"}, - {"names": [".network.", ], "tag": "Network"}, - {"names": [".compute.", ], "tag": "Nova"}, - {"names": [".data_processing.", ], "tag": "Sahara"}, - {"names": [".object_storage.", ], "tag": "Swift"}]: - for name in group["names"]: - if name in line: - section = group["tag"] - - for group in TEST_SECTIONS: - if group.lower() in line and section == "Other": - section = group - - return section - - -def get_tests_descriptions(milestone_id, testrail_default_test_priority, - testrail_project): - # To get the Tempest tests list, need to execute the following commands: - # git clone https://github.com/openstack/tempest & cd tempest & tox -evenv - # .tox/venv/bin/pip install nose - get_tempest_tests = ("cd tempest && .tox/venv/bin/nosetests " - "--collect-only tempest/{0} -v 2>&1 | grep 'id-.*'") - get_commit = "cd tempest && git rev-parse HEAD" - commit = subprocess.Popen(get_commit, shell=True, stdout=subprocess.PIPE) - logger.info("Generate test suite for tempest" - " commit:{}".format(commit.stdout.readline())) - custom_cases_fields = _get_custom_cases_fields( - case_fields=testrail_project.get_case_fields(), - project_id=testrail_project.project['id']) - tests = {} - - for group in TEST_GROUPS: - p = subprocess.Popen(get_tempest_tests.format(group.lower()), - shell=True, stdout=subprocess.PIPE) - - for line in iter(p.stdout.readline, b''): - section = generate_groups(line) if group == "API" else group - - test_class = [] - for r in line.split("."): - if "id-" in r: - title = r.strip() - break - else: - test_class.append(r) - - steps = [{"run this tempest test": "passed"}, ] - - test_case = { - "title": title, - "type_id": 1, - "milestone_id": milestone_id, - "priority_id": testrail_default_test_priority, - "estimate": "1m", - "refs": "", - "custom_report_label": title.split('id-')[1][:-1], - "custom_test_group": ".".join(test_class), - "custom_test_case_description": title, - "custom_test_case_steps": steps, - "section": section - } - for case_field, default_value in custom_cases_fields.items(): - if case_field not in test_case: - test_case[case_field] = default_value - if section not in tests: - tests[section] = [] - tests[section].append(test_case) - logger.debug(tests) - logger.info("total test cases: " - "{}".format(sum(map(lambda x: len(x), tests.values())))) - return tests - - -def delete_case(testrail_project, test_id): - testrail_project.delete_case(test_id) - - -@retry(3, 60) -def add_case(testrail_project, suite, test_case, section, do_check=False): - try_msg = "try to add test: {} to section: {}" - miss_msg = "test: {} is already on section: {}" - if do_check and testrail_project.get_case_by_name(suite['id'], - test_case['title']): - logger.debug(miss_msg.format(test_case["title"], test_case["section"])) - return - logger.debug(try_msg.format(test_case["title"], test_case["section"])) - testrail_project.add_case(section_id=section["id"], case=test_case) - - -def rewrite_tests_descriptions(testrail_project, tests): - test_suite = TestRailSettings.tests_suite - suite = testrail_project.get_suite_by_name(test_suite) - - # remove old sections and test cases - old_sections = testrail_project.get_sections(suite_id=suite['id']) - for section in old_sections: - if section["parent_id"] is None: - testrail_project.delete_section(section["id"]) - - # create new groups - for group in TEST_GROUPS: - testrail_project.create_section(suite["id"], group) - - api_group = testrail_project.get_section_by_name(suite["id"], "API") - for section in TEST_SECTIONS: - testrail_project.create_section(suite["id"], section, api_group["id"]) - - # add test cases to test suite in 100 parallel threads - logger.info("Add cases") - for test_section, test_list in tests.iteritems(): - section = testrail_project.get_section_by_name( - suite_id=suite['id'], section_name=test_section) - Parallel(n_jobs=100)(delayed(add_case) - (testrail_project, suite, test_case, section) - for test_case in test_list) - - -def add_new_tests_description(testrail_project, tests): - test_suite = TestRailSettings.tests_suite - suite = testrail_project.get_suite_by_name(test_suite) - logger.info("Update suite sections") - for group in TEST_GROUPS: - if not testrail_project.get_section_by_name(suite["id"], group): - testrail_project.create_section(suite["id"], group) - api_group = testrail_project.get_section_by_name(suite["id"], "API") - for section in TEST_SECTIONS: - if not testrail_project.get_section_by_name(suite["id"], section): - testrail_project.create_section(suite["id"], section, - api_group["id"]) - - # add test cases to test suite in 100 parallel threads - logger.info("Add cases") - for test_section, test_list in tests.iteritems(): - section = testrail_project.get_section_by_name( - suite_id=suite['id'], section_name=test_section) - Parallel(n_jobs=100)(delayed(add_case) - (testrail_project, suite, test_case, - section, True) - for test_case in test_list) - - -def main(): - - parser = OptionParser( - description="Upload tests cases to TestRail. " - "See settings.py for configuration." - ) - parser.add_option("-v", "--verbose", - action="store_true", dest="verbose", default=False, - help="Enable debug output") - parser.add_option("-r", "--rewrite", action="store_true", - dest="rewrite", default=False, help="Rewrite all suite") - parser.add_option("-u", "--update", action="store_true", - dest="update", default=False, - help="Update new cases to suite") - - (options, _) = parser.parse_args() - - if options.verbose: - logger.setLevel(DEBUG) - - testrail_project = TestRailProject( - url=TestRailSettings.url, - user=TestRailSettings.user, - password=TestRailSettings.password, - project=TestRailSettings.project - ) - - testrail_milestone = testrail_project.get_milestone_by_name( - name=TestRailSettings.milestone) - - testrail_default_test_priority = [priority['id'] for priority in - testrail_project.get_priorities() if - priority['is_default'] is True][0] - - tests_descriptions = get_tests_descriptions(testrail_milestone['id'], - testrail_default_test_priority, - testrail_project) - - if options.rewrite: - rewrite_tests_descriptions(testrail_project=testrail_project, - tests=tests_descriptions) - if options.update: - add_new_tests_description(testrail_project=testrail_project, - tests=tests_descriptions) - - -if __name__ == '__main__': - main() diff --git a/fuelweb_test/tests/__init__.py b/fuelweb_test/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/base_test_case.py b/fuelweb_test/tests/base_test_case.py deleted file mode 100644 index 0ba13444c..000000000 --- a/fuelweb_test/tests/base_test_case.py +++ /dev/null @@ -1,510 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import time - -from devops.helpers.helpers import wait -from devops.helpers.templates import get_devops_config - -from proboscis import TestProgram -from proboscis import SkipTest -from proboscis import test - -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.fuel_release_hacks import install_mos_repos -from fuelweb_test.helpers.utils import erase_data_from_hdd -from fuelweb_test.helpers.utils import get_test_method_name -from fuelweb_test.helpers.utils import TimeStat -from fuelweb_test.helpers.ssh_manager import SSHManager -from fuelweb_test.models.environment import EnvironmentModel -from fuelweb_test.settings import CUSTOM_FUEL_SETTING_YAML -from fuelweb_test.settings import EXTERNAL_HAPROXY_TEMPLATE -from fuelweb_test.settings import MULTIPLE_NETWORKS -from fuelweb_test.settings import MULTIPLE_NETWORKS_TEMPLATE -from fuelweb_test.settings import ENABLE_DMZ -from fuelweb_test.settings import ENABLE_DMZ_TEMPLATE -from fuelweb_test.settings import REPLACE_DEFAULT_REPOS -from fuelweb_test.settings import REPLACE_DEFAULT_REPOS_ONLY_ONCE -from fuelweb_test.settings import SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH -from fuelweb_test.settings import USE_HAPROXY_TEMPLATE - - -class TestBasic(object): - """Basic test case class for all system tests. - - Initializes EnvironmentModel and FuelWebModel. - - """ - def __init__(self): - self._devops_config = None - self.__env = None - self.__current_log_step = 0 - self.__test_program = None - self.__fuel_constants = { - 'rabbit_pcs_name': 'p_rabbitmq-server' - } - - @property - def fuel_constants(self): - return self.__fuel_constants - - @property - def ssh_manager(self): - return SSHManager() - - @property - def current_log_step(self): - return self.__current_log_step - - @current_log_step.setter - def current_log_step(self, new_val): - self.__current_log_step = new_val - - @property - def next_step(self): - return self.current_log_step + 1 - - @property - def test_program(self): - if self.__test_program is None: - self.__test_program = TestProgram() - return self.__test_program - - @property - def env(self): - if self.__env is None: - # hack before we find proper solution for loading devops templates - external_config = os.environ.get("DEVOPS_SETTINGS_TEMPLATE", None) - if external_config: - config = get_devops_config(external_config) - else: - config = self._devops_config - self.__env = EnvironmentModel(config) - return self.__env - - @property - def fuel_web(self): - return self.env.fuel_web - - def check_run(self, snapshot_name): - """Checks if run of current test is required. - - :param snapshot_name: Name of the snapshot the function should make - :type snapshot_name: str - :raises: SkipTest - - """ - if snapshot_name: - if self.env.d_env.has_snapshot(snapshot_name): - raise SkipTest('Snapshot {} already exist'.format( - snapshot_name)) - - def show_step(self, step, details='', initialize=False): - """Show a description of the step taken from docstring - :param int/str step: step number to show - :param str details: additional info for a step - """ - test_func_name = get_test_method_name() - - if initialize or step == 1: - self.current_log_step = step - else: - self.current_log_step += 1 - if self.current_log_step != step: - error_message = 'The step {} should be {} at {}' - error_message = error_message.format( - step, - self.current_log_step, - test_func_name - ) - logger.error(error_message) - - test_func = getattr(self.__class__, test_func_name) - docstring = test_func.__doc__ - docstring = '\n'.join([s.strip() for s in docstring.split('\n')]) - steps = {s.split('. ')[0]: s for s in - docstring.split('\n') if s and s[0].isdigit()} - if details: - details_msg = ': {0} '.format(details) - else: - details_msg = '' - if str(step) in steps: - logger.info("\n" + " " * 55 + "<<< {0} {1}>>>" - .format(steps[str(step)], details_msg)) - else: - logger.info("\n" + " " * 55 + "<<< {0}. (no step description " - "in scenario) {1}>>>".format(str(step), details_msg)) - - def is_make_snapshot(self): - """Check if the test 'test_name' is a dependency for other planned - tests (snapshot is required). If yes return True, if no - False. - - :rtype: bool - """ - test_name = get_test_method_name() - tests = self.test_program.plan.tests - test_cases = [t for t in tests if t.entry.method.__name__ == test_name] - if len(test_cases) != 1: - logger.warning("Method 'is_make_snapshot' is called from function " - "which is not a test case: {0}".format(test_name)) - return False - test_groups = set(test_cases[0].entry.info.groups) - dependent_tests = set() - dependent_groups = set() - for t in tests: - for func in t.entry.info.depends_on: - dependent_tests.add(func.__name__) - for group in t.entry.info.depends_on_groups: - dependent_groups.add(group) - if test_name in dependent_tests or \ - test_groups & dependent_groups: - return True - return False - - def fuel_post_install_actions(self, - force_ssl=settings.FORCE_HTTPS_MASTER_NODE - ): - if settings.UPDATE_FUEL: - # Update Ubuntu packages - self.env.admin_actions.upload_packages( - local_packages_dir=settings.UPDATE_FUEL_PATH, - centos_repo_path=None, - ubuntu_repo_path=settings.LOCAL_MIRROR_UBUNTU) - time.sleep(10) - self.env.set_admin_keystone_password() - self.env.sync_time(['admin']) - if settings.UPDATE_MASTER: - if settings.UPDATE_FUEL_MIRROR: - for i, url in enumerate(settings.UPDATE_FUEL_MIRROR): - conf_file = '/etc/yum.repos.d/temporary-{}.repo'.format(i) - cmd = ("echo -e" - " '[temporary-{0}]\nname=" - "temporary-{0}\nbaseurl={1}/" - "\ngpgcheck=0\npriority=" - "1' > {2}").format(i, url, conf_file) - - self.ssh_manager.execute( - ip=self.ssh_manager.admin_ip, - cmd=cmd - ) - self.env.admin_install_updates() - if settings.MULTIPLE_NETWORKS: - self.env.describe_other_admin_interfaces( - self.env.d_env.nodes().admin) - if settings.FUEL_STATS_HOST: - self.env.nailgun_actions.set_collector_address( - settings.FUEL_STATS_HOST, - settings.FUEL_STATS_PORT, - settings.FUEL_STATS_SSL) - # Restart statsenderd to apply settings(Collector address) - self.env.nailgun_actions.force_fuel_stats_sending() - if settings.FUEL_STATS_ENABLED and settings.FUEL_STATS_HOST: - self.fuel_web.client.send_fuel_stats(enabled=True) - logger.info('Enabled sending of statistics to {0}:{1}'.format( - settings.FUEL_STATS_HOST, settings.FUEL_STATS_PORT - )) - if settings.PATCHING_DISABLE_UPDATES: - cmd = "find /etc/yum.repos.d/ -type f -regextype posix-egrep" \ - " -regex '.*/mos+\-(updates|security).repo' | " \ - "xargs -n1 -i sed -i 's/enabled=1/enabled=0/' -i {}" - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd - ) - if force_ssl: - self.env.enable_force_https(self.ssh_manager.admin_ip) - - def reinstall_master_node(self): - """Erase boot sector and run setup_environment""" - admin = self.env.d_env.get_node(name="admin") - with self.env.d_env.get_admin_remote() as remote: - erase_data_from_hdd(remote, mount_point='/boot') - admin.destroy() - admin.close_tray() - if settings.ADMIN_BOOT_DEVICE == 'usb': - volume = admin.disk_devices.get(device='disk', bus='usb').volume - else: # cdrom is default - volume = admin.disk_devices.get(device='cdrom').volume - volume.upload(settings.ISO_PATH) - self.env.setup_environment() - self.fuel_post_install_actions() - - def centos_setup_fuel(self, hostname): - with TimeStat("bootstrap_centos_node", is_uniq=True): - admin = list(self.env.d_env.get_nodes(role__contains='master'))[0] - self.env.d_env.start([admin]) - logger.info("Waiting for Centos node to start up") - wait(lambda: admin.driver.node_active(admin), 60, - timeout_msg='Centos node failed to start') - logger.info("Waiting for Centos node ssh ready") - self.env.wait_for_provisioning() - - ssh = SSHManager() - logger.debug("Update host information") - cmd = "echo HOSTNAME={} >> /etc/sysconfig/network".format(hostname) - ssh.execute_on_remote(ssh.admin_ip, cmd=cmd) - - cmd = "echo {0} {1} {2} >> /etc/hosts".format( - ssh.admin_ip, - hostname, - settings.FUEL_MASTER_HOSTNAME) - - ssh.execute_on_remote(ssh.admin_ip, cmd=cmd) - - cmd = "hostname {}".format(hostname) - ssh.execute_on_remote(ssh.admin_ip, cmd=cmd) - - cmd = "yum install -y screen" - ssh.execute_on_remote(ssh.admin_ip, cmd=cmd) - - install_mos_repos() - - logger.info("Install Fuel services") - - cmd = "screen -dm bash -c 'showmenu=no wait_for_external_config=yes " \ - "bootstrap_admin_node.sh'" - ssh.execute_on_remote(ssh.admin_ip, cmd=cmd) - - self.env.wait_for_external_config() - self.env.admin_actions.modify_configs(self.env.d_env.get_default_gw()) - if CUSTOM_FUEL_SETTING_YAML: - self.env.admin_actions.update_fuel_setting_yaml( - CUSTOM_FUEL_SETTING_YAML) - self.env.kill_wait_for_external_config() - - self.env.wait_bootstrap() - - logger.debug("Check Fuel services") - self.env.admin_actions.wait_for_fuel_ready() - - logger.debug("post-installation configuration of Fuel services") - self.fuel_post_install_actions() - - -@test -class SetupEnvironment(TestBasic): - @test(groups=["setup"]) - @log_snapshot_after_test - def setup_master(self): - """Create environment and set up master node - - Snapshot: empty - - """ - # TODO: remove this code when fuel-devops will be ready to - # describe all required network parameters (gateway, CIDR, IP range) - # inside 'address_pool', so we can use 'network_pools' section - # for L3 configuration in tests for multi racks - if MULTIPLE_NETWORKS: - from system_test.core.discover import load_yaml - self._devops_config = load_yaml(MULTIPLE_NETWORKS_TEMPLATE) - if USE_HAPROXY_TEMPLATE and SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH: - from system_test.core.discover import load_yaml - self._devops_config = load_yaml(EXTERNAL_HAPROXY_TEMPLATE) - if ENABLE_DMZ: - from system_test.core.discover import load_yaml - self._devops_config = load_yaml(ENABLE_DMZ_TEMPLATE) - - self.check_run("empty") - - with TimeStat("setup_environment", is_uniq=True): - - if list(self.env.d_env.get_nodes(role='fuel_master')): - self.env.setup_environment() - self.fuel_post_install_actions() - - elif list(self.env.d_env.get_nodes(role='centos_master')): - # need to use centos_master.yaml devops template - hostname = ''.join((settings.FUEL_MASTER_HOSTNAME, - settings.DNS_SUFFIX)) - self.centos_setup_fuel(hostname) - - else: - raise SkipTest( - "No Fuel master nodes found!") - - self.env.make_snapshot("empty", is_make=True) - self.current_log_step = 0 - - @test(groups=["setup_master_custom_manifests"]) - @log_snapshot_after_test - def setup_with_custom_manifests(self): - """Setup master node with custom manifests - Scenario: - 1. Start installation of master - 2. Enable option 'wait_for_external_config' - 3. Upload custom manifests - 4. Kill 'wait_for_external_config' countdown - Snapshot: empty_custom_manifests - - Duration 20m - """ - self.check_run("empty_custom_manifests") - self.show_step(1, initialize=True) - self.show_step(2) - self.env.setup_environment(custom=True, build_images=True) - self.show_step(3) - if REPLACE_DEFAULT_REPOS and REPLACE_DEFAULT_REPOS_ONLY_ONCE: - self.fuel_web.replace_default_repos() - self.fuel_post_install_actions() - self.env.make_snapshot("empty_custom_manifests", is_make=True) - self.current_log_step = 0 - - @test(depends_on=[setup_master], groups=["prepare_release"]) - @log_snapshot_after_test - def prepare_release(self): - """Prepare master node - - Scenario: - 1. Revert snapshot "empty" - 2. Download the release if needed. Uploads custom manifest. - - Snapshot: ready - - """ - self.check_run("ready") - self.show_step(1, initialize=True) - self.env.revert_snapshot("empty", skip_timesync=True, - skip_slaves_check=True) - - self.fuel_web.get_nailgun_version() - self.fuel_web.change_default_network_settings() - self.show_step(2) - if REPLACE_DEFAULT_REPOS and REPLACE_DEFAULT_REPOS_ONLY_ONCE: - self.fuel_web.replace_default_repos() - self.env.make_snapshot("ready", is_make=True) - self.current_log_step = 0 - - @test(depends_on=[prepare_release], - groups=["prepare_slaves_1"]) - @log_snapshot_after_test - def prepare_slaves_1(self): - """Bootstrap 1 slave nodes - - Scenario: - 1. Revert snapshot "ready" - 2. Start 1 slave nodes - - Snapshot: ready_with_1_slaves - - """ - self.check_run("ready_with_1_slaves") - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready", skip_timesync=True) - self.show_step(2) - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:1], - skip_timesync=True) - self.env.make_snapshot("ready_with_1_slaves", is_make=True) - self.current_log_step = 0 - - @test(depends_on=[prepare_release], - groups=["prepare_slaves_3"]) - @log_snapshot_after_test - def prepare_slaves_3(self): - """Bootstrap 3 slave nodes - - Scenario: - 1. Revert snapshot "ready" - 2. Start 3 slave nodes - - Snapshot: ready_with_3_slaves - - """ - self.check_run("ready_with_3_slaves") - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready", skip_timesync=True) - self.show_step(2) - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:3], - skip_timesync=True) - self.env.make_snapshot("ready_with_3_slaves", is_make=True) - self.current_log_step = 0 - - @test(depends_on=[prepare_release], - groups=["prepare_slaves_5"]) - @log_snapshot_after_test - def prepare_slaves_5(self): - """Bootstrap 5 slave nodes - - Scenario: - 1. Revert snapshot "ready" - 2. Start 5 slave nodes - - Snapshot: ready_with_5_slaves - - """ - self.check_run("ready_with_5_slaves") - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready", skip_timesync=True) - self.show_step(2) - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:5], - skip_timesync=True) - self.env.make_snapshot("ready_with_5_slaves", is_make=True) - self.current_log_step = 0 - - @test(depends_on=[prepare_release], - groups=["prepare_slaves_9"]) - @log_snapshot_after_test - def prepare_slaves_9(self): - """Bootstrap 9 slave nodes - - Scenario: - 1. Revert snapshot "ready" - 2. Start 9 slave nodes - - Snapshot: ready_with_9_slaves - - """ - self.check_run("ready_with_9_slaves") - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready", skip_timesync=True) - # Bootstrap 9 slaves in two stages to get lower load on the host - self.show_step(2) - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:5], - skip_timesync=True) - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[5:9], - skip_timesync=True) - self.env.make_snapshot("ready_with_9_slaves", is_make=True) - self.current_log_step = 0 - - @test(depends_on=[prepare_release], - groups=["prepare_slaves_all"]) - @log_snapshot_after_test - def prepare_slaves_all(self): - """Bootstrap all slave nodes - - Scenario: - 1. Revert snapshot "ready" - 2. Start all available slave nodes - - Snapshot: ready_with_all_slaves - - """ - self.check_run("ready_with_all_slaves") - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready", skip_timesync=True) - - # Bootstrap by 5 slaves at the same time - self.show_step(2) - slaves = self.env.d_env.nodes().slaves[:] - - while slaves: - slaves_to_bootstrap = slaves[:5] - self.env.bootstrap_nodes(slaves_to_bootstrap, skip_timesync=True) - del slaves[:5] - - self.env.make_snapshot("ready_with_all_slaves", is_make=True) - self.current_log_step = 0 diff --git a/fuelweb_test/tests/cluster_configs.yaml b/fuelweb_test/tests/cluster_configs.yaml deleted file mode 100644 index caf0e6663..000000000 --- a/fuelweb_test/tests/cluster_configs.yaml +++ /dev/null @@ -1,177 +0,0 @@ ---- - - - name: "1Controller_2Computes_vlan" - nodes: - slave-01: - - "controller" - slave-02: - - "compute" - slave-03: - - "compute" - network: - net_provider: "neutron" - net_segment_type: "vlan" - - - name: "1Controller_2Computes_gre" - nodes: - slave-01: - - "controller" - slave-02: - - "compute" - slave-03: - - "compute" - network: - net_provider: "neutron" - net_segment_type: "gre" - - - name: "1Controller_2Computes_tun" - nodes: - slave-01: - - "controller" - slave-02: - - "compute" - slave-03: - - "compute" - network: - net_provider: "neutron" - net_segment_type: "tun" - - - name: "1Controller_ceph_2Computes_ceph_vlan" - nodes: - slave-01: - - "controller" - - "ceph-osd" - slave-02: - - "compute" - - "ceph-osd" - slave-03: - - "compute" - - "ceph-osd" - network: - net_provider: "neutron" - net_segment_type: "vlan" - settings: - volumes_ceph: true - images_ceph: true - volumes_lvm: false - - - name: "1Controller_1Compute_1cndr_mongo_3ceph" - nodes: - slave-01: - - "controller" - slave-02: - - "compute" - slave-03: - - "cinder" - - "mongo" - slave-04: - - "ceph-osd" - slave-05: - - "ceph-osd" - slave-06: - - "ceph-osd" - network: - net_provider: "neutron" - net_segment_type: "vlan" - settings: - ceilometer: true - images_ceph: true - - - name: "3Controller_3Computes_ceph" - nodes: - slave-01: - - "controller" - slave-02: - - "controller" - slave-03: - - "controller" - slave-04: - - "compute" - - "ceph-osd" - slave-05: - - "compute" - - "ceph-osd" - slave-06: - - "compute" - - "ceph-osd" - network: - net_provider: "neutron" - net_segment_type: "vlan" - settings: - ephemeral_ceph: true - - - name: "3Controller_mongo_3Computes_ceph" - nodes: - slave-01: - - "controller" - - "mongo" - slave-02: - - "controller" - - "mongo" - slave-03: - - "controller" - - "mongo" - slave-04: - - "compute" - - "ceph-osd" - slave-05: - - "compute" - - "ceph-osd" - slave-06: - - "compute" - - "ceph-osd" - network: - net_provider: "neutron" - net_segment_type: "vlan" - settings: - ceilometer: true - images_ceph: true - - - name: "1Controller_1Compute_1Cinder_3Ceph_Rados_vlan" - nodes: - slave-01: - - "controller" - slave-02: - - "compute" - slave-03: - - "cinder" - slave-04: - - "ceph-osd" - slave-05: - - "ceph-osd" - slave-06: - - "ceph-osd" - network: - net_provider: "neutron" - net_segment_type: "vlan" - settings: - images_ceph: true - objects_ceph: true - osd_pool_size: '2' - - - name: "1Controller_2Computes_1Cinder_3Ceph_1Mongo_Ceph_Image_Ceilometer_vlan" - nodes: - slave-01: - - "controller" - slave-02: - - "compute" - slave-03: - - "compute" - slave-04: - - "cinder" - slave-05: - - "ceph-osd" - slave-06: - - "ceph-osd" - slave-07: - - "ceph-osd" - slave-08: - - "mongo" - network: - net_provider: "neutron" - net_segment_type: "vlan" - settings: - ceilometer: true - images_ceph: true - osd_pool_size: '2' - diff --git a/fuelweb_test/tests/plugins/__init__.py b/fuelweb_test/tests/plugins/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/plugins/plugin_contrail/__init__.py b/fuelweb_test/tests/plugins/plugin_contrail/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/plugins/plugin_contrail/test_fuel_plugin_contrail.py b/fuelweb_test/tests/plugins/plugin_contrail/test_fuel_plugin_contrail.py deleted file mode 100644 index 8d9f0565c..000000000 --- a/fuelweb_test/tests/plugins/plugin_contrail/test_fuel_plugin_contrail.py +++ /dev/null @@ -1,795 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import division - -import os -import os.path -import time - -from proboscis import test -from proboscis.asserts import assert_true - -from fuelweb_test.helpers.checkers import check_plugin_path_env -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers import utils -from fuelweb_test.helpers.common import Common -from fuelweb_test import logger -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.settings import CONTRAIL_PLUGIN_PATH -from fuelweb_test.settings import CONTRAIL_PLUGIN_PACK_UB_PATH -from fuelweb_test.settings import CONTRAIL_PLUGIN_PACK_CEN_PATH -from fuelweb_test.settings import NEUTRON_SEGMENT -from fuelweb_test.settings import iface_alias -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["plugins"]) -class ContrailPlugin(TestBasic): - """ContrailPlugin.""" # TODO documentation - - _pack_copy_path = '/var/www/nailgun/plugins/contrail-1.0' - _add_ub_packag = \ - '/var/www/nailgun/plugins/contrail-1.0/' \ - 'repositories/ubuntu/contrail-setup*' - _add_cen_packeg = \ - '/var/www/nailgun/plugins/contrail-1.0/' \ - 'repositories/centos/Packages/contrail-setup*' - _ostf_msg = 'OSTF tests passed successfully.' - - cluster_id = '' - - _pack_path = [CONTRAIL_PLUGIN_PACK_UB_PATH, CONTRAIL_PLUGIN_PACK_CEN_PATH] - - def __init__(self): - super(ContrailPlugin, self).__init__() - check_plugin_path_env( - var_name='CONTRAIL_PLUGIN_PATH', - plugin_path=CONTRAIL_PLUGIN_PATH - ) - check_plugin_path_env( - var_name='CONTRAIL_PLUGIN_PACK_UB_PATH', - plugin_path=CONTRAIL_PLUGIN_PACK_UB_PATH - ) - check_plugin_path_env( - var_name='CONTRAIL_PLUGIN_PACK_CEN_PATH', - plugin_path=CONTRAIL_PLUGIN_PACK_CEN_PATH - ) - - def _upload_contrail_packages(self, remote): - for pack in self._pack_path: - if os.path.splitext(pack)[1] in [".deb", ".rpm"]: - pkg_name = os.path.basename(pack) - logger.debug("Uploading package {0} " - "to master node".format(pkg_name)) - remote.upload(pack, self._pack_copy_path) - else: - logger.error('Failed to upload file') - - def _install_packages(self, remote): - command = "cd " + self._pack_copy_path + " && ./install.sh" - logger.info('The command is %s', command) - remote.execute_async(command) - time.sleep(50) - os.path.isfile(self._add_ub_packag or self._add_cen_packeg) - - def _assign_net_provider(self, pub_all_nodes=False): - """Assign neutron with vlan segmentation""" - segment_type = NEUTRON_SEGMENT['vlan'] - self.cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": segment_type, - 'assign_to_all_nodes': pub_all_nodes - } - ) - return self.cluster_id - - def _prepare_contrail_plugin(self, slaves=None, pub_net=False): - """Copy necessary packages to the master node and install them""" - - self.env.revert_snapshot("ready_with_{:d}_slaves".format(slaves)) - - with self.env.d_env.get_admin_remote() as remote: - - # copy plugin to the master node - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=CONTRAIL_PLUGIN_PATH, - tar_target='/var' - ) - - # install plugin - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename(CONTRAIL_PLUGIN_PATH)) - - # copy additional packages to the master node - self._upload_contrail_packages(remote) - - # install packages - self._install_packages(remote) - - # prepare fuel - self._assign_net_provider(pub_net) - - def _activate_plugin(self): - """Enable plugin in contrail settings""" - plugin_name = 'contrail' - msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" - assert_true( - self.fuel_web.check_plugin_exists(self.cluster_id, plugin_name), - msg) - logger.debug('we have contrail element') - option = {'metadata/enabled': True, } - self.fuel_web.update_plugin_data(self.cluster_id, plugin_name, option) - - def _create_net_subnet(self, cluster): - """Create net and subnet""" - contrail_ip = self.fuel_web.get_public_vip(cluster) - logger.info('The ip is %s', contrail_ip) - net = Common( - controller_ip=contrail_ip, user='admin', - password='admin', tenant='admin' - ) - - net.neutron.create_network(body={ - 'network': { - 'name': 'net04', - 'admin_state_up': True, - } - }) - - network_id = '' - network_dic = net.neutron.list_networks() - for dd in network_dic['networks']: - if dd.get("name") == "net04": - network_id = dd.get("id") - - if network_id == "": - logger.error('Network id empty') - - logger.debug("id {0} to master node".format(network_id)) - - net.neutron.create_subnet(body={ - 'subnet': { - 'network_id': network_id, - 'ip_version': 4, - 'cidr': '10.100.0.0/24', - 'name': 'subnet04', - } - }) - - def change_disk_size(self): - """ - Configure disks on base-os nodes - """ - nailgun_nodes = \ - self.fuel_web.client.list_cluster_nodes(self.cluster_id) - base_os_disk = 40960 - # pylint: disable=round-builtin - base_os_disk_gb = ("{0}G".format(round(base_os_disk / 1024, 1))) - # pylint: enable=round-builtin - logger.info('disk size is {0}'.format(base_os_disk_gb)) - disk_part = { - "vda": { - "os": base_os_disk, } - } - - for node in nailgun_nodes: - if node.get('pending_roles') == ['base-os']: - self.fuel_web.update_node_disk(node.get('id'), disk_part) - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["install_contrail"]) - @log_snapshot_after_test - def install_contrail(self): - """Install Contrail Plugin and create cluster - - Scenario: - 1. Revert snapshot "ready_with_5_slaves" - 2. Upload contrail plugin to the master node - 3. Install plugin and additional packages - 4. Enable Neutron with VLAN segmentation - 5. Create cluster - - Duration 20 min - - """ - self._prepare_contrail_plugin(slaves=5) - - self.env.make_snapshot("install_contrail", is_make=True) - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_contrail"]) - @log_snapshot_after_test - def deploy_contrail(self): - """Deploy a cluster with Contrail Plugin - - Scenario: - 1. Revert snapshot "ready_with_5_slaves" - 2. Create cluster - 3. Add 3 nodes with Operating system role - and 1 node with controller role - 4. Enable Contrail plugin - 5. Deploy cluster with plugin - - Duration 90 min - - """ - self._prepare_contrail_plugin(slaves=5) - - self.fuel_web.update_nodes( - self.cluster_id, - { - 'slave-01': ['base-os'], - 'slave-02': ['base-os'], - 'slave-03': ['base-os'], - 'slave-04': ['controller'], - }, - custom_names={ - 'slave-01': 'contrail-1', - 'slave-02': 'contrail-2', - 'slave-03': 'contrail-3' - } - ) - - # configure disks on base-os nodes - self.change_disk_size() - - # enable plugin in contrail settings - self._activate_plugin() - - self.fuel_web.deploy_cluster_wait(self.cluster_id) - - self.env.make_snapshot("deploy_contrail", is_make=True) - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_controller_compute_contrail"]) - @log_snapshot_after_test - def deploy_controller_compute_contrail(self): - """Deploy cluster with 1 controller, 1 compute, - 3 base-os and install contrail plugin - - Scenario: - 1. Revert snapshot "ready_with_5_slaves" - 2. Create cluster - 3. Add 3 nodes with Operating system role, 1 node with controller - role and 1 node with compute + cinder role - 4. Enable Contrail plugin - 5. Deploy cluster with plugin - 6. Create net and subnet - 7. Run OSTF tests - - Duration 110 min - - """ - self._prepare_contrail_plugin(slaves=5) - - self.fuel_web.update_nodes( - self.cluster_id, - { - 'slave-01': ['base-os'], - 'slave-02': ['base-os'], - 'slave-03': ['base-os'], - 'slave-04': ['controller'], - 'slave-05': ['compute', 'cinder'] - }, - custom_names={ - 'slave-01': 'contrail-1', - 'slave-02': 'contrail-2', - 'slave-03': 'contrail-3' - } - ) - - # configure disks on base-os nodes - self.change_disk_size() - - # enable plugin in contrail settings - self._activate_plugin() - - # deploy cluster - self.fuel_web.deploy_cluster_wait(self.cluster_id) - - # create net and subnet - self._create_net_subnet(self.cluster_id) - - # TODO - # Tests using north-south connectivity are expected to fail because - # they require additional gateway nodes, and specific contrail - # settings. This mark is a workaround until it's verified - # and tested manually. - # When it will be done 'should_fail=2' and - # 'failed_test_name' parameter should be removed. - - self.fuel_web.run_ostf( - cluster_id=self.cluster_id, - should_fail=2, - failed_test_name=[('Check network connectivity ' - 'from instance via floating IP'), - 'Launch instance with file injection'] - ) - - logger.info(self._ostf_msg) - - self.env.make_snapshot("deploy_controller_compute_contrail", - is_make=True) - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["contrail_plugin_add_delete_compute_node"]) - @log_snapshot_after_test - def contrail_plugin_add_delete_compute_node(self): - """Verify that Compute node can be - deleted and added after deploying - - Scenario: - 1. Revert snapshot "ready_with_9_slaves" - 2. Create cluster - 3. Add 3 nodes with Operating system role, - 1 node with controller role and 2 nodes with compute role - 4. Enable Contrail plugin - 5. Deploy cluster with plugin - 6. Remove 1 node with compute role. - 7. Deploy cluster - 8. Add 1 nodes with compute role - 9. Deploy cluster - 10. Run OSTF tests - - Duration 140 min - - """ - self._prepare_contrail_plugin(slaves=9) - - # create cluster: 3 nodes with Operating system role, - # 1 node with controller role and 2 nodes with compute role - self.fuel_web.update_nodes( - self.cluster_id, - { - 'slave-01': ['base-os'], - 'slave-02': ['base-os'], - 'slave-03': ['base-os'], - 'slave-04': ['controller'], - 'slave-05': ['compute'], - 'slave-06': ['compute'] - }, - custom_names={ - 'slave-01': 'contrail-1', - 'slave-02': 'contrail-2', - 'slave-03': 'contrail-3' - } - ) - - # configure disks on base-os nodes - self.change_disk_size() - - # enable plugin in contrail settings - self._activate_plugin() - - # deploy cluster - self.fuel_web.deploy_cluster_wait(self.cluster_id, - check_services=False) - - # create net and subnet - self._create_net_subnet(self.cluster_id) - - # remove one node with compute role - self.fuel_web.update_nodes( - self.cluster_id, {'slave-05': ['compute']}, False, True) - - self.fuel_web.deploy_cluster_wait(self.cluster_id, - check_services=False) - - # add 1 node with compute role and redeploy cluster - self.fuel_web.update_nodes( - self.cluster_id, {'slave-07': ['compute'], }) - - self.fuel_web.deploy_cluster_wait(self.cluster_id, - check_services=False) - - # TODO - # Tests using north-south connectivity are expected to fail because - # they require additional gateway nodes, and specific contrail - # settings. This mark is a workaround until it's verified - # and tested manually. - # Also workaround according to bug 1457515 - # When it will be done 'should_fail=3' and - # 'failed_test_name' parameter should be removed. - - self.fuel_web.run_ostf( - cluster_id=self.cluster_id, - should_fail=2, - failed_test_name=[('Check network connectivity ' - 'from instance via floating IP'), - 'Launch instance with file injection'] - ) - - logger.info(self._ostf_msg) - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["deploy_ha_contrail_plugin"]) - @log_snapshot_after_test - def deploy_ha_contrail_plugin(self): - """Deploy HA Environment with Contrail Plugin - - Scenario: - 1. Revert snapshot "ready_with_9_slaves" - 2. Create cluster - 3. Add 3 nodes with Operating system role and - 1 node with controller role - 4. Enable Contrail plugin - 5. Deploy cluster with plugin - 6. Add 1 node with compute role - 7. Deploy cluster - 8. Run OSTF tests - 9. Add 2 nodes with controller role and - 1 node with compute + cinder role - 10. Deploy cluster - 11. Run OSTF tests - - Duration 140 min - - """ - self._prepare_contrail_plugin(slaves=9) - - # create cluster: 3 nodes with Operating system role - # and 1 node with controller role - self.fuel_web.update_nodes( - self.cluster_id, - { - 'slave-01': ['base-os'], - 'slave-02': ['base-os'], - 'slave-03': ['base-os'], - 'slave-04': ['controller'] - }, - custom_names={ - 'slave-01': 'contrail-1', - 'slave-02': 'contrail-2', - 'slave-03': 'contrail-3' - } - ) - - # configure disks on base-os nodes - self.change_disk_size() - - # enable plugin in contrail settings - self._activate_plugin() - - self.fuel_web.deploy_cluster_wait(self.cluster_id) - - # create net and subnet - self._create_net_subnet(self.cluster_id) - - # add 1 node with compute role and redeploy cluster - self.fuel_web.update_nodes( - self.cluster_id, {'slave-05': ['compute']},) - - self.fuel_web.deploy_cluster_wait(self.cluster_id) - - # TODO - # Tests using north-south connectivity are expected to fail because - # they require additional gateway nodes, and specific contrail - # settings. This mark is a workaround until it's verified - # and tested manually. - # When it will be done 'should_fail=2' and - # 'failed_test_name' parameter should be removed. - - self.fuel_web.run_ostf( - cluster_id=self.cluster_id, - should_fail=2, - failed_test_name=[('Check network connectivity ' - 'from instance via floating IP'), - 'Launch instance with file injection'] - ) - - logger.info(self._ostf_msg) - - # add to cluster 2 nodes with controller role and one - # with compute, cinder role and deploy cluster - self.fuel_web.update_nodes( - self.cluster_id, - { - 'slave-06': ['controller'], - 'slave-07': ['controller'], - 'slave-08': ['compute', 'cinder'], - } - ) - - logger.info(self._ostf_msg) - - self.fuel_web.deploy_cluster_wait(self.cluster_id) - - # TODO: - # Tests using north-south connectivity are expected to fail because - # they require additional gateway nodes, and specific contrail - # settings. This mark is a workaround until it's verified - # and tested manually. - # When it will be done 'should_fail=2' and - # 'failed_test_name' parameter should be removed. - - self.fuel_web.run_ostf( - cluster_id=self.cluster_id, - should_fail=2, - failed_test_name=[('Check network connectivity ' - 'from instance via floating IP'), - 'Launch instance with file injection'] - ) - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["contrail_plugin_add_delete_controller_node"]) - @log_snapshot_after_test - def contrail_plugin_add_delete_controller_node(self): - """Verify that Controller node can be - deleted and added after deploying - - Scenario: - 1. Revert snapshot "ready_with_9_slaves" - 2. Create cluster - 3. Add 3 nodes with Operating system role, - 2 nodes with controller role and 1 node with compute role - 4. Enable Contrail plugin - 5. Deploy cluster with plugin - 6. Remove 1 node with controller role. - 7. Deploy cluster - 8. Add 1 nodes with controller role - 9. Deploy cluster - 10. Run OSTF tests - - Duration 140 min - - """ - self._prepare_contrail_plugin(slaves=9) - - # create cluster: 3 nodes with Operating system role - # and 1 node with controller role - self.fuel_web.update_nodes( - self.cluster_id, - { - 'slave-01': ['base-os'], - 'slave-02': ['base-os'], - 'slave-03': ['base-os'], - 'slave-04': ['controller'], - 'slave-05': ['controller'], - 'slave-06': ['controller'], - 'slave-07': ['compute'] - }, - custom_names={ - 'slave-01': 'contrail-1', - 'slave-02': 'contrail-2', - 'slave-03': 'contrail-3' - } - ) - - # configure disks on base-os nodes - self.change_disk_size() - - # enable plugin in contrail settings - self._activate_plugin() - - self.fuel_web.deploy_cluster_wait(self.cluster_id, - check_services=False, - timeout=240 * 60) - - # remove one node with controller role - self.fuel_web.update_nodes( - self.cluster_id, {'slave-05': ['controller']}, False, True) - - self.fuel_web.deploy_cluster_wait(self.cluster_id, - check_services=False, - timeout=240 * 60) - - # add 1 node with controller role and redeploy cluster - self.fuel_web.update_nodes( - self.cluster_id, {'slave-08': ['controller']}) - - self.fuel_web.deploy_cluster_wait(self.cluster_id, - check_services=False, - timeout=240 * 60) - - # TODO - # Tests using north-south connectivity are expected to fail because - # they require additional gateway nodes, and specific contrail - # settings. This mark is a workaround until it's verified - # and tested manually. - # Also workaround according to bug 1457515 - # When it will be done 'should_fail=3' and - # 'failed_test_name' parameter should be removed. - - # create net and subnet to pass ostf - self._create_net_subnet(self.cluster_id) - - self.fuel_web.run_ostf( - cluster_id=self.cluster_id, - should_fail=2, - failed_test_name=[('Check network connectivity ' - 'from instance via floating IP'), - 'Launch instance with file injection'] - ) - - logger.info(self._ostf_msg) - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["deploy_ha_with_pub_net_all_nodes"]) - @log_snapshot_after_test - def deploy_ha_with_pub_net_all_nodes(self): - """Deploy HA Environment with Contrail Plugin - and assign public network to all nodes - - Scenario: - 1. Revert snapshot "ready_with_9_slaves" - 2. Create cluster and select "Assign public network to all nodes" - check box - 3. Add 3 nodes with Operating system role, - 1 node with controller role and 1 node with compute role - 4. Enable Contrail plugin - 5. Deploy cluster with plugin - 6. Add 1 node with controller node and - 1 node with compute role - 7. Deploy cluster - 8. Run OSTF tests - - Duration 140 min - - """ - self._prepare_contrail_plugin(slaves=9, pub_net=True) - - # create cluster: 3 nodes with Operating system role, - # 1 node with controller and 1 node with compute roles - self.fuel_web.update_nodes( - self.cluster_id, - { - 'slave-01': ['base-os'], - 'slave-02': ['base-os'], - 'slave-03': ['base-os'], - 'slave-04': ['controller'], - 'slave-05': ['compute'], - }, - custom_names={ - 'slave-01': 'contrail-1', - 'slave-02': 'contrail-2', - 'slave-03': 'contrail-3' - } - ) - - # configure disks on base-os nodes - self.change_disk_size() - - # enable plugin in contrail settings - self._activate_plugin() - - self.fuel_web.deploy_cluster_wait(self.cluster_id) - - # create net and subnet - self._create_net_subnet(self.cluster_id) - - # add 1 node with controller and 1 node with - # compute role and redeploy cluster - self.fuel_web.update_nodes( - self.cluster_id, { - 'slave-06': ['compute'], - 'slave-07': ['compute', 'cinder']}) - - self.fuel_web.deploy_cluster_wait(self.cluster_id) - - # TODO - # Tests using north-south connectivity are expected to fail because - # they require additional gateway nodes, and specific contrail - # settings. This mark is a workaround until it's verified - # and tested manually. - # When it will be done 'should_fail=2' and - # 'failed_test_name' parameter should be removed. - - self.fuel_web.run_ostf( - cluster_id=self.cluster_id, - should_fail=2, - failed_test_name=[('Check network connectivity ' - 'from instance via floating IP'), - 'Launch instance with file injection']) - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["check_bonding_with_contrail"]) - @log_snapshot_after_test - def check_bonding_with_contrail(self): - """Verify bonding with Contrail Plugin - - Scenario: - 1. Revert snapshot "ready_with_5_slaves" - 2. Create cluster - 3. Add 3 nodes with Operating system role, - 1 node with controller role and 1 node with compute role - 4. Enable Contrail plugin - 5. Setup bonding for management and storage interfaces - 6. Deploy cluster with plugin - 7. Run OSTF tests - - Duration 140 min - - """ - self._prepare_contrail_plugin(slaves=5) - - # create cluster: 3 nodes with Operating system role, - # 1 node with controller and 1 node with compute roles - self.fuel_web.update_nodes( - self.cluster_id, - { - 'slave-01': ['base-os'], - 'slave-02': ['base-os'], - 'slave-03': ['base-os'], - 'slave-04': ['controller'], - 'slave-05': ['compute'] - }, - custom_names={ - 'slave-01': 'contrail-1', - 'slave-02': 'contrail-2', - 'slave-03': 'contrail-3' - } - ) - raw_data = [{ - 'mac': None, - 'mode': 'active-backup', - 'name': 'bond0', - 'slaves': [ - {'name': iface_alias('eth4')}, - {'name': iface_alias('eth2')}, - ], - 'state': None, - 'type': 'bond', - 'assigned_networks': [] - }, ] - - interfaces = { - iface_alias('eth0'): ['fuelweb_admin'], - iface_alias('eth1'): ['public'], - iface_alias('eth3'): ['private'], - 'bond0': [ - 'management', - 'storage', - ] - } - - cluster_nodes = \ - self.fuel_web.client.list_cluster_nodes(self.cluster_id) - for node in cluster_nodes: - self.fuel_web.update_node_networks( - node['id'], interfaces_dict=interfaces, - raw_data=raw_data - ) - - # enable plugin in contrail settings - self._activate_plugin() - - self.fuel_web.deploy_cluster_wait(self.cluster_id, - check_services=False) - - # create net and subnet - self._create_net_subnet(self.cluster_id) - - # TODO - # Tests using north-south connectivity are expected to fail because - # they require additional gateway nodes, and specific contrail - # settings. This mark is a workaround until it's verified - # and tested manually. - # Also workaround according to bug 1457515 - # When it will be done 'should_fail=3' and - # 'failed_test_name' parameter should be removed. - - self.fuel_web.run_ostf( - cluster_id=self.cluster_id, - should_fail=2, - failed_test_name=[('Check network connectivity ' - 'from instance via floating IP'), - 'Launch instance with file injection'] - ) diff --git a/fuelweb_test/tests/plugins/plugin_emc/__init__.py b/fuelweb_test/tests/plugins/plugin_emc/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/plugins/plugin_emc/test_plugin_emc.py b/fuelweb_test/tests/plugins/plugin_emc/test_plugin_emc.py deleted file mode 100644 index 52f25a2a2..000000000 --- a/fuelweb_test/tests/plugins/plugin_emc/test_plugin_emc.py +++ /dev/null @@ -1,218 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os - -from proboscis import asserts -from proboscis import test -# pylint: disable=import-error -# noinspection PyUnresolvedReferences -from six.moves import configparser -# pylint: enable=import-error - -from fuelweb_test.helpers import utils -from fuelweb_test.helpers.checkers import check_plugin_path_env -from fuelweb_test.helpers.ssh_manager import SSHManager -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["plugins"]) -class EMCPlugin(TestBasic): - """EMCPlugin.""" # TODO documentation - def __init__(self): - super(EMCPlugin, self).__init__() - check_plugin_path_env( - var_name='EMC_PLUGIN_PATH', - plugin_path=settings.EMC_PLUGIN_PATH - ) - - @classmethod - def check_emc_cinder_config(cls, ip, path): - with SSHManager().open_on_remote( - ip=ip, - path=path - ) as f: - cinder_conf = configparser.ConfigParser() - cinder_conf.readfp(f) - - asserts.assert_equal( - cinder_conf.get('DEFAULT', 'volume_driver'), - 'cinder.volume.drivers.emc.emc_cli_iscsi.EMCCLIISCSIDriver') - asserts.assert_equal( - cinder_conf.get('DEFAULT', 'storage_vnx_authentication_type'), - 'global') - asserts.assert_false( - cinder_conf.getboolean('DEFAULT', - 'destroy_empty_storage_group')) - asserts.assert_true( - cinder_conf.getboolean('DEFAULT', - 'initiator_auto_registration')) - asserts.assert_equal( - cinder_conf.getint('DEFAULT', 'attach_detach_batch_interval'), -1) - asserts.assert_equal( - cinder_conf.getint('DEFAULT', 'default_timeout'), 10) - asserts.assert_equal( - cinder_conf.get('DEFAULT', 'naviseccli_path'), - '/opt/Navisphere/bin/naviseccli') - - asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_ip')) - asserts.assert_true(cinder_conf.has_option('DEFAULT', - 'san_secondary_ip')) - asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_login')) - asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_password')) - - @classmethod - def check_service(cls, remote, service): - ps_output = ''.join( - remote.execute('ps ax | grep {0} | ' - 'grep -v grep'.format(service))['stdout']) - return service in ps_output - - @classmethod - def check_emc_management_package(cls, ip): - navicli = utils.get_package_versions_from_node( - ip=ip, - name='navicli', - os_type=settings.OPENSTACK_RELEASE) - naviseccli = utils.get_package_versions_from_node( - ip=ip, - name='naviseccli', - os_type=settings.OPENSTACK_RELEASE) - return bool(navicli + naviseccli) - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_emc_ha"]) - @log_snapshot_after_test - def deploy_emc_ha(self): - """Deploy cluster in ha mode with emc plugin - - Scenario: - 1. Upload plugin to the master node - 2. Install plugin - 3. Create cluster - 4. Add 3 nodes with controller role - 5. Add 2 nodes with compute role - 6. Deploy the cluster - 7. Run network verification - 8. Check plugin installation - 9. Run OSTF - - Duration 35m - Snapshot deploy_ha_emc - """ - self.env.revert_snapshot("ready_with_5_slaves") - - # copy plugin to the master node - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=settings.EMC_PLUGIN_PATH, - tar_target='/var' - ) - - # install plugin - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename(settings.EMC_PLUGIN_PATH)) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - ) - - attr = self.fuel_web.client.get_cluster_attributes(cluster_id) - - # check plugin installed and attributes have emc options - - for option in ["emc_sp_a_ip", "emc_sp_b_ip", - "emc_username", "emc_password", "emc_pool_name"]: - asserts.assert_true(option in attr["editable"]["emc_vnx"], - "{0} is not in cluster attributes: {1}". - format(option, - str(attr["editable"]["storage"]))) - - # disable LVM-based volumes - - attr["editable"]["storage"]["volumes_lvm"]["value"] = False - - # enable EMC plugin - - emc_options = attr["editable"]["emc_vnx"] - emc_options["metadata"]["enabled"] = True - emc_options["emc_sp_a_ip"]["value"] = settings.EMC_SP_A_IP - emc_options["emc_sp_b_ip"]["value"] = settings.EMC_SP_B_IP - emc_options["emc_username"]["value"] = settings.EMC_USERNAME - emc_options["emc_password"]["value"] = settings.EMC_PASSWORD - emc_options["emc_pool_name"]["value"] = settings.EMC_POOL_NAME - - self.fuel_web.client.update_cluster_attributes(cluster_id, attr) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - # get remotes for all nodes - - controller_nodes = [self.fuel_web.get_nailgun_node_by_name(node) - for node in ['slave-01', 'slave-02', 'slave-03']] - compute_nodes = [self.fuel_web.get_nailgun_node_by_name(node) - for node in ['slave-04', 'slave-05']] - - controller_remotes = [self.env.d_env.get_ssh_to_remote(node['ip']) - for node in controller_nodes] - compute_remotes = [self.env.d_env.get_ssh_to_remote(node['ip']) - for node in compute_nodes] - - # check cinder-volume settings - - for node in controller_nodes: - self.check_emc_cinder_config( - ip=node['ip'], path='/etc/cinder/cinder.conf') - self.check_emc_management_package(ip=node['ip']) - - # check cinder-volume layout on controllers - - cinder_volume_ctrls = [self.check_service(controller, "cinder-volume") - for controller in controller_remotes] - asserts.assert_equal(sum(cinder_volume_ctrls), 1, - "Cluster has more than one " - "cinder-volume on controllers") - - # check cinder-volume layout on computes - - cinder_volume_comps = [self.check_service(compute, "cinder-volume") - for compute in compute_remotes] - # closing connections - for remote in controller_remotes: - remote.clear() - for remote in compute_remotes: - remote.clear() - - asserts.assert_equal(sum(cinder_volume_comps), 0, - "Cluster has active cinder-volume on compute") - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("deploy_ha_emc") diff --git a/fuelweb_test/tests/plugins/plugin_etckeeper/__init__.py b/fuelweb_test/tests/plugins/plugin_etckeeper/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/plugins/plugin_etckeeper/test_plugin_etckeeper.py b/fuelweb_test/tests/plugins/plugin_etckeeper/test_plugin_etckeeper.py deleted file mode 100644 index a34e64ec5..000000000 --- a/fuelweb_test/tests/plugins/plugin_etckeeper/test_plugin_etckeeper.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from proboscis import asserts -from proboscis import test - -from fuelweb_test.helpers import utils -from fuelweb_test import logger -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.settings import ETCKEEPER_PLUGIN_REPO -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic -from fuelweb_test.helpers.fuel_actions import FuelPluginBuilder -from fuelweb_test.helpers.decorators import log_snapshot_after_test - - -@test(groups=["fuel_plugins", "fuel_plugin_etckeeper"], - enabled=False) -class EtcKeeper(TestBasic): - """Test class for testing allocation of vip for plugin.""" - - @test(depends_on=[SetupEnvironment.prepare_slaves_1], - groups=["etckeeper_plugin"]) - @log_snapshot_after_test - def etckeeper_plugin(self): - """Check tracking /etc dir by etckeeper plugin - - Scenario: - 1. Revert snapshot with 1 node - 2. Download and install fuel-plugin-builder - 3. Clone plugin repo - 4. Build plugin - 5. Install plugin to fuel - 6. Create cluster and enable plugin - 7. Deploy cluster - 8. Check plugin - - Duration 50m - """ - plugin_name = 'fuel-plugin-etckeeper' - plugin_path = '/var' - source_plugin_path = os.path.join(plugin_path, plugin_name) - - self.show_step(1) - self.env.revert_snapshot("ready_with_1_slaves") - - self.show_step(2) - fpb = FuelPluginBuilder() - fpb.fpb_install() - - ip = self.ssh_manager.admin_ip - self.ssh_manager.execute_on_remote( - ip=ip, - cmd='git clone {0} {1}'.format( - ETCKEEPER_PLUGIN_REPO, source_plugin_path)) - - self.show_step(4) - packet_name = fpb.fpb_build_plugin(source_plugin_path) - - self.show_step(5) - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.join(source_plugin_path, packet_name)) - - self.show_step(6) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={'propagate_task_deploy': True} - ) - - msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" - asserts.assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - msg) - options = {'metadata/enabled': True} - - self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) - logger.info('Cluster is {!s}'.format(cluster_id)) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller']} - ) - - self.show_step(7) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(8) - ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip'] - etckeeper_status = self.ssh_manager.execute_on_remote( - ip=ip, cmd='etckeeper vcs status') - if 'branch master' not in etckeeper_status['stdout_str']: - raise Exception("The etckeeper has wrong status {0}".format( - etckeeper_status['stdout_str'])) - - new_config = 'test_config' - self.ssh_manager.execute_on_remote( - ip=ip, - cmd='>>{0}'.format(os.path.join('/etc', new_config))) - - etckeeper_status = self.ssh_manager.execute_on_remote( - ip=ip, cmd='etckeeper vcs status') - if new_config not in etckeeper_status['stdout_str']: - raise Exception( - "The etckeeper does not tracked adding the new config: {0}, " - "actual status: {1}".format( - new_config, etckeeper_status['stdout_str'])) diff --git a/fuelweb_test/tests/plugins/plugin_example/__init__.py b/fuelweb_test/tests/plugins/plugin_example/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/plugins/plugin_example/test_fuel_plugin_example.py b/fuelweb_test/tests/plugins/plugin_example/test_fuel_plugin_example.py deleted file mode 100644 index 0d139e8c2..000000000 --- a/fuelweb_test/tests/plugins/plugin_example/test_fuel_plugin_example.py +++ /dev/null @@ -1,508 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os - -from proboscis.asserts import assert_equal, assert_true -from proboscis import test - -from fuelweb_test import logger -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers import utils -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.settings import EXAMPLE_PLUGIN_PATH -from fuelweb_test.settings import EXAMPLE_PLUGIN_V3_PATH -from fuelweb_test.settings import NEUTRON_SEGMENT -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["fuel_plugins", "fuel_plugin_example"]) -class ExamplePlugin(TestBasic): - """ExamplePlugin.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_ha_controller_neutron_example"]) - @log_snapshot_after_test - def deploy_ha_one_controller_neutron_example(self): - """Deploy cluster with one controller and example plugin - - Scenario: - 1. Upload plugin to the master node - 2. Install plugin - 3. Create cluster - 4. Add 1 node with controller role - 5. Add 2 nodes with compute role - 6. Deploy the cluster - 7. Run network verification - 8. Check plugin health - 9. Run OSTF - - Duration 35m - Snapshot deploy_ha_one_controller_neutron_example - """ - checkers.check_plugin_path_env( - var_name='EXAMPLE_PLUGIN_PATH', - plugin_path=EXAMPLE_PLUGIN_PATH - ) - - self.env.revert_snapshot("ready_with_3_slaves") - - # copy plugin to the master node - checkers.check_archive_type(EXAMPLE_PLUGIN_PATH) - - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=EXAMPLE_PLUGIN_PATH, - tar_target='/var') - - # install plugin - - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename(EXAMPLE_PLUGIN_PATH)) - - segment_type = NEUTRON_SEGMENT['vlan'] - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": segment_type, - "propagate_task_deploy": True - } - ) - - plugin_name = 'fuel_plugin_example' - msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" - assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - msg) - options = {'metadata/enabled': True} - self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['compute'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.verify_network(cluster_id) - - # check if service ran on controller - logger.debug("Start to check service on node {0}".format('slave-01')) - cmd_curl = 'curl localhost:8234' - cmd = 'pgrep -f fuel-simple-service' - - with self.fuel_web.get_ssh_for_node("slave-01") as remote: - res_pgrep = remote.execute(cmd) - assert_equal(0, res_pgrep['exit_code'], - 'Failed with error {0}'.format(res_pgrep['stderr'])) - assert_equal(1, len(res_pgrep['stdout']), - 'Failed with error {0}'.format(res_pgrep['stderr'])) - # curl to service - res_curl = remote.execute(cmd_curl) - assert_equal(0, res_pgrep['exit_code'], - 'Failed with error {0}'.format(res_curl['stderr'])) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - self.env.make_snapshot("deploy_ha_one_controller_neutron_example") - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_ha_controller_neutron_example_v3"]) - @log_snapshot_after_test - def deploy_ha_one_controller_neutron_example_v3(self): - """Deploy cluster with one controller and example plugin v3 - - Scenario: - 1. Upload plugin to the master node - 2. Install plugin - 3. Create cluster - 4. Add 1 node with controller role - 5. Add 1 node with compute role - 6. Add 1 node with custom role - 7. Deploy the cluster - 8. Run network verification - 9. Check plugin health - 10. Run OSTF - - Duration 35m - Snapshot deploy_ha_one_controller_neutron_example_v3 - """ - self.check_run("deploy_ha_one_controller_neutron_example_v3") - checkers.check_plugin_path_env( - var_name='EXAMPLE_PLUGIN_V3_PATH', - plugin_path=EXAMPLE_PLUGIN_V3_PATH - ) - - self.env.revert_snapshot("ready_with_3_slaves") - - # copy plugin to the master node - checkers.check_archive_type(EXAMPLE_PLUGIN_V3_PATH) - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=EXAMPLE_PLUGIN_V3_PATH, - tar_target='/var' - ) - # install plugin - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename(EXAMPLE_PLUGIN_V3_PATH)) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={"propagate_task_deploy": True} - ) - - plugin_name = 'fuel_plugin_example_v3' - msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" - assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - msg) - options = {'metadata/enabled': True} - self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['fuel_plugin_example_v3'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.assert_os_services_ready(cluster_id) - self.fuel_web.verify_network(cluster_id) - - # check if slave-01 contain - # plugin+100.0.all - # plugin+100.all - # fuel_plugin_example_v3_sh] - slave1 = self.fuel_web.get_nailgun_node_by_name('slave-01') - checkers.check_file_exists(slave1['ip'], '/tmp/plugin+100.0.all') - checkers.check_file_exists(slave1['ip'], '/tmp/plugin+100.all') - checkers.check_file_exists(slave1['ip'], - '/tmp/fuel_plugin_example_v3_sh') - checkers.check_file_exists(slave1['ip'], - '/tmp/fuel_plugin_example_v3_puppet') - - # check if fuel_plugin_example_v3_puppet called - # between netconfig and connectivity_tests - netconfig_str = 'MODULAR: netconfig/netconfig.pp' - plugin_str = 'PLUGIN: fuel_plugin_example_v3 - deploy.pp' - connect_str = 'MODULAR: netconfig/connectivity_tests.pp' - checkers.check_log_lines_order( - ip=slave1['ip'], - log_file_path='/var/log/puppet.log', - line_matcher=[netconfig_str, - plugin_str, - connect_str]) - - # check if slave-02 contain - # plugin+100.0.all - # plugin+100.al - slave2 = self.fuel_web.get_nailgun_node_by_name('slave-02') - checkers.check_file_exists(slave2['ip'], '/tmp/plugin+100.0.all') - checkers.check_file_exists(slave2['ip'], '/tmp/plugin+100.all') - - # check if slave-03 contain - # plugin+100.0.all - # plugin+100.all - # fuel_plugin_example_v3_sh - # fuel_plugin_example_v3_puppet - slave3 = self.fuel_web.get_nailgun_node_by_name('slave-03') - checkers.check_file_exists(slave3['ip'], '/tmp/plugin+100.0.all') - checkers.check_file_exists(slave3['ip'], '/tmp/plugin+100.all') - checkers.check_file_exists(slave3['ip'], - '/tmp/fuel_plugin_example_v3_sh') - checkers.check_file_exists(slave3['ip'], - '/tmp/fuel_plugin_example_v3_puppet') - - # check if service run on slave-03 - logger.debug("Checking service on node {0}".format('slave-03')) - - cmd = 'pgrep -f fuel-simple-service' - res_pgrep = self.ssh_manager.execute_on_remote( - ip=slave3['ip'], - cmd=cmd - ) - process_count = len(res_pgrep['stdout']) - assert_equal(1, process_count, - "There should be 1 process 'fuel-simple-service'," - " but {0} found {1} processes".format(cmd, process_count)) - - # curl to service - cmd_curl = 'curl localhost:8234' - self.ssh_manager.execute_on_remote( - ip=slave3['ip'], - cmd=cmd_curl - ) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("deploy_ha_one_controller_neutron_example_v3", - is_make=True) - - @test(depends_on=[deploy_ha_one_controller_neutron_example_v3], - groups=["delete_plugin_enabled_in_cluster"]) - @log_snapshot_after_test - def delete_plugin_enabled_in_cluster(self): - """Try remove plugin enabled in cluster - - Scenario: - 1. Try to remove plugin from cluster - - Duration 3m - """ - - self.env.revert_snapshot("deploy_ha_one_controller_neutron_example_v3", - skip_timesync=True) - cluster_id = self.fuel_web.get_last_created_cluster() - - enabled_plugins = self.fuel_web.\ - list_cluster_enabled_plugins(cluster_id) - for plugin in enabled_plugins: - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd='fuel plugins --remove {0}=={1}'.format(plugin['name'], - plugin['version']), - assert_ec_equal=[1] - ) - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_neutron_example_ha"]) - @log_snapshot_after_test - def deploy_neutron_example_ha(self): - """Deploy cluster in ha mode with example plugin - - Scenario: - 1. Upload plugin to the master node - 2. Install plugin - 3. Create cluster - 4. Add 3 node with controller role - 5. Add 1 nodes with compute role - 6. Add 1 nodes with cinder role - 7. Deploy the cluster - 8. Run network verification - 9. check plugin health - 10. Run OSTF - - Duration 70m - Snapshot deploy_neutron_example_ha - - """ - checkers.check_plugin_path_env( - var_name='EXAMPLE_PLUGIN_PATH', - plugin_path=EXAMPLE_PLUGIN_PATH - ) - - self.env.revert_snapshot("ready_with_5_slaves") - - # copy plugin to the master node - checkers.check_archive_type(EXAMPLE_PLUGIN_PATH) - - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=EXAMPLE_PLUGIN_PATH, - tar_target='/var' - ) - - # install plugin - - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename(EXAMPLE_PLUGIN_PATH)) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={"propagate_task_deploy": True} - ) - - plugin_name = 'fuel_plugin_example' - msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" - assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - msg) - options = {'metadata/enabled': True} - self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['cinder'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.verify_network(cluster_id) - - for node in ('slave-01', 'slave-02', 'slave-03'): - logger.debug("Start to check service on node {0}".format(node)) - cmd_curl = 'curl localhost:8234' - cmd = 'pgrep -f fuel-simple-service' - with self.fuel_web.get_ssh_for_node(node) as remote: - res_pgrep = remote.execute(cmd) - assert_equal(0, res_pgrep['exit_code'], - 'Failed with error {0} ' - 'on node {1}'.format(res_pgrep['stderr'], node)) - assert_equal(1, len(res_pgrep['stdout']), - 'Failed with error {0} on the ' - 'node {1}'.format(res_pgrep['stderr'], node)) - # curl to service - res_curl = remote.execute(cmd_curl) - assert_equal(0, res_pgrep['exit_code'], - 'Failed with error {0} ' - 'on node {1}'.format(res_curl['stderr'], node)) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - self.env.make_snapshot("deploy_neutron_example_ha") - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_neutron_example_ha_add_node"]) - @log_snapshot_after_test - def deploy_neutron_example_ha_add_node(self): - """Deploy and scale cluster in ha mode with example plugin - - Scenario: - 1. Upload plugin to the master node - 2. Install plugin - 3. Create cluster - 4. Add 1 node with controller role - 5. Add 1 nodes with compute role - 6. Add 1 nodes with cinder role - 7. Deploy the cluster - 8. Run network verification - 9. Check plugin health - 10. Add 2 nodes with controller role - 11. Deploy cluster - 12. Check plugin health - 13. Run OSTF - - Duration 150m - Snapshot deploy_neutron_example_ha_add_node - - """ - checkers.check_plugin_path_env( - var_name='EXAMPLE_PLUGIN_PATH', - plugin_path=EXAMPLE_PLUGIN_PATH - ) - - self.env.revert_snapshot("ready_with_5_slaves") - - # copy plugin to the master node - checkers.check_archive_type(EXAMPLE_PLUGIN_PATH) - - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=EXAMPLE_PLUGIN_PATH, - tar_target='/var') - - # install plugin - - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename(EXAMPLE_PLUGIN_PATH)) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": NEUTRON_SEGMENT['tun'], - "propagate_task_deploy": True - } - ) - - plugin_name = 'fuel_plugin_example' - msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" - assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - msg) - options = {'metadata/enabled': True} - self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.verify_network(cluster_id) - - # check if service ran on controller - logger.debug("Start to check service on node {0}".format('slave-01')) - cmd_curl = 'curl localhost:8234' - cmd = 'pgrep -f fuel-simple-service' - - with self.fuel_web.get_ssh_for_node('slave-01') as remote: - res_pgrep = remote.execute(cmd) - assert_equal(0, res_pgrep['exit_code'], - 'Failed with error {0}'.format(res_pgrep['stderr'])) - assert_equal(1, len(res_pgrep['stdout']), - 'Failed with error {0}'.format(res_pgrep['stderr'])) - # curl to service - res_curl = remote.execute(cmd_curl) - assert_equal(0, res_pgrep['exit_code'], - 'Failed with error {0}'.format(res_curl['stderr'])) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-04': ['controller'], - 'slave-05': ['controller'], - } - ) - - self.fuel_web.deploy_cluster_wait(cluster_id) - - for node in ('slave-01', 'slave-04', 'slave-05'): - logger.debug("Start to check service on node {0}".format(node)) - cmd_curl = 'curl localhost:8234' - cmd = 'pgrep -f fuel-simple-service' - - with self.fuel_web.get_ssh_for_node(node) as remote: - res_pgrep = remote.execute(cmd) - assert_equal(0, res_pgrep['exit_code'], - 'Failed with error {0} ' - 'on node {1}'.format(res_pgrep['stderr'], node)) - assert_equal(1, len(res_pgrep['stdout']), - 'Failed with error {0} on the ' - 'node {1}'.format(res_pgrep['stderr'], node)) - # curl to service - res_curl = remote.execute(cmd_curl) - assert_equal(0, res_pgrep['exit_code'], - 'Failed with error {0} ' - 'on node {1}'.format(res_curl['stderr'], node)) - - # add verification here - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - self.env.make_snapshot("deploy_neutron_example_ha_add_node") diff --git a/fuelweb_test/tests/plugins/plugin_example/test_fuel_plugin_example_postdeploy.py b/fuelweb_test/tests/plugins/plugin_example/test_fuel_plugin_example_postdeploy.py deleted file mode 100644 index 352e3ba4a..000000000 --- a/fuelweb_test/tests/plugins/plugin_example/test_fuel_plugin_example_postdeploy.py +++ /dev/null @@ -1,580 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from os.path import basename - -from proboscis.asserts import assert_true -from proboscis import test - -from core.helpers.setup_teardown import setup_teardown - -from fuelweb_test import logger -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.decorators import upload_manifests -from fuelweb_test.helpers.utils import get_node_hiera_roles -from fuelweb_test.helpers import checkers -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.settings import EXAMPLE_PLUGIN_V4_PATH -from fuelweb_test.settings import NEUTRON_SEGMENT -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["fuel_plugins", "install_plugin_after_cluster_create"]) -class ExamplePluginPostDeploy(TestBasic): - """ExamplePlugin.""" # TODO documentation - - def __init__(self): - super(ExamplePluginPostDeploy, self).__init__() - checkers.check_plugin_path_env( - var_name='EXAMPLE_PLUGIN_V4_PATH', - plugin_path=EXAMPLE_PLUGIN_V4_PATH - ) - - self.__primary_controller = None - self.__controllers = None - self.__plugin_nodes = None - self.__cluster_id = None - - def deploy_cluster_wait(self, check_services=True): - self.fuel_web.deploy_cluster_wait( - cluster_id=self.cluster_id, - check_services=check_services) - del self.controllers - - def create_cluster(self): - self.cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": NEUTRON_SEGMENT['vlan'] - } - ) - - def clean_up(self): - del self.primary_controller - del self.controllers - del self.plugin_nodes - del self.cluster_id - - @property - def cluster_id(self): - if self.__cluster_id is None: - self.__cluster_id = self.__get_cluster_id() - return self.__cluster_id - - @cluster_id.setter - def cluster_id(self, cluster_id): - del self.controllers - del self.primary_controller - del self.plugin_nodes - self.__cluster_id = cluster_id - - @cluster_id.deleter - def cluster_id(self): - self.cluster_id = None - - @property - def controllers(self): - if self.__controllers is None: - self.__controllers = self.__get_nodelist_with_role('controller') - return self.__controllers - - @controllers.deleter - def controllers(self): - self.__controllers = None - - @property - def primary_controller(self): - if self.__primary_controller is None: - self.__primary_controller = self.__get_primary_controller() - return self.__primary_controller - - @primary_controller.deleter - def primary_controller(self): - self.__primary_controller = None - - @property - def plugin_nodes(self): - if self.__plugin_nodes is None: - self.__plugin_nodes = \ - self.__get_nodelist_with_role('fuel_plugin_example_v4') - return self.__plugin_nodes - - @plugin_nodes.deleter - def plugin_nodes(self): - self.__plugin_nodes = None - - @upload_manifests - def __get_cluster_id(self): - return self.fuel_web.get_last_created_cluster() - - def install_plugin_v4(self): - # copy plugin to the master node - checkers.check_archive_type(EXAMPLE_PLUGIN_V4_PATH) - self.env.admin_actions.upload_plugin( - plugin=EXAMPLE_PLUGIN_V4_PATH) - self.env.admin_actions.install_plugin( - plugin_file_name=basename(EXAMPLE_PLUGIN_V4_PATH)) - - def check_plugin_v4_is_running(self): - for node in self.plugin_nodes: - self.__check_plugin_v4_on_node(node=node) - - def __check_plugin_v4_on_node(self, node="slave-01"): - logger.debug("Start to check service on node {0}".format(node)) - - ip = self.fuel_web.get_node_ip_by_devops_name(node) - self.ssh_manager.execute_on_remote(ip, 'pgrep -f fuel-simple-service') - self.ssh_manager.execute_on_remote(ip, 'curl localhost:8234') - - def check_plugin_v4_is_installed(self): - plugin_name = 'fuel_plugin_example_v4_hotpluggable' - msg = "Plugin couldn't be enabled. Check plugin version." - assert_true( - self.fuel_web.check_plugin_exists(self.cluster_id, plugin_name), - msg) - - def enable_plugin_v4(self): - plugin_name = 'fuel_plugin_example_v4_hotpluggable' - self.check_plugin_v4_is_installed() - options = {'metadata/enabled': True} - self.fuel_web.update_plugin_data(self.cluster_id, plugin_name, options) - - def __get_nodelist_with_role(self, role='controller'): - devops_nodes = [ - self.fuel_web.get_devops_node_by_nailgun_node(node) for node - in self.fuel_web.client.list_cluster_nodes(self.cluster_id) - if role in node['roles'] and 'ready' in node['status']] - return [node.name for node in devops_nodes] - - def __get_primary_controller(self): - for controller_node in self.controllers: - with self.fuel_web.get_ssh_for_node(controller_node) as remote: - hiera_roles = get_node_hiera_roles(remote) - if "primary-controller" in hiera_roles: - return controller_node - - def redeploy_controller_nodes(self, nodes): - if self.primary_controller in nodes: - del self.primary_controller - - logger.info('Removing nodes {!s} from cluster'.format(nodes)) - self.fuel_web.update_nodes( - cluster_id=self.cluster_id, - nodes_dict={node: ['controller'] for node in nodes}, - pending_addition=False, pending_deletion=True - ) - self.deploy_cluster_wait(check_services=True) - - self.fuel_web.verify_network(cluster_id=self.cluster_id) - - logger.info('Re-adding nodes {!s} from cluster'.format(nodes)) - self.fuel_web.update_nodes( - cluster_id=self.cluster_id, - nodes_dict={node: ['controller'] for node in nodes}, - ) - self.deploy_cluster_wait(check_services=True) - - @test( - depends_on=[SetupEnvironment.prepare_slaves_5], - groups=[ - "install_plugin_after_create", - "three_ctrl_install_enable_after_create"]) - @log_snapshot_after_test - @setup_teardown(setup=clean_up, teardown=clean_up) - def three_ctrl_install_enable_after_create(self): - """Install and enable plugin after cluster create - - Scenario: - 1. Create cluster - 2. Upload plugin to the master node - 3. Install plugin - 4. Enable plugin - 5. Add 3 nodes with controller role - 6. Add 1 node with compute role - 7. Add 1 node with fuel_plugin_example_v4 role - 8. Deploy the cluster - 9. Run network verification - 10. Check plugin on ALL fuel_plugin_example_v4 nodes - 11. Run OSTF - - Duration 100m - Snapshot three_ctrl_install_enable_after_create - """ - - self.env.revert_snapshot("ready_with_5_slaves") - - self.show_step(1, initialize=True) - - self.create_cluster() - - self.show_step(2) - self.show_step(3) - self.install_plugin_v4() - - self.show_step(4) - self.enable_plugin_v4() - - self.show_step(5) - self.show_step(6) - self.show_step(7) - self.fuel_web.update_nodes( - self.cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['fuel_plugin_example_v4'], - 'slave-05': ['compute'], - } - ) - - self.show_step(8) - self.deploy_cluster_wait() - - self.show_step(9) - self.fuel_web.verify_network(cluster_id=self.cluster_id) - - self.show_step(10) - self.check_plugin_v4_is_running() - - self.show_step(11) - self.fuel_web.run_ostf(cluster_id=self.cluster_id) - - self.env.make_snapshot("three_ctrl_install_enable_after_create") - - @test( - depends_on=[SetupEnvironment.prepare_slaves_5], - groups=[ - "install_plugin_after_create", - "three_ctrl_install_after_create"]) - @log_snapshot_after_test - @setup_teardown(setup=clean_up, teardown=clean_up) - def three_ctrl_install_after_create(self): - """Install plugin after cluster create - - Scenario: - 1. Create cluster - 2. Upload plugin to the master node - 3. Install plugin - 4. Verify, that plugin is recognized - 5. Add 3 nodes with controller role - 6. Add 2 node with compute role - 7. Deploy the cluster - 8. Run network verification - 9. Run OSTF - - Duration 100m - Snapshot three_ctrl_install_after_create - """ - # self.check_run('three_ctrl_install_after_create') - self.env.revert_snapshot("ready_with_5_slaves") - - self.show_step(1, initialize=True) - - self.create_cluster() - - self.show_step(2) - self.show_step(3) - self.install_plugin_v4() - - self.show_step(4) - self.check_plugin_v4_is_installed() - - self.show_step(5) - self.show_step(6) - self.fuel_web.update_nodes( - self.cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - } - ) - - self.show_step(7) - self.deploy_cluster_wait() - - self.show_step(8) - self.fuel_web.verify_network(cluster_id=self.cluster_id) - - self.show_step(9) - self.fuel_web.run_ostf(cluster_id=self.cluster_id) - - self.env.make_snapshot( - "three_ctrl_install_after_create", - is_make=True - ) - - @test( - depends_on=[three_ctrl_install_after_create], - groups=[ - "install_plugin_after_create", - "three_ctrl_enable_installed_after_create_redeploy"], - enabled=False) - @log_snapshot_after_test - @setup_teardown(setup=clean_up, teardown=clean_up) - def three_ctrl_enable_installed_after_create_redeploy(self): - """Enable plugin, installed after create, and re-deploy node - - Scenario: - 1. Enable plugin - 2. Re-deploy 1 controller node at cluster (Node Under Test) - 3. Run network verification - 4. Check plugin on ALL controller nodes - 5. Run OSTF - - Duration 35m - Snapshot three_ctrl_enable_installed_after_create_redeploy - """ - self.env.revert_snapshot("three_ctrl_install_after_create") - - self.show_step(1, initialize=True) - - self.enable_plugin_v4() - - # Select node for testing on it - node = self.primary_controller - logger.info('Node under test: {!s}'.format(node)) - - self.show_step(2) - self.redeploy_controller_nodes(nodes=[node]) - - self.show_step(3) - self.fuel_web.verify_network(cluster_id=self.cluster_id) - - self.show_step(4) - self.check_plugin_v4_is_running() - - self.show_step(5) - self.fuel_web.run_ostf(cluster_id=self.cluster_id) - - self.env.make_snapshot( - "three_ctrl_enable_installed_after_create_redeploy") - - @test( - depends_on=[three_ctrl_install_after_create], - groups=[ - "install_plugin_after_create", - "five_ctrl_enable_installed_after_create_add"]) - @log_snapshot_after_test - @setup_teardown(setup=clean_up, teardown=clean_up) - def five_ctrl_enable_installed_after_create_add(self): - """Enable plugin, installed after create, and add nodes - - Scenario: - 1. Enable plugin - 2. Deploy 2 new fuel_plugin_example_v4 node at cluster - (Nodes Under Test) - 3. Run network verification - 4. Check plugin on ALL fuel_plugin_example_v4 nodes - 5. Run OSTF - - Duration 130m - Snapshot five_ctrl_enable_installed_after_create_add - """ - self.env.revert_snapshot("three_ctrl_install_after_create") - - self.show_step(1, initialize=True) - - self.enable_plugin_v4() - - self.show_step(2) - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[5:7]) - self.fuel_web.update_nodes( - self.cluster_id, - { - 'slave-06': ['fuel_plugin_example_v4'], - 'slave-07': ['fuel_plugin_example_v4'], - } - ) - self.deploy_cluster_wait() - - self.show_step(3) - self.fuel_web.verify_network(cluster_id=self.cluster_id) - - self.show_step(4) - self.check_plugin_v4_is_running() - - self.show_step(5) - self.fuel_web.run_ostf(cluster_id=self.cluster_id) - - self.env.make_snapshot("five_ctrl_enable_installed_after_create_add") - - @test( - depends_on=[SetupEnvironment.prepare_slaves_5], - groups=[ - "install_plugin_after_deploy", - "three_ctrl_install_after_deploy"]) - @log_snapshot_after_test - @setup_teardown(setup=clean_up, teardown=clean_up) - def three_ctrl_install_after_deploy(self): - """Install plugin after cluster deployment - - Scenario: - 1. Create cluster - 2. Add 3 nodes with controller role - 3. Add 2 node with compute role - 4. Deploy the cluster - 5. Run network verification - 6. Upload plugin to the master node - 7. Install plugin - 8. Verify, that plugin is recognized - 9. Run network verification - 10. Run OSTF - - Duration 100m - Snapshot three_ctrl_install_after_deploy - """ - # self.check_run('three_ctrl_install_after_deploy') - self.env.revert_snapshot("ready_with_5_slaves") - - self.show_step(1, initialize=True) - - self.create_cluster() - - self.show_step(2) - self.show_step(3) - self.fuel_web.update_nodes( - self.cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - } - ) - - self.show_step(4) - self.deploy_cluster_wait() - - self.show_step(5) - self.fuel_web.verify_network(cluster_id=self.cluster_id) - - self.show_step(6) - self.show_step(7) - self.install_plugin_v4() - - self.show_step(8) - self.check_plugin_v4_is_installed() - - self.show_step(9) - self.fuel_web.verify_network(cluster_id=self.cluster_id) - - self.show_step(10) - self.fuel_web.run_ostf(cluster_id=self.cluster_id) - - self.env.make_snapshot( - "three_ctrl_install_after_deploy", - is_make=True - ) - - @test( - depends_on=[three_ctrl_install_after_deploy], - groups=[ - "install_plugin_after_deploy", - "three_ctrl_enable_installed_after_deploy_redeploy"], - enabled=False) - @log_snapshot_after_test - @setup_teardown(setup=clean_up, teardown=clean_up) - def three_ctrl_enable_installed_after_deploy_redeploy(self): - """Enable plugin, installed after deploy, and re-deploy node - - Scenario: - 1. Enable plugin - 2. Re-deploy 1 controller node at cluster (Node Under Test) - 3. Run network verification - 4. Check plugin on ALL controller nodes - 5. Run OSTF - - Duration 35m - Snapshot three_ctrl_enable_installed_after_deploy_redeploy - """ - - self.env.revert_snapshot("three_ctrl_install_after_deploy") - - self.show_step(1, initialize=True) - self.enable_plugin_v4() - - # Select node for testing on it - node = self.primary_controller - logger.info('Node under test: {!s}'.format(node)) - - self.show_step(2) - self.redeploy_controller_nodes(nodes=[node]) - - self.show_step(3) - self.fuel_web.verify_network(cluster_id=self.cluster_id) - - self.show_step(4) - self.check_plugin_v4_is_running() - - self.show_step(5) - self.fuel_web.run_ostf(cluster_id=self.cluster_id) - - self.env.make_snapshot( - "three_ctrl_enable_installed_after_deploy_redeploy") - - @test( - depends_on=[three_ctrl_install_after_deploy], - groups=[ - "install_plugin_after_deploy", - "five_ctrl_enable_installed_after_deploy_add"]) - @log_snapshot_after_test - @setup_teardown(setup=clean_up, teardown=clean_up) - def five_ctrl_enable_installed_after_deploy_add(self): - """Enable plugin, installed after deploy, and add nodes - - Scenario: - 1. Enable plugin - 2. Deploy 2 new fuel_plugin_example_v4 node at cluster - (Nodes Under Test) - 3. Run network verification - 4. Check plugin on ALL fuel_plugin_example_v4 nodes - 5. Run OSTF - - Duration 130m - Snapshot five_ctrl_enable_installed_after_deploy_add - """ - - self.env.revert_snapshot("three_ctrl_install_after_deploy") - - self.show_step(1, initialize=True) - self.enable_plugin_v4() - - self.show_step(2) - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[5:7]) - self.fuel_web.update_nodes( - self.cluster_id, - { - 'slave-06': ['fuel_plugin_example_v4'], - 'slave-07': ['fuel_plugin_example_v4'] - } - ) - self.deploy_cluster_wait() - - self.show_step(3) - self.fuel_web.verify_network(cluster_id=self.cluster_id) - - self.show_step(4) - self.check_plugin_v4_is_running() - - self.show_step(5) - self.fuel_web.run_ostf(cluster_id=self.cluster_id) - - self.env.make_snapshot("five_ctrl_enable_installed_after_deploy_add") diff --git a/fuelweb_test/tests/plugins/plugin_glusterfs/__init__.py b/fuelweb_test/tests/plugins/plugin_glusterfs/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/plugins/plugin_glusterfs/test_plugin_glusterfs.py b/fuelweb_test/tests/plugins/plugin_glusterfs/test_plugin_glusterfs.py deleted file mode 100644 index dbf71f650..000000000 --- a/fuelweb_test/tests/plugins/plugin_glusterfs/test_plugin_glusterfs.py +++ /dev/null @@ -1,222 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os - -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_true -from proboscis import test - -from fuelweb_test.helpers.checkers import check_plugin_path_env -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers import utils -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.settings import GLUSTER_CLUSTER_ENDPOINT -from fuelweb_test.settings import GLUSTER_PLUGIN_PATH -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["plugins"]) -class GlusterfsPlugin(TestBasic): - """GlusterfsPlugin.""" # TODO documentation - def __init__(self): - super(GlusterfsPlugin, self).__init__() - check_plugin_path_env( - var_name='GLUSTER_PLUGIN_PATH', - plugin_path=GLUSTER_PLUGIN_PATH - ) - - @classmethod - def check_glusterfs_conf(cls, remote, path, gfs_endpoint): - cmd = ' cat {0}'.format(path) - result = remote.execute(cmd) - assert_equal(result['exit_code'], - 0, - 'Command {0} execution failed with non-zero exit code. ' - 'Actual result {1} stderr {2}'.format( - cmd, result['exit_code'], result['stderr'])) - assert_true(gfs_endpoint in ''.join(result['stdout']), - 'Can not find gsf endpoint in gfs configs') - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_ha_one_controller_glusterfs"]) - @log_snapshot_after_test - def deploy_ha_one_controller_glusterfs_simple(self): - """Deploy cluster with one controller and glusterfs plugin - - Scenario: - 1. Upload plugin to the master node - 2. Install plugin - 3. Create cluster - 4. Add 1 node with controller and cinder roles - 5. Add 1 nodes with compute role - 6. Add 1 nodes with cinder role - 7. Deploy the cluster - 8. Run network verification - 9. Check plugin health - 10. Run OSTF - - Duration 35m - Snapshot deploy_ha_one_controller_glusterfs - """ - self.env.revert_snapshot("ready_with_3_slaves") - - # copy plugin to the master node - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=GLUSTER_PLUGIN_PATH, - tar_target='/var') - - # install plugin - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename(GLUSTER_PLUGIN_PATH)) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - ) - - plugin_name = 'external_glusterfs' - msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" - assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - msg) - options = {'metadata/enabled': True, - 'endpoint/value': GLUSTER_CLUSTER_ENDPOINT} - self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'cinder'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - for node in ('slave-01', 'slave-03'): - _ip = self.fuel_web.get_nailgun_node_by_name(node)['ip'] - with self.env.d_env.get_ssh_to_remote(_ip) as remote: - self.check_glusterfs_conf( - remote=remote, - path='/etc/cinder/glusterfs', - gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - self.env.make_snapshot("deploy_ha_one_controller_glusterfs") - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_glusterfs_ha"]) - @log_snapshot_after_test - def deploy_glusterfs_ha(self): - """Deploy cluster in ha mode with glusterfs plugin - - Scenario: - 1. Upload plugin to the master node - 2. Install plugin - 3. Create cluster - 4. Add 1 node with controller and cinder roles - 5. Add 1 nodes with compute role - 6. Add 1 nodes with cinder role - 7. Deploy the cluster - 8. Run network verification - 9. Check plugin health - 10. Run OSTF - 11. Add 2 cinder + controller nodes - 12. Re-deploy cluster - 13. Check plugin health - 14. Run ostf - - Duration 50m - Snapshot deploy_glusterfs_ha - - """ - self.env.revert_snapshot("ready_with_5_slaves") - - # copy plugin to the master node - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=GLUSTER_PLUGIN_PATH, - tar_target='/var') - - # install plugin - - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename(GLUSTER_PLUGIN_PATH)) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - ) - - plugin_name = 'external_glusterfs' - msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" - assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - msg) - options = {'metadata/enabled': True, - 'endpoint/value': GLUSTER_CLUSTER_ENDPOINT} - self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - _ip = self.fuel_web.get_nailgun_node_by_name("slave-03")['ip'] - with self.env.d_env.get_ssh_to_remote(_ip) as remote: - self.check_glusterfs_conf( - remote=remote, - path='/etc/cinder/glusterfs', - gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-04': ['controller, cinder'], - 'slave-05': ['controller, cinder'], - } - ) - - self.fuel_web.deploy_cluster_wait(cluster_id) - - for node in ('slave-03', 'slave-04', 'slave-05'): - _ip = self.fuel_web.get_nailgun_node_by_name(node)['ip'] - with self.env.d_env.get_ssh_to_remote(_ip) as remote: - self.check_glusterfs_conf( - remote=remote, - path='/etc/cinder/glusterfs', - gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - self.env.make_snapshot("deploy_glusterfs_ha") diff --git a/fuelweb_test/tests/plugins/plugin_lbaas/__init__.py b/fuelweb_test/tests/plugins/plugin_lbaas/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/plugins/plugin_lbaas/test_plugin_lbaas.py b/fuelweb_test/tests/plugins/plugin_lbaas/test_plugin_lbaas.py deleted file mode 100644 index 8112e56a9..000000000 --- a/fuelweb_test/tests/plugins/plugin_lbaas/test_plugin_lbaas.py +++ /dev/null @@ -1,270 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os -import traceback - -from devops.helpers.helpers import wait -from proboscis import asserts -from proboscis import test - -from fuelweb_test.helpers.checkers import check_plugin_path_env -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers import os_actions -from fuelweb_test.helpers import utils -from fuelweb_test import logger -from fuelweb_test.settings import DEPLOYMENT_MODE_SIMPLE -from fuelweb_test.settings import LBAAS_PLUGIN_PATH -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(enabled=False, groups=["plugins"]) -class LbaasPlugin(TestBasic): - """LbaasPlugin.""" # TODO documentation - def __init__(self): - super(LbaasPlugin, self).__init__() - check_plugin_path_env( - var_name='LBAAS_PLUGIN_PATH', - plugin_path=LBAAS_PLUGIN_PATH - ) - - @classmethod - def check_neutron_agents_statuses(cls, os_conn): - agents_list = os_conn.list_agents() - - for a in agents_list['agents']: - asserts.assert_equal( - a['alive'], True, - 'Neutron agent {0} is not alive'. format(a['binary'])) - asserts.assert_true( - a['admin_state_up'], - "Admin state is down for agent {0}".format(a['binary'])) - - lb_agent = [a for a in agents_list["agents"] - if a['binary'] == 'neutron-lbaas-agent'] - - logger.debug("LbaaS agent list is {0}".format(lb_agent)) - - asserts.assert_equal( - len(lb_agent), 1, - 'There is not LbaaS agent in neutron agent list output') - - @classmethod - def check_lbaas_work(cls, os_conn): - # create pool - pool = os_conn.create_pool(pool_name='lbaas_pool') - - logger.debug('pull is {0}'.format(pool)) - - # create vip - vip = os_conn.create_vip(name='lbaas_vip', - protocol='HTTP', - port=80, - pool=pool) - - logger.debug('vip is {0}'.format(vip)) - - # get list of vips - lb_vip_list = os_conn.get_vips() - - logger.debug( - 'Initial state of vip is {0}'.format( - os_conn.get_vip(lb_vip_list['vips'][0]['id']))) - - # wait for active status - try: - wait(lambda: os_conn.get_vip( - lb_vip_list['vips'][0]['id'])['vip']['status'] == 'ACTIVE', - timeout=120 * 60) - except: - logger.error(traceback.format_exc()) - vip_state = os_conn.get_vip( - lb_vip_list['vips'][0]['id'])['vip']['status'] - asserts.assert_equal( - 'ACTIVE', vip_state, - "Vip is not active, current state is {0}".format(vip_state)) - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_neutron_lbaas_simple"]) - @log_snapshot_after_test - def deploy_neutron_lbaas_simple(self): - """Deploy cluster in simple mode with LbaaS plugin - - Scenario: - 1. Upload plugin to the master node - 2. Install plugin - 3. Create cluster - 4. Add 1 node with controller role - 5. Add 2 nodes with compute role - 6. Deploy the cluster - 7. Run network verification - 8. Check health of lbaas agent on the node - 9. Create pool and vip - 10. Run OSTF - - Duration 35m - Snapshot deploy_neutron_vlan_lbaas_simple - - """ - self.env.revert_snapshot("ready_with_3_slaves") - - # copy plugin to the master node - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=LBAAS_PLUGIN_PATH, - tar_target='/var') - - # install plugin - - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename(LBAAS_PLUGIN_PATH)) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE_SIMPLE, - ) - - plugin_name = 'lbaas' - msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" - asserts.assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - msg) - logger.debug('we have lbaas element') - options = {'metadata/enabled': True} - self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['compute'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - cluster = self.fuel_web.client.get_cluster(cluster_id) - asserts.assert_equal(str(cluster['net_provider']), 'neutron') - - self.fuel_web.verify_network(cluster_id) - - controller = self.fuel_web.get_nailgun_node_by_name('slave-01') - os_conn = os_actions.OpenStackActions(controller['ip']) - - self.check_neutron_agents_statuses(os_conn) - - self.check_lbaas_work(os_conn) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - self.env.make_snapshot("deploy_neutron_vlan_lbaas_simple") - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_neutron_lbaas_simple_reset_ready"]) - @log_snapshot_after_test - def deploy_neutron_lbaas_simple_reset_ready(self): - """Deploy and re-deploy cluster in simple mode with LbaaS plugin - - Scenario: - 1. Upload plugin to the master node - 2. Install plugin - 3. Create cluster - 4. Add 1 node with controller role - 5. Add 1 nodes with compute role - 6. Deploy the cluster - 7. Run network verification - 8. Check health of lbaas agent on the node - 9. Create pool and vip - 10. Reset cluster - 11. Add 1 compute - 12. Re-deploy cluster - 13. Check health of lbaas agent on the node - 14. Create pool and vip - 15. Run OSTF - - Duration 65m - Snapshot deploy_neutron_lbaas_simple_reset_ready - - """ - self.env.revert_snapshot("ready_with_3_slaves") - - # copy plugin to the master node - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=LBAAS_PLUGIN_PATH, - tar_target='/var') - - # install plugin - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename(LBAAS_PLUGIN_PATH)) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE_SIMPLE, - ) - - plugin_name = 'lbaas' - msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" - asserts.assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - msg) - logger.debug('we have lbaas element') - options = {'metadata/enabled': True} - self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - cluster = self.fuel_web.client.get_cluster(cluster_id) - asserts.assert_equal(str(cluster['net_provider']), 'neutron') - - self.fuel_web.verify_network(cluster_id) - - controller = self.fuel_web.get_nailgun_node_by_name('slave-01') - os_conn = os_actions.OpenStackActions(controller['ip']) - - self.check_neutron_agents_statuses(os_conn) - - self.check_lbaas_work(os_conn) - - self.fuel_web.stop_reset_env_wait(cluster_id) - - self.fuel_web.wait_nodes_get_online_state( - self.env.d_env.nodes().slaves[:2]) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-03': ['compute'], - } - ) - - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.check_neutron_agents_statuses(os_conn) - - self.check_lbaas_work(os_conn) - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - self.env.make_snapshot("deploy_neutron_lbaas_simple_reset_ready") diff --git a/fuelweb_test/tests/plugins/plugin_murano/__init__.py b/fuelweb_test/tests/plugins/plugin_murano/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/plugins/plugin_murano/test_plugin_murano.py b/fuelweb_test/tests/plugins/plugin_murano/test_plugin_murano.py deleted file mode 100644 index 3b1b0f92c..000000000 --- a/fuelweb_test/tests/plugins/plugin_murano/test_plugin_murano.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from proboscis.asserts import assert_true -from proboscis import test - -from fuelweb_test.helpers.checkers import check_plugin_path_env -from fuelweb_test.helpers import utils -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test import logger -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["plugins", "murano_plugin"]) -class MuranoPlugin(TestBasic): - """Murano Plugin Tests.""" - def __init__(self): - super(MuranoPlugin, self).__init__() - check_plugin_path_env( - var_name='MURANO_PLUGIN_PATH', - plugin_path=settings.MURANO_PLUGIN_PATH - ) - - def setup_murano_plugin(self, - cluster_id, - murano_user='murano', - murano_db_password='murano_password', - cfapi=False, - glare=False, - apps_url='http://storage.apps.openstack.org/'): - plugin_name = 'detach-murano' - - assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - "Plugin couldn't be enabled. Check plugin version. Test aborted") - plugin_options = { - 'metadata/enabled': True, - 'metadata/versions/murano_user_password': murano_user, - 'metadata/versions/murano_db_password': murano_db_password, - 'metadata/versions/murano_glance_artifacts/value': glare, - 'metadata/versions/murano_cfapi/value': cfapi, - 'metadata/versions/murano_repo_url/value': apps_url - } - self.fuel_web.update_plugin_data( - cluster_id, plugin_name, plugin_options) - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_murano_with_glare_ha_one_controller"]) - @log_snapshot_after_test - def deploy_murano_with_glare_ha_one_controller(self): - """Deploy cluster in ha mode with murano plugin - - Scenario: - 1. Upload plugin to the master node - 2. Install plugin - 3. Create cluster - 4. Add 1 nodes with controller role - 5. Add 1 node with compute role - 6. Add 1 node with cinder role - 7. Add 1 node with murano role - 8. Deploy the cluster - 9. Run network verification - 10. Run sanity OSTF - 11. Run Murano Platform OSTF - - Duration 150m - Snapshot deploy_murano_with_glare_ha_one_controller - """ - self.env.revert_snapshot("ready_with_5_slaves") - - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=settings.MURANO_PLUGIN_PATH, - tar_target="/var") - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename(settings.MURANO_PLUGIN_PATH)) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - configure_ssl=False - ) - - self.setup_murano_plugin(cluster_id, glare=True) - - self.fuel_web.update_nodes( - cluster_id, - { - "slave-01": ["controller"], - "slave-02": ["compute"], - "slave-03": ["cinder"], - "slave-04": ["murano-node"] - } - ) - - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['sanity']) - - logger.debug('Run OSTF platform tests') - - test_class_main = ('fuel_health.tests.tests_platform' - '.test_murano_linux.MuranoDeployLinuxServicesTests') - tests_names = ['test_deploy_dummy_app_with_glare'] - - test_classes = [] - - for test_name in tests_names: - test_classes.append('{0}.{1}'.format(test_class_main, - test_name)) - - for test_name in test_classes: - self.fuel_web.run_single_ostf_test( - cluster_id=cluster_id, test_sets=['tests_platform'], - test_name=test_name, timeout=60 * 20) - - self.env.make_snapshot("deploy_murano_with_glare_ha_one_controller") diff --git a/fuelweb_test/tests/plugins/plugin_reboot/__init__.py b/fuelweb_test/tests/plugins/plugin_reboot/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/plugins/plugin_reboot/reboot_tasks.yaml b/fuelweb_test/tests/plugins/plugin_reboot/reboot_tasks.yaml deleted file mode 100644 index 79cb99664..000000000 --- a/fuelweb_test/tests/plugins/plugin_reboot/reboot_tasks.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# These tasks will be merged into deployment graph. Here you -# can specify new tasks for any roles, even built-in ones. - -- id: reboot_plugin-sleep-before - type: shell - role: ['primary-controller', 'controller', 'ceph-osd'] - version: 2.0.0 - requires: [pre_deployment_start] # version 1.0.0 - required_for: [reboot_plugin-create-file] - parameters: - cmd: sleep 20 - timeout: 30 - -- id: reboot_plugin-create-file - type: shell - role: ['primary-controller', 'controller', 'ceph-osd'] - version: 2.0.0 - requires: [reboot_plugin-sleep-before] # version 1.0.0 - required_for: [reboot_plugin-task] - parameters: - cmd: if ! ls /run/cloud-init/status.json;then mkdir /run/cloud-init; touch /run/cloud-init/status.json;fi - timeout: 30 - -- id: reboot_plugin-task - role: ['primary-controller', 'controller', 'ceph-osd'] - version: 2.0.0 - requires: [reboot_plugin-create-file] # version 1.0.0 - required_for: [reboot_plugin-sleep-after] - type: reboot - parameters: - timeout: 180 - -- id: reboot_plugin-sleep-after - role: ['primary-controller', 'controller', 'ceph-osd'] - version: 2.0.0 - requires: [reboot_plugin-task] # version 1.0.0 - required_for: [pre_deployment_end] - type: shell - parameters: - cmd: sleep 50 - timeout: 60 diff --git a/fuelweb_test/tests/plugins/plugin_reboot/test_plugin_reboot_task.py b/fuelweb_test/tests/plugins/plugin_reboot/test_plugin_reboot_task.py deleted file mode 100644 index 37596d901..000000000 --- a/fuelweb_test/tests/plugins/plugin_reboot/test_plugin_reboot_task.py +++ /dev/null @@ -1,267 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from proboscis import asserts -from proboscis import test - -from fuelweb_test.helpers import utils -from fuelweb_test import logger -from fuelweb_test.helpers.utils import YamlEditor -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic -from fuelweb_test.helpers.fuel_actions import FuelPluginBuilder -from fuelweb_test.helpers.decorators import log_snapshot_after_test - - -@test(groups=["fuel_plugins", "fuel_plugin_reboot"]) -class RebootPlugin(TestBasic): - """Test class for testing reboot task in plugins.""" - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_cluster_with_reboot_plugin"]) - @log_snapshot_after_test - def deploy_cluster_with_reboot_plugin(self): - """Add pre-deployment reboot task to nailgun via plugin. - - Scenario: - 1. Revert snapshot with 5 nodes - 2. Download and install fuel-plugin-builder - 3. Create plugin with reboot task - 4. Build plugin and copy it in var directory - 5. Install plugin to fuel - 6. Create cluster and enable plugin - 7. Provision nodes - 8. Collect timestamps from nodes - 9. Deploy cluster - 10. Check if timestamps are changed - - Duration 40m - """ - # define some plugin related variables - plugin_name = 'reboot_plugin' - source_plugin_path = os.path.join('/root/', plugin_name) - plugin_path = '/var' - tasks_path = os.path.dirname(os.path.abspath(__file__)) - tasks_file = 'reboot_tasks.yaml' - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready_with_5_slaves") - # let's get ssh client for the master node - - # initiate fuel plugin builder instance - self.show_step(2) - fpb = FuelPluginBuilder() - # install fuel_plugin_builder on master node - fpb.fpb_install() - # create plugin template on the master node - self.show_step(3) - fpb.fpb_create_plugin(source_plugin_path) - fpb.fpb_update_release_in_metadata(source_plugin_path) - # replace plugin tasks with our file - fpb.fpb_replace_plugin_content( - os.path.join(tasks_path, tasks_file), - os.path.join(source_plugin_path, 'deployment_tasks.yaml')) - # build plugin - self.show_step(4) - packet_name = fpb.fpb_build_plugin(source_plugin_path) - fpb.fpb_copy_plugin( - os.path.join(source_plugin_path, packet_name), plugin_path) - self.show_step(5) - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.join(plugin_path, packet_name)) - self.show_step(6) - # create cluster - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - 'volumes_lvm': False, - 'volumes_ceph': True, - 'images_ceph': True, - 'objects_ceph': True - } - ) - # get plugins from fuel and enable our one - msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" - asserts.assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - msg) - options = {'metadata/enabled': True} - self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) - - logger.info('Cluster is {!s}'.format(cluster_id)) - - self.fuel_web.update_nodes( - cluster_id, - {'slave-01': ['controller', 'ceph-osd'], - 'slave-02': ['compute', 'ceph-osd'], - 'slave-03': ['compute'], - 'slave-04': ['ceph-osd']} - ) - # firstly, let's provision nodes - self.show_step(7) - self.fuel_web.provisioning_cluster_wait(cluster_id) - # after provision is done, collect timestamps from nodes - old_timestamps = {} - - nodes = { - 'slave-01': True, - 'slave-02': True, - 'slave-03': False, - 'slave-04': True - } - self.show_step(8) - for node in nodes: - logger.debug( - "Get init object creation time from node {0}".format(node)) - cmd = 'stat --printf=\'%Y\' /proc/1' - old_timestamps[node] = int( - self.ssh_manager.execute_on_remote( - ip=self.fuel_web.get_node_ip_by_devops_name(node), - cmd=cmd)['stdout_str'] - ) - - # start deploying nodes - # here nodes with controller and ceph roles should be rebooted - self.show_step(9) - self.fuel_web.deploy_cluster_wait_progress(cluster_id, 30) - - # collect new timestamps and check them - self.show_step(10) - for node in nodes: - logger.debug( - "Get init object creation time from node {0}".format(node)) - cmd = 'stat --printf=\'%Y\' /proc/1' - new_timestamp = int( - self.ssh_manager.execute_on_remote( - ip=self.fuel_web.get_node_ip_by_devops_name(node), - cmd=cmd)['stdout_str'] - ) - # compute node without ceph role shouldn't reboot - if not nodes[node]: - asserts.assert_equal( - new_timestamp, old_timestamps[node], - 'The new timestamp {0} is not equal to old one {1}, ' - 'but it shouldn\'t for {2} node' - .format(new_timestamp, old_timestamps[node], node) - ) - else: - # other nodes should be rebooted and have new timestamps - # greater than old - asserts.assert_true( - new_timestamp > old_timestamps[node], - 'The new timestamp {0} is not greater than old one {1} ' - 'but it should for node {2}' - .format(new_timestamp, old_timestamps[node], node) - ) - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_cluster_with_reboot_plugin_timeout"]) - @log_snapshot_after_test - def deploy_cluster_with_reboot_plugin_timeout(self): - """Check deployment is failed by reboot task plugin. - - Scenario: - 1. Revert snapshot with 3 nodes - 2. Download and install fuel-plugin-builder - 3. Create plugin with reboot task, - set timeout for reboot task as 1 second - 4. Build plugin - 5. Install plugin to fuel - 6. Create cluster and enable plugin - 7. Provision nodes - 8. Deploy cluster - 9. Check that deployment task failed - 10. Check error msg at the logs - - Duration 15m - """ - # define some plugin related variables - plugin_name = 'timeout_plugin' - source_plugin_path = os.path.join('/root/', plugin_name) - plugin_path = '/var' - tasks_path = os.path.dirname(os.path.abspath(__file__)) - tasks_file = 'reboot_tasks.yaml' - # start reverting snapshot - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready_with_3_slaves") - # let's get ssh client for the master node - self.show_step(2) - # initiate fuel plugin builder instance - fpb = FuelPluginBuilder() - # install fuel_plugin_builder on master node - fpb.fpb_install() - self.show_step(3) - # create plugin template on the master node - fpb.fpb_create_plugin(source_plugin_path) - fpb.fpb_update_release_in_metadata(source_plugin_path) - # replace plugin tasks with our file - fpb.fpb_replace_plugin_content( - os.path.join(tasks_path, tasks_file), - os.path.join(source_plugin_path, 'deployment_tasks.yaml')) - # change timeout to a new value '1' - with YamlEditor( - os.path.join(source_plugin_path, 'deployment_tasks.yaml'), - fpb.admin_ip) as editor: - editor.content[2]['parameters']['timeout'] = 1 - # build plugin - self.show_step(4) - packet_name = fpb.fpb_build_plugin(source_plugin_path) - # copy plugin archive file - # to the /var directory on the master node - fpb.fpb_copy_plugin( - os.path.join(source_plugin_path, packet_name), - plugin_path) - # let's install plugin - self.show_step(5) - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.join(plugin_path, packet_name)) - # create cluster - self.show_step(6) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - ) - # get plugins from fuel and enable it - msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" - asserts.assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - msg) - options = {'metadata/enabled': True} - self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) - - logger.info('Cluster is {!s}'.format(cluster_id)) - - self.fuel_web.update_nodes( - cluster_id, - {'slave-01': ['controller', 'cinder']} - ) - self.show_step(7) - self.fuel_web.provisioning_cluster_wait(cluster_id) - logger.info('Start cluster #%s deployment', cluster_id) - self.show_step(8) - task = self.fuel_web.client.deploy_nodes(cluster_id) - self.show_step(9) - self.fuel_web.assert_task_failed(task) - - msg = 'reboot_plugin-task failed becausereboot timeout 1 expired' - cmd = 'grep "{0}" /var/log/astute/astute.log'.format(msg) - self.show_step(10) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, cmd=cmd, - err_msg='Failed to find reboot plugin warning message in logs') diff --git a/fuelweb_test/tests/plugins/plugin_vip_reservation/__init__.py b/fuelweb_test/tests/plugins/plugin_vip_reservation/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/plugins/plugin_vip_reservation/metadata.yaml b/fuelweb_test/tests/plugins/plugin_vip_reservation/metadata.yaml deleted file mode 100644 index 0d7f693e2..000000000 --- a/fuelweb_test/tests/plugins/plugin_vip_reservation/metadata.yaml +++ /dev/null @@ -1,16 +0,0 @@ -name: vip_reservation_plugin -title: vip_reservation_plugin -version: '3.0.0' -description: Build plugin for test vip_reservation_plugin -fuel_version: ['9.0'] -licenses: ['Apache License Version 2.0'] -authors: ['Fuel'] -homepage: 'https://github.com/openstack/fuel-plugins' -groups: [] -releases: -- os: ubuntu - version: '-9.0' - mode: ['ha'] - deployment_scripts_path: deployment_scripts/ - repository_path: repositories/ubuntu -package_version: '3.0.0' diff --git a/fuelweb_test/tests/plugins/plugin_vip_reservation/network_roles.yaml b/fuelweb_test/tests/plugins/plugin_vip_reservation/network_roles.yaml deleted file mode 100644 index b86e5b9b8..000000000 --- a/fuelweb_test/tests/plugins/plugin_vip_reservation/network_roles.yaml +++ /dev/null @@ -1,14 +0,0 @@ -- id: "network_role_public" - default_mapping: "public" - properties: - subnet: true - gateway: false - vip: - - name: "reserved_pub" -- id: "network_role_management" - default_mapping: "management" - properties: - subnet: true - gateway: false - vip: - - name: "reserved_mng" \ No newline at end of file diff --git a/fuelweb_test/tests/plugins/plugin_vip_reservation/tasks.yaml b/fuelweb_test/tests/plugins/plugin_vip_reservation/tasks.yaml deleted file mode 100644 index fe51488c7..000000000 --- a/fuelweb_test/tests/plugins/plugin_vip_reservation/tasks.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/fuelweb_test/tests/plugins/plugin_vip_reservation/test_plugin_vip_reservation.py b/fuelweb_test/tests/plugins/plugin_vip_reservation/test_plugin_vip_reservation.py deleted file mode 100644 index 68f998fd8..000000000 --- a/fuelweb_test/tests/plugins/plugin_vip_reservation/test_plugin_vip_reservation.py +++ /dev/null @@ -1,402 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import os - -from proboscis import asserts -from proboscis import test - -from fuelweb_test.helpers import utils -from fuelweb_test import logger -from fuelweb_test.helpers.utils import YamlEditor -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic -from fuelweb_test.helpers.fuel_actions import FuelPluginBuilder -from fuelweb_test.helpers.decorators import log_snapshot_after_test - - -@test(groups=["fuel_plugins", "fuel_plugin_vip_reservation"]) -class VipReservation(TestBasic): - """Test class for testing allocation of vip for plugin.""" - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["vip_reservation_for_plugin", - "vip_reservation_for_plugin_vlan", - "vip_reservation_for_plugin_vxlan"]) - @log_snapshot_after_test - def vip_reservation_for_plugin(self): - """Check vip reservation for fuel plugin - - Scenario: - 1. Revert snapshot with 3 nodes - 2. Download and install fuel-plugin-builder - 3. Create plugin with predefined network_roles.yaml - 4. Build and copy plugin to /var directory - 5. Install plugin to fuel - 6. Create cluster and enable plugin - 7. Deploy cluster - 8. Check vip reservation - - Duration 40m - """ - plugin_name = 'vip_reservation_plugin' - source_plugin_path = os.path.join('/root/', plugin_name) - plugin_path = '/var' - dir_path = os.path.dirname(os.path.abspath(__file__)) - tasks_file = 'tasks.yaml' - net_role_file = 'network_roles.yaml' - metadata_file = 'metadata.yaml' - namespace = 'haproxy' - - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready_with_3_slaves") - # initiate fuel plugin builder instance - fpb = FuelPluginBuilder() - # install fuel_plugin_builder on master node - self.show_step(2) - fpb.fpb_install() - # create plugin template on the master node - self.show_step(3) - fpb.fpb_create_plugin(source_plugin_path) - # replace plugin tasks, metadata, network_roles - fpb.fpb_replace_plugin_content( - os.path.join(dir_path, net_role_file), - os.path.join(source_plugin_path, net_role_file)) - fpb.fpb_replace_plugin_content( - os.path.join(dir_path, tasks_file), - os.path.join(source_plugin_path, tasks_file)) - fpb.fpb_replace_plugin_content( - os.path.join(dir_path, metadata_file), - os.path.join(source_plugin_path, metadata_file)) - # build plugin - self.show_step(4) - packet_name = fpb.fpb_build_plugin(source_plugin_path) - # copy plugin archive file from nailgun container - # to the /var directory on the master node - fpb.fpb_copy_plugin(os.path.join(source_plugin_path, packet_name), - plugin_path) - # let's install plugin - self.show_step(5) - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.join(plugin_path, packet_name)) - self.show_step(6) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - ) - # get plugins from fuel and enable our one - msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" - asserts.assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - msg) - options = {'metadata/enabled': True} - self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) - - logger.info('Cluster is {!s}'.format(cluster_id)) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute']} - ) - self.show_step(7) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(8) - with self.fuel_web.get_ssh_for_node('slave-01') as remote: - hiera_json_out = "ruby -rhiera -rjson -e \"h = Hiera.new(); " \ - "Hiera.logger = 'noop'; puts JSON.dump " \ - "(h.lookup('network_metadata', " \ - "[], {}, nil, nil))\"" - for vip in ('reserved_pub', 'reserved_mng'): - # get vips from hiera - vip_hiera = json.loads( - remote.execute( - hiera_json_out)['stdout'][0])["vips"][vip]["ipaddr"] - # get vips from database - vip_db = self.env.postgres_actions.run_query( - db='nailgun', - query="select ip_addr from ip_addrs where " - "vip_name = '\"'\"'{0}'\"'\"';".format(vip)) - vip_array = [vip_hiera, vip_db] - for ip in vip_array[1:]: - asserts.assert_equal( - vip_array[0], ip, - "Vip from hiera output {0} does not equal " - "to {1}".format(vip_array[0], ip)) - vip_pcs = remote.execute( - 'pcs resource show {0}{1}'.format( - 'vip__', vip))['exit_code'] - asserts.assert_not_equal(0, vip_pcs, - 'The vip__{0} was found in ' - 'pacemaker'.format(vip)) - vip_ns = remote.execute( - 'ip netns exec {0} ip a | grep {1}{2}'.format( - namespace, 'b_', vip))['exit_code'] - asserts.assert_not_equal(0, vip_ns, - 'The {0} was found in {1} ' - 'namespace'.format(vip, namespace)) - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["vip_reservation_for_plugin_haproxy_ns", - "vip_reservation_for_plugin_haproxy_ns_vlan", - "vip_reservation_for_plugin_haproxy_ns_vxlan"]) - @log_snapshot_after_test - def vip_reservation_for_plugin_haproxy_ns(self): - """Check vip reservation for haproxy ns plugin - - Scenario: - 1. Revert snapshot with 3 nodes - 2. Download and install fuel-plugin-builder - 3. Create plugin with predefined network_roles.yaml - 4. Build and copy plugin to /var directory - 5. Install plugin to fuel - 6. Create cluster and enable plugin - 7. Deploy cluster - 8. Check vip reservation - - Duration 40m - """ - plugin_name = 'vip_reservation_plugin' - source_plugin_path = os.path.join('/root/', plugin_name) - plugin_path = '/var' - task_path = os.path.dirname(os.path.abspath(__file__)) - tasks_file = 'tasks.yaml' - net_role_file = 'network_roles.yaml' - metadata_file = 'metadata.yaml' - namespace = 'haproxy' - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready_with_3_slaves") - - # initiate fuel plugin builder instance - self.show_step(2) - fpb = FuelPluginBuilder() - # install fuel_plugin_builder on master node - fpb.fpb_install() - # create plugin template on the master node - self.show_step(3) - fpb.fpb_create_plugin(source_plugin_path) - # replace plugin tasks, metadata, network_roles - fpb.fpb_replace_plugin_content( - os.path.join(task_path, net_role_file), - os.path.join(source_plugin_path, net_role_file)) - fpb.fpb_replace_plugin_content( - os.path.join(task_path, tasks_file), - os.path.join(source_plugin_path, tasks_file)) - fpb.fpb_replace_plugin_content( - os.path.join(task_path, metadata_file), - os.path.join(source_plugin_path, metadata_file)) - - with YamlEditor(os.path.join(source_plugin_path, net_role_file), - ip=fpb.admin_ip) as editor: - editor.content[0]['properties']['vip'][0]['namespace'] = namespace - editor.content[1]['properties']['vip'][0]['namespace'] = namespace - # build plugin - self.show_step(4) - packet_name = fpb.fpb_build_plugin(source_plugin_path) - # copy plugin archive file - # to the /var directory on the master node - fpb.fpb_copy_plugin(os.path.join(source_plugin_path, packet_name), - plugin_path) - # let's install plugin - self.show_step(5) - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.join(plugin_path, packet_name)) - self.show_step(6) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - ) - # get plugins from fuel and enable our one - msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" - asserts.assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - msg) - options = {'metadata/enabled': True} - self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) - - logger.info('Cluster is {!s}'.format(cluster_id)) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute']} - ) - self.show_step(7) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.show_step(8) - with self.fuel_web.get_ssh_for_node('slave-01') as remote: - hiera_json_out = "ruby -rhiera -rjson -e \"h = Hiera.new(); " \ - "Hiera.logger = 'noop'; " \ - "puts JSON.dump(h.lookup('network_metadata', " \ - "[], {}, nil, nil))\"" - for vip in ('reserved_pub', 'reserved_mng'): - # get vips from hiera - vip_hiera = json.loads( - remote.execute( - hiera_json_out)['stdout'][0])["vips"][vip]["ipaddr"] - # get vips from database - vip_db = self.env.postgres_actions.run_query( - db='nailgun', - query="select ip_addr from ip_addrs where " - "vip_name = '\"'\"'{0}'\"'\"';".format(vip)) - # get vips from corosync - vip_crm = remote.execute( - 'crm_resource --resource {0}{1} --get-parameter=ip'.format( - 'vip__', vip))['stdout'][0].rstrip() - # fet vips from namespace - vip_ns = remote.execute( - 'ip netns exec {0} ip -4 a show {1}{2}'.format( - namespace, 'b_', - vip))['stdout'][1].split(' ')[5].split('/')[0] - vip_array = [vip_hiera, vip_db, vip_crm, vip_ns] - for ip in vip_array[1:]: - asserts.assert_equal( - vip_array[0], ip, - "Vip from hiera output {0} does not equal " - "to {1}".format(vip_array[0], ip)) - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["vip_reservation_for_plugin_custom_ns", - "vip_reservation_for_plugin_custom_ns_vlan", - "vip_reservation_for_plugin_custom_ns_vxlan"]) - @log_snapshot_after_test - def vip_reservation_for_plugin_custom_ns(self): - """Check vip reservation for custom ns plugin - - Scenario: - 1. Revert snapshot with 3 nodes - 2. Download and install fuel-plugin-builder - 3. Create plugin with predefined network_roles.yaml - 4. Build and copy plugin to /var - 5. Install plugin to fuel - 6. Create cluster and enable plugin - 7. Deploy cluster - 8. Check vip reservation - - Duration 40m - """ - plugin_name = 'vip_reservation_plugin' - source_plugin_path = os.path.join('/root/', plugin_name) - plugin_path = '/var' - task_path = os.path.dirname(os.path.abspath(__file__)) - tasks_file = 'tasks.yaml' - net_role_file = 'network_roles.yaml' - metadata_file = 'metadata.yaml' - namespace = 'custom_ns' - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(2) - # initiate fuel plugin builder instance - fpb = FuelPluginBuilder() - # install fuel_plugin_builder on master node - fpb.fpb_install() - # create plugin template on the master node - self.show_step(3) - fpb.fpb_create_plugin(source_plugin_path) - # replace plugin tasks, metadata, network_roles - fpb.fpb_replace_plugin_content( - os.path.join(task_path, net_role_file), - os.path.join(source_plugin_path, net_role_file)) - fpb.fpb_replace_plugin_content( - os.path.join(task_path, tasks_file), - os.path.join(source_plugin_path, tasks_file)) - fpb.fpb_replace_plugin_content( - os.path.join(task_path, metadata_file), - os.path.join(source_plugin_path, metadata_file)) - - with YamlEditor(os.path.join(source_plugin_path, net_role_file), - ip=fpb.admin_ip) as editor: - editor.content[0]['properties']['vip'][0]['namespace'] = namespace - editor.content[1]['properties']['vip'][0]['namespace'] = namespace - # build plugin - self.show_step(4) - packet_name = fpb.fpb_build_plugin(source_plugin_path) - # copy plugin archive file - # to the /var directory on the master node - fpb.fpb_copy_plugin(os.path.join(source_plugin_path, packet_name), - plugin_path) - self.show_step(5) - # let's install plugin - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.join(plugin_path, packet_name)) - self.show_step(6) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - ) - # get plugins from fuel and enable our one - msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" - asserts.assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - msg) - options = {'metadata/enabled': True} - self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) - - logger.info('Cluster is {!s}'.format(cluster_id)) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute']} - ) - self.show_step(7) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(8) - with self.fuel_web.get_ssh_for_node('slave-01') as remote: - hiera_json_out = "ruby -rhiera -rjson -e \"h = Hiera.new(); " \ - "Hiera.logger = 'noop'; " \ - "puts JSON.dump(h.lookup('network_metadata', " \ - "[], {}, nil, nil))\"" - for vip in ('reserved_pub', 'reserved_mng'): - # get vips from hiera - vip_hiera = json.loads( - remote.execute( - hiera_json_out)['stdout'][0])["vips"][vip]["ipaddr"] - # get vips from database - vip_db = self.env.postgres_actions.run_query( - db='nailgun', - query="select ip_addr from ip_addrs where " - "vip_name = '\"'\"'{0}'\"'\"';".format(vip)) - # get vips from corosync - vip_crm = remote.execute( - 'crm_resource --resource {0}{1} --get-parameter=ip'.format( - 'vip__', vip))['stdout'][0].rstrip() - # get vips from namespace - vip_ns = remote.execute( - 'ip netns exec {0} ip -4 a show {1}{2}'.format( - namespace, 'b_', - vip))['stdout'][1].split(' ')[5].split('/')[0] - vip_array = [vip_hiera, vip_db, vip_crm, vip_ns] - for ip in vip_array[1:]: - asserts.assert_equal( - vip_array[0], ip, - "Vip from hiera output {0} does not equal " - "to {1}".format(vip_array[0], ip)) diff --git a/fuelweb_test/tests/plugins/plugin_zabbix/__init__.py b/fuelweb_test/tests/plugins/plugin_zabbix/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/plugins/plugin_zabbix/test_plugin_zabbix.py b/fuelweb_test/tests/plugins/plugin_zabbix/test_plugin_zabbix.py deleted file mode 100644 index 78e4d1f9c..000000000 --- a/fuelweb_test/tests/plugins/plugin_zabbix/test_plugin_zabbix.py +++ /dev/null @@ -1,675 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os - -import bs4 -from devops.helpers.helpers import wait -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_not_equal -from proboscis.asserts import assert_true -from proboscis import test -import requests -# pylint: disable=import-error -# noinspection PyUnresolvedReferences -from six.moves import urllib -# pylint: enable=import-error - -from fuelweb_test.helpers.checkers import check_plugin_path_env -from fuelweb_test.helpers import utils -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -class ZabbixWeb(object): - def __init__(self, public_vip, username, password, verify=False): - self.session = requests.Session() - self.base_url = "https://{0}/zabbix/".format(public_vip) - self.username = username - self.password = password - self.verify = verify - - def login(self): - login_params = urllib.parse.urlencode( - {'request': '', - 'name': self.username, - 'password': self.password, - 'autologin': 1, - 'enter': 'Sign in'}) - url = urllib.parse.urljoin(self.base_url, '?{0}'.format(login_params)) - response = self.session.post(url, verify=self.verify) - - assert_equal(response.status_code, 200, - "Login to Zabbix failed: {0}".format(response.content)) - - def get_trigger_statuses(self): - url = urllib.parse.urljoin(self.base_url, 'tr_status.php') - response = self.session.get(url, verify=self.verify) - - assert_equal(response.status_code, 200, - "Getting Zabbix trigger statuses failed: {0}" - .format(response.content)) - - return response.content - - def get_screens(self): - url = urllib.parse.urljoin(self.base_url, 'screens.php') - response = self.session.get(url, verify=self.verify) - - assert_equal(response.status_code, 200, - "Getting Zabbix screens failed: {0}" - .format(response.content)) - - return response.content - - -@test(groups=["plugins", "zabbix_plugins"]) -class ZabbixPlugin(TestBasic): - """ZabbixPlugin.""" - def __init__(self): - super(ZabbixPlugin, self).__init__() - check_plugin_path_env( - var_name='ZABBIX_PLUGIN_PATH', - plugin_path=settings.ZABBIX_PLUGIN_PATH - ) - - def setup_zabbix_plugin(self, - cluster_id, - zabbix_username='admin', - zabbix_password='zabbix'): - plugin_name = 'zabbix_monitoring' - - assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - "Plugin couldn't be enabled. Check plugin version. Test aborted") - plugin_options = {'metadata/enabled': True, - 'username/value': zabbix_username, - 'password/value': zabbix_password} - self.fuel_web.update_plugin_data( - cluster_id, plugin_name, plugin_options) - - def setup_snmp_plugin(self, - cluster_id, - snmp_community='public'): - plugin_name = 'zabbix_snmptrapd' - - assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - "Plugin couldn't be enabled. Check plugin version. Test aborted") - plugin_options = {'metadata/enabled': True, - 'community/value': snmp_community} - self.fuel_web.update_plugin_data( - cluster_id, plugin_name, plugin_options) - - def setup_snmp_emc_plugin(self, cluster_id): - plugin_name = 'zabbix_monitoring_emc' - - assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - "Plugin couldn't be enabled. Check plugin version. Test aborted") - - plugin_options = {'metadata/enabled': True, - 'hosts/value': 'emc:10.109.2.2'} - self.fuel_web.update_plugin_data( - cluster_id, plugin_name, plugin_options) - - def setup_snmp_extreme_plugin(self, cluster_id): - plugin_name = 'zabbix_monitoring_extreme_networks' - - assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - "Plugin couldn't be enabled. Check plugin version. Test aborted") - - plugin_options = {'metadata/enabled': True, - 'hosts/value': 'extreme:10.109.2.2'} - self.fuel_web.update_plugin_data( - cluster_id, plugin_name, plugin_options) - - @staticmethod - def check_event_message(zabbix_web, zabbix_hostgroup, message): - statuses_html = bs4.BeautifulSoup(zabbix_web.get_trigger_statuses()) - status_lines = statuses_html.find_all('tr', {'class': 'even_row'}) - - for status_line in status_lines: - host_span = status_line.find('span', {'class': 'link_menu'}) - if not host_span or host_span.get_text() != zabbix_hostgroup: - continue - - host_span = (status_line.find('span', {'class': 'pointer'}). - find('span', {'class': 'link_menu'})) - if host_span and message in host_span.get_text(): - return True - return False - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_zabbix_ha"]) - @log_snapshot_after_test - def deploy_zabbix_ha(self): - """Deploy cluster in ha mode with zabbix plugin - - Scenario: - 1. Upload plugin to the master node - 2. Install plugin - 3. Create cluster - 4. Add 3 nodes with controller role - 5. Add 1 node with compute role - 6. Add 1 node with cinder role - 7. Deploy the cluster - 8. Run network verification - 9. Run OSTF - 10. Check zabbix service in pacemaker - 11. Check login to zabbix dashboard - - Duration 70m - Snapshot deploy_zabbix_ha - - """ - self.env.revert_snapshot("ready_with_5_slaves") - - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=settings.ZABBIX_PLUGIN_PATH, - tar_target="/var") - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename(settings.ZABBIX_PLUGIN_PATH)) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - ) - - zabbix_username = 'admin' - zabbix_password = 'zabbix' - self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password) - - self.fuel_web.update_nodes( - cluster_id, - { - "slave-01": ["controller"], - "slave-02": ["controller"], - "slave-03": ["controller"], - "slave-04": ["compute"], - "slave-05": ["cinder"] - } - ) - - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.check_zabbix_configuration(cluster_id, zabbix_username, - zabbix_password) - - self.env.make_snapshot("deploy_zabbix_ha") - - def check_zabbix_configuration(self, cluster_id, user, password): - cmd = "crm resource status p_zabbix-server" - with self.fuel_web.get_ssh_for_node("slave-01") as remote: - response = remote.execute(cmd)["stdout"][0] - assert_true("p_zabbix-server is running" in response, - "p_zabbix-server resource wasn't found in pacemaker:\n{0}" - .format(response)) - - public_vip = self.fuel_web.get_public_vip(cluster_id) - - zabbix_web = ZabbixWeb(public_vip, user, password) - zabbix_web.login() - - screens_html = bs4.BeautifulSoup(zabbix_web.get_screens()) - screens_links = screens_html.find_all('a') - assert_true(any('charts.php?graphid=' in link.get('href') - for link in screens_links), - "Zabbix screen page does not contain graphs:\n{0}". - format(screens_links)) - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_zabbix_snmptrap_ha"]) - @log_snapshot_after_test - def deploy_zabbix_snmptrap_ha(self): - """Deploy cluster in ha mode with zabbix snmptrap plugin - - Scenario: - 1. Upload plugin to the master node - 2. Install plugins - 3. Create cluster - 4. Add 3 nodes with controller role - 5. Add 1 node with compute role - 6. Add 1 node with cinder role - 7. Deploy the cluster - 8. Run network verification - 9. Run OSTF - 10. Check zabbix service in pacemaker - 11. Check login to zabbix dashboard - 12. Check SNMP services on controllers - 13. Check test SNMP trap - - Duration 70m - Snapshot deploy_zabbix_snmptrap_ha - - """ - check_plugin_path_env( - var_name='ZABBIX_SNMP_PLUGIN_PATH', - plugin_path=settings.ZABBIX_SNMP_PLUGIN_PATH - ) - - self.env.revert_snapshot("ready_with_5_slaves") - - for plugin in [settings.ZABBIX_PLUGIN_PATH, - settings.ZABBIX_SNMP_PLUGIN_PATH]: - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=plugin, - tar_target="/var") - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename(plugin)) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - ) - - zabbix_username = 'admin' - zabbix_password = 'zabbix' - snmp_community = 'public' - - self.setup_zabbix_plugin(cluster_id) - self.setup_snmp_plugin(cluster_id, snmp_community) - - self.fuel_web.update_nodes( - cluster_id, - { - "slave-01": ["controller"], - "slave-02": ["controller"], - "slave-03": ["controller"], - "slave-04": ["compute"], - "slave-05": ["cinder"] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.check_zabbix_configuration(cluster_id, zabbix_username, - zabbix_password) - - for node_name in ['slave-01', 'slave-02', 'slave-03']: - with self.fuel_web.get_ssh_for_node(node_name) as remote: - cmd = 'pgrep {0}' - response = \ - ''.join(remote.execute(cmd.format('snmptrapd'))["stdout"]) - assert_not_equal(response.strip(), "OK", - "Service {0} not started".format('snmptrapd')) - response = \ - ''.join(remote.execute(cmd.format('snmptt'))["stdout"]) - assert_not_equal(response.strip(), "OK", - "Service {0} not started".format('snmptt')) - - management_vip = self.fuel_web.get_mgmt_vip(cluster_id) - snmp_heartbeat_command = \ - ("snmptrap -v 2c -c {0} {1} '' .1.3.6.1.4.1.8072.2.3.0.1" - .format(snmp_community, management_vip)) - - with self.fuel_web.get_ssh_for_node("slave-01") as remote: - remote.execute("apt-get install snmp -y") - remote.execute(snmp_heartbeat_command) - - mgmt_vip_devops_node = self.fuel_web.get_pacemaker_resource_location( - 'slave-01', 'vip__management')[0] - mgmt_vip_nailgun_node = self.fuel_web.get_nailgun_node_by_devops_node( - mgmt_vip_devops_node) - - with self.env.d_env.get_ssh_to_remote( - mgmt_vip_nailgun_node['ip']) as remote: - cmd = ('grep netSnmpExampleHeartbeatNotification ' - '/var/log/zabbix/zabbix_server.log | ' - 'grep "Status Events"') - - wait(lambda: remote.execute(cmd)['exit_code'] == 0, - timeout_msg='SNMP heartbeat status not found ' - ' in /var/log/zabbix/zabbix_server.log') - - self.env.make_snapshot("deploy_zabbix_snmptrap_ha") - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_zabbix_snmp_emc_ha"]) - @log_snapshot_after_test - def deploy_zabbix_snmp_emc_ha(self): - """Deploy cluster in ha mode with zabbix emc plugin - - Scenario: - 1. Upload plugin to the master node - 2. Install plugins: zabbix, zabbix snmp and zabbix emc - 3. Create cluster - 4. Add 3 nodes with controller role - 5. Add 1 node with compute role - 6. Add 1 node with cinder role - 7. Deploy the cluster - 8. Run network verification - 9. Run OSTF - 10. Check EMC trigger with test SNMP message - - Duration 70m - Snapshot deploy_zabbix_snmp_emc_ha - - """ - check_plugin_path_env( - var_name='ZABBIX_SNMP_PLUGIN_PATH', - plugin_path=settings.ZABBIX_SNMP_PLUGIN_PATH - ) - check_plugin_path_env( - var_name='ZABBIX_SNMP_EMC_PLUGIN_PATH', - plugin_path=settings.ZABBIX_SNMP_EMC_PLUGIN_PATH - ) - - self.env.revert_snapshot("ready_with_5_slaves") - - for plugin in [settings.ZABBIX_PLUGIN_PATH, - settings.ZABBIX_SNMP_PLUGIN_PATH, - settings.ZABBIX_SNMP_EMC_PLUGIN_PATH]: - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=plugin, - tar_target="/var") - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename(plugin)) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - ) - - zabbix_username = 'admin' - zabbix_password = 'zabbix' - snmp_community = 'public' - - self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password) - self.setup_snmp_plugin(cluster_id, snmp_community) - self.setup_snmp_emc_plugin(cluster_id) - - self.fuel_web.update_nodes( - cluster_id, - { - "slave-01": ["controller"], - "slave-02": ["controller"], - "slave-03": ["controller"], - "slave-04": ["compute"], - "slave-05": ["cinder"] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - management_vip = self.fuel_web.get_mgmt_vip(cluster_id) - snmp_emc_critical_command = \ - ("snmptrap -v 1 -c {snmp_community} {management_vip} " - "'.1.3.6.1.4.1.1981' {management_vip} 6 6 '10' .1.3.6.1.4.1.1981 " - "s 'null' .1.3.6.1.4.1.1981 s 'null' .1.3.6.1.4.1.1981 s 'a37'" - .format(snmp_community=snmp_community, - management_vip=management_vip)) - - with self.fuel_web.get_ssh_for_node("slave-01") as remote: - remote.execute("apt-get install snmp -y") - remote.execute(snmp_emc_critical_command) - - public_vip = self.fuel_web.get_public_vip(cluster_id) - zabbix_web = ZabbixWeb(public_vip, zabbix_username, zabbix_password) - zabbix_web.login() - - wait(lambda: self.check_event_message( - zabbix_web, 'emc', 'SNMPtrigger Critical'), - timeout_msg='SNMPtrigger Critical event not found in Zabbix') - - self.env.make_snapshot("deploy_zabbix_snmp_emc_ha") - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_zabbix_snmp_extreme_ha"]) - @log_snapshot_after_test - def deploy_zabbix_snmp_extreme_ha(self): - """Deploy cluster in ha mode with zabbix snmptrap plugin - - Scenario: - 1. Upload plugin to the master node - 2. Install plugins - 3. Create cluster - 4. Add 3 nodes with controller role - 5. Add 1 node with compute role - 6. Add 1 node with cinder role - 7. Deploy the cluster - 8. Run network verification - 9. Run OSTF - 10. Check Extreme Switch trigger with test SNMP message - - Duration 70m - Snapshot deploy_zabbix_snmp_extreme_ha - - """ - check_plugin_path_env( - var_name='ZABBIX_SNMP_PLUGIN_PATH', - plugin_path=settings.ZABBIX_SNMP_PLUGIN_PATH - ) - check_plugin_path_env( - var_name='ZABBIX_SNMP_EXTREME_PLUGIN_PATH', - plugin_path=settings.ZABBIX_SNMP_EXTREME_PLUGIN_PATH - ) - self.env.revert_snapshot("ready_with_5_slaves") - - for plugin in [settings.ZABBIX_PLUGIN_PATH, - settings.ZABBIX_SNMP_PLUGIN_PATH, - settings.ZABBIX_SNMP_EXTREME_PLUGIN_PATH]: - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=plugin, - tar_target="/var") - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename(plugin)) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - ) - - zabbix_username = 'admin' - zabbix_password = 'zabbix' - snmp_community = 'public' - - self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password) - self.setup_snmp_plugin(cluster_id, snmp_community) - self.setup_snmp_extreme_plugin(cluster_id) - - self.fuel_web.update_nodes( - cluster_id, - { - "slave-01": ["controller"], - "slave-02": ["controller"], - "slave-03": ["controller"], - "slave-04": ["compute"], - "slave-05": ["cinder"] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - management_vip = self.fuel_web.get_mgmt_vip(cluster_id) - snmp_extreme_critical_command = \ - ("snmptrap -v 1 -c {snmp_community} {management_vip} " - "'.1.3.6.1.4.1.1916' {management_vip} 6 10 '10' .1.3.6.1.4.1.1916" - " s 'null' .1.3.6.1.4.1.1916 s 'null' .1.3.6.1.4.1.1916 s '2'" - .format(snmp_community=snmp_community, - management_vip=management_vip)) - - with self.fuel_web.get_ssh_for_node("slave-01") as remote: - remote.execute("apt-get install snmp -y") - remote.execute(snmp_extreme_critical_command) - - public_vip = self.fuel_web.get_public_vip(cluster_id) - zabbix_web = ZabbixWeb(public_vip, zabbix_username, zabbix_password) - zabbix_web.login() - - wait(lambda: self.check_event_message( - zabbix_web, 'extreme', 'Power Supply Failed'), - timeout_msg='Power Supply Failed event not found in Zabbix') - - self.env.make_snapshot("deploy_zabbix_snmp_extreme_ha") - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_zabbix_ceph_ha"]) - @log_snapshot_after_test - def deploy_zabbix_ceph_ha(self): - """Deploy cluster in ha mode with zabbix plugin - - Scenario: - 1. Upload plugin to the master node - 2. Install plugin - 3. Create cluster - 4. Add 3 nodes with controller,ceph-osd roles - 5. Add 2 node with compute,ceph-osd roles - 6. Deploy the cluster - 7. Run network verification - 8. Run OSTF - 9. Check zabbix service in pacemaker - 10. Check login to zabbix dashboard - - Duration 180m - Snapshot deploy_zabbix_ceph_ha - - """ - self.env.revert_snapshot("ready_with_5_slaves") - - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=settings.ZABBIX_PLUGIN_PATH, - tar_target="/var") - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename(settings.ZABBIX_PLUGIN_PATH)) - - cluster_settings = { - "net_provider": "neutron", - "net_segment_type": settings.NEUTRON_SEGMENT_TYPE, - 'volumes_ceph': True, - 'images_ceph': True, - 'volumes_lvm': False, - 'tenant': 'cephHA', - 'user': 'cephHA', - 'password': 'cephHA', - 'osd_pool_size': "3" - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings=cluster_settings - ) - - zabbix_username = 'admin' - zabbix_password = 'zabbix' - self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'ceph-osd'], - 'slave-02': ['controller', 'ceph-osd'], - 'slave-03': ['controller', 'ceph-osd'], - 'slave-04': ['compute', 'ceph-osd'], - 'slave-05': ['compute', 'ceph-osd'], - } - ) - - self.fuel_web.deploy_cluster_wait(cluster_id, timeout=190 * 60) - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.check_zabbix_configuration(cluster_id, zabbix_username, - zabbix_password) - - self.env.make_snapshot("deploy_zabbix_ceph_ha") - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_zabbix_ceph_radosgw_ha"]) - @log_snapshot_after_test - def deploy_zabbix_ceph_radosgw_ha(self): - """Deploy cluster in ha mode with zabbix plugin - - Scenario: - 1. Upload plugin to the master node - 2. Install plugin - 3. Create cluster - 4. Add 3 nodes with controller,ceph-osd roles - 5. Add 2 node with compute,ceph-osd roles - 6. Deploy the cluster - 7. Run network verification - 8. Run OSTF - 9. Check zabbix service in pacemaker - 10. Check login to zabbix dashboard - - Duration 180m - Snapshot deploy_zabbix_ceph_radosgw_ha - - """ - self.env.revert_snapshot("ready_with_5_slaves") - - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=settings.ZABBIX_PLUGIN_PATH, - tar_target="/var") - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename(settings.ZABBIX_PLUGIN_PATH)) - - cluster_settings = { - "net_provider": "neutron", - "net_segment_type": settings.NEUTRON_SEGMENT_TYPE, - 'objects_ceph': True, - 'volumes_ceph': True, - 'images_ceph': True, - 'volumes_lvm': False, - 'tenant': 'rados', - 'user': 'rados', - 'password': 'rados', - 'osd_pool_size': "3" - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings=cluster_settings - ) - - zabbix_username = 'admin' - zabbix_password = 'zabbix' - self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'ceph-osd'], - 'slave-02': ['controller', 'ceph-osd'], - 'slave-03': ['controller', 'ceph-osd'], - 'slave-04': ['compute', 'ceph-osd'], - 'slave-05': ['compute', 'ceph-osd'], - } - ) - - self.fuel_web.deploy_cluster_wait(cluster_id, timeout=190 * 60) - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.check_zabbix_configuration(cluster_id, zabbix_username, - zabbix_password) - - self.env.make_snapshot("deploy_zabbix_ceph_radosgw_ha") diff --git a/fuelweb_test/tests/test_admin_node.py b/fuelweb_test/tests/test_admin_node.py deleted file mode 100644 index 2eff53114..000000000 --- a/fuelweb_test/tests/test_admin_node.py +++ /dev/null @@ -1,614 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import division -from warnings import warn - -import datetime -import random -import re - -from devops.helpers.helpers import http -from devops.helpers.helpers import wait -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_true -from proboscis import test -from proboscis import SkipTest -# pylint: disable=import-error -# noinspection PyUnresolvedReferences -from six.moves.urllib.request import urlopen -# noinspection PyUnresolvedReferences -from six.moves.xmlrpc_client import ServerProxy -# pylint: enable=import-error - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(enabled=False, groups=["thread_1"]) -class TestAdminNode(TestBasic): - """TestAdminNode. - - Test disabled and move to fuel_tests suite: - fuel_tests.test.test_admin_node - """ # TODO documentation - - @test(enabled=False, depends_on=[SetupEnvironment.setup_master], - groups=["test_cobbler_alive"]) - @log_snapshot_after_test - def test_cobbler_alive(self): - """Test current installation has correctly setup cobbler - - Test disabled and move to fuel_tests suite: - fuel_tests.test.test_admin_node.TestAdminNode - - API and cobbler HTTP server are alive - - Scenario: - 1. Revert snapshot "empty" - 2. test cobbler API and HTTP server through send http request - - Duration 1m - - """ - # pylint: disable=W0101 - warn("Test disabled and move to fuel_tests suite", DeprecationWarning) - raise SkipTest("Test disabled and move to fuel_tests suite") - - self.env.revert_snapshot("empty") - wait( - lambda: http(host=self.env.get_admin_node_ip(), url='/cobbler_api', - waited_code=501), - timeout=60, - timeout_msg='Cobler WEB API is not alive' - ) - server = ServerProxy( - 'http://%s/cobbler_api' % self.env.get_admin_node_ip()) - - config = self.env.admin_actions.get_fuel_settings() - username = config['cobbler']['user'] - password = config['cobbler']['password'] - - # raises an error if something isn't right - server.login(username, password) - - @test(enabled=False, depends_on=[SetupEnvironment.setup_master], - groups=["test_astuted_alive"]) - @log_snapshot_after_test - def test_astuted_alive(self): - """Test astute master and worker processes are alive on master node - - Test disabled and move to fuel_tests suite: - fuel_tests.test.test_admin_node.TestAdminNode - - Scenario: - 1. Revert snapshot "empty" - 2. Search for master and child processes - - Duration 1m - - """ - # pylint: disable=W0101 - warn("Test disabled and move to fuel_tests suite", DeprecationWarning) - raise SkipTest("Test disabled and move to fuel_tests suite") - - self.env.revert_snapshot("empty") - ps_output = self.ssh_manager.execute( - self.ssh_manager.admin_ip, 'ps ax')['stdout'] - astute_master = [ - master for master in ps_output if 'astute master' in master] - logger.info("Found astute processes: {:s}".format(astute_master)) - assert_equal(len(astute_master), 1) - astute_workers = [ - worker for worker in ps_output if 'astute worker' in worker] - logger.info( - "Found {len:d} astute worker processes: {workers!s}" - "".format(len=len(astute_workers), workers=astute_workers)) - assert_equal(True, len(astute_workers) > 1) - - -@test(groups=["logrotate"]) -class TestLogrotateBase(TestBasic): - @staticmethod - def no_error_in_log(log_txt): - checker = re.compile(r'\s+(error)[: \n\t]+', flags=re.IGNORECASE) - return len(checker.findall(log_txt)) == 0 - - def generate_file(self, remote_ip, name, path, size): - cmd = 'cd {0} && fallocate -l {1} {2}'.format(path, size, name) - self.ssh_manager.execute_on_remote(remote_ip, cmd) - - def execute_logrotate_cmd( - self, remote_ip, force=True, cmd=None, any_exit_code=False): - if not cmd: - cmd = 'logrotate -v {0} /etc/logrotate.conf'.format( - '-f' if force else "") - result = self.ssh_manager.execute_on_remote( - remote_ip, cmd, raise_on_assert=not any_exit_code) - - assert_equal( - True, self.no_error_in_log(result['stderr_str']), - 'logrotate failed with:\n{0}'.format(result['stderr_str'])) - logger.info('Logrotate: success') - return result - - def check_free_space(self, remote_ip, return_as_is=None): - result = self.ssh_manager.execute_on_remote( - remote_ip, - 'python -c "import os; ' - 'stats=os.statvfs(\'/var/log\'); ' - 'print stats.f_bavail * stats.f_frsize"', - err_msg='Failed to check free space!' - ) - if not return_as_is: - return self.bytestogb(int(result['stdout'][0])) - else: - return int(result['stdout'][0]) - - def check_free_inodes(self, remote_ip): - result = self.ssh_manager.execute_on_remote( - remote_ip, - 'python -c "import os; ' - 'stats=os.statvfs(\'/var/log\'); ' - 'print stats.f_ffree"', - err_msg='Failed to check free inodes!') - return self.bytestogb(int(result['stdout'][0])) - - @staticmethod - def bytestogb(data): - symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') - prefix = {} - for i, s in enumerate(symbols): - prefix[s] = 1 << (i + 1) * 10 - for s in reversed(symbols): - if data >= prefix[s]: - value = data / prefix[s] - return format(value, '.1f'), s - return data, 'B' - - def create_old_file(self, remote_ip, name): - one_week_old = datetime.datetime.now() - datetime.timedelta(days=7) - result = self.ssh_manager.execute_on_remote( - remote_ip, - 'touch {0} -d {1}'.format(name, one_week_old), - err_msg='Failed to create old file!' - ) - return result - - @test(depends_on=[SetupEnvironment.setup_master], - groups=["test_logrotate"]) - @log_snapshot_after_test - def test_log_rotation(self): - """Logrotate with logrotate.conf on master node - - Scenario: - 1. Revert snapshot "empty" - 2. Check free disk space under /var/log, check free inodes - 3. Generate 2GB size file - 4. Run logrotate 2 times - 5. Check free disk space, check free inodes - - Duration 30m - - """ - self.show_step(1, initialize=True) - self.env.revert_snapshot("empty") - - admin_ip = self.ssh_manager.admin_ip - - # get data before logrotate - self.show_step(2) - free, suff = self.check_free_space(admin_ip) - - free_inodes, i_suff = self.check_free_inodes(admin_ip) - logger.debug('Free inodes before file ' - 'creation: {0}{1}'.format(free_inodes, i_suff)) - self.show_step(3) - self.generate_file( - admin_ip, size='2G', - path='/var/log/', - name='messages') - - # Get free space after file creation - free2, suff2 = self.check_free_space(admin_ip) - assert_true( - free2 < free, - 'File was not created. Free space ' - 'before creation {0}{1}, ' - 'free space after ' - 'creation {2}{3}'.format(free, suff, free2, suff2)) - - self.show_step(4) - self.execute_logrotate_cmd(admin_ip, force=False) - - free3, suff3 = self.check_free_space(admin_ip) - logger.debug('Free space after first ' - 'rotation {0} {1}'.format(free3, suff3)) - - # Allow any exit code, but check real status later - # Logrotate can return fake-fail on second run - self.execute_logrotate_cmd(admin_ip, any_exit_code=True) - - free4, suff4 = self.check_free_space(admin_ip) - free_inodes4, i_suff4 = self.check_free_inodes(admin_ip) - logger.info('Free inodes after logrotation:' - ' {0}{1}'.format(free_inodes4, i_suff4)) - - assert_true( - free4 > free2, - 'Logs were not rotated. ' - 'Rotate was executed 2 times. ' - 'Free space after file creation: {0}{1}, ' - 'after rotation {2}{3} free space before rotation {4}' - '{5}'.format(free2, suff2, free4, suff4, free, suff)) - - assert_equal( - (free_inodes, i_suff), - (free_inodes4, i_suff4), - 'Unexpected free inodes count. Before log rotate was: {0}{1}' - ' after logrotation: {2}{3}'.format( - free_inodes, i_suff, free_inodes4, i_suff4)) - self.env.make_snapshot("test_logrotate") - - @test(depends_on=[SetupEnvironment.setup_master], - groups=["test_fuel_nondaily_logrotate"]) - @log_snapshot_after_test - def test_fuel_nondaily_rotation(self): - """Logrotate with fuel.nondaily on master node - - Scenario: - 1. Revert snapshot "empty" - 2. Check free disk space under /var/log, check free inodes - 3. Generate 2GB /var/log/ostf-test.log size file - 4. Run /usr/bin/fuel-logrotate - 5. Generate 101MB /var/log/ostf-test.log size file - 6. Run /usr/bin/fuel-logrotate - 7. Check free disk space, check free inodes - - Duration 30m - - """ - self.show_step(1, initialize=True) - self.env.revert_snapshot("empty") - - admin_ip = self.ssh_manager.admin_ip - - # get data before logrotate - self.show_step(2) - free, suff = self.check_free_space(admin_ip) - free_inodes, i_suff = self.check_free_inodes(admin_ip) - logger.debug('Free inodes before file ' - 'creation: {0}{1}'.format(free_inodes, i_suff)) - - self.show_step(3) - self.generate_file( - admin_ip, size='2G', - path='/var/log/', - name='ostf-test.log') - - free2, suff2 = self.check_free_space(admin_ip) - assert_true( - free2 < free, - 'File was not created. Free space ' - 'before creation {0}{1}, ' - 'free space after ' - 'creation {2}{3}'.format(free, suff, free2, suff2)) - - self.show_step(4) - self.execute_logrotate_cmd(admin_ip, cmd='/usr/bin/fuel-logrotate') - - self.show_step(5) - self.generate_file( - admin_ip, size='101M', - path='/var/log/', - name='ostf-test.log') - - self.show_step(6) - self.execute_logrotate_cmd(admin_ip, cmd='/usr/bin/fuel-logrotate') - - self.show_step(7) - free3, suff3 = self.check_free_space(admin_ip) - - free_inodes3, i_suff3 = self.check_free_inodes(admin_ip) - logger.info('Free inodes after logrotation:' - ' {0}{1}'.format(free_inodes3, i_suff3)) - - assert_true( - free3 > free2, - 'Logs were not rotated. ' - 'Free space before rotation: {0}{1}, ' - 'after rotation {2}{3}'.format(free2, suff2, free3, suff3)) - - assert_equal( - (free_inodes, i_suff), - (free_inodes3, i_suff3), - 'Unexpected free inodes count. Before log rotate was: {0}{1}' - ' after logrotation: {2}{3}'.format( - free_inodes, i_suff, free_inodes3, i_suff3)) - - self.env.make_snapshot("test_fuel_nondaily_logrotate") - - @test(depends_on=[SetupEnvironment.setup_master], - groups=["test_logrotate_101MB"]) - @log_snapshot_after_test - def test_log_rotation_101mb(self): - """Logrotate with logrotate.conf for 101MB size file on master node - - Scenario: - 1. Revert snapshot "empty" - 2. Check free disk space and free inodes under /var/log - 3. Generate 101MB size file - 4. Run logrotate 2 times - 5. Check free disk space and free inodes - - Duration 30m - - """ - self.show_step(1, initialize=True) - self.env.revert_snapshot("empty") - - admin_ip = self.ssh_manager.admin_ip - - # get data before logrotate - self.show_step(2) - free, suff = self.check_free_space(admin_ip) - - free_inodes, i_suff = self.check_free_inodes(admin_ip) - logger.debug('Free inodes before file ' - 'creation: {0}{1}'.format(free_inodes, i_suff)) - self.show_step(3) - self.generate_file( - admin_ip, size='101M', - path='/var/log/', - name='messages') - - free2, suff2 = self.check_free_space(admin_ip) - assert_true( - free2 < free, - 'File was not created. Free space ' - 'before creation {0}{1}, ' - 'free space after ' - 'creation {2}{3}'.format(free, suff, free2, suff2)) - self.show_step(4) - self.execute_logrotate_cmd(admin_ip, force=False) - - free3, suff3 = self.check_free_space(admin_ip) - logger.debug('free space after first ' - 'rotation: {0}{1}'.format(free3, suff3)) - - # Allow any exit code, but check real status later - # Logrotate can return fake-fail on second run - self.execute_logrotate_cmd(admin_ip, any_exit_code=True) - - free4, suff4 = self.check_free_space(admin_ip) - free_inodes4, i_suff4 = self.check_free_inodes(admin_ip) - logger.info('Free inodes after logrotation:' - ' {0}{1}'.format(free_inodes4, i_suff4)) - - assert_true( - free4 > free2, - 'Logs were not rotated. ' - 'Rotate was executed 2 times. ' - 'Free space after file creation: {0}{1}, ' - 'after rotation {2}{3} free space before rotation {4}' - '{5}'.format(free2, suff2, free4, suff4, free, suff)) - - assert_equal( - (free_inodes, i_suff), - (free_inodes4, i_suff4), - 'Unexpected free inodes count. Before log rotate was: {0}{1}' - ' after logrotation: {2}{3}'.format( - free_inodes, i_suff, free_inodes4, i_suff4)) - self.env.make_snapshot("test_logrotate_101MB") - - @test(depends_on=[SetupEnvironment.setup_master], - groups=["test_logrotate_one_week_11MB"]) - @log_snapshot_after_test - def test_log_rotation_one_week_11mb(self): - """Logrotate with logrotate.conf for 1 week old file with size 11MB - - Scenario: - 1. Revert snapshot "empty" - 2. Check free disk space and free inodes under /var/log - 3. Generate 1 week old 11MB size file - 4. Run logrotate 2 times - 5. Check free disk space and free inodes - - Duration 30m - - """ - self.show_step(1, initialize=True) - self.env.revert_snapshot("empty") - - admin_ip = self.ssh_manager.admin_ip - - # get data before logrotate - self.show_step(2) - free = self.check_free_space(admin_ip, return_as_is=True) - - free_inodes, i_suff = self.check_free_inodes(admin_ip) - logger.debug('Free inodes before file ' - 'creation: {0}{1}'.format(free_inodes, i_suff)) - # create 1 week old empty file - - self.create_old_file(admin_ip, name='/var/log/messages') - self.show_step(3) - self.generate_file( - admin_ip, size='11M', - path='/var/log/', - name='messages') - - free2 = self.check_free_space(admin_ip, return_as_is=True) - assert_true( - free2 < free, - 'File was not created. Free space ' - 'before creation {0}, ' - 'free space after ' - 'creation {1}'.format(free, free2)) - self.show_step(4) - self.execute_logrotate_cmd(admin_ip) - - free3 = self.check_free_space(admin_ip, return_as_is=True) - logger.debug('Free space after first' - ' rotation {0}'.format(free3)) - - # Allow any exit code, but check real status later - # Logrotate can return fake-fail on second run - self.execute_logrotate_cmd(admin_ip, any_exit_code=True) - - self.show_step(5) - free4 = self.check_free_space(admin_ip, return_as_is=True) - free_inodes4, i_suff4 = self.check_free_inodes(admin_ip) - logger.info('Free inodes after logrotation:' - ' {0}{1}'.format(free_inodes4, i_suff4)) - - assert_true( - free4 > free2, - 'Logs were not rotated. ' - 'Rotate was executed 2 times. ' - 'Free space after file creation: {0}, ' - 'after rotation {1} free space before rotation' - '{2}'.format(free2, free4, free)) - - assert_equal( - (free_inodes, i_suff), - (free_inodes4, i_suff4), - 'Unexpected free inodes count. Before log rotate was: {0}{1}' - ' after logrotation: {2}{3}'.format( - free_inodes, i_suff, free_inodes4, i_suff4)) - self.env.make_snapshot("test_logrotate_one_week_11MB") - - -@test(groups=["tests_gpg_singing_check"]) -class GPGSigningCheck(TestBasic): - """ Tests for checking GPG signing """ - def __init__(self): - super(GPGSigningCheck, self).__init__() - self.release_version = self.fuel_web.client.get_api_version().get( - 'release') - self.gpg_name = settings.GPG_CENTOS_KEY_PATH.split('/')[-1].format( - release_version=self.release_version) - self.gpg_centos_key_path = settings.GPG_CENTOS_KEY_PATH.format( - release_version=self.release_version) - self.centos_repo_path = settings.CENTOS_REPO_PATH.format( - release_version=self.release_version) - self.ubuntu_repo_path = settings.UBUNTU_REPO_PATH.format( - release_version=self.release_version) - self.gpg_centos_key_path = settings.GPG_CENTOS_KEY_PATH.format( - release_version=self.release_version) - - @test(depends_on=[SetupEnvironment.setup_master], - groups=['test_check_rpm_packages_signed']) - @log_snapshot_after_test - def check_rpm_packages_signed(self): - """Check that local rpm packages are signed - - Scenario: - 1. Create environment using fuel-qa - 2. Import public GPG key for rpm verification by executing: - rpm --import gpg-pub-key - 3. Check all local rpm packets and verify it - - Duration: 15 min - """ - - self.show_step(1) - self.env.revert_snapshot('empty') - - path_to_repos = '/var/www/nailgun/mos-centos/x86_64/Packages/' - - self.show_step(2) - cmds = [ - 'wget {link}'.format(link=self.gpg_centos_key_path), - 'rpm --import {gpg_pub_key}'.format(gpg_pub_key=self.gpg_name) - ] - for cmd in cmds: - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd - ) - - self.show_step(3) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd='rpm -K {repos}*rpm'.format(repos=path_to_repos) - ) - - @test(depends_on=[SetupEnvironment.setup_master], - groups=['test_remote_packages_and_mos_repositories_signed']) - @log_snapshot_after_test - def check_remote_packages_and_mos_repositories_signed(self): - """Check that remote packages and MOS repositories are signed - - Scenario: - 1. Create environment using fuel-qa - 2. Import GPG key for rpm - 3. Import GPG key for gpg - 4. Download repomd.xml.asc and repomd.xml and verify them - 5. Download Release and Releasee.gpg and verify those - 6. Download randomly chosen .rpm file and verify it - - Duration: 15 min - """ - self.show_step(1) - self.env.revert_snapshot('empty') - - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.show_step(5) - cmds = [ - 'wget {link}'.format(link=self.gpg_centos_key_path), - 'rpm --import {gpg_pub_key}'.format(gpg_pub_key=self.gpg_name), - 'gpg --import {gpg_pub_key}'.format(gpg_pub_key=self.gpg_name), - 'wget {repo_path}os/x86_64/repodata/repomd.xml.asc'.format( - repo_path=self.centos_repo_path), - 'wget {repo_path}os/x86_64/repodata/repomd.xml'.format( - repo_path=self.centos_repo_path), - 'gpg --verify repomd.xml.asc repomd.xml', - 'wget {repo_path}dists/mos{release_version}/Release'.format( - repo_path=self.ubuntu_repo_path, - release_version=self.release_version), - 'wget {repo_path}dists/mos{release_version}/Release.gpg'.format( - repo_path=self.ubuntu_repo_path, - release_version=self.release_version), - 'gpg --verify Release.gpg Release' - ] - for cmd in cmds: - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd - ) - - self.show_step(6) - response = urlopen( - '{}/os/x86_64/Packages/'.format(self.centos_repo_path) - ) - source = response.read() - rpms = re.findall(r'href="(.*.rpm)"', source) - rpm = random.choice(rpms) - - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd='wget {}os/x86_64/Packages/{}'.format( - self.centos_repo_path, - rpm) - ) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd='rpm -K {}'.format(rpm) - ) diff --git a/fuelweb_test/tests/test_backup_restore.py b/fuelweb_test/tests/test_backup_restore.py deleted file mode 100644 index b400e679c..000000000 --- a/fuelweb_test/tests/test_backup_restore.py +++ /dev/null @@ -1,448 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from devops.helpers.helpers import wait -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_true -from proboscis import test - -from fuelweb_test import settings -from fuelweb_test import logger -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers import os_actions -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.utils import RunLimit -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.settings import NEUTRON_SEGMENT_TYPE -from fuelweb_test.settings import OPENSTACK_RELEASE -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic -from fuelweb_test.tests.test_ha_one_controller_base\ - import HAOneControllerNeutronBase -from fuelweb_test.tests.test_neutron_tun_base import NeutronTunHaBase -from fuelweb_test.tests.test_cli_base import CommandLine - - -@test(enabled=False, groups=["backup_restore_master"]) -class TestAdminNodeBackupRestore(TestBasic): - - @test(enabled=False, depends_on=[SetupEnvironment.setup_master], - groups=["backup_restore_master_base"]) - @log_snapshot_after_test - def backup_restore_master_base(self): - """Backup/restore master node - - Scenario: - 1. Revert snapshot "empty" - 2. Backup master - 3. Check backup - 4. Restore master - 5. Check restore - 6. Check iptables - - Duration 30m - - """ - self.env.revert_snapshot("empty") - - with self.env.d_env.get_admin_remote() as remote: - self.fuel_web.backup_master(remote) - checkers.backup_check(remote) - with RunLimit( - seconds=60 * 10, - error_message="'dockerctl restore' " - "run longer then 600 sec"): - self.fuel_web.restore_master(self.ssh_manager.admin_ip) - self.fuel_web.restore_check_nailgun_api() - checkers.restore_check_sum(self.ssh_manager.admin_ip) - checkers.iptables_check(self.ssh_manager.admin_ip) - - -@test(enabled=False, groups=["backup_restore_master"]) -class BackupRestoreHAOneController(HAOneControllerNeutronBase): - """BackupRestoreHAOneController""" # TODO documentation - - @test(enabled=False, depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_ha_one_controller_backup_restore"]) - @log_snapshot_after_test - def deploy_ha_one_controller_backup_restore(self): - """Deploy cluster in HA mode (one controller) with neutron - - Scenario: - 1. Create cluster in HA mode - 2. Add 1 node with controller role - 3. Add 1 node with compute role - 4. Deploy the cluster - 5. Validate cluster was set up correctly, there are no dead - services, there are no errors in logs - 6. Verify networks - 7. Verify network configuration on controller - 8. Run OSTF - - Duration 30m - Snapshot: deploy_ha_one_controller_backup_restore - """ - super(self.__class__, self).deploy_ha_one_controller_neutron_base( - snapshot_name="deploy_ha_one_controller_backup_restore") - - @test(enabled=False, depends_on=[deploy_ha_one_controller_backup_restore], - groups=["ha_one_controller_backup_restore"]) - @log_snapshot_after_test - def ha_one_controller_backup_restore(self): - """Backup/restore master node with one controller in cluster - - Scenario: - 1. Revert snapshot "deploy_ha_one_controller_backup_restore" - 2. Backup master - 3. Check backup - 4. Run OSTF - 5. Add 1 node with compute role - 6. Restore master - 7. Check restore - 8. Run OSTF - - Duration 35m - - """ - self.env.revert_snapshot("deploy_ha_one_controller_backup_restore") - - cluster_id = self.fuel_web.get_last_created_cluster() - - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id), - 'neutronOneController', 'neutronOneController', - 'neutronOneController') - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5) - - with self.env.d_env.get_admin_remote() as remote: - # Execute master node backup - self.fuel_web.backup_master(remote) - # Check created backup - checkers.backup_check(remote) - - self.fuel_web.update_nodes( - cluster_id, {'slave-03': ['compute']}, True, False) - - assert_equal( - 3, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) - - with RunLimit( - seconds=60 * 10, - error_message="'dockerctl restore' " - "run longer then 600 sec"): - self.fuel_web.restore_master(self.ssh_manager.admin_ip) - checkers.restore_check_sum(self.ssh_manager.admin_ip) - self.fuel_web.restore_check_nailgun_api() - checkers.iptables_check(self.ssh_manager.admin_ip) - - assert_equal( - 2, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) - - self.fuel_web.update_nodes( - cluster_id, {'slave-03': ['compute']}, True, False) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - self.env.make_snapshot("ha_one_controller_backup_restore") - - -@test(enabled=False, groups=["backup_restore_master"]) -class BackupRestoreHA(NeutronTunHaBase): - """BackupRestoreHAOneController""" # TODO documentation - - @test(enabled=False, depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_neutron_tun_ha_backup_restore"]) - @log_snapshot_after_test - def deploy_neutron_tun_ha_backup_restore(self): - """Deploy cluster in HA mode with Neutron VXLAN - - Scenario: - 1. Create cluster - 2. Add 3 nodes with controller role - 3. Add 2 nodes with compute role - 4. Deploy the cluster - 5. Run network verification - 6. Run OSTF - - Duration 80m - Snapshot deploy_neutron_tun_ha_backup_restore - """ - super(self.__class__, self).deploy_neutron_tun_ha_base( - snapshot_name="deploy_neutron_tun_ha_backup_restore") - - @test(enabled=False, - depends_on_groups=['deploy_neutron_tun_ha_backup_restore'], - groups=["neutron_tun_ha_backup_restore"]) - @log_snapshot_after_test - def neutron_tun_ha_backup_restore(self): - """Backup/restore master node with cluster in ha mode - - Scenario: - 1. Revert snapshot "deploy_neutron_tun_ha" - 2. Backup master - 3. Check backup - 4. Run OSTF - 5. Add 1 node with compute role - 6. Restore master - 7. Check restore - 8. Run OSTF - - Duration 50m - """ - self.env.revert_snapshot("deploy_neutron_tun_ha_backup_restore") - - cluster_id = self.fuel_web.get_last_created_cluster() - cluster = self.fuel_web.client.get_cluster(cluster_id) - assert_equal(str(cluster['net_provider']), 'neutron') - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id), - 'haTun', 'haTun', 'haTun') - - self.fuel_web.check_fixed_network_cidr( - cluster_id, os_conn) - - with self.env.d_env.get_admin_remote() as remote: - self.fuel_web.backup_master(remote) - checkers.backup_check(remote) - - self.env.bootstrap_nodes( - self.env.d_env.nodes().slaves[5:6]) - self.fuel_web.update_nodes( - cluster_id, {'slave-06': ['compute']}, True, False - ) - - assert_equal( - 6, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) - - with RunLimit( - seconds=60 * 10, - error_message="'dockerctl restore' " - "run longer then 600 sec"): - self.fuel_web.restore_master(self.ssh_manager.admin_ip) - checkers.restore_check_sum(self.ssh_manager.admin_ip) - - self.fuel_web.restore_check_nailgun_api() - checkers.iptables_check(self.ssh_manager.admin_ip) - - assert_equal( - 5, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) - - self.env.bootstrap_nodes( - self.env.d_env.nodes().slaves[5:6]) - self.fuel_web.update_nodes( - cluster_id, {'slave-06': ['compute']}, True, False - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - self.env.make_snapshot("neutron_tun_ha_backup_restore") - - @test(enabled=False, depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["create_backup_reset_restore_and_deploy_via_cli"]) - @log_snapshot_after_test - def create_backup_reset_restore_and_deploy_via_cli(self): - """Backup/restore master node with cluster in ha mode - - Scenario: - 1. Create env with 1 Controller, 1 Compute, 1 Ceph - 2. Start provisioning and wait for it is finished - 3. Backup master - 4. Reset env - 5. Restore master - 6. Delete env - 7. Create new env via CLI with the same staff - 8. Start provisioning via CLI - - Duration 75m - """ - self.env.revert_snapshot("ready_with_3_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": NEUTRON_SEGMENT_TYPE - } - ) - - self.fuel_web.update_nodes( - cluster_id, - {'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['ceph-osd']} - ) - self.fuel_web.provisioning_cluster_wait(cluster_id) - - with self.env.d_env.get_admin_remote() as remote: - self.fuel_web.backup_master(remote) - checkers.backup_check(remote) - - self.fuel_web.stop_reset_env_wait(cluster_id) - self.fuel_web.wait_nodes_get_online_state( - self.env.d_env.nodes().slaves[:3], timeout=10 * 60) - - with RunLimit( - seconds=60 * 10, - error_message="'dockerctl restore' " - "ran longer then 600 sec"): - self.fuel_web.restore_master(self.ssh_manager.admin_ip) - checkers.restore_check_sum(self.ssh_manager.admin_ip) - - number_of_nodes = len(self.fuel_web.client.list_cluster_nodes( - cluster_id)) - - self.fuel_web.client.delete_cluster(cluster_id) - - wait((lambda: len( - self.fuel_web.client.list_nodes()) == number_of_nodes), - timeout=5 * 60, - timeout_msg='Nodes are not discovered in timeout') - - cl = CommandLine() - release_id = self.fuel_web.get_releases_list_for_os( - release_name=OPENSTACK_RELEASE)[0] - node_ids = [self.fuel_web.get_nailgun_node_by_devops_node( - self.env.d_env.nodes().slaves[slave_id])['id'] - for slave_id in range(3)] - - # Create an environment - if NEUTRON_SEGMENT_TYPE: - nst = '--nst={0}'.format(NEUTRON_SEGMENT_TYPE) - else: - nst = '' - cmd = ('fuel env create --name={0} --release={1} ' - '{2} --json'.format(self.__class__.__name__, - release_id, nst)) - env_result = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd, - jsonify=True - )['stdout_json'] - cluster_id = env_result['id'] - - # Update network parameters - cl.update_cli_network_configuration(cluster_id) - - # Update SSL configuration - cl.update_ssl_configuration(cluster_id) - - roles = {'controller': node_ids[0], - 'compute': node_ids[1], - 'ceph-osd': node_ids[2]} - - for role in roles: - cmd = ('fuel --env-id={0} node set --node {1} --role={2}' - .format(cluster_id, - roles[role], - role)) - self.ssh_manager.execute( - ip=self.ssh_manager.admin_ip, - cmd=cmd - ) - cmd = ( - 'fuel --env-id={0} node --provision --node={1} --json'.format( - cluster_id, ','.join(str(l) for l in node_ids)) - ) - logger.info("Started provisioning via CLI") - task = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd, - jsonify=True - )['stdout_json'] - cl.assert_cli_task_success(task, timeout=30 * 60) - logger.info("Finished provisioning via CLI") - - -@test(enabled=False, groups=["backup_reinstall_restore"]) -class BackupReinstallRestoreHA(NeutronTunHaBase): - """This test dedicated for verifying dockerctl backup/restore feature - with complete re-installation of Fuel after backing up""" - - @test(enabled=False, - depends_on_groups=['deploy_neutron_tun_ha_backup_restore'], - groups=["backup_reinstall_restore"]) - @log_snapshot_after_test - def backup_reinstall_restore(self): - """Backup, reinstall then restore master node with cluster in ha mode - - Scenario: - 1. Revert snapshot "deploy_neutron_tun_ha" - 2. Backup master - 3. Check backup - 4. Reinstall fuel-master node - 5. Restore master - 6. Check restore - 7. Run OSTF - 8. Add 1 compute - 9. Run network check - 10. Deploy cluster - 11. Run OSTF - - - Duration XXm - """ - self.env.revert_snapshot("deploy_neutron_tun_ha_backup_restore") - - cluster_id = self.fuel_web.get_last_created_cluster() - - with self.env.d_env.get_admin_remote() as remote: - self.fuel_web.backup_master(remote) - checkers.backup_check(remote) - backup = checkers.find_backup(self.ssh_manager.admin_ip).strip() - local_backup = os.path.join( - settings.LOGS_DIR, - os.path.basename(backup)) - remote.download(backup, local_backup) - assert_true(os.path.exists(local_backup), - "Backup file wasn't downloaded!") - - self.reinstall_master_node() - - with self.env.d_env.get_admin_remote() as remote: - remote.execute('mkdir -p {}'.format(os.path.dirname(backup))) - remote.upload(local_backup, backup) - assert_true(remote.exists(backup), "Backup file wasn't uploaded!") - with RunLimit( - seconds=60 * 10, - error_message="'dockerctl restore' " - "run longer then 600 sec"): - self.fuel_web.restore_master(self.ssh_manager.admin_ip) - checkers.restore_check_sum(self.ssh_manager.admin_ip) - - self.fuel_web.restore_check_nailgun_api() - checkers.iptables_check(self.ssh_manager.admin_ip) - - assert_equal( - 5, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) - - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[5:6], - skip_timesync=True) - self.fuel_web.update_nodes(cluster_id, {'slave-06': ['compute']}) - - self.fuel_web.verify_network(cluster_id) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - self.env.make_snapshot("neutron_tun_ha_backup_restore") diff --git a/fuelweb_test/tests/test_bdd.py b/fuelweb_test/tests/test_bdd.py deleted file mode 100644 index 859660870..000000000 --- a/fuelweb_test/tests/test_bdd.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["test_bdd"]) -class TestBlockDevice(TestBasic): - """Tests for verification deployment with Cinder block Device.""" - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_bdd"]) - @log_snapshot_after_test - def bdd_ha_one_controller_compact(self): - """Deploy cluster with Cinder Block Device - - Scenario: - 1. Create cluster with Neutron vlan - 2. Add 1 nodes with controller role - 3. Add 1 nodes with compute and cinder-block-device role - 4. Deploy the cluster - 5. Network check - 6. Run OSTF tests - - Duration 60m - Snapshot bdd_ha_one_controller_compact - """ - self.env.revert_snapshot("ready_with_3_slaves") - self.show_step(1) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings={ - 'tenant': 'bdd', - 'user': 'bdd', - 'password': 'bdd', - 'volumes_lvm': False, - 'volumes_ceph': False, - 'images_ceph': False, - 'objects_ceph': False, - 'ephemeral_ceph': False, - 'nova_quotas': True, - 'volumes_block_device': True, - 'net_provider': 'neutron', - 'net_segment_type': settings.NEUTRON_SEGMENT['vlan'], - 'configure_ssl': False - } - ) - self.show_step(2) - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute', 'cinder-block-device'], - } - ) - - self.show_step(4) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - self.show_step(6) - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.env.make_snapshot("bdd_ha_one_controller_compact") diff --git a/fuelweb_test/tests/test_bond_offloading.py b/fuelweb_test/tests/test_bond_offloading.py deleted file mode 100644 index d7ce5ab1b..000000000 --- a/fuelweb_test/tests/test_bond_offloading.py +++ /dev/null @@ -1,260 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from copy import deepcopy - -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_true -from proboscis import test - -from fuelweb_test.helpers.checkers import check_offload -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.test_bonding_base import BondingTestOffloading - - -@test(groups=["bonding_ha_one_controller", "bonding"]) -class TestOffloading(BondingTestOffloading): - - offloadings_1 = {'generic-receive-offload': False, - 'generic-segmentation-offload': False, - 'tcp-segmentation-offload': False, - 'large-receive-offload': False} - - offloadings_2 = {'rx-all': True, - 'rx-vlan-offload': True, - 'tx-vlan-offload': True} - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["offloading_bond_neutron_vlan", "bonding"]) - @log_snapshot_after_test - def offloading_bond_neutron_vlan(self): - """Verify offloading types for the logical bonded interfaces and - neutron VLAN - - Scenario: - 1. Create cluster with neutron VLAN - 2. Add 1 node with controller role - 3. Add 1 node with compute role and 1 node with cinder role - 4. Configure offloading modes for bonded interfaces - 5. Setup offloading types - 6. Run network verification - 7. Deploy the cluster - 8. Run network verification - 9. Verify offloading types for the bonded interfaces - 10. Run OSTF - - Duration 60m - Snapshot offloading_bond_neutron_vlan - - """ - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE_HA, - settings={ - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - } - ) - - self.show_step(2) - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - }, - update_interfaces=False - ) - - nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - - self.show_step(4) - bond0 = self.get_bond_interfaces(self.bond_config, 'bond0') - bond1 = self.get_bond_interfaces(self.bond_config, 'bond1') - offloadings_1 = {} - offloadings_2 = {} - for node in nodes: - modes = self.fuel_web.get_offloading_modes(node['id'], bond0) - for name in self.offloadings_1: - if name in modes and name not in offloadings_1: - offloadings_1[name] = self.offloadings_1[name] - modes = self.fuel_web.get_offloading_modes(node['id'], bond1) - for name in self.offloadings_2: - if name in modes and name not in offloadings_2: - offloadings_2[name] = self.offloadings_2[name] - - assert_true(len(offloadings_1) > 0, "No types for disable offloading") - assert_true(len(offloadings_2) > 0, "No types for enable offloading") - - offloadings = { - 'bond0': offloadings_1, - 'bond1': offloadings_2 - } - - self.show_step(5) - for node in nodes: - self.fuel_web.update_node_networks( - node['id'], - interfaces_dict=deepcopy(self.INTERFACES), - raw_data=deepcopy(self.bond_config)) - for offloading in offloadings: - self.fuel_web.update_offloads( - node['id'], offloadings[offloading], offloading) - - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - self.show_step(7) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(8) - self.fuel_web.verify_network(cluster_id) - - self.show_step(9) - for node in nodes: - for eth in bond0: - for name in offloadings_1: - result = check_offload(node['ip'], eth, name) - assert_equal( - result, 'off', - "Offload type '{0}': '{1}' - node-{2}, {3}".format( - name, result, node['id'], eth)) - for eth in bond1: - for name in offloadings_2: - result = check_offload(node['ip'], eth, name) - assert_equal( - result, 'on', - "Offload type '{0}': '{1}' - node-{2}, {3}".format( - name, result, node['id'], eth)) - - self.show_step(10) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("offloading_bond_neutron_vlan") - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["offloading_bond_neutron_vxlan", "bonding"]) - @log_snapshot_after_test - def offloading_bond_neutron_vxlan(self): - """Verify setting offloading types for the logical bonded interfaces - and neutron VXLAN - - Scenario: - 1. Create cluster with neutron VXLAN - 2. Add 1 node with controller role - 3. Add 1 node with compute role and 1 node with cinder role - 4. Configure offloading modes for bonded interfaces - 5. Setup offloading types - 6. Run network verification - 7. Deploy the cluster - 8. Run network verification - 9. Verify offloading types for the bonded interfaces - 10. Run OSTF - - Duration 60m - Snapshot offloading_bond_neutron_vxlan - - """ - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE_HA, - settings={ - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['tun'], - } - ) - - self.show_step(2) - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - }, - update_interfaces=False - ) - - nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - - self.show_step(4) - bond0 = self.get_bond_interfaces(self.bond_config, 'bond0') - bond1 = self.get_bond_interfaces(self.bond_config, 'bond1') - offloadings_1 = {} - offloadings_2 = {} - for node in nodes: - modes = self.fuel_web.get_offloading_modes(node['id'], bond0) - for name in self.offloadings_1: - if name in modes and name not in offloadings_1: - offloadings_1[name] = self.offloadings_1[name] - modes = self.fuel_web.get_offloading_modes(node['id'], bond1) - for name in self.offloadings_2: - if name in modes and name not in offloadings_2: - offloadings_2[name] = self.offloadings_2[name] - - assert_true(len(offloadings_1) > 0, "No types for disable offloading") - assert_true(len(offloadings_2) > 0, "No types for enable offloading") - - offloadings = { - 'bond0': offloadings_1, - 'bond1': offloadings_2 - } - - self.show_step(5) - for node in nodes: - self.fuel_web.update_node_networks( - node['id'], - interfaces_dict=deepcopy(self.INTERFACES), - raw_data=deepcopy(self.bond_config)) - for offloading in offloadings: - self.fuel_web.update_offloads( - node['id'], offloadings[offloading], offloading) - - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - self.show_step(7) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(8) - self.fuel_web.verify_network(cluster_id) - - self.show_step(9) - for node in nodes: - for eth in bond0: - for name in offloadings_1: - result = check_offload(node['ip'], eth, name) - assert_equal( - result, 'off', - "Offload type '{0}': '{1}' - node-{2}, {3}".format( - name, result, node['id'], eth)) - for eth in bond1: - for name in offloadings_2: - result = check_offload(node['ip'], eth, name) - assert_equal( - result, 'on', - "Offload type '{0}': '{1}' - node-{2}, {3}".format( - name, result, node['id'], eth)) - - self.show_step(10) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("offloading_bond_neutron_vxlan") diff --git a/fuelweb_test/tests/test_bonding.py b/fuelweb_test/tests/test_bonding.py deleted file mode 100644 index aa60e1ed0..000000000 --- a/fuelweb_test/tests/test_bonding.py +++ /dev/null @@ -1,567 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from copy import deepcopy - -from keystoneauth1.exceptions import HttpError -from proboscis.asserts import assert_equal -from proboscis import test - -from fuelweb_test import logger -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.settings import NEUTRON_SEGMENT -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.test_bonding_base import BondingTest - - -@test(groups=["bonding_ha_one_controller", "bonding"]) -class BondingHAOneController(BondingTest): - """BondingHAOneController.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_bonding_one_controller_tun"]) - @log_snapshot_after_test - def deploy_bonding_one_controller_tun(self): - """Deploy cluster with active-backup bonding and Neutron VXLAN - - Scenario: - 1. Create cluster - 2. Add 1 node with controller role - 3. Add 1 node with compute role and 1 node with cinder role - 4. Setup bonding for all interfaces (including admin interface - bonding) - 5. Run network verification - 6. Deploy the cluster - 7. Run network verification - 8. Run OSTF - - Duration 30m - Snapshot deploy_bonding_one_controller_tun - """ - - self.env.revert_snapshot("ready_with_3_slaves") - - segment_type = NEUTRON_SEGMENT['tun'] - - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": segment_type, - } - ) - - self.show_step(2) - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - }, - update_interfaces=False - ) - - self.show_step(4) - nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - for node in nailgun_nodes: - self.fuel_web.update_node_networks( - node['id'], interfaces_dict=deepcopy(self.INTERFACES), - raw_data=deepcopy(self.bond_config) - ) - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - - self.show_step(6) - self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False) - - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - - self.show_step(8) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("deploy_bonding_one_controller_tun") - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_bonding_one_controller_vlan"]) - @log_snapshot_after_test - def deploy_bonding_one_controller_vlan(self): - """Deploy cluster with active-backup bonding and Neutron VLAN - - Scenario: - 1. Create cluster - 2. Add 1 node with controller role - 3. Add 1 node with compute role and 1 node with cinder role - 4. Setup bonding for all interfaces (including admin interface - bonding) - 5. Run network verification - 6. Deploy the cluster - 7. Run network verification - 8. Run OSTF - - - Duration 30m - Snapshot deploy_bonding_one_controller_vlan - """ - - self.env.revert_snapshot("ready_with_3_slaves") - - segment_type = NEUTRON_SEGMENT['vlan'] - - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": segment_type, - } - ) - - self.show_step(2) - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - }, - update_interfaces=False - ) - - self.show_step(4) - nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - for node in nailgun_nodes: - self.fuel_web.update_node_networks( - node['id'], interfaces_dict=deepcopy(self.INTERFACES), - raw_data=deepcopy(self.bond_config) - ) - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - - self.show_step(6) - self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False) - - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - - self.show_step(8) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("deploy_bonding_one_controller_vlan") - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["negative_admin_bonding_in_lacp_mode"]) - @log_snapshot_after_test - def negative_admin_bonding_in_lacp_mode(self): - """Verify that lacp mode cannot be enabled for admin bond - - Scenario: - 1. Create cluster - 2. Add 1 node with controller role - 3. Add 1 node with compute role and 1 node with cinder role - 4. Verify that lacp mode cannot be enabled for admin bond - - Duration 4m - Snapshot negative_admin_bonding_in_lacp_mode - """ - self.env.revert_snapshot("ready_with_3_slaves") - - segment_type = NEUTRON_SEGMENT['tun'] - - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": segment_type, - } - ) - - self.show_step(2) - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - }, - update_interfaces=False - ) - - self.show_step(4) - nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - invalid_bond_conf = deepcopy(self.bond_config) - invalid_bond_conf[1]['mode'] = '802.3ad' - interfaces_dict = deepcopy(self.INTERFACES) - - exp_code = 400 - try: - self.fuel_web.update_node_networks( - nailgun_nodes[0]['id'], - interfaces_dict=interfaces_dict, - raw_data=invalid_bond_conf) - except HttpError as exc: - if exc.http_status != exp_code: - logger.error( - 'Raised: {exc!s},\n' - 'Expected: {exp} with code={code}'.format( - exc=exc, - exp=HttpError, - code=exp_code)) - raise - - logger.info('Test PASS: expected exception raised: ' - '{!s}'.format(exc)) - return - except BaseException as exc: - logger.error( - 'Raised: {exc!s},\n' - 'Expected: {exp} with code={code}'.format( - exc=exc, - exp=HttpError, - code=exp_code)) - raise - raise AssertionError( - 'Not raised any exception, while expected ' - '{exp} with code={code}'.format( - exp=HttpError, - code=exp_code)) - - -@test(groups=["bonding_neutron", "bonding_ha", "bonding"]) -class BondingHA(BondingTest): - """Tests for HA bonding.""" - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_bonding_neutron_vlan"]) - @log_snapshot_after_test - def deploy_bonding_neutron_vlan(self): - """Deploy cluster with active-backup bonding and Neutron VLAN - - Scenario: - 1. Create cluster - 2. Add 3 nodes with controller role - 3. Add 1 node with compute role and 1 node with cinder role - 4. Setup bonding for all interfaces (including admin interface - bonding) - 5. Run network verification - 6. Deploy the cluster - 7. Run network verification - 8. Run OSTF - 9. Save network configuration from slave nodes - 10. Reboot all environment nodes - 11. Verify that network configuration is the same after reboot - 12. Run network verification - 13. Run OSTF - - Duration 70m - Snapshot deploy_bonding_neutron_vlan - """ - - self.env.revert_snapshot("ready_with_5_slaves") - - segment_type = NEUTRON_SEGMENT['vlan'] - - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": segment_type, - } - ) - - self.show_step(2) - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['cinder'] - }, - update_interfaces=False - ) - - net_params = self.fuel_web.client.get_networks(cluster_id) - - self.show_step(4) - nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - for node in nailgun_nodes: - self.fuel_web.update_node_networks( - node['id'], interfaces_dict=deepcopy(self.INTERFACES), - raw_data=deepcopy(self.bond_config) - ) - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - - self.show_step(6) - self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False) - - cluster = self.fuel_web.client.get_cluster(cluster_id) - assert_equal(str(cluster['net_provider']), 'neutron') - assert_equal(str(net_params["networking_parameters"] - ['segmentation_type']), segment_type) - - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - - self.show_step(8) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(9) - self.show_step(10) - self.show_step(11) - self.check_interfaces_config_after_reboot() - self.fuel_web.assert_ha_services_ready(cluster_id) - - self.show_step(12) - self.fuel_web.verify_network(cluster_id) - - self.show_step(13) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("deploy_bonding_neutron_vlan") - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_bonding_neutron_tun"]) - @log_snapshot_after_test - def deploy_bonding_neutron_tun(self): - """Deploy cluster with active-backup bonding and Neutron VXLAN - - Scenario: - 1. Create cluster - 2. Add 3 nodes with controller role - 3. Add 1 node with compute role and 1 node with cinder role - 4. Setup bonding for all interfaces (including admin interface - bonding) - 5. Run network verification - 6. Deploy the cluster - 7. Run network verification - 8. Run OSTF - 9. Save network configuration from slave nodes - 10. Reboot all environment nodes - 11. Verify that network configuration is the same after reboot - 12. Run network verification - 13. Run OST - - Duration 70m - Snapshot deploy_bonding_neutron_tun - """ - - self.env.revert_snapshot("ready_with_5_slaves") - - segment_type = NEUTRON_SEGMENT['tun'] - - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": segment_type, - } - ) - - self.show_step(2) - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['cinder'] - }, - update_interfaces=False - ) - - net_params = self.fuel_web.client.get_networks(cluster_id) - - self.show_step(4) - nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - for node in nailgun_nodes: - self.fuel_web.update_node_networks( - node['id'], interfaces_dict=deepcopy(self.INTERFACES), - raw_data=deepcopy(self.bond_config) - ) - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - - self.show_step(6) - self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False) - - cluster = self.fuel_web.client.get_cluster(cluster_id) - assert_equal(str(cluster['net_provider']), 'neutron') - assert_equal(str(net_params["networking_parameters"] - ['segmentation_type']), segment_type) - - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - - self.show_step(8) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(9) - self.show_step(10) - self.show_step(11) - self.check_interfaces_config_after_reboot() - self.fuel_web.assert_ha_services_ready(cluster_id) - - self.show_step(12) - self.fuel_web.verify_network(cluster_id) - - self.show_step(13) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("deploy_bonding_neutron_tun") - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["bonding_conf_consistency"]) - @log_snapshot_after_test - def bonding_conf_consistency(self): - """Verify that network configuration with bonds is consistent\ - after deployment failure - - Scenario: - 1. Create an environment - 2. Add 3 nodes with controller role - 3. Add 1 node with compute role - 4. Setup bonding for all interfaces (including admin interface - bonding) - 5. Run network verification - 6. Update 'connectivity_tests' puppet manifest to cause the\ - deployment process fail right after 'netconfig' task is finished - 7. Start deployment and wait until it fails - 8. Verify that interfaces are not lost from the configured bonds - 9. Restore the initial version of 'connectivity_tests' manifest - 10. Redeploy the cluster and run basic health checks - 11. Run network verification - - Duration 120m - Snapshot bonding_conf_consistency - """ - - self.env.revert_snapshot("ready_with_5_slaves") - - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": NEUTRON_SEGMENT['vlan'], - } - ) - - self.show_step(2) - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - }, - update_interfaces=False - ) - - self.show_step(4) - nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - for node in nailgun_nodes: - self.fuel_web.update_node_networks( - node['id'], interfaces_dict=deepcopy(self.INTERFACES), - raw_data=deepcopy(self.bond_config) - ) - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - - # Get ID of a (pending) primary controller - pending_ctrl_ids = [n['id'] for n in nailgun_nodes - if 'controller' in n['pending_roles']] - node_id = sorted(pending_ctrl_ids)[0] - - # Get interfaces data of the primary controller for which deployment - # will be forced to fail - ifaces_data = self.fuel_web.client.get_node_interfaces(node_id) - - self.show_step(6) - pp_file = ("/etc/puppet/modules/osnailyfacter/modular/netconfig/" - "connectivity_tests.pp") - with self.env.d_env.get_admin_remote() as admin_node: - # Backup the manifest to be updated for the sake of the test - backup_cmd = "cp {0} {1}".format(pp_file, pp_file + "_bak") - res = admin_node.execute(backup_cmd) - assert_equal(0, res['exit_code'], - "Failed to create a backup copy of {0} puppet " - "manifest on master node".format(pp_file)) - - fail_cmd = ("echo 'fail(\"Emulate deployment failure after " - "netconfig!\")' >> {0}".format(pp_file)) - res = admin_node.execute(fail_cmd) - assert_equal(0, res['exit_code'], - "Failed to update {0} puppet manifest " - "on master node".format(pp_file)) - - self.show_step(7) - task = self.fuel_web.deploy_cluster(cluster_id) - self.fuel_web.assert_task_failed(task) - - # Get interfaces data after deployment failure on - # the primary controller - ifaces_data_latest = self.fuel_web.client.get_node_interfaces(node_id) - - self.show_step(8) - # Bond interfaces are always the last objects in the list being - # returned by 'get node interfaces' API request. - # So having 2 bonds on the node under test the last 2 objects - # in the corresponding list are being examined below - admin_bond_ifaces = ifaces_data[-1]['slaves'] - admin_bond_ifaces_latest = ifaces_data_latest[-1]['slaves'] - assert_equal(len(admin_bond_ifaces), len(admin_bond_ifaces_latest), - "Admin interface bond config is inconsistent; " - "interface(s) have disappeared from the bond") - others_bond_ifaces = ifaces_data[-2]['slaves'] - others_bond_ifaces_latest = ifaces_data_latest[-2]['slaves'] - assert_equal(len(others_bond_ifaces), len(others_bond_ifaces_latest), - "Other network interfaces bond config is inconsistent; " - "interface(s) have disappeared from the bond") - - self.show_step(9) - with self.env.d_env.get_admin_remote() as admin_node: - restore_cmd = "cp {0} {1}".format(pp_file + "_bak", pp_file) - res = admin_node.execute(restore_cmd) - assert_equal(0, res['exit_code'], - "Failed to restore the backup copy of {0} puppet " - "manifest on master node".format(pp_file)) - - self.show_step(10) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(11) - self.fuel_web.verify_network(cluster_id) - - self.env.make_snapshot("bonding_conf_consistency") diff --git a/fuelweb_test/tests/test_bonding_base.py b/fuelweb_test/tests/test_bonding_base.py deleted file mode 100644 index 732163431..000000000 --- a/fuelweb_test/tests/test_bonding_base.py +++ /dev/null @@ -1,252 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from proboscis.asserts import assert_false - -from fuelweb_test import logger -from fuelweb_test.helpers.utils import get_net_settings -from fuelweb_test.settings import iface_alias -from fuelweb_test.tests.base_test_case import TestBasic - - -class BondingTest(TestBasic): - def __init__(self): - self.TEMPLATE_OLD_SERIALIZATION_BOND_CONFIG = { - 'mac': None, - 'mode': 'active-backup', - 'state': None, - 'type': 'bond', - 'assigned_networks': [], - 'bond_properties': {'mode': 'active-backup', - 'type__': 'linux'}} - - self.TEMPLATE_NEW_SERIALIZATION_BOND_CONFIG = { - 'mac': None, - 'mode': 'active-backup', - 'state': None, - 'type': 'bond', - 'assigned_networks': [], - 'attributes': { - 'type__': {'type': 'hidden', 'value': 'linux'}}} - - self.INTERFACES = { - 'bond0': [ - 'public', - 'management', - 'storage', - 'private' - ], - 'bond1': ['fuelweb_admin'] - } - self.BOND_LIST = [ - { - 'name': 'bond0', - 'slaves': [ - {'name': iface_alias('eth5')}, - {'name': iface_alias('eth4')}, - {'name': iface_alias('eth3')}, - {'name': iface_alias('eth2')} - ] - }, - { - 'name': 'bond1', - 'slaves': [ - {'name': iface_alias('eth1')}, - {'name': iface_alias('eth0')}] - } - ] - self.BOND_ATTR = {} - super(BondingTest, self).__init__() - self.__cluster_id = None - self.__bond_config = None - - @property - def cluster_id(self): - if self.__cluster_id is None: - self.__cluster_id = self.fuel_web.get_last_created_cluster() - return self.__cluster_id - - @property - def bond_config(self): - if self.__bond_config is None: - self.__bond_config = self._generate_bonding_config() - return self.__bond_config - - @staticmethod - def get_bond_interfaces(bond_config, bond_name): - bond_slaves = [] - for bond in [bond for bond in bond_config]: - if bond['name'] == bond_name: - for slave in bond['slaves']: - bond_slaves.append(slave['name']) - return bond_slaves - - def _is_old_interface_serialization_scheme(self): - node = self.fuel_web.client.list_cluster_nodes(self.cluster_id)[0] - interface = self.fuel_web.client.get_node_interfaces(node['id'])[0] - if 'interface_properties' in interface.keys(): - return True - - def _generate_bonding_config(self): - bonding_config = copy.deepcopy(self.BOND_LIST) - if self._is_old_interface_serialization_scheme(): - data = self.TEMPLATE_OLD_SERIALIZATION_BOND_CONFIG - else: - data = self.TEMPLATE_NEW_SERIALIZATION_BOND_CONFIG - data['attributes'].update(self.BOND_ATTR) - for bond in bonding_config: - bond.update(data) - return bonding_config - - def check_interfaces_config_after_reboot(self): - network_settings = dict() - skip_interfaces = { - r'^pub-base$', r'^vr_pub-base$', r'^vr-base$', r'^mgmt-base$', - r'^vr-host-base$', r'^mgmt-conntrd$', r'^hapr-host$', - r'^(tap|qr-|qg-|p_).*$', r'^v_vrouter.*$', - r'^v_(management|public)$'} - - nodes = self.fuel_web.client.list_cluster_nodes(self.cluster_id) - - for node in nodes: - with self.env.d_env.get_ssh_to_remote(node['ip']) as remote: - network_settings[node['hostname']] = \ - get_net_settings(remote, skip_interfaces) - - self.fuel_web.warm_restart_nodes( - self.fuel_web.get_devops_nodes_by_nailgun_nodes(nodes)) - - network_settings_changed = False - - for node in nodes: - with self.env.d_env.get_ssh_to_remote(node['ip']) as remote: - saved_settings = network_settings[node['hostname']] - actual_settings = get_net_settings(remote, skip_interfaces) - if not saved_settings == actual_settings: - network_settings_changed = True - logger.error('Network settings were changed after reboot ' - 'on node {0}! '.format(node['hostname'])) - logger.debug('Network settings before the reboot of slave ' - '{0}: {1}'.format(node['hostname'], - saved_settings)) - logger.debug('Network settings after the reboot of slave ' - '{0}: {1}'.format(node['hostname'], - actual_settings)) - - for iface in saved_settings: - if iface not in actual_settings: - logger.error("Interface '{0}' doesn't exist after " - "reboot of '{1}'!".format( - iface, node['hostname'])) - continue - if saved_settings[iface] != actual_settings[iface]: - logger.error("Interface '{0}' settings " - "were changed after reboot " - "of '{1}': was {2}, now " - "{3}.".format(iface, - node['hostname'], - saved_settings[iface], - actual_settings[iface])) - - assert_false(network_settings_changed, - "Network settings were changed after environment nodes " - "reboot! Please check logs for details!") - - -class BondingTestDPDK(BondingTest): - def __init__(self): - super(BondingTestDPDK, self).__init__() - self.TEMPLATE_OLD_SERIALIZATION_BOND_CONFIG[ - 'interface_properties'] = {'dpdk': {'available': True}} - self.BOND_LIST = [ - { - 'name': 'bond0', - 'slaves': [ - {'name': iface_alias('eth3')}, - {'name': iface_alias('eth2')} - ], - }, - { - 'name': 'bond1', - 'slaves': [ - {'name': iface_alias('eth1')}, - {'name': iface_alias('eth0')} - ], - }, - { - 'name': 'bond2', - 'slaves': [ - {'name': iface_alias('eth5')}, - {'name': iface_alias('eth4')}, - ], - }, - ] - - self.INTERFACES = { - 'bond0': [ - 'public', - 'management', - 'storage', - ], - 'bond1': ['fuelweb_admin'], - 'bond2': ['private'], - } - - self.BOND_ATTR = { - 'dpdk': { - 'enabled': { - 'type': 'checkbox', - 'value': False, - 'weight': 10, - 'label': 'DPDK enabled'}, - 'metadata': {'weight': 40, 'label': 'DPDK'} - }} - - -class BondingTestOffloading(BondingTest): - def __init__(self): - super(BondingTestOffloading, self).__init__() - self.BOND_ATTR = { - "offloading": { - "disable": { - "type": "checkbox", - "value": False, - "weight": 10, - "label": "Disable offloading" - }, - "modes": { - "value": { - "rx-vlan-offload": None, - "tx-scatter-gather": None, - "scatter-gather": None, - "generic-segmentation-offload": None, - "tx-nocache-copy": None, - "tx-checksumming": None, - "generic-receive-offload": None, - "tx-checksum-ip-generic": None, - "rx-all": None, - "rx-fcs": None, - "tcp-segmentation-offload": None, - "tx-tcp-segmentation": None, - "rx-checksumming": None}, - "type": "offloading_modes", - "description": "Offloading modes", - "weight": 20, - "label": "Offloading modes"}, - "metadata": { - "weight": 10, - "label": "Offloading"} - }} diff --git a/fuelweb_test/tests/test_ceph.py b/fuelweb_test/tests/test_ceph.py deleted file mode 100644 index 3eb0c5d71..000000000 --- a/fuelweb_test/tests/test_ceph.py +++ /dev/null @@ -1,1129 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from __future__ import unicode_literals - -import time - -import paramiko -from pkg_resources import parse_version -from proboscis.asserts import assert_true, assert_false, assert_equal -from proboscis import SkipTest -from proboscis import test -from devops.helpers.helpers import tcp_ping -from devops.helpers.helpers import wait -from devops.helpers.ssh_client import SSHAuth -from six import BytesIO -# pylint: disable=import-error -# noinspection PyUnresolvedReferences -from six.moves import configparser -# noinspection PyUnresolvedReferences -from six.moves import cStringIO -# pylint: enable=import-error - -from fuelweb_test.helpers import os_actions -from fuelweb_test.helpers import ceph -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers import utils -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.ovs import ovs_get_tag_by_port -from fuelweb_test import ostf_test_mapping -from fuelweb_test import settings -from fuelweb_test import logger -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["ceph_ha_one_controller", "ceph"]) -class CephCompact(TestBasic): - """CephCompact.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["ceph_ha_one_controller_compact", - "ha_one_controller_nova_ceph", - "ceph_ha_one_controller_compact_neutron", "ceph", - "nova", "deployment"]) - @log_snapshot_after_test - def ceph_ha_one_controller_compact(self): - """Deploy ceph in HA mode with 1 controller - - Scenario: - 1. Create cluster - 2. Add 1 node with controller and ceph OSD roles - 3. Add 2 nodes with compute and ceph OSD roles - 4. Deploy the cluster - 5. Check ceph status - - Duration 35m - Snapshot ceph_ha_one_controller_compact - """ - self.check_run('ceph_ha_one_controller_compact') - self.env.revert_snapshot("ready_with_3_slaves") - data = { - 'volumes_ceph': True, - 'images_ceph': True, - 'volumes_lvm': False, - 'tenant': 'ceph1', - 'user': 'ceph1', - 'password': 'ceph1', - 'net_provider': 'neutron', - 'net_segment_type': settings.NEUTRON_SEGMENT['vlan'] - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings=data) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'ceph-osd'], - 'slave-02': ['compute', 'ceph-osd'], - 'slave-03': ['compute', 'ceph-osd'] - } - ) - # Cluster deploy - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.check_ceph_status(cluster_id) - - # Run ostf - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("ceph_ha_one_controller_compact", is_make=True) - - @test(depends_on=[ceph_ha_one_controller_compact], - groups=["check_ceph_cinder_cow"]) - @log_snapshot_after_test - def check_ceph_cinder_cow(self): - """Check copy-on-write when Cinder creates a volume from Glance image - - Scenario: - 1. Revert a snapshot where ceph enabled for volumes and images: - "ceph_ha_one_controller_compact" - 2. Create a Glance image in RAW disk format - 3. Create a Cinder volume using Glance image in RAW disk format - 4. Check on a ceph-osd node if the volume has a parent image. - - Duration 5m - """ - self.env.revert_snapshot("ceph_ha_one_controller_compact") - cluster_id = self.fuel_web.get_last_created_cluster() - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id), 'ceph1', 'ceph1', - 'ceph1') - - image_data = BytesIO( - self.__class__.__name__.encode(encoding='ascii', errors='ignore')) - image = os_conn.create_image(disk_format='raw', - container_format='bare', - name='test_ceph_cinder_cow', - is_public=True, - data=image_data) - wait(lambda: os_conn.get_image(image.name).status == 'active', - timeout=60 * 2, timeout_msg='Image is not active') - - volume = os_conn.create_volume(size=1, image_id=image.id) - - with self.fuel_web.get_ssh_for_node('slave-01') as remote: - rbd_list = ceph.get_rbd_images_list(remote, 'volumes') - - for item in rbd_list: - if volume.id in item['image']: - assert_true('parent' in item, - "Volume {0} created from image {1} doesn't have" - " parents. Copy-on-write feature doesn't work." - .format(volume.id, image.id)) - assert_true(image.id in item['parent']['image'], - "Volume {0} created from image {1}, but have a " - "different image in parent: {2}" - .format(volume.id, image.id, - item['parent']['image'])) - break - else: - raise Exception("Volume {0} not found!".format(volume.id)) - - -@test(groups=["thread_3", "ceph"]) -class CephCompactWithCinder(TestBasic): - """CephCompactWithCinder.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_release], - groups=["ceph_ha_one_controller_with_cinder"]) - @log_snapshot_after_test - def ceph_ha_one_controller_with_cinder(self): - """Deploy ceph with cinder in ha mode with 1 controller - - Scenario: - 1. Create cluster - 2. Add 1 node with controller role - 3. Add 1 node with compute role - 4. Add 2 nodes with cinder and ceph OSD roles - 5. Deploy the cluster - 6. Check ceph status - 7. Check partitions on controller node - - Duration 40m - Snapshot ceph_ha_one_controller_with_cinder - """ - try: - self.check_run('ceph_ha_one_controller_with_cinder') - except SkipTest: - return - - self.env.revert_snapshot("ready") - self.env.bootstrap_nodes( - self.env.d_env.nodes().slaves[:4]) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - 'volumes_ceph': False, - 'images_ceph': True, - 'osd_pool_size': '2', - 'volumes_lvm': True, - 'tenant': 'ceph2', - 'user': 'ceph2', - 'password': 'ceph2' - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder', 'ceph-osd'], - 'slave-04': ['cinder', 'ceph-osd'] - } - ) - # Cluster deploy - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.check_ceph_status(cluster_id) - - disks = self.fuel_web.client.get_node_disks( - self.fuel_web.get_nailgun_node_by_name('slave-01')['id']) - - logger.info("Current disk partitions are: \n{d}".format(d=disks)) - - logger.info("Check unallocated space") - # We expect failure here only for release 5.0 due to bug - # https://bugs.launchpad.net/fuel/+bug/1306625, so it is - # necessary to assert_true in the next release. - assert_false( - checkers.check_unallocated_space(disks, contr_img_ceph=True), - "Check unallocated space on controller") - - # Run ostf - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("ceph_ha_one_controller_with_cinder", - is_make=True) - - -@test(groups=["thread_3", "ceph"]) -class CephHA(TestBasic): - """CephHA.""" # TODO documentation1 - - @test(depends_on=[SetupEnvironment.prepare_release], - groups=["ceph_ha", "classic_provisioning"]) - @log_snapshot_after_test - def ceph_ha(self): - """Deploy ceph with cinder in HA mode - - Scenario: - 1. Create cluster - 2. Add 3 nodes with controller and ceph OSD roles - 3. Add 1 node with ceph OSD roles - 4. Add 2 nodes with compute and ceph OSD roles - 5. Deploy the cluster - - Duration 90m - Snapshot ceph_ha - - """ - try: - self.check_run('ceph_ha') - except SkipTest: - return - - self.env.revert_snapshot("ready") - self.env.bootstrap_nodes( - self.env.d_env.nodes().slaves[:6]) - - data = { - 'volumes_ceph': True, - 'images_ceph': True, - 'volumes_lvm': False, - 'tenant': 'cephHA', - 'user': 'cephHA', - 'password': 'cephHA', - 'osd_pool_size': "3", - 'net_provider': 'neutron', - 'net_segment_type': settings.NEUTRON_SEGMENT['vlan'] - } - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings=data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'ceph-osd'], - 'slave-02': ['controller', 'ceph-osd'], - 'slave-03': ['controller', 'ceph-osd'], - 'slave-04': ['compute', 'ceph-osd'], - 'slave-05': ['compute', 'ceph-osd'], - 'slave-06': ['ceph-osd'] - } - ) - # Deploy cluster - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.env.make_snapshot("ceph_ha", is_make=True) - - @test(depends_on=[ceph_ha], - groups=["ha_nova_ceph", "ha_neutron_ceph", "check_ceph_ha"]) - @log_snapshot_after_test - def check_ceph_ha(self): - """Check ceph with cinder in HA mode - - Scenario: - 1. Revert snapshot with ceph cluster in HA mode - 2. Check ceph status - 3. Check ceph version, should be consistent across nodes - - Duration 10m - Snapshot check_ceph_ha - - """ - self.env.revert_snapshot("ceph_ha") - cluster_id = self.fuel_web.get_last_created_cluster() - - self.fuel_web.check_ceph_status(cluster_id) - - versions = [] - for node in self.fuel_web.client.list_cluster_nodes(cluster_id): - role = '_'.join(node['roles']) - logger.debug('{} has role {}'.format(node['fqdn'], role)) - with self.fuel_web.get_ssh_for_nailgun_node(node) as remote: - version = ceph.get_version(remote) - logger.info('On {} ceph version is {}'.format(node['fqdn'], - version)) - versions.append({'name': node['fqdn'], 'ceph_version': version}) - - ceph_version = versions[0]['ceph_version'] - - bad_nodes = [ - ver for ver in versions - if parse_version(ver['ceph_version']) != parse_version( - ceph_version)] - - assert_true(len(bad_nodes) == 0, - message="Nodes should same Ceph version on all nodes. " - "Expecting version {0}, the following nodes " - "do not have this version: {1}".format( - ceph_version, bad_nodes)) - # Run ostf - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - @test(depends_on=[ceph_ha], - groups=["openstack_stat"]) - @log_snapshot_after_test - def check_openstack_stat(self): - """Check openstack statistic on fuel and collector side - - Scenario: - 1. Revert ceph_ha env - 2. Create all openstack resources that are collected - 3. Check that all info was collected on fuel side - 4. Check that info was sent to collector - 5. Check that info is properly saved on collector side - - Duration 20m - Snapshot check_openstack_stat - - """ - self.env.revert_snapshot("ceph_ha") - cluster_id = self.fuel_web.get_last_created_cluster() - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id), 'cephHA', 'cephHA', - 'cephHA') - - # Check resources addition - # create instance - net_name = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - server = os_conn.create_instance( - neutron_network=True, label=net_name) - - # create flavor - flavor = os_conn.create_flavor('openstackstat', 1024, 1, 1) - - # create volume - volume = os_conn.create_volume() - - # create image - devops_node = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - with self.fuel_web.get_ssh_for_node(devops_node.name) as slave: - if settings.OPENSTACK_RELEASE_CENTOS in settings.OPENSTACK_RELEASE: - slave.execute(". openrc; glance image-create --name" - " 'custom-image' --disk-format qcow2" - " --protected False --visibility public" - " --container-format bare" - " --file /opt/vm/cirros-x86_64-disk.img") - else: - slave.execute( - ". openrc; glance image-create --name" - " 'custom-image' --disk-format qcow2" - " --protected False --visibility public" - " --container-format bare --file" - " /usr/share/cirros-testvm/cirros-x86_64-disk.img") - - image = os_conn.get_image_by_name('custom-image') - logger.debug("image is {}".format(image)) - - # create tenant and user - tenant = os_conn.create_tenant("openstack_tenant") - user = os_conn.create_user('openstack_user', 'qwerty', tenant) - - self.env.nailgun_actions.force_oswl_collect() - self.env.nailgun_actions.force_fuel_stats_sending() - master_uid = self.env.get_masternode_uuid() - checkers.check_oswl_stat(self.env.postgres_actions, - self.env.nailgun_actions, self.env.collector, - master_uid, operation='current', - resources=['vm', 'flavor', 'volume', 'image', - 'tenant', 'keystone_user']) - - # Check resources modification - # suspend instance - server.suspend() - # edit volume - os_conn.extend_volume(volume, 2) - # edit image - os_conn.update_image(image, min_ram=333) - # edit user - os_conn.update_user_enabled(user, enabled=False) - # edit tenant - os_conn.update_tenant(tenant.id, enabled=False) - - self.env.nailgun_actions.force_oswl_collect() - self.env.nailgun_actions.force_fuel_stats_sending() - checkers.check_oswl_stat(self.env.postgres_actions, - self.env.nailgun_actions, self.env.collector, - master_uid, operation='modified', - resources=['vm', 'volume', 'image', - 'tenant', 'keystone_user']) - - # Check resources deletion - # delete instance - server.delete() - # delete flavor - os_conn.delete_flavor(flavor) - # delete volume - os_conn.delete_volume_and_wait(volume, timeout=300) - # delete image - os_conn.delete_image(image.id) - # delete tenant - os_conn.delete_tenant(tenant) - # delete user - os_conn.delete_user(user) - - self.env.nailgun_actions.force_oswl_collect() - self.env.nailgun_actions.force_fuel_stats_sending() - checkers.check_oswl_stat(self.env.postgres_actions, - self.env.nailgun_actions, self.env.collector, - master_uid, operation='removed', - resources=['vm', 'flavor', 'volume', 'image', - 'tenant', 'keystone_user']) - - -@test(groups=["ha_neutron_tun", "ceph"]) -class CephRadosGW(TestBasic): - """CephRadosGW.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_release], - groups=["ceph_rados_gw", "bvt_2", "ceph", "neutron", "deployment"]) - @log_snapshot_after_test - def ceph_rados_gw(self): - """Deploy ceph HA with RadosGW for objects - - Scenario: - 1. Create cluster with Neutron - 2. Add 3 nodes with controller role - 3. Add 3 nodes with compute and ceph-osd role - 4. Deploy the cluster - 5. Check ceph status - 6. Run OSTF tests - 7. Check the radosgw daemon is started - - Duration 90m - Snapshot ceph_rados_gw - - """ - def radosgw_started(remote): - return remote.check_call('pkill -0 radosgw')['exit_code'] == 0 - - self.env.revert_snapshot("ready") - self.env.bootstrap_nodes( - self.env.d_env.nodes().slaves[:6]) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - 'volumes_lvm': False, - 'volumes_ceph': True, - 'images_ceph': True, - 'objects_ceph': True, - 'tenant': 'rados', - 'user': 'rados', - 'password': 'rados' - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute', 'ceph-osd'], - 'slave-05': ['compute', 'ceph-osd'], - 'slave-06': ['compute', 'ceph-osd'] - } - ) - self.fuel_web.verify_network(cluster_id) - # Deploy cluster - self.fuel_web.deploy_cluster_wait(cluster_id) - - # Network verification - self.fuel_web.verify_network(cluster_id) - - # HAProxy backend checking - controller_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller']) - - for node in controller_nodes: - logger.info("Check all HAProxy backends on {}".format( - node['meta']['system']['fqdn'])) - haproxy_status = checkers.check_haproxy_backend(node['ip']) - assert_equal(haproxy_status['exit_code'], 1, - "HAProxy backends are DOWN. {0}".format( - haproxy_status)) - - self.fuel_web.check_ceph_status(cluster_id) - - # Run ostf - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - # Check the radosgw daemon is started - with self.fuel_web.get_ssh_for_node('slave-01') as remote: - assert_true(radosgw_started(remote), 'radosgw daemon started') - - self.env.make_snapshot("ceph_rados_gw") - - -@test(groups=["ceph_ha_one_controller", "ceph_migration"]) -class VmBackedWithCephMigrationBasic(TestBasic): - """VmBackedWithCephMigrationBasic.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["ceph_migration"]) - @log_snapshot_after_test - def migrate_vm_backed_with_ceph(self): - """Check VM backed with ceph migration in ha mode with 1 controller - - Scenario: - 1. Create cluster - 2. Add 1 node with controller and ceph OSD roles - 3. Add 2 nodes with compute and ceph OSD roles - 4. Deploy the cluster - 5. Check ceph status - 6. Run OSTF - 7. Create a new VM, assign floating ip - 8. Migrate VM - 9. Check cluster and server state after migration - 10. Terminate VM - 11. Check that DHCP lease is not offered for MAC of deleted VM - 12. Create a new VM for migration, assign floating ip - 13. Create a volume and attach it to the VM - 14. Create filesystem on the new volume and mount it to the VM - 15. Migrate VM - 16. Check that volume was mounted - 17. Check cluster and server state after migration - 18. Terminate VM - - Duration 35m - Snapshot vm_backed_with_ceph_live_migration - """ - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(1, initialize=True) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - 'volumes_ceph': True, - 'images_ceph': True, - 'ephemeral_ceph': True, - 'volumes_lvm': False, - } - ) - - self.show_step(2) - self.show_step(3) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'ceph-osd'], - 'slave-02': ['compute', 'ceph-osd'], - 'slave-03': ['compute', 'ceph-osd'] - } - ) - creds = SSHAuth(username="cirros", password="test") - - self.show_step(4) - - # Cluster deploy - self.fuel_web.deploy_cluster_wait(cluster_id) - - def _check(): - # Run volume test several times with hope that it pass - test_path = ostf_test_mapping.OSTF_TEST_MAPPING.get( - 'Create volume and attach it to instance') - logger.debug('Start to run test {0}'.format(test_path)) - self.fuel_web.run_single_ostf_test( - cluster_id, test_sets=['smoke'], - test_name=test_path) - - self.show_step(5) - try: - _check() - except AssertionError: - logger.debug(AssertionError) - logger.debug("Test failed from first probe," - " we sleep 60 second try one more time " - "and if it fails again - test will fails ") - time.sleep(60) - _check() - - self.show_step(6) - - # Run ostf - self.fuel_web.run_ostf(cluster_id) - - self.show_step(7) - - # Create new server - os = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - net_name = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - - logger.info("Create new server") - srv = os.create_server_for_migration( - neutron=True, - scenario='./fuelweb_test/helpers/instance_initial_scenario', - label=net_name) - logger.info("Srv is currently in status: {:s}".format(srv.status)) - - # Prepare to DHCP leases checks - srv_instance_ip = os.get_nova_instance_ip(srv, net_name=net_name) - srv_host_name = self.fuel_web.find_devops_node_by_nailgun_fqdn( - os.get_srv_hypervisor_name(srv), - self.env.d_env.nodes().slaves[:3]).name - net_id = os.get_network(net_name)['id'] - ports = os.get_neutron_dhcp_ports(net_id) - dhcp_server_ip = ports[0]['fixed_ips'][0]['ip_address'] - with self.fuel_web.get_ssh_for_node(srv_host_name) as srv_remote_node: - srv_instance_mac = os.get_instance_mac(srv_remote_node, srv) - - logger.info("Assigning floating ip to server") - floating_ip = os.assign_floating_ip(srv) - srv_host = os.get_srv_host_name(srv) - logger.info("Server is on host {:s}".format(srv_host)) - - wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120, - timeout_msg='new VM ssh port ping timeout') - - def ssh_ready(remote, ip, creds): - """SSH Ready status - - :type ip: str - :type creds: SSHAuth - """ - try: - remote.execute_through_host(ip, '/bin/true', creds) - return True - except paramiko.AuthenticationException: - logger.info("Authentication failed. Trying again in a minute.") - time.sleep(60) - return False - - with self.fuel_web.get_ssh_for_node("slave-01") as remote: - wait(lambda: ssh_ready(remote, floating_ip.ip, creds), timeout=300) - md5before = remote.execute_through_host( - floating_ip.ip, - "md5sum {:s}".format("/home/test_file"), - auth=creds).stdout_str - - self.show_step(8) - - logger.info("Get available computes") - avail_hosts = os.get_hosts_for_migr(srv_host) - - logger.info("Migrating server") - new_srv = os.migrate_server(srv, avail_hosts[0], timeout=200) - logger.info("Check cluster and server state after migration") - - wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120, - timeout_msg='VM ssh port ping timeout after migration') - - with self.fuel_web.get_ssh_for_node("slave-01") as remote: - md5after = remote.execute_through_host( - floating_ip.ip, - "md5sum {:s}".format("/home/test_file"), - auth=creds).stdout_str - - checkers.diff_md5(md5before, md5after) - - self.show_step(9) - - with self.fuel_web.get_ssh_for_node("slave-01") as remote: - res = remote.execute_through_host( - floating_ip.ip, - "ping -q -c3 -w10 {0} | grep 'received' |" - " grep -v '0 packets received'" - .format(settings.PUBLIC_TEST_IP), - auth=creds) - logger.info("Ping {0} result on vm is: {1}" - .format(settings.PUBLIC_TEST_IP, res['stdout'])) - - logger.info("Check Ceph health is ok after migration") - self.fuel_web.check_ceph_status(cluster_id) - - logger.info( - "Server is now on host {:s}".format(os.get_srv_host_name(new_srv))) - - self.show_step(10) - - logger.info("Terminate migrated server") - os.delete_instance(new_srv) - os.verify_srv_deleted(new_srv) - - self.show_step(11) - # Check if the dhcp lease for instance still remains - # on the previous compute node. Related Bug: #1391010 - _ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'] - with self.fuel_web.get_ssh_for_node('slave-01') as remote: - dhcp_port_tag = ovs_get_tag_by_port(remote, ports[0]['id']) - assert_false(checkers.check_neutron_dhcp_lease(_ip, - srv_instance_ip, - srv_instance_mac, - dhcp_server_ip, - dhcp_port_tag), - "Instance has been deleted, but it's DHCP lease " - "for IP:{0} with MAC:{1} still offers by Neutron DHCP" - " agent.".format(srv_instance_ip, - srv_instance_mac)) - self.show_step(12) - # Create a new server - logger.info("Create a new server for migration with volume") - srv = os.create_server_for_migration( - neutron=True, - scenario='./fuelweb_test/helpers/instance_initial_scenario', - label=net_name) - logger.info("Srv is currently in status: {:s}".format(srv.status)) - - logger.info("Assigning floating ip to server") - floating_ip = os.assign_floating_ip(srv) - srv_host = os.get_srv_host_name(srv) - logger.info("Server is on host {:s}".format(srv_host)) - - self.show_step(13) - logger.info("Create volume") - vol = os.create_volume() - logger.info("Attach volume to server") - os.attach_volume(vol, srv) - - self.show_step(14) - wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120, - timeout_msg='new VM ssh port ping timeout') - logger.info("Create filesystem and mount volume") - - with self.fuel_web.get_ssh_for_node("slave-01") as remote: - wait(lambda: ssh_ready(remote, floating_ip.ip, creds), timeout=300) - - remote.execute_through_host( - floating_ip.ip, - 'sudo sh /home/mount_volume.sh', - auth=creds) - - remote.execute_through_host( - floating_ip.ip, - 'sudo touch /mnt/file-on-volume', - auth=creds) - - self.show_step(15) - logger.info("Get available computes") - avail_hosts = os.get_hosts_for_migr(srv_host) - - logger.info("Migrating server") - new_srv = os.migrate_server(srv, avail_hosts[0], timeout=120) - - logger.info("Check cluster and server state after migration") - wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120, - timeout_msg='VM ssh port ping timeout after migration') - - self.show_step(16) - logger.info("Check that volume was mounted") - with self.fuel_web.get_ssh_for_node("slave-01") as remote: - out = remote.execute_through_host( - floating_ip.ip, - 'mount | grep "/dev/vdb on /mnt"', - auth=creds) - assert_true(out['stdout'] and out['exit_code'] == 0, - "Volume was not mounted") - - with self.fuel_web.get_ssh_for_node("slave-01") as remote: - out = remote.execute_through_host( - floating_ip.ip, - "sudo ls /mnt", - auth=creds) - assert_true("file-on-volume\n" in out['stdout'], - "File is absent in /mnt") - - self.show_step(17) - logger.info("Check Ceph health is ok after migration") - self.fuel_web.check_ceph_status(cluster_id) - - logger.info( - "Server is now on host {:s}".format(os.get_srv_host_name(new_srv))) - - self.show_step(18) - logger.info("Terminate migrated server") - os.delete_instance(new_srv) - os.verify_srv_deleted(new_srv) - - self.env.make_snapshot( - "vm_backed_with_ceph_live_migration") - - -@test(groups=["ceph_ha_one_controller", "ceph_partitions"]) -class CheckCephPartitionsAfterReboot(TestBasic): - """CheckCephPartitionsAfterReboot.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["ceph_partitions"]) - @log_snapshot_after_test - def check_ceph_partitions_after_reboot(self): - """Check that Ceph OSD partitions are remounted after reboot - - Scenario: - 1. Create cluster in Ha mode with 1 controller - 2. Add 1 node with controller role - 3. Add 1 node with compute and Ceph OSD roles - 4. Add 1 node with Ceph OSD role - 5. Deploy the cluster - 6. Check Ceph status - 7. Read current partitions - 8. Warm-reboot Ceph nodes - 9. Read partitions again - 10. Check Ceph health - 11. Cold-reboot Ceph nodes - 12. Read partitions again - 13. Check Ceph health - - Duration 40m - Snapshot check_ceph_partitions_after_reboot - - """ - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(1, initialize=True) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - 'volumes_ceph': True, - 'images_ceph': True, - 'osd_pool_size': '2', - 'ephemeral_ceph': True, - 'volumes_lvm': False, - } - ) - - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute', 'ceph-osd'], - 'slave-03': ['ceph-osd'] - } - ) - - self.show_step(5) - # Deploy cluster - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(6) - for node in ["slave-02", "slave-03"]: - - self.show_step(7, node, True) - logger.info("Get partitions for {node}".format(node=node)) - _ip = self.fuel_web.get_nailgun_node_by_name(node)['ip'] - before_reboot_partitions = [utils.get_ceph_partitions( - _ip, - "/dev/vd{p}".format(p=part)) for part in ["b", "c"]] - - self.show_step(8, node) - logger.info("Warm-restart nodes") - self.fuel_web.warm_restart_nodes( - [self.fuel_web.environment.d_env.get_node(name=node)]) - - self.show_step(9, node) - logger.info("Get partitions for {node} once again".format( - node=node - )) - _ip = self.fuel_web.get_nailgun_node_by_name(node)['ip'] - after_reboot_partitions = [utils.get_ceph_partitions( - _ip, - "/dev/vd{p}".format(p=part)) for part in ["b", "c"]] - - if before_reboot_partitions != after_reboot_partitions: - logger.info("Partitions don`t match") - logger.info("Before reboot: " - "{:s}".format(before_reboot_partitions)) - logger.info("After reboot: " - "{:s}".format(after_reboot_partitions)) - raise Exception() - - self.show_step(10, node) - logger.info("Check Ceph health is ok after reboot") - self.fuel_web.check_ceph_status(cluster_id) - - self.show_step(11, node) - logger.info("Cold-restart nodes") - self.fuel_web.cold_restart_nodes( - [self.fuel_web.environment.d_env.get_node(name=node)]) - - self.show_step(12, node) - _ip = self.fuel_web.get_nailgun_node_by_name(node)['ip'] - after_reboot_partitions = [utils.get_ceph_partitions( - _ip, - "/dev/vd{p}".format(p=part)) for part in ["b", "c"]] - - if before_reboot_partitions != after_reboot_partitions: - logger.info("Partitions don`t match") - logger.info("Before reboot: " - "{:s}".format(before_reboot_partitions)) - logger.info("After reboot: " - "{:s}".format(after_reboot_partitions)) - raise Exception() - - self.show_step(13, node) - logger.info("Check Ceph health is ok after reboot") - self.fuel_web.check_ceph_status(cluster_id) - - -@test(groups=["default_storage_rados_gw", "ceph"]) -class RadosGW(TestBasic): - """RadosGW.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_release], - groups=["radosgw_without_os_services_usage"]) - @log_snapshot_after_test - def radosgw_without_os_services_usage(self): - """Deploy ceph HA with RadosGW for objects - - Scenario: - 1. Create cluster with RadosGW enabled - 2. Add 3 nodes with controller role - 3. Add 2 nodes with compute and ceph-osd role - 4. Add 2 nodes with ceph-osd role - 5. Verify Network - 6. Deploy the cluster - 7. Verify Network - 8. Run OSTF tests - 9. Check ceph status - 10. Check the radosgw daemon is started - 11. Create custom image via glance - 12. Compare custom image IDs via glance and swift client - 13. Check S3 API - - Duration 90m - - """ - self.show_step(1) - self.env.revert_snapshot("ready") - self.env.bootstrap_nodes( - self.env.d_env.nodes().slaves[:7]) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - 'volumes_lvm': True, - 'volumes_ceph': False, - 'images_ceph': False, - 'objects_ceph': True - } - ) - - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute', 'ceph-osd'], - 'slave-06': ['ceph-osd'], - 'slave-07': ['ceph-osd'] - } - ) - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - - self.show_step(6) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - - self.show_step(8) - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - self.show_step(9) - self.fuel_web.check_ceph_status(cluster_id) - - self.show_step(10) - devops_node = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - node = self.fuel_web.get_nailgun_node_by_devops_node(devops_node) - - self.ssh_manager.execute_on_remote( - ip=node['ip'], - cmd="pkill -0 radosgw") - - self.show_step(11) - self.ssh_manager.execute_on_remote( - ip=node['ip'], - cmd=". openrc; glance image-create --name" - " 'custom-image' --disk-format qcow2" - " --protected False --visibility public" - " --container-format bare --file" - " /usr/share/cirros-testvm/cirros-x86_64-disk.img") - - settings_source = '/etc/glance/glance-api.conf' - openrc = '~/openrc' - settings_list = ( - "admin_tenant_name", "admin_user", "admin_password") - openrc_settings = ( - "OS_TENANT_NAME", "OS_PROJECT_NAME", "OS_USERNAME", "OS_PASSWORD") - - glance_config = self.ssh_manager.execute_on_remote( - ip=node['ip'], - cmd="cat {0} | egrep -v '^#'".format( - settings_source))['stdout_str'] - glance_config_file = cStringIO(glance_config) - parser = configparser.ConfigParser() - parser.readfp(glance_config_file) - settings_value = [ - parser.get('keystone_authtoken', value) for value in settings_list] - - settings_value.insert(0, settings_value[0]) - for val in zip(openrc_settings, settings_value): - self.ssh_manager.execute_on_remote( - ip=node['ip'], - cmd="sed -ie '/{0}=/ s/admin/{1}/g' {2}".format( - val[0], val[1], openrc)) - self.ssh_manager.execute_on_remote( - ip=node['ip'], - cmd="sed -i 's/5000/5000\/v2.0/g' {0}".format(openrc)) - - glance_image_id = self.ssh_manager.execute_on_remote( - ip=node['ip'], - cmd=". openrc; glance image-list | " - "grep custom-image")['stdout'][0].split("|")[1].strip() - - swift_image_ids = self.ssh_manager.execute_on_remote( - ip=node['ip'], - cmd=". openrc; swift list glance")['stdout'] - - self.show_step(12) - if glance_image_id in [image_id.rstrip() - for image_id in swift_image_ids]: - logger.debug( - "Glance image {0} was found " - "in the swift_image_ids {1}".format( - glance_image_id, swift_image_ids)) - else: - raise Exception( - "The glance_image_id {0} was not found " - "in the list swift_image_ids {1}".format( - glance_image_id, swift_image_ids)) - - self.show_step(13) - keys = self.ssh_manager.execute_on_remote( - ip=node['ip'], - cmd='radosgw-admin user create ' - '--uid="s3_main" --display-name="s3_main"', - jsonify=True)['stdout_json']['keys'][0] - access_key = keys['access_key'] - secret_key = keys['secret_key'] - - self.ssh_manager.execute_on_remote( - ip=node['ip'], - cmd="apt-get install -y python-pip") - self.ssh_manager.execute_on_remote( - ip=node['ip'], - cmd="pip install {0}".format(settings.S3_API_CLIENT)) - - pub_contr_ip = self.ssh_manager.execute_on_remote( - ip=node['ip'], - cmd="ip -o -4 addr " - "show br-ex")['stdout'][0].split()[3].split('/')[0] - - self.ssh_manager.execute_on_remote( - ip=node['ip'], - cmd="s3cmd --access_key={0} --secret_key={1} " - "--no-ssl --host={2}:7480 mb s3://test_bucket".format( - access_key, secret_key, pub_contr_ip)) - - result = self.ssh_manager.execute_on_remote( - ip=node['ip'], - cmd="{0} --access_key={1} --secret_key={2} " - "--no-ssl --host={3}:7480 ls".format( - settings.S3_API_CLIENT, access_key, secret_key, - pub_contr_ip)) - - if 'test_bucket' not in result['stdout_str']: - raise Exception( - "The S3 API call failed: {0}".format(result['stderr'])) diff --git a/fuelweb_test/tests/test_cgroups.py b/fuelweb_test/tests/test_cgroups.py deleted file mode 100644 index ac0a8eeab..000000000 --- a/fuelweb_test/tests/test_cgroups.py +++ /dev/null @@ -1,485 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from __future__ import division - -import re -import json - -from proboscis import test -from proboscis import asserts - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test import logger -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic -from fuelweb_test.helpers import utils - - -@test(groups=["cgroup_ha"]) -class TestCgroupHa(TestBasic): - """Tests for verification deployment with enabled cgroup.""" - - @staticmethod - def generate_attributes(cgroups): - """Generate cluster attributes structure from cgroups dicts.""" - - attributes = {} - for cgroup in cgroups: - if "limit" not in cgroup: - limit = {} - else: - limit = {cgroup["limit"]: cgroup["value"]} - attributes = utils.dict_merge(attributes, { - cgroup["process"]: { - "label": cgroup["process"], - "type": "text", - "value": { - cgroup["controller"]: limit - } - } - }) - - for cgroup in attributes.values(): - cgroup["value"] = json.dumps(cgroup["value"]) - - return {"editable": {"cgroups": attributes}} - - @staticmethod - def check_cgconfig_setup(config, process, controller, - limit=None, value=None): - """Check /etc/cgconfig.conf contains properly configured cgroup.""" - - actual_limit = config[process][controller] - - if limit is None and value is None: - asserts.assert_equal(actual_limit, {}, - "Actual limit is not empty: {}" - .format(actual_limit)) - else: - asserts.assert_equal(actual_limit[limit], value, - "Actual value limit is not as expected for " - "process {}, controller {}, limit {}, " - "expected value = {}, actual == {}" - .format(process, controller, limit, value, - actual_limit[limit])) - - @staticmethod - def generate_lscgroups(cgroups): - """Generate a list of lscgroups entities from cgroups dicts.""" - - cpu_controller = "cpu,cpuacct" - return ["{}:/{}".format(cpu_controller - if cgroup["controller"] in cpu_controller - else cgroup["controller"], cgroup["process"]) - for cgroup in cgroups] - - def apply_cgroups(self, cgroups, node_ids): - cluster_id = self.fuel_web.get_last_created_cluster() - - self.fuel_web.client.update_cluster_attributes( - cluster_id, self.generate_attributes(cgroups)) - task = self.fuel_web.client.put_deployment_tasks_for_cluster( - cluster_id=cluster_id, - data=["upload_configuration", "configuration_symlink", - "hiera", "cgroups"], - node_id=node_ids) - self.fuel_web.assert_task_success(task) - - def get_cgroups_config(self, nailgun_node): - """Get /etc/cgconfig.conf from node, transform it to json and loads - - Before transformation: - group mysqld { - memory { - memory.swappiness = 0; - } - } - group keystone { - cpu { - cpu.shares = 70; - } - } - group rabbitmq { - blkio { - blkio.weight = 500; - } - memory { - memory.swappiness = 0; - } - } - - After transformation: - { - "mysqld": { - "memory": { - "memory.swappiness": 0 - } - }, - "keystone": { - "cpu": { - "cpu.shares": 70 - } - }, - "rabbitmq": { - "blkio": { - "blkio.weight": 500 - }, - "memory": { - "memory.swappiness": 0 - } - } - } - """ - - cmd = "cat /etc/cgconfig.conf" - result = self.ssh_manager.execute(nailgun_node['ip'], cmd)["stdout"] - cgroups_config = "".join([line for line in result - if not line.startswith("#")]) - - cgroups_to_json = [ - ('group ', ''), # Remove group tag - (' {', ': {'), # Replace { -> : { - ('}', '},'), # Replace } -> }, - (';', ','), # Replace ; -> , - (' = ', ': '), # Replace = -> : - ('[a-z_]+\.{0,1}[a-z_]*', '"\g<0>"'), # Wrap all words with " " - # Words could contain period - ('[\s\S]*', '{\g<0> }'), # Wrap whole string with {} - (',[ \t\r\n]+}', '}') # Clear trailing commas - ] - - for pattern, replace in cgroups_to_json: - cgroups_config = re.sub(pattern, replace, cgroups_config) - - return json.loads(cgroups_config) - - def check_cgroups_on_node(self, nailgun_node, cgroups): - """Complex validation of cgroups on particular node.""" - - cgroups_config = self.get_cgroups_config(nailgun_node) - - for cgroup in cgroups: - logger.info("Check cgroup config for {} {} on node {}" - .format(cgroup["process"], cgroup["controller"], - nailgun_node['fqdn'])) - self.check_cgconfig_setup(config=cgroups_config, **cgroup) - - for lscgroup in self.generate_lscgroups(cgroups): - check_group_cmd = 'sudo lscgroup | fgrep -q {}' - logger.info('Check {} group existence on controller node {}' - .format(lscgroup, nailgun_node['fqdn'])) - self.ssh_manager.check_call(nailgun_node['ip'], - check_group_cmd.format(lscgroup)) - - for cgroup in cgroups: - check_rule_cmd = ("fgrep {} /etc/cgrules.conf | fgrep -q {}" - .format(cgroup["process"], cgroup["controller"])) - - logger.info("Check cgrule {} {} on controller node {}" - .format(cgroup["process"], cgroup["controller"], - nailgun_node['fqdn'])) - - self.ssh_manager.check_call(nailgun_node['ip'], check_rule_cmd) - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=['deploy_ha_cgroup']) - @log_snapshot_after_test - def deploy_ha_cgroup(self): - """Deploy cluster in HA mode with enabled cgroups - - Scenario: - 1. Create cluster - 2. Add 3 nodes with controller role - 3. Add 1 node with compute role - 4. Add 1 node with cinder role - 5. Deploy the cluster - 6. Check ceph status - 7. Run OSTF - - Duration 90m - Snapshot deploy_ha_cgroup - """ - self.check_run("deploy_ha_cgroup") - self.env.revert_snapshot("ready_with_5_slaves") - data = { - 'tenant': 'cgroup', - 'user': 'cgroup', - 'password': 'cgroup', - 'net_provider': 'neutron', - 'net_segment_type': settings.NEUTRON_SEGMENT['vlan'] - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings=data) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['cinder'] - } - ) - - cgroup_data = [{ - "process": "keystone", - "controller": "cpu", - "limit": "cpu.shares", - "value": 70, - }] - - self.fuel_web.client.update_cluster_attributes( - cluster_id, self.generate_attributes(cgroup_data)) - - # Cluster deploy - self.fuel_web.deploy_cluster_wait(cluster_id) - - # Run ostf - self.fuel_web.run_ostf(cluster_id=cluster_id) - - # Check that task cgroup was executed - cmd = 'fgrep "MODULAR: cgroups/cgroups.pp" -q /var/log/puppet.log' - n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller']) - for nailgun_node in n_ctrls: - logger.info('Check cgroups task on controller node {0}'.format( - nailgun_node["fqdn"])) - - self.ssh_manager.check_call(nailgun_node['ip'], cmd) - - self.check_cgroups_on_node(nailgun_node, cgroup_data) - - self.env.make_snapshot("deploy_ha_cgroup", is_make=True) - - @test(depends_on=[deploy_ha_cgroup], - groups=['apply_cgroups_after_deploy']) - @log_snapshot_after_test - def apply_cgroups_after_deploy(self): - """Apply, reconfigure and disable cgroups limits to services - - Scenario: - 1. Revert snapshot deploy_ha_cgroup - 2. Configure and validate cgroups for mysqld, rabbitmq - and keystone - 3. Reconfigure and validate cgroups for mysqld, - rabbitmq and keystone - 4. Disable cgroups for mysqld, rabbitmq and keystone - - Duration 15m - """ - - self.show_step(1) - self.env.revert_snapshot("deploy_ha_cgroup") - - cluster_id = self.fuel_web.get_last_created_cluster() - n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller']) - ctrl_ids = ",".join([str(nailgun_node['id']) - for nailgun_node in n_ctrls]) - - self.show_step(2) - cgroups = [ - {"process": "mysqld", - "controller": "memory", - "limit": "memory.swappiness", - "value": 0}, - {"process": "rabbitmq", - "controller": "blkio", - "limit": "blkio.weight", - "value": 500}, - {"process": "rabbitmq", - "controller": "memory", - "limit": "memory.swappiness", - "value": 0}, - {"process": "keystone", - "controller": "cpu", - "limit": "cpu.shares", - "value": 50}, - ] - - self.apply_cgroups(cgroups, ctrl_ids) - for nailgun_node in n_ctrls: - self.check_cgroups_on_node(nailgun_node, cgroups) - - self.show_step(3) - cgroups = [ - {"process": "mysqld", - "controller": "memory", - "limit": "memory.swappiness", - "value": 10}, - {"process": "rabbitmq", - "controller": "blkio", - "limit": "blkio.weight", - "value": 400}, - {"process": "rabbitmq", - "controller": "memory", - "limit": "memory.swappiness", - "value": 60}, - {"process": "keystone", - "controller": "cpu", - "limit": "cpu.shares", - "value": 70}, - ] - - self.apply_cgroups(cgroups, ctrl_ids) - for nailgun_node in n_ctrls: - self.check_cgroups_on_node(nailgun_node, cgroups) - - self.show_step(4) - cgroups = [ - {"process": "mysqld", - "controller": "memory"}, - {"process": "rabbitmq", - "controller": "blkio"}, - {"process": "rabbitmq", - "controller": "memory"}, - {"process": "keystone", - "controller": "cpu"}, - ] - - self.apply_cgroups(cgroups, ctrl_ids) - for nailgun_node in n_ctrls: - self.check_cgroups_on_node(nailgun_node, cgroups) - - @test(depends_on=[deploy_ha_cgroup], - groups=['apply_relative_cgroups_after_deploy']) - @log_snapshot_after_test - def apply_relative_cgroups_after_deploy(self): - """Apply relative cgroups limits to services - - Scenario: - 1. Revert snapshot deploy_ha_cgroup - 2. Configure and validate cgroups for mysqld, rabbitmq - and keystone with relative memory count - - Duration 15m - """ - self.show_step(1) - self.env.revert_snapshot("deploy_ha_cgroup") - - cluster_id = self.fuel_web.get_last_created_cluster() - n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller']) - ctrl_ids = ",".join([str(nailgun_node['id']) - for nailgun_node in n_ctrls]) - - self.show_step(2) - cgroups = [ - {"process": "mysqld", - "controller": "memory", - "limit": "memory.swappiness", - "value": 0}, - {"process": "mysqld", - "controller": "memory", - "limit": "memory.soft_limit_in_bytes", - "value": "%5,10,3000"}, - {"process": "rabbitmq", - "controller": "blkio", - "limit": "blkio.weight", - "value": 500}, - {"process": "rabbitmq", - "controller": "memory", - "limit": "memory.soft_limit_in_bytes", - "value": "%99,10,250"}, - {"process": "keystone", - "controller": "cpu", - "limit": "cpu.shares", - "value": 50}, - {"process": "keystone", - "controller": "memory", - "limit": "memory.soft_limit_in_bytes", - "value": "%1,250,2500"}, - ] - - self.apply_cgroups(cgroups, ctrl_ids) - - memory = float("".join(self.ssh_manager.execute( - n_ctrls[0]["ip"], "facter memorysize_mb")["stdout"])) - - for cgroup in cgroups: - if cgroup["limit"] == "memory.soft_limit_in_bytes": - # pylint: disable=no-member - percent, min_mem, max_mem = cgroup["value"].split(",") - # pylint: enable=no-member - percent = int(percent.replace("%", "")) * memory / 100 - min_mem, max_mem = int(min_mem), int(max_mem) - - value = sorted((min_mem, percent, max_mem))[1] - cgroup["value"] = int(value * 1024 * 1024) - - logger.info("New cgroups to verify: {}".format(cgroups)) - for nailgun_node in n_ctrls: - self.check_cgroups_on_node(nailgun_node, cgroups) - - @test(depends_on=[deploy_ha_cgroup], - groups=['apply_cgroups_reboot_node']) - @log_snapshot_after_test - def apply_cgroups_reboot_node(self): - """Apply cgroups limits to services, reboot, verify - - Scenario: - 1. Revert snapshot deploy_ha_cgroup - 2. Configure and validate cgroups for mysqld, rabbitmq - and keystone - 3. Reboot controller - 4. Validate cgroups for mysqld, rabbitmq and keystone - - Duration 15m - """ - - self.show_step(1) - self.env.revert_snapshot("deploy_ha_cgroup") - - cluster_id = self.fuel_web.get_last_created_cluster() - n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller']) - ctrl_ids = ",".join([str(nailgun_node['id']) - for nailgun_node in n_ctrls]) - - self.show_step(2) - cgroups = [ - {"process": "mysqld", - "controller": "memory", - "limit": "memory.swappiness", - "value": 0}, - {"process": "rabbitmq", - "controller": "blkio", - "limit": "blkio.weight", - "value": 500}, - {"process": "rabbitmq", - "controller": "memory", - "limit": "memory.swappiness", - "value": 0}, - {"process": "keystone", - "controller": "cpu", - "limit": "cpu.shares", - "value": 50}, - ] - - self.apply_cgroups(cgroups, ctrl_ids) - for nailgun_node in n_ctrls: - self.check_cgroups_on_node(nailgun_node, cgroups) - - self.show_step(3) - target_controller = self.fuel_web.get_nailgun_primary_node( - self.fuel_web.get_devops_node_by_nailgun_node(n_ctrls[0])) - self.fuel_web.cold_restart_nodes([target_controller]) - - self.show_step(4) - self.check_cgroups_on_node(n_ctrls[0], cgroups) diff --git a/fuelweb_test/tests/test_cli.py b/fuelweb_test/tests/test_cli.py deleted file mode 100644 index 636c20a87..000000000 --- a/fuelweb_test/tests/test_cli.py +++ /dev/null @@ -1,674 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from devops.helpers.helpers import wait -from proboscis import test -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_false -from proboscis.asserts import assert_true - -from fuelweb_test.helpers.checkers import check_cluster_presence -from fuelweb_test.helpers.checkers import check_cobbler_node_exists -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.utils import generate_floating_ranges -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.settings import SSL_CN -from fuelweb_test.settings import PATH_TO_PEM -from fuelweb_test.settings import NEUTRON_SEGMENT_TYPE -from fuelweb_test.settings import NEUTRON_SEGMENT -from fuelweb_test.settings import OPENSTACK_RELEASE -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic -from fuelweb_test.tests import test_cli_base -from fuelweb_test import logger - - -@test(groups=["command_line_minimal"]) -class CommandLineMinimal(TestBasic): - """CommandLineMinimal.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.setup_with_custom_manifests], - groups=["hiera_deploy"]) - @log_snapshot_after_test - def hiera_deploy(self): - """Deploy cluster with controller node only - - Scenario: - 1. Start installation of master - 2. Enter "fuelmenu" - 3. Upload custom manifests - 4. Kill "fuelmenu" pid - 5. Deploy hiera manifest - - Duration 20m - """ - self.env.revert_snapshot("empty_custom_manifests") - - self.env.bootstrap_nodes( - self.env.d_env.nodes().slaves[:1]) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE - ) - self.fuel_web.update_nodes( - cluster_id, - {'slave-01': ['controller']} - ) - admin_ip = self.ssh_manager.admin_ip - node_id = self.fuel_web.get_nailgun_node_by_devops_node( - self.env.d_env.nodes().slaves[0])['id'] - cmd = 'fuel node --node {0} --provision --env {1}'.format(node_id, - cluster_id) - self.ssh_manager.execute_on_remote(admin_ip, cmd) - self.fuel_web.provisioning_cluster_wait(cluster_id) - cmd = 'fuel node --node {0} --end hiera --env {1}'.format(node_id, - cluster_id) - self.ssh_manager.execute_on_remote(admin_ip, cmd) - cmd = 'fuel task | grep deployment | awk \'{print $9}\'' - wait(lambda: int( - self.ssh_manager.execute_on_remote( - admin_ip, cmd)['stdout'][0].rstrip()) == 100, timeout=120, - timeout_msg='hiera manifest was not applied') - cmd = 'ssh -q node-{0} "hiera role"'.format(node_id) - role = self.ssh_manager.execute_on_remote( - admin_ip, cmd)['stdout'][0].rstrip() - assert_equal(role, 'primary-controller', "node with deployed hiera " - "was not found") - - -@test(groups=["command_line"]) -class CommandLineTest(test_cli_base.CommandLine): - """CommandLine.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["cli_selected_nodes_deploy"]) - @log_snapshot_after_test - def cli_selected_nodes_deploy(self): - """Create and deploy environment using Fuel CLI and check CN name - is equal to the public name passed via UI (user-owned cert) - - Scenario: - 1. Create environment using fuel-qa - 2. Create a cluster using Fuel CLI - 3. Add floating ranges for public network - 4. Allow public network assignment for all nodes - 5. Get cluster settings - 6. Provision a controller node using Fuel CLI - 7. Provision two compute+cinder nodes using Fuel CLI - 8. Deploy the controller node using Fuel CLI - 9. Deploy the compute+cinder nodes using Fuel CLI - 10. Compare network settings after compute deployment task - 11. Verify network - 12. Check that all services work by 'https' - 13. Check that all services have domain name - 14. Find 'CN' value at the output: - CN value is equal to the value specified - at certificate provided via Fuel UI - 15. Find keypair data at the output: - Keypair data is equal to the value specified - at certificate provided via Fuel UI - 16. Compare floating ranges - 17. Get deployment-info - 18. Get cluster settings after deployment task - 19. Compare cluster settings after deploy and before deploy - 20. Run OSTF - - - Duration 50m - """ - self.env.revert_snapshot("ready_with_3_slaves") - node_ids = sorted([node['id'] for node in - self.fuel_web.client.list_nodes()]) - release_id = self.fuel_web.get_releases_list_for_os( - release_name=OPENSTACK_RELEASE)[0] - admin_ip = self.ssh_manager.admin_ip - # Create an environment - self.show_step(1) - if NEUTRON_SEGMENT_TYPE: - nst = '--nst={0}'.format(NEUTRON_SEGMENT_TYPE) - else: - nst = '' - self.show_step(2) - cmd = ('fuel env create --name={0} --release={1} {2} --json'.format( - self.__class__.__name__, release_id, nst)) - env_result =\ - self.ssh_manager.execute_on_remote(admin_ip, cmd, - jsonify=True)['stdout_json'] - cluster_id = env_result['id'] - self.show_step(3) - # Update network parameters - self.update_cli_network_configuration(cluster_id) - # Change floating ranges - current_floating_range = self.get_floating_ranges(cluster_id) - logger.info("Current floating ranges: {0}".format( - current_floating_range)) - first_floating_address = current_floating_range[0][0] - logger.info("First floating address: {0}".format( - first_floating_address)) - last_floating_address = current_floating_range[0][1] - logger.info("Last floating address: {0}".format(last_floating_address)) - new_floating_range = generate_floating_ranges(first_floating_address, - last_floating_address, - 10) - logger.info("New floating range: {0}".format(new_floating_range)) - self.change_floating_ranges(cluster_id, new_floating_range) - # Update SSL configuration - self.update_ssl_configuration(cluster_id) - - # Allow public network assignment for all nodes - # Get cluster settings before deploy - self.show_step(4) - self.show_step(5) - cluster_settings = self.download_settings(cluster_id) - cluster_settings['editable']['public_network_assignment'][ - 'assign_to_all_nodes']['value'] = True - self.upload_settings(cluster_id, cluster_settings) - self.show_step(6) - # Add and provision a controller node - logger.info("Add to the cluster \ - and start provisioning a controller node [{0}]".format(node_ids[0])) - cmd = ('fuel --env-id={0} node set --node {1}\ - --role=controller'.format(cluster_id, node_ids[0])) - self.ssh_manager.execute_on_remote(admin_ip, cmd) - self.update_node_interfaces(node_ids[0]) - cmd = ('fuel --env-id={0} node --provision --node={1} --json'.format( - cluster_id, node_ids[0])) - task = self.ssh_manager.execute_on_remote(admin_ip, - cmd, - jsonify=True)['stdout_json'] - self.assert_cli_task_success(task, timeout=30 * 60) - self.show_step(7) - # Add and provision 2 compute+cinder - logger.info("Add to the cluster and start provisioning two " - "compute+cinder nodes [{0},{1}]".format(node_ids[1], - node_ids[2])) - cmd = ('fuel --env-id={0} node set --node {1},{2} \ - --role=compute,cinder'.format(cluster_id, node_ids[1], node_ids[2])) - self.ssh_manager.execute_on_remote(admin_ip, cmd) - for node_id in (node_ids[1], node_ids[2]): - self.update_node_interfaces(node_id) - cmd = ('fuel --env-id={0} node --provision \ - --node={1},{2} --json'.format(cluster_id, node_ids[1], node_ids[2])) - task = self.ssh_manager.execute_on_remote(admin_ip, - cmd, - jsonify=True)['stdout_json'] - self.assert_cli_task_success(task, timeout=10 * 60) - self.show_step(8) - # Deploy the controller node - cmd = ('fuel --env-id={0} node --deploy --node {1} --json'.format( - cluster_id, node_ids[0])) - task = self.ssh_manager.execute_on_remote(admin_ip, - cmd, - jsonify=True)['stdout_json'] - self.assert_cli_task_success(task, timeout=60 * 60) - - self.assert_all_tasks_completed(cluster_id=cluster_id) - - self.show_step(9) - # Deploy the compute nodes - cmd = ('fuel --env-id={0} node --deploy --node {1},{2} --json'.format( - cluster_id, node_ids[1], node_ids[2])) - task = self.ssh_manager.execute_on_remote(admin_ip, - cmd, - jsonify=True)['stdout_json'] - - self.wait_cli_task_status(task=task, status='running') - # Fuel 9.1 is async, so we should wait for real task start - network_settings = self.get_networks(cluster_id) - - self.assert_cli_task_success(task, timeout=30 * 60) - - self.assert_all_tasks_completed(cluster_id=cluster_id) - # Verify networks - self.show_step(10) - network_configuration = self.get_net_config_cli() - assert_equal(network_settings, - network_configuration, - message='Network settings are not equal before' - ' and after deploy') - self.show_step(11) - self.fuel_web.verify_network(cluster_id) - controller_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller']) - # Get controller ip address - controller_node = controller_nodes[0]['ip'] - # Get endpoint list - endpoint_list = self.get_endpoints(controller_node) - logger.info(endpoint_list) - # Check protocol and domain names for endpoints - self.show_step(12) - self.show_step(13) - for endpoint in endpoint_list: - logger.debug(("Endpoint {0} use protocol {1}\ - and have domain name {2}".format(endpoint['service_name'], - endpoint['protocol'], - endpoint['domain']))) - assert_equal(endpoint['protocol'], "https", - message=("Endpoint {0} don't use https.".format( - endpoint['service_name']))) - assert_equal(endpoint['domain'], SSL_CN, message=( - "{0} domain name not equal {1}.".format( - endpoint['service_name'], SSL_CN))) - self.show_step(14) - current_ssl_cn = self.get_current_ssl_cn(controller_node) - logger.info(("CN before cluster deploy {0} \ - and after deploy {1}".format(SSL_CN, current_ssl_cn))) - assert_equal(SSL_CN, current_ssl_cn, message="SSL CNs are not equal") - self.show_step(15) - with open(PATH_TO_PEM) as pem_file: - old_ssl_keypair = pem_file.read().strip() - current_ssl_keypair = self.get_current_ssl_keypair(controller_node) - logger.info( - "SSL keypair before cluster deploy:\n" - "{0}\n" - "and after deploy:\n" - "{1}".format(old_ssl_keypair, current_ssl_keypair) - ) - assert_equal(old_ssl_keypair, current_ssl_keypair, - message="SSL keypairs are not equal") - self.show_step(16) - actual_floating_ranges = self.hiera_floating_ranges(controller_node) - logger.info("Current floating ranges: {0}".format( - actual_floating_ranges)) - assert_equal(actual_floating_ranges, new_floating_range, - message="Floating ranges are not equal") - # Get deployment task id - task_id = self.get_first_task_id_by_name(cluster_id, 'deployment') - self.show_step(17) - # Get deployment info - self.get_deployment_info_cli(task_id) - self.show_step(18) - # Get cluster settings after deploy - cluster_config = self.get_cluster_config_cli(task_id) - self.show_step(19) - # Compare cluster settings - assert_equal(cluster_settings, - cluster_config, - message='Cluster settings are not equal before' - ' and after deploy') - self.show_step(20) - # Run OSTF - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - self.env.make_snapshot("cli_selected_nodes_deploy", is_make=True) - - @test(depends_on_groups=['cli_selected_nodes_deploy'], - groups=["cli_node_deletion_check"]) - @log_snapshot_after_test - def cli_node_deletion_check(self): - """Destroy node and remove it from Nailgun using Fuel CLI - - Scenario: - 1. Revert snapshot 'cli_selected_nodes_deploy' - 2. Check 'slave-03' is present - 3. Destroy 'slave-03' - 4. Wait until 'slave-03' become offline - 5. Delete offline 'slave-03' from db - 6. Check presence of 'slave-03' - - Duration 30m - - """ - self.env.revert_snapshot("cli_selected_nodes_deploy") - - node = self.env.d_env.nodes().slaves[2] - node_id = self.fuel_web.get_nailgun_node_by_devops_node(node)['id'] - - assert_true(check_cobbler_node_exists(self.ssh_manager.admin_ip, - node_id), - "node-{0} is not found".format(node_id)) - node.destroy() - self.fuel_web.wait_node_is_offline(node, timeout=60 * 6) - - admin_ip = self.ssh_manager.admin_ip - cmd = 'fuel node --node-id {0} --delete-from-db'.format(node_id) - res = self.ssh_manager.execute_on_remote(admin_ip, cmd) - assert_true( - res['exit_code'] == 0, - "Offline node-{0} was not" - "deleted from database".format(node_id)) - - cmd = "fuel node | awk '{{print $1}}' | grep -w '{0}'".format(node_id) - - wait( - lambda: not self.ssh_manager.execute_on_remote( - admin_ip, - cmd, - raise_on_assert=False)['exit_code'] == 0, timeout=60 * 4, - timeout_msg='After deletion node-{0} is found in fuel list' - ''.format(node_id)) - - is_cobbler_node_exists = check_cobbler_node_exists( - self.ssh_manager.admin_ip, node_id) - - assert_false(is_cobbler_node_exists, - "After deletion node-{0} is found in cobbler list". - format(node_id)) - cmd = "fuel env | tail -n 1 | awk {'print $1'}" - cluster_id = self.ssh_manager.execute_on_remote( - admin_ip, cmd)['stdout_str'] - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) - - @test(depends_on_groups=['cli_selected_nodes_deploy'], - groups=["cli_cluster_deletion"]) - @log_snapshot_after_test - def cli_cluster_deletion(self): - """Delete a cluster using Fuel CLI - - Scenario: - 1. Revert snapshot 'cli_selected_nodes_deploy' - 2. Delete cluster via cli - 3. Check cluster absence in the list - - Duration 25m - - """ - self.env.revert_snapshot("cli_selected_nodes_deploy") - - cluster_id = self.fuel_web.get_last_created_cluster() - - nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - online_nodes = [node for node in nodes if node['online']] - if nodes != online_nodes: - logger.error( - 'Some slaves do not become online after revert!!' - ' Expected {0} Actual {1}'.format(nodes, online_nodes)) - - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd='fuel --env {0} env delete --force'.format(cluster_id) - ) - - wait(lambda: - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd="fuel env | awk '{print $1}' | tail -n 1 | " - "grep '^.$'", - raise_on_assert=False)['exit_code'] == 1, timeout=60 * 10, - timeout_msg='cluster {0} was not deleted'.format(cluster_id)) - - assert_false( - check_cluster_presence(cluster_id, self.env.postgres_actions), - "cluster {0} is found".format(cluster_id)) - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["cli_selected_nodes_deploy_huge"]) - @log_snapshot_after_test - def cli_selected_nodes_deploy_huge(self): - """Create and deploy huge environment using Fuel CLI - - Scenario: - 1. Revert snapshot "ready_with_9_slaves" - 2. Create a cluster - 3. Set replication factor 2 - 4. Set ceph usage for images, cinder for volumes - 5. Get cluster settings before deploy - 6. Provision a controller node using Fuel CLI - 7. Provision one compute node using Fuel CLI - 8. Provision one cinder node using Fuel CLI - 9. Provision two ceph-osd nodes using Fuel CLI - 10. Provision two base-os node using Fuel CLI - 11. Leave 2 nodes in discover state - 12. Deploy the ceph-osd and controller nodes using Fuel CLI - 13. Deploy the compute node using Fuel CLI - 14. Deploy the cinder node using Fuel CLI - 15. Deploy the base-os node using Fuel CLI - 16. Check that nodes in discover state stay in it - 17. Get deployment-info - 18. Get cluster settings after deployment task - 19. Compare cluster settings after deploy and before deploy - 20. Run OSTF - - Duration 60m - """ - self.show_step(1) - self.env.revert_snapshot("ready_with_9_slaves") - data = { - 'volumes_ceph': False, - 'images_ceph': True, - 'volumes_lvm': True, - 'objects_ceph': True, - 'osd_pool_size': '2', - 'net_provider': 'neutron', - 'net_segment_type': NEUTRON_SEGMENT['vlan'], - 'assign_to_all_nodes': True, - 'tenant': 'huge_cli', - 'user': 'huge_cli', - 'password': 'huge_cli' - } - self.show_step(2) - self.show_step(3) - self.show_step(4) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - - # Get nodes ids - node_ids = [node['id'] for node in self.fuel_web.client.list_nodes()] - admin_ip = self.ssh_manager.admin_ip - self.show_step(5) - cluster_settings = self.download_settings(cluster_id) - # Add and provision a controller node node_ids[0] - self.show_step(6, 'on node {0}'.format(node_ids[0])) - - cmd = ( - 'fuel --env-id={0} node set --node {1} --role=controller' - ''.format(cluster_id, node_ids[0])) - self.ssh_manager.check_call(admin_ip, cmd) - self.update_node_interfaces(node_ids[0]) - cmd = ( - 'fuel --env-id={0} node --provision --node={1} --json' - ''.format(cluster_id, node_ids[0])) - task = self.ssh_manager.check_call( - admin_ip, - cmd).stdout_json - self.assert_cli_task_success(task, timeout=80 * 60) - - self.assert_all_tasks_completed(cluster_id=cluster_id) - - assert_equal( - 1, - len(self.fuel_web.get_nailgun_node_by_status('provisioned')), - 'Some unexpected nodes were provisioned,' - ' current list of provisioned ' - 'nodes {}'.format( - self.fuel_web.get_nailgun_node_by_status('provisioned'))) - - # Add and provision 1 compute node_ids[1] - self.show_step(7, details='using node id {}'.format(node_ids[1])) - cmd = ( - 'fuel --env-id={0} node set --node {1} --role=compute' - ''.format(cluster_id, node_ids[1])) - self.ssh_manager.check_call(admin_ip, cmd) - self.update_node_interfaces(node_ids[1]) - - cmd = ( - 'fuel --env-id={0} node --provision --node={1} --json' - ''.format(cluster_id, node_ids[1])) - task = self.ssh_manager.check_call(admin_ip, cmd,).stdout_json - self.assert_cli_task_success(task, timeout=10 * 60) - - self.assert_all_tasks_completed(cluster_id=cluster_id) - - assert_equal( - 2, - len(self.fuel_web.get_nailgun_node_by_status('provisioned')), - 'Some unexpected nodes were provisioned,' - ' current list of provisioned ' - 'nodes {}'.format( - self.fuel_web.get_nailgun_node_by_status('provisioned'))) - - # Add and provision 1 cinder node_ids[2] - self.show_step(8, details='using node id {}'.format(node_ids[2])) - cmd = ( - 'fuel --env-id={0} node set --node {1} --role=cinder' - ''.format(cluster_id, node_ids[2])) - self.ssh_manager.check_call(admin_ip, cmd) - self.update_node_interfaces(node_ids[2]) - - cmd = ( - 'fuel --env-id={0} node --provision --node={1} --json' - ''.format(cluster_id, node_ids[2])) - task = self.ssh_manager.check_call(admin_ip, cmd).stdout_json - self.assert_cli_task_success(task, timeout=10 * 60) - - self.assert_all_tasks_completed(cluster_id=cluster_id) - - assert_equal( - 3, - len(self.fuel_web.get_nailgun_node_by_status('provisioned')), - 'Some unexpected nodes were provisioned,' - ' current list of provisioned ' - 'nodes {}'.format( - self.fuel_web.get_nailgun_node_by_status('provisioned'))) - - # Add and provision 2 ceph-osd node_ids[3], node_ids[4] - self.show_step(9, details='using node ids {0}, {1}'.format( - node_ids[3], node_ids[4])) - cmd = ( - 'fuel --env-id={0} node set --node {1},{2} ' - '--role=ceph-osd'.format(cluster_id, node_ids[3], node_ids[4])) - self.ssh_manager.check_call(admin_ip, cmd) - for node_id in (node_ids[3], node_ids[4]): - self.update_node_interfaces(node_id) - - cmd = ('fuel ' - '--env-id={0} node --provision ' - '--node {1},{2} ' - '--json'.format(cluster_id, node_ids[3], node_ids[4])) - task = self.ssh_manager.check_call(admin_ip, cmd).stdout_json - self.assert_cli_task_success(task, timeout=10 * 60) - - self.assert_all_tasks_completed(cluster_id=cluster_id) - - assert_equal( - 5, - len(self.fuel_web.get_nailgun_node_by_status('provisioned')), - 'Some unexpected nodes were provisioned,' - ' current list of provisioned ' - 'nodes {}'.format( - self.fuel_web.get_nailgun_node_by_status('provisioned'))) - # Add and provision 2 base-os node node_ids[5], node_ids[6] - self.show_step(10, details='using node ids {0},{1}'.format( - node_ids[5], node_ids[6])) - cmd = ('fuel --env-id={0} node set --node {1},{2} ' - '--role=base-os'.format(cluster_id, node_ids[5], node_ids[6])) - self.ssh_manager.check_call(admin_ip, cmd) - for node_id in (node_ids[5], node_ids[6]): - self.update_node_interfaces(node_id) - - cmd = ('fuel --env-id={0} node --provision ' - '--node={1},{2} --json'.format(cluster_id, node_ids[5], - node_ids[6])) - task = self.ssh_manager.check_call(admin_ip, cmd).stdout_json - self.assert_cli_task_success(task, timeout=10 * 60) - - self.assert_all_tasks_completed(cluster_id=cluster_id) - - assert_equal( - 7, - len(self.fuel_web.get_nailgun_node_by_status('provisioned')), - 'Some unexpected nodes were provisioned,' - ' current list of provisioned ' - 'nodes {}'.format( - self.fuel_web.get_nailgun_node_by_status('provisioned'))) - - self.show_step(11) - # Add 2 compute but do not deploy node_ids[7] node_ids[8] - cmd = ('fuel --env-id={0} node set --node {1},{2} ' - '--role=compute'.format(cluster_id, node_ids[7], node_ids[8])) - self.ssh_manager.check_call(admin_ip, cmd) - - node_discover = self.fuel_web.get_nailgun_node_by_status('discover') - assert_equal( - 2, - len(node_discover), - 'Some unexpected nodes were provisioned,' - ' current list of provisioned ' - 'nodes {}'.format( - [node['id'] for node in node_discover])) - - for node in node_discover: - assert_true(node['pending_addition']) - - # Deploy ceph-osd and controller nodes - # node_ids[0], node_ids[3] node_ids[4] - self.show_step(12, details='for node ids {0}, {1}, {2}'.format( - node_ids[0], node_ids[3], node_ids[4])) - cmd = ( - 'fuel --env-id={0} node --deploy --node {1},{2},{3} --json'.format( - cluster_id, node_ids[0], node_ids[3], node_ids[4])) - task = self.ssh_manager.check_call(admin_ip, cmd).stdout_json - self.assert_cli_task_success(task, timeout=80 * 60) - - self.assert_all_tasks_completed(cluster_id=cluster_id) - self.show_step(13, details='for node id {}'.format(node_ids[1])) - # Deploy the compute node node_ids[1] - cmd = ('fuel --env-id={0} node --deploy --node {1} --json'.format( - cluster_id, node_ids[1])) - task = self.ssh_manager.check_call(admin_ip, cmd).stdout_json - self.assert_cli_task_success(task, timeout=30 * 60) - self.assert_all_tasks_completed(cluster_id=cluster_id) - - # Deploy the cinder node node_ids[2] - self.show_step(14, details='for node id {}'.format(node_ids[2])) - cmd = ('fuel --env-id={0} node --deploy --node {1} --json'.format( - cluster_id, node_ids[2])) - task = self.ssh_manager.check_call(admin_ip, cmd).stdout_json - self.assert_cli_task_success(task, timeout=60 * 60) - self.assert_all_tasks_completed(cluster_id=cluster_id) - - # Deploy the base-os node node_ids[5] node_ids[6] - self.show_step(15, details='for node id {0} {1}'.format(node_ids[5], - node_ids[6])) - cmd = ('fuel --env-id={0} node --deploy --node {1},{2} --json'.format( - cluster_id, node_ids[5], node_ids[6])) - task = self.ssh_manager.check_call(admin_ip, cmd).stdout_json - self.assert_cli_task_success(task, timeout=60 * 60) - self.assert_all_tasks_completed(cluster_id=cluster_id) - - self.show_step(16) - self.fuel_web.verify_network(cluster_id) - node_discover_after_deploy = self.fuel_web.get_nailgun_node_by_status( - 'discover') - assert_equal( - 2, - len(node_discover_after_deploy), - 'Some unexpected nodes were deployed,' - ' current list of discover nodes {}'.format( - [node['id'] for node in node_discover_after_deploy])) - - for node in node_discover_after_deploy: - assert_true(node['pending_addition']) - self.show_step(17) - task_id = self.get_first_task_id_by_name(cluster_id, 'deployment') - self.get_deployment_info_cli(task_id) - self.show_step(18) - cluster_config = self.get_cluster_config_cli(task_id) - self.show_step(19) - assert_equal(cluster_settings, - cluster_config, - message='Cluster settings are not equal before' - ' and after deploy') - # Run OSTF - self.show_step(20) - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - self.env.make_snapshot("cli_selected_nodes_deploy_huge") diff --git a/fuelweb_test/tests/test_cli_base.py b/fuelweb_test/tests/test_cli_base.py deleted file mode 100644 index 940e87b06..000000000 --- a/fuelweb_test/tests/test_cli_base.py +++ /dev/null @@ -1,502 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import time - -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_true - -from devops.helpers.helpers import wait -# pylint: disable=import-error -# noinspection PyUnresolvedReferences -from six.moves import urllib -# pylint: enable=import-error - -from core.helpers.log_helpers import logwrap - -from fuelweb_test.helpers.checkers import fail_deploy -from fuelweb_test.helpers.checkers import incomplete_deploy -from fuelweb_test.helpers.checkers import incomplete_tasks -from fuelweb_test.helpers.ssl_helpers import change_cluster_ssl_config -from fuelweb_test.helpers import utils -from fuelweb_test.tests.base_test_case import TestBasic -from fuelweb_test import logger -from fuelweb_test.helpers.utils import hiera_json_out -from fuelweb_test.settings import iface_alias -from fuelweb_test.settings import SSL_CN - - -class CommandLine(TestBasic): - """CommandLine.""" # TODO documentation - - @logwrap - def get_task(self, task_id): - tasks = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd='fuel2 task show -f json {}'.format(task_id), - jsonify=True - )['stdout_json'] - return tasks - - @logwrap - def get_tasks(self): - tasks = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd='fuel2 task list -f json', - jsonify=True)['stdout_json'] - return tasks - - @logwrap - def get_first_task_id_by_name(self, cluster_id, task_name): - tasks = self.get_tasks() - tasks_ids = [] - for task in tasks: - if task['cluster'] == cluster_id and task['name'] == task_name: - tasks_ids.append(task['id']) - return min(tasks_ids) - - @logwrap - def get_network_filename(self, cluster_id): - cmd = ('fuel2 env network download {0} -d /tmp -f json' - .format(cluster_id)) - out = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd - )['stdout'] - net_download = ''.join(out) - # net_download = 'Network ... downloaded to /tmp/network_1.json' - return net_download.split()[-1] - - @logwrap - def get_networks(self, cluster_id): - net_file = self.get_network_filename(cluster_id) - with self.ssh_manager.open_on_remote( - ip=self.ssh_manager.admin_ip, - path=net_file - ) as f: - return json.load(f) - - @logwrap - def update_network(self, cluster_id, net_config): - net_file = self.get_network_filename(cluster_id) - with self.ssh_manager.open_on_remote( - ip=self.ssh_manager.admin_ip, - path=net_file, - mode='w' - ) as f: - json.dump(net_config, f) - - cmd = 'cd /tmp; fuel2 env network upload {0} -f json'.format( - cluster_id) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd - ) - - @logwrap - def wait_cli_task_status(self, task, status, timeout=5 * 60, interval=5): - wait( - lambda: (self.get_task(task['id'])['status'] == status), - interval=interval, - timeout=timeout, - timeout_msg='Waiting timeout {timeout} sec was reached ' - 'for status: {status}' - ' on task: {task}'.format(task=task["name"], - timeout=timeout, - status=status - ) - ) - - def assert_cli_task_success(self, task, timeout=70 * 60, interval=20): - logger.info('Wait {timeout} seconds for task: {task}' - .format(timeout=timeout, task=task)) - start = time.time() - wait( - lambda: (self.get_task(task['id'])['status'] not in - ('pending', 'running')), - interval=interval, - timeout=timeout, - timeout_msg='Waiting timeout {timeout} sec was reached ' - 'for task: {task}'.format(task=task["name"], - timeout=timeout) - ) - took = time.time() - start - task = self.get_task(task['id']) - logger.info('Task finished in {took} seconds with the result: {task}' - .format(took=took, task=task)) - assert_equal( - task['status'], 'ready', - "Task '{name}' has incorrect status. {status} != {exp}".format( - status=task['status'], exp='ready', name=task["name"] - ) - ) - - @logwrap - def get_all_tasks_list(self): - return self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd='fuel2 task list -f json', - jsonify=True)['stdout_json'] - - @logwrap - def get_deployment_task_hist(self, task_id): - return self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd='fuel2 task history show {} -f json'.format(task_id), - jsonify=True - )['stdout_json'] - - @logwrap - def assert_all_tasks_completed(self, cluster_id=None): - cluster_info_template = "\n\tCluster ID: {cluster}{info}\n" - all_tasks = sorted( - self.get_all_tasks_list(), - key=lambda _tsk: _tsk['id'], - reverse=True - ) - - not_ready_tasks, deploy_tasks = incomplete_tasks( - all_tasks, cluster_id) - - not_ready_transactions = incomplete_deploy( - { - cluster: self.get_deployment_task_hist(task_id) - for cluster, task_id in deploy_tasks.items()}) - - if len(not_ready_tasks) > 0: - task_details_template = ( - "\n" - "\t\tTask name: {name}\n" - "\t\t\tStatus: {status}\n" - "\t\t\tProgress: {progress}\n" - "\t\t\tResult: {result}\n" - "\t\t\tTask ID: {id}" - ) - - task_text = 'Not all tasks completed: {}'.format( - ''.join( - cluster_info_template.format( - cluster=cluster, - info="".join( - task_details_template.format(**task) - for task in tasks)) - for cluster, tasks in sorted(not_ready_tasks.items()) - )) - logger.error(task_text) - if len(not_ready_transactions) == 0: - # Else: we will raise assert with detailed info - # about deployment - assert_true(len(not_ready_tasks) == 0, task_text) - fail_deploy(not_ready_transactions) - - @staticmethod - @logwrap - def hiera_floating_ranges(node_ip): - """ - - 1. SSH to controller node - 2. Get network settings from controller node - 3. Convert to json network settings in variable config_json - 4. Get new list of floating ranges in variable floating ranges - 5. Convert to sublist floating ranges in variable floating_ranges_json - - """ - config_json = hiera_json_out(node_ip, 'quantum_settings') - floating_ranges = \ - config_json[ - "predefined_networks"][ - "admin_floating_net"][ - "L3"]["floating"] - floating_ranges_json = [ - [float_address[0], float_address[1]] for float_address in ( - float_address.split(':') for float_address in floating_ranges)] - return floating_ranges_json - - @logwrap - def get_floating_ranges(self, cluster_id): - """ - - This method using for get floating ranges from master node before - cluster will be deployed. - 1. SSH to master node - 2. Get networks from master node - 3. Save floating ranges from master node - - """ - net_config = self.get_networks(cluster_id) - floating_ranges =\ - net_config[u'networking_parameters'][u'floating_ranges'] - return floating_ranges - - @logwrap - def change_floating_ranges(self, cluster_id, floating_range): - net_config = self.get_networks(cluster_id) - net_config[u'networking_parameters'][u'floating_ranges'] = \ - floating_range - new_settings = net_config - self.update_network(cluster_id, new_settings) - - @logwrap - def update_cli_network_configuration(self, cluster_id): - """Update cluster network settings with custom configuration. - Place here an additional config changes if needed (e.g. nodegroups' - networking configuration. - Also this method checks downloading/uploading networks via cli. - """ - net_config = self.get_networks(cluster_id) - new_settings = net_config - self.update_network(cluster_id, new_settings) - - def get_public_vip(self, cluster_id): - networks = self.get_networks(cluster_id) - return networks['public_vip'] - - def download_settings(self, cluster_id): - cmd = ('fuel2 env settings download {0} -d /tmp -f json'.format( - cluster_id)) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd - ) - with self.ssh_manager.open_on_remote( - ip=self.ssh_manager.admin_ip, - path='/tmp/environment_{0}/settings.json'.format(cluster_id) - ) as f: - return json.load(f) - - def upload_settings(self, cluster_id, settings): - with self.ssh_manager.open_on_remote( - ip=self.ssh_manager.admin_ip, - path='/tmp/environment_{id}/settings.json'.format( - id=cluster_id), - mode='w' - ) as f: - json.dump(settings, f) - - cmd = 'fuel2 env settings upload {0} -d /tmp -f json'.format( - cluster_id) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd - ) - - @logwrap - def update_ssl_configuration(self, cluster_id): - settings = self.download_settings(cluster_id) - change_cluster_ssl_config(settings, SSL_CN) - self.upload_settings(cluster_id, settings) - - def add_nodes_to_cluster(self, cluster_id, node_ids, roles): - if isinstance(node_ids, int): - node_ids_str = str(node_ids) - else: - node_ids_str = ' '.join(str(n) for n in node_ids) - cmd = ('fuel2 env add nodes -e {0} -n {1} -r {2}'.format( - cluster_id, node_ids_str, ','.join(roles))) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd - ) - - @logwrap - def use_ceph_for_volumes(self, cluster_id): - settings = self.download_settings(cluster_id) - settings['editable']['storage']['volumes_lvm'][ - 'value'] = False - settings['editable']['storage']['volumes_ceph'][ - 'value'] = True - self.upload_settings(cluster_id, settings) - - @logwrap - def use_ceph_for_images(self, cluster_id): - settings = self.download_settings(cluster_id) - settings['editable']['storage']['images_ceph'][ - 'value'] = True - self.upload_settings(cluster_id, settings) - - @logwrap - def use_ceph_for_ephemeral(self, cluster_id): - settings = self.download_settings(cluster_id) - settings['editable']['storage']['ephemeral_ceph'][ - 'value'] = True - self.upload_settings(cluster_id, settings) - - @logwrap - def change_osd_pool_size(self, cluster_id, replication_factor): - settings = self.download_settings(cluster_id) - settings['editable']['storage']['osd_pool_size'][ - 'value'] = replication_factor - self.upload_settings(cluster_id, settings) - - @logwrap - def use_radosgw_for_objects(self, cluster_id): - settings = self.download_settings(cluster_id) - ceph_for_images = settings['editable']['storage']['images_ceph'][ - 'value'] - if ceph_for_images: - settings['editable']['storage']['objects_ceph'][ - 'value'] = True - else: - settings['editable']['storage']['images_ceph'][ - 'value'] = True - settings['editable']['storage']['objects_ceph'][ - 'value'] = True - self.upload_settings(cluster_id, settings) - - @logwrap - def get_current_ssl_cn(self, controller_ip): - cmd = "openssl x509 -noout -subject -in \ - /var/lib/astute/haproxy/public_haproxy.pem \ - | sed -n '/^subject/s/^.*CN=//p'" - ssl_cn = self.ssh_manager.execute_on_remote( - ip=controller_ip, - cmd=cmd)['stdout_str'] - return ssl_cn - - @logwrap - def get_current_ssl_keypair(self, controller_ip): - path = "/var/lib/astute/haproxy/public_haproxy.pem" - with self.ssh_manager.open_on_remote( - ip=controller_ip, - path=path - ) as f: - current_ssl_keypair = f.read().strip() - return current_ssl_keypair - - @logwrap - def get_endpoints(self, controller_ip): - cmd = "source openrc;export OS_IDENTITY_API_VERSION=3;" \ - "openstack endpoint list -f json" - endpoints = [] - endpoint_list =\ - self.ssh_manager.execute_on_remote(ip=controller_ip, - cmd=cmd, - jsonify=True)['stdout_json'] - for endpoint in endpoint_list: - if endpoint['Interface'] == 'public': - url = urllib.parse.urlparse(endpoint['URL']) - endpoint_info = {'service_name': endpoint['Service Name'], - 'protocol': url.scheme, - 'domain': url.hostname} - endpoints.append(endpoint_info) - return endpoints - - @logwrap - def download_node_interfaces(self, node_id): - cmd = ' fuel2 node interfaces download {} -d /tmp -f json' \ - ''.format(node_id) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd - ) - with self.ssh_manager.open_on_remote( - ip=self.ssh_manager.admin_ip, - path='/tmp/node_{}/interfaces.json'.format(node_id) - ) as f: - return json.load(f) - - def upload_node_interfaces(self, node_id, interfaces): - self.ssh_manager.mkdir_on_remote( - ip=self.ssh_manager.admin_ip, - path='/tmp/node_{id}'.format(id=node_id) - ) - with self.ssh_manager.open_on_remote( - ip=self.ssh_manager.admin_ip, - path='/tmp/node_{id}/interfaces.json'.format(id=node_id), - mode='w' - ) as f: - json.dump(interfaces, f) - - cmd = ('fuel2 node interfaces upload {} -d /tmp -f json' - ''.format(node_id)) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd - ) - - @logwrap - def update_node_interfaces(self, node_id): - interfaces = self.download_node_interfaces(node_id) - logger.debug("interfaces we get {}".format(interfaces)) - assigned_networks = { - iface_alias('eth0'): [{'id': 1, 'name': 'fuelweb_admin'}], - iface_alias('eth1'): [{'id': 2, 'name': 'public'}], - iface_alias('eth2'): [{'id': 3, 'name': 'management'}], - iface_alias('eth3'): [{'id': 5, 'name': 'private'}], - iface_alias('eth4'): [{'id': 4, 'name': 'storage'}], - } - for interface in interfaces: - name = interface['name'] - net_to_assign = assigned_networks.get(name, None) - if net_to_assign: - interface['assigned_networks'] = net_to_assign - logger.debug("interfaces after update {}".format(interfaces)) - self.upload_node_interfaces(node_id, interfaces) - - @logwrap - def get_net_config_cli(self, task_id=None): - if task_id is None: - all_deployment_tasks = sorted( - [ - tsk_ for tsk_ in self.get_all_tasks_list() - if tsk_['name'] == 'deployment'], - key=lambda _tsk: _tsk['id'], - reverse=True - ) - task_id = all_deployment_tasks[0]['id'] - - cmd = 'fuel2 task network-configuration download {0}'.format(task_id) - settings_download = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd - )['stdout_str'] - settings_file = settings_download.split()[-1] - return utils.YamlEditor( - file_path=settings_file, - ip=self.ssh_manager.admin_ip - ).get_content() - - @logwrap - def get_cluster_config_cli(self, task_id): - cmd = 'fuel2 task settings download {0}'.format(task_id) - settings_download = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd - )['stdout_str'] - settings_file = settings_download.split()[-1] - return utils.YamlEditor( - file_path=settings_file, - ip=self.ssh_manager.admin_ip, - ).get_content() - - @logwrap - def get_deployment_info_cli(self, task_id): - cmd = 'fuel2 task deployment-info download {0}'.format(task_id) - settings_download = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd - )['stdout_str'] - settings_file = settings_download.split()[-1] - return utils.YamlEditor( - file_path=settings_file, - ip=self.ssh_manager.admin_ip, - ).get_content() - - @logwrap - def set_public_networks_for_all_nodes(self, cluster_id): - settings = self.download_settings(cluster_id) - settings['editable']['public_network_assignment'][ - 'assign_to_all_nodes']['value'] = True - self.upload_settings(cluster_id, settings) diff --git a/fuelweb_test/tests/test_cpu_pinning.py b/fuelweb_test/tests/test_cpu_pinning.py deleted file mode 100644 index 894864eab..000000000 --- a/fuelweb_test/tests/test_cpu_pinning.py +++ /dev/null @@ -1,657 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from keystoneauth1.exceptions import BadRequest -from proboscis import asserts -from proboscis import test - -from core.helpers.log_helpers import logwrap - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers import os_actions -from fuelweb_test.helpers import utils -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic -from gates_tests.helpers import exceptions - - -@test(groups=["numa_cpu_pinning"]) -class NumaCpuPinning(TestBasic): - """NumaCpuPinning.""" - - @staticmethod - @logwrap - def assert_entry_in_config(conf, conf_name, section, option, value): - """Check entry of parameter with a proper value. - - :param conf: a ConfigParser object - :param conf_name: a string of full file path - :param section: a string of section name in configuration file - :param option: a string of option name in configuration file - :param value: a string of value that has entry in configuration file - :return: - """ - current_value = conf.get(section, option) - asserts.assert_true(value in current_value, - 'Expected that the option "{0}" contains value ' - '"{1}" in config file "{2}", but actually has ' - 'value "{3}": FAIL'.format(option, - value, - conf_name, - current_value)) - - @staticmethod - @logwrap - def assert_quantity_in_config(conf, conf_name, section, option, - value): - """Check number of parameters in option section. - - :param conf: a ConfigParser object - :param conf_name: a string of full file path - :param section: a string of section name in configuration file - :param option: a string of option name in configuration file - :param value: an int number of values in specific option - :return: - """ - current_value = conf.get(section, option) - asserts.assert_equal(len(current_value.split(',')), value, - 'Expected that the option "{0}" has "{1}"' - 'values in config file {2} but actually has ' - 'value "{3}": FAIL'.format(option, - value, - conf_name, - current_value)) - - @logwrap - def create_pinned_instance(self, os_conn, cluster_id, - name, vcpus, hostname, meta): - """Boot VM on specific compute with CPU pinning - - :param os_conn: an object of connection to openstack services - :param cluster_id: an integer number of cluster id - :param name: a string name of flavor and aggregate - :param vcpus: an integer number of vcpus for flavor - :param hostname: a string fqdn name of compute - :param meta: a dict with metadata for aggregate - :return: - """ - aggregate_name = name + str(random.randint(0, 1000)) - aggregate = os_conn.create_aggregate(aggregate_name, - metadata=meta, - hosts=[hostname]) - - extra_specs = {'aggregate_instance_extra_specs:pinned': 'true', - 'hw:cpu_policy': 'dedicated'} - - net_name = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - flavor_id = random.randint(10, 10000) - flavor = os_conn.create_flavor(name=name, ram=64, vcpus=vcpus, disk=1, - flavorid=flavor_id, - extra_specs=extra_specs) - - server = os_conn.create_server_for_migration(neutron=True, - label=net_name, - flavor_id=flavor_id) - os_conn.verify_instance_status(server, 'ACTIVE') - os_conn.delete_instance(server) - os_conn.delete_flavor(flavor) - os_conn.delete_aggregate(aggregate, hosts=[hostname]) - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["numa_cpu_pinning", - "basic_env_for_numa_cpu_pinning"]) - @log_snapshot_after_test - def basic_env_for_numa_cpu_pinning(self): - """Basic environment for NUMA CPU pinning - - Scenario: - 1. Create cluster - 2. Add 2 nodes with compute role - 3. Add 3 nodes with controller role - 4. Verify that quantity of NUMA is equal on node and in Fuel - - Snapshot: basic_env_for_numa_cpu_pinning - """ - snapshot_name = 'basic_env_for_numa_cpu_pinning' - self.check_run(snapshot_name) - self.env.revert_snapshot("ready_with_5_slaves") - - # TODO(kdemina) Use commomn function for variables asserts - if not settings.KVM_USE: - raise exceptions.FuelQAVariableNotSet( - 'KVM_USE', 'true') - - if int(settings.HARDWARE['slave_node_cpu']) < 6: - raise exceptions.FuelQAVariableNotSet( - 'SLAVE_NODE_CPU', 6) - - if int(settings.HARDWARE['numa_nodes']) < 2: - raise exceptions.FuelQAVariableNotSet( - 'NUMA_NODES', 2) - - if not settings.INTERFACES_DICT['eth0'] == 'ens3': - raise exceptions.FuelQAVariableNotSet( - 'IFACE_0', 'ens3') - - if not settings.INTERFACES_DICT['eth1'] == 'ens4': - raise exceptions.FuelQAVariableNotSet( - 'IFACE_1', 'ens4') - - if not settings.INTERFACES_DICT['eth2'] == 'ens5': - raise exceptions.FuelQAVariableNotSet( - 'IFACE_2', 'ens5') - - elif not settings.INTERFACES_DICT['eth3'] == 'ens6': - raise exceptions.FuelQAVariableNotSet( - 'IFACE_3', 'ens6') - - elif not settings.INTERFACES_DICT['eth4'] == 'ens7': - raise exceptions.FuelQAVariableNotSet( - 'IFACE_4', 'ens7') - - elif not settings.INTERFACES_DICT['eth5'] == 'ens8': - raise exceptions.FuelQAVariableNotSet( - 'IFACE_5', 'ens8') - - elif not settings.ACPI_ENABLE: - raise exceptions.FuelQAVariableNotSet( - 'DRIVER_ENABLE_ACPI', 'true') - - self.show_step(1) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT_TYPE - } - ) - self.show_step(2) - self.show_step(3) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['compute'], - 'slave-02': ['compute'], - 'slave-03': ['controller'], - 'slave-04': ['controller'], - 'slave-05': ['controller'] - }) - - self.show_step(4) - - for node in ('slave-01', 'slave-02'): - target_node = self.fuel_web.get_nailgun_node_by_name(node) - numas_from_fuel = len( - target_node['meta']['numa_topology']['numa_nodes']) - numas_on_remote = utils.get_quantity_of_numa(target_node['ip']) - if not numas_on_remote: - # Fuel handle topology without NUMA as 1 NUMA node - asserts.assert_equal(numas_from_fuel, 1, - "No NUMA nodes on {0} " - "while Fuel shows it " - "has {1}".format( - target_node['ip'], numas_from_fuel)) - raise AssertionError("No NUMA nodes on {0}".format( - target_node['ip'])) - else: - asserts.assert_equal(numas_on_remote, numas_from_fuel, - "{0} NUMA nodes on {1} " - "while Fuel shows it " - "has {2}".format( - numas_on_remote, target_node['ip'], - numas_from_fuel)) - logger.info("There is {0} NUMA nodes on node {1}".format( - numas_on_remote, target_node['ip'])) - self.env.make_snapshot(snapshot_name, is_make=True) - - @test(depends_on_groups=['basic_env_for_numa_cpu_pinning'], - groups=["cpu_pinning_on_two_compute"]) - @log_snapshot_after_test - def cpu_pinning_on_two_compute(self): - """Check different amount of pinned CPU - - Scenario: - 1. Revert snapshot "basic_env_for_numa_cpu_pinning" - 2. Pin maximum CPU for the nova on the first compute - 3. Pin minimun CPU for the nova on the second compute - 4. Verify setting was successfully applied - 5. Deploy cluster - 6. Check new filters are enabled in nova.conf at controller - 7. Check nova.conf contains pinned CPU at compute - 8. Run OSTF - 9. Boot VM with pinned CPU on the first compute - 10. Boot VM with pinned CPU on the second compute - - Snapshot: cpu_pinning_on_two_compute - """ - snapshot_name = 'cpu_pinning_on_two_compute' - self.check_run(snapshot_name) - - self.show_step(1) - self.env.revert_snapshot("basic_env_for_numa_cpu_pinning") - - cluster_id = self.fuel_web.get_last_created_cluster() - - self.show_step(2) - first_compute = self.fuel_web.get_nailgun_node_by_name('slave-01') - first_compute_cpu = first_compute['meta']['cpu']['total'] - first_config = self.fuel_web.client.get_node_attributes( - first_compute['id']) - first_config['cpu_pinning']['nova']['value'] = first_compute_cpu - 1 - self.fuel_web.client.upload_node_attributes( - first_config, first_compute['id']) - - self.show_step(3) - second_compute = self.fuel_web.get_nailgun_node_by_name('slave-02') - second_config = self.fuel_web.client.get_node_attributes( - second_compute['id']) - second_config['cpu_pinning']['nova']['value'] = 1 - self.fuel_web.client.upload_node_attributes( - second_config, second_compute['id']) - - self.show_step(4) - first_config = self.fuel_web.client.get_node_attributes( - first_compute['id']) - asserts.assert_equal( - first_config['cpu_pinning']['nova']['value'], - first_compute_cpu - 1, - "CPU pinning wasn't applied on '{0}': " - "Expected value '{1}', actual '{2}'" - .format(first_compute['ip'], first_compute_cpu - 1, - first_config['cpu_pinning']['nova']['value'])) - - second_config = self.fuel_web.client.get_node_attributes( - second_compute['id']) - asserts.assert_equal( - second_config['cpu_pinning']['nova']['value'], - 1, - "CPU pinning wasn't applied on '{0}': " - "Expected value '{1}', actual '{2}'" - .format(second_compute['ip'], 1, - second_config['cpu_pinning']['nova']['value'])) - - self.show_step(5) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(6) - controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, - roles=['controller']) - - nova_conf_path = "/etc/nova/nova.conf" - - for controller in controllers: - with self.ssh_manager.open_on_remote( - ip=controller['ip'], - path=nova_conf_path) as f: - nova_conf = utils.get_ini_config(f) - - self.assert_entry_in_config(nova_conf, - nova_conf_path, - "DEFAULT", - "scheduler_default_filters", - "NUMATopologyFilter") - - self.show_step(7) - - with self.ssh_manager.open_on_remote( - ip=first_compute['ip'], - path=nova_conf_path) as f: - nova_conf = utils.get_ini_config(f) - - self.assert_quantity_in_config(nova_conf, - nova_conf_path, - "DEFAULT", - "vcpu_pin_set", - first_compute_cpu - 1) - - with self.ssh_manager.open_on_remote( - ip=second_compute['ip'], - path=nova_conf_path) as f: - nova_conf = utils.get_ini_config(f) - - self.assert_quantity_in_config(nova_conf, - nova_conf_path, - "DEFAULT", - "vcpu_pin_set", - 1) - - self.show_step(8) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(9) - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - - meta = {'pinned': 'true'} - - self.create_pinned_instance(os_conn=os_conn, - cluster_id=cluster_id, - name='cpu_3_', - vcpus=3, - hostname=first_compute['fqdn'], - meta=meta) - self.show_step(10) - self.create_pinned_instance(os_conn=os_conn, - cluster_id=cluster_id, - name='cpu_1_', - vcpus=1, - hostname=second_compute['fqdn'], - meta=meta) - - self.env.make_snapshot(snapshot_name, is_make=True) - - @test(depends_on_groups=['basic_env_for_numa_cpu_pinning'], - groups=["cpu_pinning_with_other_role"]) - @log_snapshot_after_test - def cpu_pinning_with_other_role(self): - """Check pinned CPU on compute,cinder node - - Scenario: - 1. Revert snapshot "basic_env_for_numa_cpu_pinning" - 2. Add cinder role for compute nodes - 3. Pin maximum CPU for the nova on the computes - 4. Verify setting was successfully applied - 5. Deploy cluster - 6. Check new filters are enabled in nova.conf at controller - 7. Check nova.conf contains pinned CPU at computes - 8. Run OSTF - 9. Boot VMs with pinned CPU on each compute, cinder node - - Snapshot: cpu_pinning_with_other_role - """ - self.show_step(1) - self.env.revert_snapshot("basic_env_for_numa_cpu_pinning") - - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - - nodes = {'slave-01': ['compute', 'cinder'], - 'slave-02': ['compute', 'cinder']} - - self.fuel_web.update_nodes(cluster_id, nodes) - - self.show_step(3) - target_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute', 'cinder'], role_status='pending_roles') - for compute in target_nodes: - compute_cpu = compute['meta']['cpu']['total'] - compute_config = self.fuel_web.client.get_node_attributes( - compute['id']) - compute_config['cpu_pinning']['nova']['value'] = compute_cpu - 1 - self.fuel_web.client.upload_node_attributes( - compute_config, compute['id']) - - self.show_step(4) - for compute in target_nodes: - compute_cpu = compute['meta']['cpu']['total'] - compute_config = self.fuel_web.client.get_node_attributes( - compute['id']) - asserts.assert_equal( - compute_config['cpu_pinning']['nova']['value'], - compute_cpu - 1, - "CPU pinning wasn't applied on '{0}': " - "Expected value '{1}', actual '{2}'" - .format(compute['ip'], compute_cpu - 1, - compute_config['cpu_pinning']['nova']['value'])) - - self.show_step(5) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(6) - controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, - roles=['controller']) - - nova_conf_path = "/etc/nova/nova.conf" - - for controller in controllers: - with self.ssh_manager.open_on_remote( - ip=controller['ip'], - path=nova_conf_path) as f: - nova_conf = utils.get_ini_config(f) - - self.assert_entry_in_config(nova_conf, - nova_conf_path, - "DEFAULT", - "scheduler_default_filters", - "NUMATopologyFilter") - - self.show_step(7) - for compute in target_nodes: - with self.ssh_manager.open_on_remote( - ip=compute['ip'], - path=nova_conf_path) as f: - nova_conf = utils.get_ini_config(f) - - compute_cpu = compute['meta']['cpu']['total'] - self.assert_quantity_in_config(nova_conf, - nova_conf_path, - "DEFAULT", - "vcpu_pin_set", - compute_cpu - 1) - - self.show_step(8) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(9) - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - - meta = {'pinned': 'true'} - - for compute in target_nodes: - self.create_pinned_instance(os_conn=os_conn, - cluster_id=cluster_id, - name='cpu_role_', - vcpus=2, - hostname=compute['fqdn'], - meta=meta) - - self.env.make_snapshot("cpu_pinning_with_other_role") - - @test(depends_on_groups=['cpu_pinning_on_two_compute'], - groups=["reboot_cpu_pinning_compute"]) - @log_snapshot_after_test - def reboot_cpu_pinning_compute(self): - """Check compute with pinned CPU after reboot - - Scenario: - 1. Revert snapshot "cpu_pinning_on_two_compute" - 2. Reboot the first compute with CPU pinning - 3. Run OSTF - 4. Boot VM with pinned CPU on the first compute - 5. Reboot the second compute with CPU pinning - 6. Run OSTF - 7. Boot VM with pinned CPU on the second compute - - Snapshot: reboot_cpu_pinning_compute - """ - self.show_step(1) - self.env.revert_snapshot("cpu_pinning_on_two_compute") - cluster_id = self.fuel_web.get_last_created_cluster() - - self.show_step(2) - first_compute = self.fuel_web.get_nailgun_node_by_name('slave-01') - second_compute = self.fuel_web.get_nailgun_node_by_name('slave-02') - self.fuel_web.warm_restart_nodes( - self.fuel_web.get_devops_nodes_by_nailgun_nodes([first_compute])) - - self.show_step(3) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(4) - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - - meta = {'pinned': 'true'} - - self.create_pinned_instance(os_conn=os_conn, - cluster_id=cluster_id, - name='cpu_3_', - vcpus=3, - hostname=first_compute['fqdn'], - meta=meta) - self.show_step(5) - self.fuel_web.warm_restart_nodes( - self.fuel_web.get_devops_nodes_by_nailgun_nodes([second_compute])) - - self.show_step(6) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(7) - self.create_pinned_instance(os_conn=os_conn, - cluster_id=cluster_id, - name='cpu_1_', - vcpus=1, - hostname=second_compute['fqdn'], - meta=meta) - - self.env.make_snapshot('reboot_cpu_pinning_compute') - - @test(depends_on_groups=['basic_env_for_numa_cpu_pinning'], - groups=["cpu_pinning_allocation"]) - @log_snapshot_after_test - def cpu_pinning_allocation(self): - """Check errors for allocation of CPU for nova - - Scenario: - 1. Revert snapshot "basic_env_for_numa_cpu_pinning" - 2. Pin all node CPU and twice of it for the nova - 3. Check status code: 400 - - Snapshot: cpu_pinning_allocation - """ - - self.show_step(1) - self.env.revert_snapshot("basic_env_for_numa_cpu_pinning") - - cluster_id = self.fuel_web.get_last_created_cluster() - - self.show_step(2) - self.show_step(3) - target_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'], role_status='pending_roles') - - for compute in target_nodes: - compute_cpu = compute['meta']['cpu']['total'] - compute_config = self.fuel_web.client.get_node_attributes( - compute['id']) - compute_config['cpu_pinning']['nova']['value'] = compute_cpu - - try: - self.fuel_web.client.upload_node_attributes( - compute_config, compute['id']) - except BadRequest: - logger.debug('BadRequest received as expected') - else: - asserts.fail("Pinned all CPU on {0}, while expecting HTTP " - "error on CPU value {1}" - .format(compute['ip'], compute_cpu)) - - compute_config['cpu_pinning']['nova']['value'] = compute_cpu * 2 - - try: - self.fuel_web.client.upload_node_attributes( - compute_config, compute['id']) - except BadRequest: - logger.debug('BadRequest received as expected') - else: - asserts.fail("Pinned all CPU on {0}, while expecting HTTP " - "400 error on CPU value {1}" - .format(compute['ip'], compute_cpu * 2)) - - self.env.make_snapshot('cpu_pinning_allocation') - - @test(depends_on_groups=['cpu_pinning_on_two_compute'], - groups=["change_pinned_cpu_and_redeploy"]) - @log_snapshot_after_test - def change_pinned_cpu_and_redeploy(self): - """Unpinned CPU and redeploy cluster - - Scenario: - 1. Revert snapshot "cpu_pinning_on_two_compute" - 2. Unpinned CPU on the first compute - 3. Deploy changes - 4. Verify changes were applied - 5. Check nova.conf doesn't contain pinned CPU at the first compute - 6. Run OSTF - 7. Boot VM with pinned CPU on the second compute - - Snapshot: change_pinned_cpu_and_redeploy - """ - self.show_step(1) - self.env.revert_snapshot("cpu_pinning_on_two_compute") - cluster_id = self.fuel_web.get_last_created_cluster() - - self.show_step(2) - first_compute = self.fuel_web.get_nailgun_node_by_name('slave-01') - first_config = self.fuel_web.client.get_node_attributes( - first_compute['id']) - first_config['cpu_pinning']['nova']['value'] = 0 - self.fuel_web.client.upload_node_attributes( - first_config, first_compute['id']) - - self.show_step(3) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(4) - compute_config = self.fuel_web.client.get_node_attributes( - first_compute['id']) - asserts.assert_equal( - compute_config['cpu_pinning']['nova']['value'], - 0, - "CPU wasn't unpinned on '{0}': " - "Expected value 0, actual '{1}'" - .format(first_compute['ip'], - compute_config['cpu_pinning']['nova']['value'])) - - self.show_step(5) - - nova_conf_path = "/etc/nova/nova.conf" - with self.ssh_manager.open_on_remote( - ip=first_compute['ip'], - path=nova_conf_path) as f: - - nova_conf = utils.get_ini_config(f) - utils.check_config(nova_conf, - nova_conf_path, - "DEFAULT", - "vcpu_pin_set", - None) - - self.show_step(6) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(7) - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - - second_compute = self.fuel_web.get_nailgun_node_by_name('slave-02') - meta = {'pinned': 'true'} - self.create_pinned_instance(os_conn=os_conn, - cluster_id=cluster_id, - name='cpu_1_', - vcpus=1, - hostname=second_compute['fqdn'], - meta=meta) - - self.env.make_snapshot('change_pinned_cpu_and_redeploy') diff --git a/fuelweb_test/tests/test_custom_hostname.py b/fuelweb_test/tests/test_custom_hostname.py deleted file mode 100644 index a04f1866f..000000000 --- a/fuelweb_test/tests/test_custom_hostname.py +++ /dev/null @@ -1,288 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from random import randrange -from re import match - -from keystoneauth1 import exceptions as http_exceptions -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_raises -from proboscis.asserts import assert_true -from proboscis import test - -from fuelweb_test.helpers.checkers import check_ping -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers import os_actions -from fuelweb_test import settings -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=['custom_hostname']) -class CustomHostname(TestBasic): - """CustomNodeName.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=['default_hostname']) - @log_snapshot_after_test - def default_hostname(self): - """Verify that the default hostnames (e.g. 'node-1') are applied - - Scenario: - 1. Create a cluster - 2. Add 3 nodes with controller role - 3. Add 1 node with compute role - 4. Deploy the cluster - 5. Verify network configuration on controller - 6. Run OSTF - 7. Verify that the default hostname is applied on cluster nodes - - Duration: 70m - """ - self.env.revert_snapshot("ready_with_5_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE_HA, - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False) - - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf( - cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) - - hostname_pattern = "node-\d{1,2}" - - for node in self.fuel_web.client.list_cluster_nodes(cluster_id): - devops_node = self.fuel_web.get_devops_node_by_nailgun_node(node) - - # Get hostname of a node and compare it against - # the default hostname format - assert_true( - match(hostname_pattern, node['hostname']), - "Default host naming format ('node-#') has not been applied " - "to '{0}' node. Current hostname is " - "'{1}'".format(devops_node.name, node['hostname'])) - - # Verify that a node is accessible by the default hostname - assert_true( - check_ping(self.env.get_admin_node_ip(), - node['hostname']), - "{0} node is not accessible by its default " - "hostname {1}".format(devops_node.name, node['hostname'])) - - self.env.make_snapshot("default_hostname") - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=['set_custom_hostname']) - @log_snapshot_after_test - def set_custom_hostname(self): - """Verify that a custom hostname can be applied to a node - - Scenario: - 1. Revert the snapshot - 2. Create a cluster - 3. Add 3 nodes with controller role - 4. Add 1 node with compute role - 5. Set custom hostnames for all cluster nodes - 6. Deploy the cluster - 7. Verify network configuration on controller - 8. Run OSTF - 9. Verify that there are no dead services compute services - 10. Verify the new hostnames are applied on the nodes - - Duration: 130m - """ - for method in ('API', 'CLI'): - self.env.revert_snapshot("ready_with_5_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'] - } - ) - - # Set custom hostnames for cluster nodes - custom_hostnames = [] - for node in self.fuel_web.client.list_cluster_nodes(cluster_id): - custom_hostname = "{0}-{1}".format( - node['pending_roles'][0], randrange(0, 0xffff)) - custom_hostnames.append(custom_hostname) - if method == 'API': - self.fuel_web.client.set_hostname(node['id'], - custom_hostname) - elif method == 'CLI': - with self.env.d_env.get_admin_remote() as admin_remote: - admin_remote.execute( - 'fuel node --node-id {0} --hostname ' - '{1}'.format(node['id'], custom_hostname)) - - self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False) - - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf( - cluster_id, test_sets=['ha', 'smoke', 'sanity']) - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=13) - - # Verify that new hostnames are applied on the nodes - for node, custom_hostname in zip( - self.fuel_web.client.list_cluster_nodes(cluster_id), - custom_hostnames): - devops_node = self.fuel_web.get_devops_node_by_nailgun_node( - node) - with self.env.d_env.get_admin_remote() as admin_remote: - hostname = admin_remote.execute( - "ssh -q {0} hostname " - "-s".format(custom_hostname))['stdout'][0].strip() - assert_equal( - custom_hostname, - hostname, - "Failed to apply the new '{0}' hostname to '{1}' node. " - "Current hostname is '{2}'".format( - custom_hostname, devops_node.name, hostname)) - - self.env.make_snapshot("set_custom_hostname") - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=['custom_hostname_validation']) - @log_snapshot_after_test - def custom_hostname_validation(self): - """Verify the hostname format validation - - Scenario: - 1. Revert the snapshot - 2. Verify that the hostname format is validated (only alphanumeric - ASCII symbols are allowed, and the hyphen; the hostname must not - start with or end with the hyphen). - - Duration: 7m - """ - self.env.revert_snapshot("ready_with_5_slaves") - - node = self.fuel_web.client.list_nodes()[0] - - for invalid_hostname in ( - # Boundary values of classes of invalid ASCII symbols - "node ", - "node,", - "node.", - "node/", - "node:", - "node@", - "node[", - "node`", - "node{", - # A hostname must not start or end with the hyphen - "-node", - "node-", - ): - assert_raises( - http_exceptions.BadRequest, - self.fuel_web.client.set_hostname, - node['id'], - invalid_hostname) - - self.env.make_snapshot("custom_hostname_validation") - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=['set_duplicate_hostname']) - @log_snapshot_after_test - def set_duplicate_hostname(self): - """Verify that a duplicate hostname is not allowed - - Scenario: - 1. Revert the snapshot - 2. Set a custom hostname for the node - 3. Verify that new hostnames are validated to avoid duplicates - - Duration: 7m - """ - self.env.revert_snapshot("ready_with_5_slaves") - - # Set a custom hostname for a node for the 1st time - custom_hostname = 'custom-hostname' - node_list = self.fuel_web.client.list_nodes() - node = node_list[0] - self.fuel_web.client.set_hostname(node['id'], custom_hostname) - - # Try to set duplicate hostname for another node - node = node_list[1] - assert_raises( - http_exceptions.Conflict, - self.fuel_web.client.set_hostname, - node["id"], - custom_hostname - ) - - self.env.make_snapshot("set_duplicate_hostname") - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=['set_custom_hostname_for_provisioned_node']) - @log_snapshot_after_test - def set_custom_hostname_for_provisioned_node(self): - """Verify that it is not allowed to change a hostname of a - provisioned node - - Scenario: - 1. Revert the snapshot - 2. Create a cluster - 3. Add a node with controller role - 4. Set a custom hostname for the node - 5. Provision the node - 6. Verify that updating node hostname of the provisioned node - is not allowed - - Duration: 20m - """ - self.env.revert_snapshot("ready_with_5_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - ) - - self.fuel_web.update_nodes(cluster_id, {'slave-01': ['controller']}) - - # Set a custom hostname for a node for the 1st time - # and provision the node - node = self.fuel_web.client.list_cluster_nodes(cluster_id)[0] - self.fuel_web.client.set_hostname(node['id'], 'custom-hostname') - self.fuel_web.provisioning_cluster_wait(cluster_id) - - # Try to change the hostname of the provisioned node - assert_raises( - http_exceptions.Forbidden, - self.fuel_web.client.set_hostname, - node['id'], - 'new-custom-hostname') - - self.env.make_snapshot("set_custom_hostname_for_provisioned_node") diff --git a/fuelweb_test/tests/test_dpdk.py b/fuelweb_test/tests/test_dpdk.py deleted file mode 100644 index 5bbd7346f..000000000 --- a/fuelweb_test/tests/test_dpdk.py +++ /dev/null @@ -1,429 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from copy import deepcopy -import random - -from devops.helpers import helpers as devops_helpers -from proboscis.asserts import assert_raises -from proboscis import test -from keystoneauth1 import exceptions - -from fuelweb_test.helpers.checkers import check_firewall_driver -from fuelweb_test.helpers.checkers import check_settings_requirements -from fuelweb_test.helpers.checkers import enable_feature_group -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers import os_actions -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic -from fuelweb_test.tests.test_bonding_base import BondingTestDPDK -from fuelweb_test import logger -from fuelweb_test import settings - - -class TestDPDK(TestBasic): - """TestDPDK.""" - - tests_requirements = {'KVM_USE': True} - - def __init__(self): - super(TestDPDK, self).__init__() - check_settings_requirements(self.tests_requirements) - - def check_dpdk_instance_connectivity(self, os_conn, cluster_id, - mem_page_size='2048'): - """Boot VM with HugePages and ping it via floating IP - - :param os_conn: an object of connection to openstack services - :param cluster_id: an integer number of cluster id - :param mem_page_size: huge pages size - :return: - """ - - extra_specs = { - 'hw:mem_page_size': mem_page_size - } - - net_name = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - flavor_id = random.randint(10, 10000) - name = 'system_test-{}'.format(random.randint(10, 10000)) - flavor = os_conn.create_flavor(name=name, ram=64, - vcpus=1, disk=1, - flavorid=flavor_id, - extra_specs=extra_specs) - - server = os_conn.create_server_for_migration(neutron=True, - label=net_name, - flavor_id=flavor_id) - os_conn.verify_instance_status(server, 'ACTIVE') - - float_ip = os_conn.assign_floating_ip(server) - logger.info("Floating address {0} associated with instance {1}" - .format(float_ip.ip, server.id)) - - logger.info("Wait for ping from instance {} " - "by floating ip".format(server.id)) - devops_helpers.wait( - lambda: devops_helpers.tcp_ping(float_ip.ip, 22), - timeout=300, - timeout_msg=("Instance {0} is unreachable for {1} seconds". - format(server.id, 300))) - - os_conn.delete_instance(server) - os_conn.delete_flavor(flavor) - - -@test(groups=["support_dpdk"]) -class SupportDPDK(TestDPDK): - """SupportDPDK.""" - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_cluster_with_dpdk_vlan"]) - @log_snapshot_after_test - def deploy_cluster_with_dpdk_vlan(self): - """Deploy cluster with DPDK with VLAN segmentation - - Scenario: - 1. Create new environment with VLAN segmentation for Neutron - 2. Set KVM as Hypervisor - 3. Add controller and compute nodes - 4. Configure private network in DPDK mode - 5. Configure HugePages for compute nodes - 6. Run network verification - 7. Deploy environment - 8. Run network verification - 9. Run OSTF - 10. Reboot compute - 11. Run OSTF - 12. Check option "firewall_driver" in config files - 13. Run instance on compute with DPDK and check its availability - via floating IP - - Snapshot: deploy_cluster_with_dpdk_vlan - - """ - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(1) - enable_feature_group(self.env, 'experimental') - self.show_step(2) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": "vlan" - } - ) - - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - }) - - compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'], role_status='pending_roles')[0] - - self.show_step(4) - self.fuel_web.enable_dpdk(compute['id']) - - self.show_step(5) - self.fuel_web.setup_hugepages( - compute['id'], hp_2mb=256, hp_dpdk_mb=1024) - - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - - self.show_step(7) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(8) - self.fuel_web.verify_network(cluster_id) - - self.show_step(9) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(10) - # reboot compute - self.fuel_web.warm_restart_nodes( - [self.fuel_web.get_devops_node_by_nailgun_node(compute)]) - - # Wait until OpenStack services are UP - self.fuel_web.assert_os_services_ready(cluster_id) - - self.show_step(11) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(12) - compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'])[0] - check_firewall_driver(compute['ip'], compute['roles'][0], 'noop') - - self.show_step(13) - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - - self.check_dpdk_instance_connectivity(os_conn, cluster_id) - - self.env.make_snapshot("deploy_cluster_with_dpdk_vlan") - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_cluster_with_dpdk_tun"]) - @log_snapshot_after_test - def deploy_cluster_with_dpdk_tun(self): - """Deploy cluster with DPDK with tunneling segmentation - - Scenario: - 1. Create new environment with VXLAN segmentation for Neutron - 2. Set KVM as Hypervisor - 3. Add controller and compute nodes - 4. Configure private network in DPDK mode - 5. Configure HugePages for compute nodes - 6. Run network verification - 7. Deploy environment - 8. Run network verification - 9. Run OSTF - 10. Reboot compute - 11. Run OSTF - 12. Check option "firewall_driver" in config files - 13. Run instance on compute with DPDK and check its availability - via floating IP - - Snapshot: deploy_cluster_with_dpdk_tun - - """ - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(1) - enable_feature_group(self.env, 'experimental') - self.show_step(2) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": "tun" - } - ) - - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - }) - - compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'], role_status='pending_roles')[0] - - self.show_step(4) - self.fuel_web.enable_dpdk(compute['id']) - - self.show_step(5) - self.fuel_web.setup_hugepages( - compute['id'], hp_2mb=256, hp_dpdk_mb=1024) - - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - - self.show_step(7) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(8) - self.fuel_web.verify_network(cluster_id) - - self.show_step(9) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(10) - # reboot compute - self.fuel_web.warm_restart_nodes( - [self.fuel_web.get_devops_node_by_nailgun_node(compute)]) - - # Wait until OpenStack services are UP - self.fuel_web.assert_os_services_ready(cluster_id) - - self.show_step(11) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(12) - compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'])[0] - check_firewall_driver(compute['ip'], compute['roles'][0], 'noop') - - self.show_step(13) - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - - self.check_dpdk_instance_connectivity(os_conn, cluster_id) - - self.env.make_snapshot("deploy_cluster_with_dpdk_tun") - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["check_can_not_enable_dpdk_on_non_dedicated_iface"]) - @log_snapshot_after_test - def check_can_not_enable_dpdk_on_non_dedicated_iface(self): - """Check can not enable DPDK on non-dedicated interface - - Scenario: - 1. Create new environment with VLAN segmentation for Neutron - 2. Set KVM as Hypervisor - 3. Add controller and compute nodes - 4. Add private and storage networks to interface - and try enable DPDK mode - """ - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(1) - enable_feature_group(self.env, 'experimental') - self.show_step(2) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": "vlan" - } - ) - - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - }) - - compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'], role_status='pending_roles')[0] - - self.show_step(4) - assigned_networks = { - settings.iface_alias('eth0'): ['fuelweb_admin'], - settings.iface_alias('eth1'): ['public'], - settings.iface_alias('eth2'): ['management'], - settings.iface_alias('eth3'): ['private', 'storage'], - settings.iface_alias('eth4'): [] - } - self.fuel_web.update_node_networks(compute['id'], - interfaces_dict=assigned_networks) - assert_raises( - exceptions.BadRequest, - self.fuel_web.enable_dpdk, compute['id'], - force_enable=True) - - -@test(groups=["support_dpdk_bond"]) -class SupportDPDKBond(BondingTestDPDK, TestDPDK): - """SupportDPDKBond.""" - - def __init__(self): - self.tests_requirements.update({'BONDING': True}) - super(SupportDPDKBond, self).__init__() - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_cluster_with_dpdk_bond"]) - @log_snapshot_after_test - def deploy_cluster_with_dpdk_bond(self): - """Deploy cluster with DPDK, active-backup bonding and Neutron VLAN - - Scenario: - 1. Create cluster with VLAN for Neutron and KVM - 2. Add 1 node with controller role - 3. Add 2 nodes with compute and cinder roles - 4. Setup bonding for all interfaces: 1 for admin, 1 for private - and 1 for public/storage/management networks - 5. Enable DPDK for bond with private network on all computes - 6. Configure HugePages for compute nodes - 7. Run network verification - 8. Deploy the cluster - 9. Run network verification - 10. Run OSTF - 11. Run instance on compute with DPDK and check its availability - via floating IP - - Duration 90m - Snapshot deploy_cluster_with_dpdk_bond - """ - - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(1) - enable_feature_group(self.env, 'experimental') - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings={ - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - } - ) - - self.show_step(2) - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, { - 'slave-01': ['controller'], - 'slave-02': ['compute', 'cinder'], - 'slave-03': ['compute', 'cinder'] - }, - update_interfaces=False - ) - - self.show_step(4) - nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - for node in nailgun_nodes: - self.fuel_web.update_node_networks( - node['id'], interfaces_dict=deepcopy(self.INTERFACES), - raw_data=deepcopy(self.bond_config) - ) - - computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, - roles=['compute'], - role_status='pending_roles') - - self.show_step(5) - for node in computes: - self.fuel_web.enable_dpdk(node['id']) - - self.show_step(6) - for node in computes: - self.fuel_web.setup_hugepages( - node['id'], hp_2mb=256, hp_dpdk_mb=1024) - - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - - self.show_step(8) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(9) - self.fuel_web.verify_network(cluster_id) - - self.show_step(10) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(11) - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - self.check_dpdk_instance_connectivity(os_conn, cluster_id) - - self.env.make_snapshot("deploy_cluster_with_dpdk_bond") diff --git a/fuelweb_test/tests/test_environment_action.py b/fuelweb_test/tests/test_environment_action.py deleted file mode 100644 index 06b4037e7..000000000 --- a/fuelweb_test/tests/test_environment_action.py +++ /dev/null @@ -1,523 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import asserts -from proboscis import test - -from fuelweb_test.helpers.decorators import check_fuel_statistics -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test.tests import base_test_case - - -@test(groups=["cluster_actions"]) -class EnvironmentAction(base_test_case.TestBasic): - """EnvironmentAction.""" # TODO documentation - - @test(depends_on=[base_test_case.SetupEnvironment.prepare_release], - groups=["check_deployment_actions_as_graph"]) - @log_snapshot_after_test - def check_deployment_actions_as_graph(self): - """Check that all cluster actions are using graph engine - - Scenario: - - 1. Revert snapshot "ready" - 2. Get release ID - 3. Get sequence list for this release - 4. Get graphs from release sequence - 5. Check that all graph actions are present in graph list - 6. Ensure that there is no additional graphs - - Duration: 1m - """ - - self.show_step(1) - self.env.revert_snapshot("ready") - - self.show_step(self.next_step) - release_id = self.fuel_web.client.get_release_id( - release_name=settings.OPENSTACK_RELEASE_UBUNTU) - - self.show_step(self.next_step) - admin_ip = self.env.get_admin_node_ip() - out = self.ssh_manager.check_call( - ip=admin_ip, - command="fuel2 sequence list -f json -r {}".format(release_id)) - sequence_id = out.stdout_json[0]['id'] - - self.show_step(self.next_step) - out = self.ssh_manager.check_call( - ip=admin_ip, - command="fuel2 sequence show -f json {}".format(sequence_id)) - sequence_graphs = set(out.stdout_json["graphs"].split(", ")) - - self.show_step(self.next_step) - # "default" graph is deployment graph itself - named that for backward - # compatibility - graphs_list = ["net-verification", "deletion", "provision", "default"] - for graph in graphs_list: - asserts.assert_true( - graph in sequence_graphs, - "Graph {!r} is not presented in sequence! {!r}".format( - graph, out.stdout_json)) - sequence_graphs.remove(graph) - - self.show_step(self.next_step) - asserts.assert_false( - sequence_graphs, - "New unexpected graphs were found in release sequence: {!r}!" - "Please check the results and update the test " - "if needed!".format(sequence_graphs)) - - @test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_3], - groups=["smoke", "deploy_neutron_stop_reset_on_deploying", - "classic_provisioning"]) - @log_snapshot_after_test - @check_fuel_statistics - def deploy_neutron_stop_on_deploying(self): - """Stop reset cluster in HA mode with neutron - - Scenario: - 1. Create cluster in HA mode with 1 controller - 2. Add 1 node with controller role - 3. Add 1 node with compute role - 4. Verify network - 5. Run provisioning task - 6. Run deployment task - 7. Stop deployment - 8. Add 1 node with cinder role - 9. Re-deploy cluster - 10. Verify network - 11. Run OSTF - - Duration 50m - Snapshot: deploy_neutron_stop_reset_on_deploying - - """ - self.env.revert_snapshot("ready_with_3_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - 'tenant': 'stop_deploy', - 'user': 'stop_deploy', - 'password': 'stop_deploy', - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'] - } - ) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.provisioning_cluster_wait(cluster_id) - self.fuel_web.deploy_task_wait(cluster_id=cluster_id, progress=10) - self.fuel_web.stop_deployment_wait(cluster_id) - self.fuel_web.wait_nodes_get_online_state( - self.env.d_env.nodes().slaves[:2], timeout=10 * 60) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-03': ['cinder'] - } - ) - - self.fuel_web.deploy_cluster_wait(cluster_id) - - asserts.assert_equal( - 3, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - self.env.make_snapshot("deploy_neutron_stop_reset_on_deploying") - - @test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_3], - groups=["smoke", "deploy_neutron_stop_reset_on_provisioning"]) - @log_snapshot_after_test - def deploy_neutron_stop_reset_on_provisioning(self): - """Stop provisioning cluster in HA mode with neutron - - Scenario: - 1. Create cluster in HA mode with 1 controller - 2. Add 1 node with controller role - 3. Add 1 node with compute role - 4. Verify network - 5. Run provisioning task - 6. Stop provisioning - 7. Reset settings - 8. Add 1 node with cinder role - 9. Re-deploy cluster - 10. Verify network - 11. Run OSTF - - Duration 40m - Snapshot: deploy_neutron_stop_reset_on_provisioning - - """ - self.env.revert_snapshot("ready_with_3_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'] - } - ) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.provisioning_cluster_wait( - cluster_id=cluster_id, progress=20) - - self.fuel_web.stop_deployment_wait(cluster_id) - - self.fuel_web.stop_reset_env_wait(cluster_id) - - self.fuel_web.wait_nodes_get_online_state( - self.env.d_env.nodes().slaves[:2], timeout=10 * 60) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-03': ['cinder'] - } - ) - - self.fuel_web.deploy_cluster_wait(cluster_id) - - asserts.assert_equal( - 3, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - self.env.make_snapshot("deploy_neutron_stop_reset_on_provisioning") - - @test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_3], - groups=["smoke", "deploy_reset_on_ready"]) - @log_snapshot_after_test - @check_fuel_statistics - def deploy_reset_on_ready(self): - """Stop reset cluster in HA mode with 1 controller - - Scenario: - 1. Create cluster in Ha mode with 1 controller - 2. Add 1 node with controller role - 3. Add 1 node with compute role - 4. Verify network - 5. Deploy cluster - 6. Reset settings - 7. Update net - 8. Re-deploy cluster - 9. Verify network - 10. Run OSTF - - Duration 40m - Snapshot: deploy_reset_on_ready - - """ - self.env.revert_snapshot("ready_with_3_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'] - } - ) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.stop_reset_env_wait(cluster_id) - self.fuel_web.wait_nodes_get_online_state( - self.env.d_env.nodes().slaves[:2], timeout=10 * 60) - - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - self.env.make_snapshot("deploy_reset_on_ready") - - -@test(groups=["cluster_actions_ha"]) -class EnvironmentActionOnHA(base_test_case.TestBasic): - """EnvironmentActionOnHA.""" # TODO documentation - - @test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_5], - groups=["smoke", "deploy_stop_reset_on_ha"]) - @log_snapshot_after_test - def deploy_stop_reset_on_ha(self): - """Stop reset cluster in ha mode - - Scenario: - 1. Create cluster - 2. Add 3 node with controller role - 3. Verify network - 4. Deploy cluster - 5. Stop deployment - 6. Reset settings - 7. Add 2 nodes with compute role - 8. Re-deploy cluster - 9. Verify network - 10. Run OSTF - - Duration 60m - Snapshot: deploy_stop_reset_on_ha - - """ - self.env.revert_snapshot("ready_with_5_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE_HA, - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'] - } - ) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.deploy_cluster_wait_progress(cluster_id, progress=10) - self.fuel_web.stop_deployment_wait(cluster_id) - self.fuel_web.wait_nodes_get_online_state( - self.env.d_env.nodes().slaves[:3], timeout=10 * 60) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-04': ['compute'], - 'slave-05': ['compute'] - } - ) - - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - self.env.make_snapshot("deploy_stop_reset_on_ha") - - -@test(groups=["controller_replacement"]) -class ControllerReplacement(base_test_case.TestBasic): - """ - Test class ControllerReplacement includes following cases: - - replace controller on ha cluster with neutron gre provider; - - replace controller on ha cluster with neutron vlan provider; - - replace controller on ha cluster with nova network provider; - """ - - @test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_5], - groups=["deploy_ha_neutron_tun_ctrl_replacement"]) - @log_snapshot_after_test - def deploy_ha_neutron_tun_ctrl_replacement(self): - """Replace 1 controller and re-deploy on ha env with neutron vxlan - - Scenario: - 1. Create cluster with Neutron VXLAN - 2. Add 3 node with controller role - 3. Add 1 node with compute - 4. Verify network - 5. Deploy cluster - 6. Remove one controller add new controller - 7. Deploy changes - 8. Verify network - 9. Run OSTF - - Duration 90m - Snapshot: deploy_ha_neutron_tun_ctrl_replacement - """ - - self.env.revert_snapshot("ready_with_5_slaves") - - data = {"net_provider": "neutron", "net_segment_type": 'tun'} - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE_HA, - settings=data - - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'] - } - ) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.update_nodes( - cluster_id, {'slave-05': ['controller']}, True, False) - self.fuel_web.update_nodes( - cluster_id, {'slave-01': ['controller']}, False, True) - - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf(cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - self.env.make_snapshot("deploy_ha_neutron_tun_ctrl_replacement") - - @test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_5], - groups=["deploy_ha_neutron_vlan_ctrl_replacement"]) - @log_snapshot_after_test - def deploy_ha_neutron_vlan_ctrl_replacement(self): - """Replace 1 controller and re-deploy on ha env with neutron vlan - - Scenario: - 1. Create cluster with neutron vlan - 2. Add 3 node with controller role - 3. Add 1 node with compute - 4. Verify network - 5. Deploy cluster - 6. Remove one controller add new controller - 7. Deploy changes - 8. Verify network - 9. Run OSTF - - Duration 90m - Snapshot: deploy_ha_neutron_vlan_ctrl_replacement - """ - - self.env.revert_snapshot("ready_with_5_slaves") - - data = {"net_provider": "neutron", "net_segment_type": 'vlan'} - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE_HA, - settings=data - - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'] - } - ) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.update_nodes( - cluster_id, {'slave-05': ['controller']}, True, False) - self.fuel_web.update_nodes( - cluster_id, {'slave-01': ['controller']}, False, True) - - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf(cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - self.env.make_snapshot("deploy_ha_neutron_vlan_ctrl_replacement") - - @test(enabled=False, - depends_on=[base_test_case.SetupEnvironment.prepare_slaves_5], - groups=["deploy_ha_nova_ctrl_replacement"]) - @log_snapshot_after_test - def deploy_ha_nova_ctrl_replacement(self): - # REMOVE THIS NOVA_NETWORK CASE WHEN NEUTRON BE DEFAULT - """Replace 1 controller and re-deploy on ha env with nova - - Scenario: - 1. Create cluster with nova - 2. Add 3 node with controller role - 3. Add 1 node with compute - 4. Verify network - 5. Deploy cluster - 6. Remove one controller add new controller - 7. Deploy changes - 8. Verify network - 9. Run OSTF - - Duration 90m - Snapshot: deploy_ha_nova_ctrl_replacement - """ - - self.env.revert_snapshot("ready_with_5_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE_HA, - - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'] - } - ) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.update_nodes( - cluster_id, {'slave-05': ['controller']}, True, False) - self.fuel_web.update_nodes( - cluster_id, {'slave-01': ['controller']}, False, True) - - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf(cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - self.env.make_snapshot("deploy_ha_nova_ctrl_replacement") diff --git a/fuelweb_test/tests/test_graph_extension.py b/fuelweb_test/tests/test_graph_extension.py deleted file mode 100644 index f91d1b143..000000000 --- a/fuelweb_test/tests/test_graph_extension.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from __future__ import unicode_literals - -from proboscis import test -from proboscis.asserts import assert_equal - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - -TEST_GRAPH = '''- id: test_1 - type: shell - version: 2.1.0 - role: ['/.*/'] - parameters: - cmd: echo "test_1" >> /etc/test_file - timeout: 5 -- id: test_2 - type: shell - version: 2.1.0 - role: ['/.*/'] - requires: [test_1] - parameters: - cmd: echo "test_2" >> /etc/test_file - timeout: 5''' - - -@test(groups=["graph_extension"]) -class GraphExtension(TestBasic): - - def __init__(self): - super(GraphExtension, self).__init__() - self._cluster_id = None - self._admin_ip = self.env.get_admin_node_ip() - - @property - def cluster_id(self): - if self._cluster_id: - return self._cluster_id - - @cluster_id.setter - def cluster_id(self, cluster_id): - self._cluster_id = cluster_id - - @property - def admin_ip(self): - return self._admin_ip - - def deploy_custom_graph_wait_cli(self, graph_type): - command = 'fuel2 graph execute --env {} -t {} --format=json'.format( - self.cluster_id, graph_type) - task = self.ssh_manager.check_call(self.admin_ip, command).stdout_json - self.fuel_web.assert_task_success(task) - - def check_created_by_tasks_file(self): - test_file = '/etc/test_file' - cmd = 'egrep "test_1|test_2" {} |wc -l'.format(test_file) - for node in self.fuel_web.client.list_cluster_nodes(self.cluster_id): - res = self.ssh_manager.check_call(node['ip'], cmd).stdout_str - msg = "The file {0} consists of the wrong count of grepped lines" \ - ": `egrep 'test_1|test_2' {0} |wc -l` should be 2, but it " \ - "is {1} on the node {2}".format(test_file, res, node['name']) - assert_equal(int(res), 2, msg) - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["graph_extension_cli"]) - @log_snapshot_after_test - def graph_extension_cli(self): - """Upload and execute graph for env with 4 slaves (CLI) - - Scenario: - 1. Revert snapshot "ready_with_3_slaves" - 2. Create env with 1 controller nodes and 1 compute+cinder node - 3. Provision nodes - 4. Upload two simple graphs - 5. Make snapshots for next tests and resume snapshot - 6. Execute graphs - 7. Check that graph tasks was executed and - finished without any errors - 8. Check the created by graph tasks file - - Duration 10m - """ - self.show_step(1) - self.env.revert_snapshot("ready_with_3_slaves") - self.cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__) - self.show_step(2) - self.fuel_web.update_nodes( - self.cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute', 'cinder'] - }) - self.show_step(3) - self.fuel_web.provisioning_cluster_wait(self.cluster_id) - self.show_step(4) - with self.ssh_manager.open_on_remote( - self.admin_ip, '/root/graph.yaml', "w") as f: - f.write(TEST_GRAPH) - cmd = \ - 'fuel2 graph upload -e {} -t my_graph -f /root/graph.yaml'.format( - self.cluster_id) - self.ssh_manager.check_call(self.admin_ip, cmd) - self.show_step(5) - self.env.make_snapshot("extension_graph_prepare_env", is_make=True) - self.env.resume_environment() - self.env.sync_time() - self.show_step(6) - self.deploy_custom_graph_wait_cli('my_graph') - self.show_step(7) - self.fuel_web.assert_all_tasks_completed(self.cluster_id) - self.show_step(8) - self.cluster_id = self.fuel_web.get_last_created_cluster() - self.check_created_by_tasks_file() - - @test(depends_on=[graph_extension_cli], - groups=["graph_extension_api"]) - @log_snapshot_after_test - def graph_extension_api(self): - """Upload and execute graph for env with 4 slaves (API) - - Scenario: - 1. Revert snapshot "extension_graph_prepare_env" - 2. Execute graphs via API - 3. Check that graph tasks was executed and - finished without any errors - 4. Check the created by graph tasks file - - Duration 10m - """ - self.show_step(1) - self.env.revert_snapshot("extension_graph_prepare_env") - self.show_step(2) - self.cluster_id = self.env.fuel_web.get_last_created_cluster() - self.fuel_web.deploy_custom_graph_wait(self.cluster_id, 'my_graph') - self.show_step(3) - self.fuel_web.assert_all_tasks_completed(self.cluster_id) - self.show_step(4) - self.check_created_by_tasks_file() diff --git a/fuelweb_test/tests/test_ha_one_controller.py b/fuelweb_test/tests/test_ha_one_controller.py deleted file mode 100644 index 37e5a2bb7..000000000 --- a/fuelweb_test/tests/test_ha_one_controller.py +++ /dev/null @@ -1,937 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import division -from __future__ import unicode_literals - -import re -from warnings import warn - -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_true -from proboscis import test -from proboscis import SkipTest - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.eb_tables import Ebtables -from fuelweb_test.helpers import os_actions -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.settings import MIRROR_UBUNTU -from fuelweb_test.settings import NODE_VOLUME_SIZE -from fuelweb_test.settings import NEUTRON_SEGMENT -from fuelweb_test.settings import NEUTRON_SEGMENT_TYPE -from fuelweb_test.settings import iface_alias -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic -from fuelweb_test import logger -from fuelweb_test.tests.test_ha_one_controller_base\ - import HAOneControllerNeutronBase - - -@test() -class OneNodeDeploy(TestBasic): - """OneNodeDeploy. DEPRECATED!""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_release], - groups=["deploy_one_node", 'master']) - @log_snapshot_after_test - def deploy_one_node(self): - """Deploy cluster with controller node only - - Scenario: - 1. Create cluster - 2. Add 1 node with controller role - 3. Deploy the cluster - 4. Validate cluster was set up correctly, there are no dead - services, there are no errors in logs - - Duration 20m - - """ - self.env.revert_snapshot("ready") - self.fuel_web.client.list_nodes() - self.env.bootstrap_nodes( - self.env.d_env.nodes().slaves[:1]) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - ) - logger.info('Cluster is {!s}'.format(cluster_id)) - self.fuel_web.update_nodes( - cluster_id, - {'slave-01': ['controller']} - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=4) - self.fuel_web.run_single_ostf_test( - cluster_id=cluster_id, test_sets=['sanity'], - test_name=('fuel_health.tests.sanity.test_sanity_identity' - '.SanityIdentityTest.test_list_users')) - - -@test(groups=["one_controller_actions"]) -class HAOneControllerNeutron(HAOneControllerNeutronBase): - """HAOneControllerNeutron.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["smoke", "deploy_ha_one_controller_neutron"]) - @log_snapshot_after_test - def deploy_ha_one_controller_neutron(self): - """Deploy cluster in HA mode (one controller) with neutron - - Scenario: - 1. Create cluster in HA mode - 2. Add 1 node with controller role - 3. Add 1 node with compute role - 4. Deploy the cluster - 5. Validate cluster was set up correctly, there are no dead - services, there are no errors in logs - 6. Verify networks - 7. Verify network configuration on controller - 8. Run OSTF - - Duration 30m - Snapshot: deploy_ha_one_controller_neutron - """ - super(self.__class__, self).deploy_ha_one_controller_neutron_base( - snapshot_name="deploy_ha_one_controller_neutron") - - @test(depends_on=[deploy_ha_one_controller_neutron], - groups=["ha_one_controller_neutron_node_deletion"]) - @log_snapshot_after_test - def ha_one_controller_neutron_node_deletion(self): - """Remove compute from cluster in ha mode with neutron - - Scenario: - 1. Revert "deploy_ha_one_controller_neutron" environment - 2. Remove compute node - 3. Deploy changes - 4. Verify node returns to unallocated pull - - Duration 8m - - """ - self.env.revert_snapshot("deploy_ha_one_controller_neutron") - - cluster_id = self.fuel_web.get_last_created_cluster() - nailgun_nodes = self.fuel_web.update_nodes( - cluster_id, {'slave-02': ['compute']}, False, True) - task = self.fuel_web.deploy_cluster(cluster_id) - self.fuel_web.assert_task_success(task) - nodes = [ - node for node in nailgun_nodes if node["pending_deletion"] is True] - assert_true( - len(nodes) == 1, "Verify 1 node has pending deletion status" - ) - self.fuel_web.wait_node_is_discovered(nodes[0]) - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["ha_one_controller_neutron_blocked_vlan"]) - @log_snapshot_after_test - def ha_one_controller_neutron_blocked_vlan(self): - """Verify network verification with blocked VLANs - - Scenario: - 1. Create cluster in Ha mode - 2. Add 1 node with controller role - 3. Add 1 node with compute role - 4. Deploy the cluster - 5. Validate cluster was set up correctly, there are no dead - services, there are no errors in logs - 6. Block first VLAN - 7. Run Verify network and assert it fails - 8. Restore first VLAN - - Duration 20m - - """ - self.env.revert_snapshot("ready_with_3_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": NEUTRON_SEGMENT['vlan'] - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5) - ebtables = self.env.get_ebtables( - cluster_id, self.env.d_env.nodes().slaves[:2]) - ebtables.restore_vlans() - try: - ebtables.block_first_vlan() - self.fuel_web.verify_network(cluster_id, success=False) - finally: - ebtables.restore_first_vlan() - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["ha_one_controller_neutron_add_compute"]) - @log_snapshot_after_test - def ha_one_controller_neutron_add_compute(self): - """Add compute node to cluster in ha mode - - Scenario: - 1. Create cluster in HA mode - 2. Add 1 node with controller role - 3. Add 1 node with compute role - 4. Deploy the cluster - 5. Validate cluster was set up correctly, there are no dead - services, there are no errors in logs - 6. Add 1 node with role compute - 7. Deploy changes - 8. Validate cluster was set up correctly, there are no dead - services, there are no errors in logs - 9. Verify services list on compute nodes - 10. Run OSTF - - Duration 40m - Snapshot: ha_one_controller_neutron_add_compute - """ - self.env.revert_snapshot("ready_with_3_slaves") - - data = { - 'tenant': 'neutronAddCompute', - 'user': 'neutronAddCompute', - 'password': 'neutronAddCompute', - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings=data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id), - data['user'], data['password'], data['tenant']) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5) - - self.fuel_web.update_nodes( - cluster_id, {'slave-03': ['compute']}, True, False) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=6) - - assert_equal( - 3, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - self.env.make_snapshot("ha_one_controller_neutron_add_compute") - - @test(depends_on=[deploy_ha_one_controller_neutron], - groups=["deploy_base_os_node"]) - @log_snapshot_after_test - def deploy_base_os_node(self): - """Add base-os node to cluster in HA mode with one controller - - Scenario: - 1. Revert snapshot "deploy_ha_one_controller_neutron" - 2. Add 1 node with base-os role - 3. Deploy the cluster - 4. Run network verification - 5. Run OSTF - 6. Ssh to the base-os node and check /etc/astute.yaml link source. - 7. Make snapshot. - - Snapshot: deploy_base_os_node - - """ - self.env.revert_snapshot("deploy_ha_one_controller_neutron") - - cluster_id = self.fuel_web.get_last_created_cluster() - - self.fuel_web.update_nodes( - cluster_id, {'slave-03': ['base-os']}, True, False) - self.fuel_web.deploy_cluster_wait(cluster_id) - - assert_equal( - 3, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf(cluster_id=cluster_id) - - _ip = self.fuel_web.get_nailgun_node_by_name("slave-03")['ip'] - result = self.ssh_manager.check_call( - command='hiera roles', ip=_ip).stdout_str - assert_equal( - '["base-os"]', - result, - message="Role mismatch. Node slave-03 is not base-os") - - self.env.make_snapshot("deploy_base_os_node") - - @test(depends_on=[deploy_ha_one_controller_neutron], - groups=["delete_environment"]) - @log_snapshot_after_test - def delete_environment(self): - """Delete existing environment - and verify nodes returns to unallocated state - - Scenario: - 1. Revert "deploy_ha_one_controller" environment - 2. Delete environment - 3. Verify node returns to unallocated pull - - Duration 15m - """ - self.env.revert_snapshot("deploy_ha_one_controller_neutron") - - cluster_id = self.fuel_web.get_last_created_cluster() - self.fuel_web.client.delete_cluster(cluster_id) - nailgun_nodes = self.fuel_web.client.list_nodes() - nodes = [ - node for node in nailgun_nodes if node["pending_deletion"] is True] - assert_true( - len(nodes) == 2, "Verify 2 node has pending deletion status" - ) - self.fuel_web.wait_node_is_discovered(nodes[0]) - self.fuel_web.wait_node_is_discovered(nodes[1]) - - -@test(groups=["multirole"]) -class MultiroleControllerCinder(TestBasic): - """MultiroleControllerCinder.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_multirole_controller_cinder"]) - @log_snapshot_after_test - def deploy_multirole_controller_cinder(self): - """Deploy cluster in HA mode with multi-role controller and cinder - - Scenario: - 1. Create cluster in HA mode - 2. Add 1 node with controller and cinder roles - 3. Add 1 node with compute role - 4. Deploy the cluster - 5. Run network verification - 6. Run OSTF - - Duration 30m - Snapshot: deploy_multirole_controller_cinder - - """ - self.env.revert_snapshot("ready_with_3_slaves") - - data = { - 'tenant': 'multirolecinder', - 'user': 'multirolecinder', - 'password': 'multirolecinder', - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings=data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'cinder'], - 'slave-02': ['compute'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("deploy_multirole_controller_cinder") - - -@test(groups=["multirole"]) -class MultiroleComputeCinder(TestBasic): - """MultiroleComputeCinder.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_multirole_compute_cinder", "bvt_3"]) - @log_snapshot_after_test - def deploy_multirole_compute_cinder(self): - """Deploy cluster in HA mode with multi-role compute and cinder - - Scenario: - 1. Create cluster in Ha mode - 2. Add 1 node with controller role - 3. Add 2 node with compute and cinder roles - 4. Deploy the cluster - 5. Run network verification - 6. Run OSTF - - Duration 30m - Snapshot: deploy_multirole_compute_cinder - - """ - self.env.revert_snapshot("ready_with_3_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute', 'cinder'], - 'slave-03': ['compute', 'cinder'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("deploy_multirole_compute_cinder") - - -@test(groups=["multirole"]) -class MultiroleMultipleServices(TestBasic): - """MultiroleMultipleServices.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_multiple_services_local_mirror"]) - @log_snapshot_after_test - def deploy_multiple_services_local_mirror(self): - """Deploy cluster with multiple services using local mirror - - Scenario: - 1. Revert snapshot 'prepare_slaves_3' with default set of mirrors - 2. Run 'fuel-mirror' to create mirror repositories - 3. Create cluster with many components to check as many - packages in local mirrors have correct dependencies - 4. Run 'fuel-mirror' to replace cluster repositories - with local mirrors - 5. Check that repositories are changed - 6. Deploy cluster - 7. Check running services with OSTF - - Duration 140m - """ - self.show_step(1) - self.env.revert_snapshot('ready_with_3_slaves') - - self.show_step(2) - admin_ip = self.ssh_manager.admin_ip - if MIRROR_UBUNTU != '': - ubuntu_url = MIRROR_UBUNTU.split()[1] - replace_cmd = \ - "sed -i 's,http://archive.ubuntu.com/ubuntu,{0},g'" \ - " /usr/share/fuel-mirror/ubuntu.yaml".format( - ubuntu_url) - self.ssh_manager.check_call(ip=admin_ip, command=replace_cmd) - - create_mirror_cmd = 'fuel-mirror create -P ubuntu -G mos ubuntu' - self.env.admin_actions.ensure_cmd(create_mirror_cmd) - - self.show_step(3) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - 'net_provider': 'neutron', - 'net_segment_type': NEUTRON_SEGMENT['tun'], - 'sahara': True, - 'volumes_lvm': True, - 'volumes_ceph': False, - 'images_ceph': True - } - ) - - self.show_step(4) - apply_mirror_cmd = 'fuel-mirror apply -P ubuntu -G mos ubuntu ' \ - '--env {0} --replace'.format(cluster_id) - self.ssh_manager.check_call(ip=admin_ip, command=apply_mirror_cmd) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'ceph-osd'], - 'slave-02': ['compute', 'ceph-osd'], - 'slave-03': ['cinder', 'ceph-osd'] - } - ) - - self.show_step(5) - repos_ubuntu = self.fuel_web.get_cluster_repos(cluster_id) - remote_repos = [] - for repo_value in repos_ubuntu['value']: - if (self.fuel_web.admin_node_ip not in repo_value['uri'] and - '{settings.MASTER_IP}' not in repo_value['uri']): - remote_repos.append({repo_value['name']: repo_value['uri']}) - assert_true(not remote_repos, - "Some repositories weren't replaced with local mirrors: " - "{0}".format(remote_repos)) - - self.fuel_web.verify_network(cluster_id) - self.show_step(6) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(7) - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['smoke']) - - -@test -class FloatingIPs(TestBasic): - """FloatingIPs.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_floating_ips"]) - @log_snapshot_after_test - def deploy_floating_ips(self): - """Deploy cluster with non-default 1 floating IPs ranges - - Scenario: - 1. Create cluster in HA mode - 2. Add 1 node with controller role - 3. Add 1 node with compute and cinder roles - 4. Update floating IP ranges. Use 1 range - 5. Deploy the cluster - 6. Verify available floating IP list - 7. Run OSTF - - Duration 30m - Snapshot: deploy_floating_ips - - """ - # Test should be re-worked for neutron according to LP#1481322 - self.env.revert_snapshot("ready_with_3_slaves") - - csettings = { - 'tenant': 'floatingip', - 'user': 'floatingip', - 'password': 'floatingip', - 'net_provider': 'neutron', - 'net_segment_type': NEUTRON_SEGMENT_TYPE, - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings=csettings, - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'] - } - ) - - floating_list = [self.fuel_web.get_floating_ranges()[0][0]] - networking_parameters = { - "floating_ranges": floating_list} - - self.fuel_web.client.update_network( - cluster_id, - networking_parameters=networking_parameters - ) - - self.fuel_web.deploy_cluster_wait(cluster_id) - - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id), - user=csettings['user'], - passwd=csettings['password'], - tenant=csettings['tenant']) - - # assert ips - expected_ips = self.fuel_web.get_floating_ranges()[1][0] - self.fuel_web.assert_cluster_floating_list( - os_conn, cluster_id, expected_ips) - - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("deploy_floating_ips") - - -@test(enabled=False, groups=["thread_1"]) -class NodeMultipleInterfaces(TestBasic): - """NodeMultipleInterfaces. - - Test disabled and move to fuel_tests suite: - fuel_tests.test.test_l2_network_config - """ # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_node_multiple_interfaces"]) - @log_snapshot_after_test - def deploy_node_multiple_interfaces(self): - """Deploy cluster with networks allocated on different interfaces - - Test disabled and move to fuel_tests suite: - fuel_tests.test.test_l2_network_config.TestL2NetworkConfig - - Scenario: - 1. Create cluster in Ha mode - 2. Add 1 node with controller role - 3. Add 1 node with compute role - 4. Add 1 node with cinder role - 5. Split networks on existing physical interfaces - 6. Deploy the cluster - 7. Verify network configuration on each deployed node - 8. Run network verification - - Duration 25m - Snapshot: deploy_node_multiple_interfaces - - """ - # pylint: disable=W0101 - warn("Test disabled and move to fuel_tests suite", DeprecationWarning) - raise SkipTest("Test disabled and move to fuel_tests suite") - - self.env.revert_snapshot("ready_with_3_slaves") - - interfaces_dict = { - iface_alias('eth0'): ['fuelweb_admin'], - iface_alias('eth1'): ['public'], - iface_alias('eth2'): ['storage'], - iface_alias('eth3'): ['private'], - iface_alias('eth4'): ['management'], - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - } - ) - nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - for node in nailgun_nodes: - self.fuel_web.update_node_networks(node['id'], interfaces_dict) - - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.verify_network(cluster_id) - - self.env.make_snapshot("deploy_node_multiple_interfaces", is_make=True) - - -@test(enabled=False, groups=["thread_1"]) -class NodeDiskSizes(TestBasic): - """NodeDiskSizes. - - Test disabled and move to fuel_tests suite: - fuel_tests.test.test_discovery_slave - - """ # TODO documentation - - @test(enabled=False, depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["check_nodes_notifications"]) - @log_snapshot_after_test - def check_nodes_notifications(self): - """Verify nailgun notifications for discovered nodes - - Test disabled and move to fuel_tests suite: - fuel_tests.test.test_discovery_slave.TestNodeDiskSizes - - Scenario: - 1. Revert snapshot "ready_with_3_slaves" - 2. Verify hard drive sizes for discovered nodes in /api/nodes - 3. Verify hard drive sizes for discovered nodes in notifications - - Duration 5m - - """ - # pylint: disable=W0101 - warn("Test disabled and move to fuel_tests suite", DeprecationWarning) - raise SkipTest("Test disabled and move to fuel_tests suite") - - self.env.revert_snapshot("ready_with_3_slaves") - - # assert /api/nodes - disk_size = NODE_VOLUME_SIZE * 1024 ** 3 - nailgun_nodes = self.fuel_web.client.list_nodes() - for node in nailgun_nodes: - for disk in node['meta']['disks']: - assert_equal(disk['size'], disk_size, 'Disk size') - - hdd_size = "{0:.3} TB HDD".format((disk_size * 3 / (10 ** 9)) / 1000) - notifications = self.fuel_web.client.get_notifications() - - for node in nailgun_nodes: - # assert /api/notifications - for notification in notifications: - discover = notification['topic'] == 'discover' - current_node = notification['node_id'] == node['id'] - if current_node and discover and \ - "discovered" in notification['message']: - assert_true(hdd_size in notification['message'], - '"{size} not found in notification message ' - '"{note}" for node {node} ' - '(hostname {host})!'.format( - size=hdd_size, - note=notification['message'], - node=node['name'], - host=node['hostname']) - ) - - # assert disks - disks = self.fuel_web.client.get_node_disks(node['id']) - for disk in disks: - assert_equal( - disk['size'], NODE_VOLUME_SIZE * 1024 - 500, - 'Disk size {0} is not equals expected {1}'.format( - disk['size'], NODE_VOLUME_SIZE * 1024 - 500 - )) - - @test(enabled=False, - depends_on=[NodeMultipleInterfaces.deploy_node_multiple_interfaces], - groups=["check_nodes_disks"]) - @log_snapshot_after_test - def check_nodes_disks(self): - """Verify hard drive sizes for deployed nodes - - Test disabled and move to fuel_tests suite: - fuel_tests.test.test_discovery_slave.TestNodeDiskSizes - - Scenario: - 1. Revert snapshot "deploy_node_multiple_interfaces" - 2. Verify hard drive sizes for deployed nodes - - Duration 15m - """ - # pylint: disable=W0101 - warn("Test disabled and move to fuel_tests suite", DeprecationWarning) - raise SkipTest("Test disabled and move to fuel_tests suite") - - self.env.revert_snapshot("deploy_node_multiple_interfaces") - - nodes_dict = { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - } - - # assert node disks after deployment - for node_name in nodes_dict: - str_block_devices = self.fuel_web.get_cluster_block_devices( - node_name) - - logger.debug("Block device:\n{}".format(str_block_devices)) - - expected_regexp = re.compile( - "vda\s+\d+:\d+\s+0\s+{}G\s+0\s+disk".format(NODE_VOLUME_SIZE)) - assert_true( - expected_regexp.search(str_block_devices), - "Unable to find vda block device for {}G in: {}".format( - NODE_VOLUME_SIZE, str_block_devices - )) - - expected_regexp = re.compile( - "vdb\s+\d+:\d+\s+0\s+{}G\s+0\s+disk".format(NODE_VOLUME_SIZE)) - assert_true( - expected_regexp.search(str_block_devices), - "Unable to find vdb block device for {}G in: {}".format( - NODE_VOLUME_SIZE, str_block_devices - )) - - expected_regexp = re.compile( - "vdc\s+\d+:\d+\s+0\s+{}G\s+0\s+disk".format(NODE_VOLUME_SIZE)) - assert_true( - expected_regexp.search(str_block_devices), - "Unable to find vdc block device for {}G in: {}".format( - NODE_VOLUME_SIZE, str_block_devices - )) - - -@test(enabled=False, groups=["thread_1"]) -class MultinicBootstrap(TestBasic): - """MultinicBootstrap. - - Test disabled and move to fuel_tests suite: - fuel_tests.test.test_discovery_slave - - """ # TODO documentation - - @test(enabled=False, - depends_on=[SetupEnvironment.prepare_release], - groups=["multinic_bootstrap_booting"]) - @log_snapshot_after_test - def multinic_bootstrap_booting(self): - """Verify slaves booting with blocked mac address - - Test disabled and move to fuel_tests suite: - fuel_tests.test.test_discovery_slave.TestMultinicBootstrap - - Scenario: - 1. Revert snapshot "ready" - 2. Block traffic for first slave node (by mac) - 3. Restore mac addresses and boot first slave - 4. Verify slave mac addresses is equal to unblocked - - Duration 2m - - """ - # pylint: disable=W0101 - warn("Test disabled and move to fuel_tests suite", DeprecationWarning) - raise SkipTest("Test disabled and move to fuel_tests suite") - - self.env.revert_snapshot("ready") - - slave = self.env.d_env.nodes().slaves[0] - mac_addresses = [interface.mac_address for interface in - slave.interfaces.filter(network__name='internal')] - try: - for mac in mac_addresses: - Ebtables.block_mac(mac) - for mac in mac_addresses: - Ebtables.restore_mac(mac) - slave.destroy() - self.env.d_env.nodes().admins[0].revert("ready") - nailgun_slave = self.env.bootstrap_nodes([slave])[0] - assert_equal(mac.upper(), nailgun_slave['mac'].upper()) - Ebtables.block_mac(mac) - finally: - for mac in mac_addresses: - Ebtables.restore_mac(mac) - - -@test(enabled=False, groups=["thread_1"]) -class UntaggedNetworksNegative(TestBasic): - """UntaggedNetworksNegative. - - Test disabled and move to fuel_tests suite: - fuel_tests.test.test_l2_network_config.TestL2NetworkConfig - - """ # TODO documentation - - @test( - depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["untagged_networks_negative"], - enabled=False) - @log_snapshot_after_test - def untagged_networks_negative(self): - """Verify network verification fails with untagged network on eth0 - - Test disabled and move to fuel_tests suite: - fuel_tests.test.test_l2_network_config.TestL2NetworkConfig - - Scenario: - 1. Create cluster in ha mode - 2. Add 1 node with controller role - 3. Add 1 node with compute role - 4. Split networks on existing physical interfaces - 5. Remove VLAN tagging from networks which are on eth0 - 6. Run network verification (assert it fails) - 7. Start cluster deployment (assert it fails) - - Duration 30m - - """ - # pylint: disable=W0101 - warn("Test disabled and move to fuel_tests suite", DeprecationWarning) - raise SkipTest("Test disabled and move to fuel_tests suite") - - self.env.revert_snapshot("ready_with_3_slaves") - - vlan_turn_off = {'vlan_start': None} - interfaces = { - iface_alias('eth0'): ["fixed"], - iface_alias('eth1'): ["public"], - iface_alias('eth2'): ["management", "storage"], - iface_alias('eth3'): [] - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'] - } - ) - - nets = self.fuel_web.client.get_networks(cluster_id)['networks'] - nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - for node in nailgun_nodes: - self.fuel_web.update_node_networks(node['id'], interfaces) - - # select networks that will be untagged: - for net in nets: - net.update(vlan_turn_off) - - # stop using VLANs: - self.fuel_web.client.update_network(cluster_id, networks=nets) - - # run network check: - self.fuel_web.verify_network(cluster_id, success=False) - - # deploy cluster: - task = self.fuel_web.deploy_cluster(cluster_id) - self.fuel_web.assert_task_failed(task) - - -@test(groups=["thread_usb"]) -class HAOneControllerNeutronUSB(HAOneControllerNeutronBase): - """HAOneControllerNeutronUSB.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_3]) - @log_snapshot_after_test - def deploy_ha_one_controller_neutron_usb(self): - """Deploy cluster in HA mode (1 controller) with neutron USB - - Scenario: - 1. Create cluster in HA mode - 2. Add 1 node with controller role - 3. Add 1 node with compute role - 4. Deploy the cluster - 5. Validate cluster was set up correctly, there are no dead - services, there are no errors in logs - 6. Verify networks - 7. Verify network configuration on controller - 8. Run OSTF - - Duration 30m - Snapshot: deploy_ha_one_controller_neutron - """ - - super(self.__class__, self).deploy_ha_one_controller_neutron_base( - snapshot_name="deploy_ha_one_controller_neutron_usb") diff --git a/fuelweb_test/tests/test_ha_one_controller_base.py b/fuelweb_test/tests/test_ha_one_controller_base.py deleted file mode 100644 index 49ba0b9b3..000000000 --- a/fuelweb_test/tests/test_ha_one_controller_base.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from fuelweb_test.helpers import os_actions -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.tests.base_test_case import TestBasic -from fuelweb_test.helpers.checkers import check_free_space_admin -from fuelweb_test.helpers.checkers import check_free_space_slave - - -class HAOneControllerNeutronBase(TestBasic): - """HAOneControllerNeutronBase""" # TODO documentation - - def deploy_ha_one_controller_neutron_base( - self, snapshot_name): - - self.env.revert_snapshot("ready_with_3_slaves") - data = { - 'tenant': 'neutronOneController', - 'user': 'neutronOneController', - 'password': 'neutronOneController', - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings=data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'] - } - ) - check_free_space_admin(self.env) - - self.fuel_web.deploy_cluster_wait(cluster_id) - - check_free_space_slave(self.env) - - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id), - data['user'], data['password'], data['tenant']) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - self.env.make_snapshot(snapshot_name, is_make=True) diff --git a/fuelweb_test/tests/test_ironic_base.py b/fuelweb_test/tests/test_ironic_base.py deleted file mode 100644 index 33f96374e..000000000 --- a/fuelweb_test/tests/test_ironic_base.py +++ /dev/null @@ -1,510 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.common import Common -from fuelweb_test.helpers import ironic_actions -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.settings import IRONIC_USER_IMAGE_URL -from fuelweb_test.settings import NEUTRON_SEGMENT -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["ironic"]) -class TestIronicBase(TestBasic): - """TestIronicBase""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["ironic_base"]) - @log_snapshot_after_test - def ironic_base( - self): - """Deploy cluster in HA mode with Ironic: - - Scenario: - 1. Create cluster - 2. Add 1 controller node - 3. Add 1 compute node - 4. Add 1 ironic node - 5. Deploy cluster - 6. Verify network - 7. Run OSTF - - Snapshot: ironic_base - """ - - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": NEUTRON_SEGMENT['vlan'], - "ironic": True, - } - ) - - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['ironic'], - } - ) - - self.show_step(5) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - - self.show_step(7) - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - self.env.make_snapshot("ironic_base") - - -class TestIronicDeploy(TestBasic): - """Test ironic provisioning on VM.""" - - def _deploy_ironic_cluster(self, **kwargs): - default_settings = { - 'net_provider': 'neutron', - 'net_segment_type': NEUTRON_SEGMENT['vlan'], - 'ironic': True} - default_nodes = { - 'slave-01': ['controller'], - 'slave-02': ['controller', 'ironic'], - 'slave-03': ['controller', 'ironic'], - 'slave-04': ['ironic'], - 'slave-05': ['compute']} - settings = kwargs.get('settings') or default_settings - nodes = kwargs.get('nodes') or default_nodes - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings=settings - ) - - self.fuel_web.update_nodes( - cluster_id, - nodes - ) - - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - return cluster_id - - def _create_os_resources(self, ironic_conn): - devops_node = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - nailgun_node = self.fuel_web.get_nailgun_node_by_devops_node( - devops_node) - - ironic_conn.upload_user_image(nailgun_node, - ssh_manager=self.ssh_manager, - img_url=IRONIC_USER_IMAGE_URL) - - ironic_slaves = self.env.d_env.nodes().ironics - server_ip = self.env.d_env.router('public') - - for ironic_slave in ironic_slaves: - ironic_conn.enroll_ironic_node(ironic_slave, server_ip) - - ironic_conn.wait_for_ironic_hypervisors(ironic_conn, ironic_slaves) - - @staticmethod - def _rand_name(name): - """Randomize the given name.""" - return name + str(random.randint(1, 0x7fffffff)) - - def _boot_nova_instances(self, ironic_conn): - ironic_slaves = self.env.d_env.nodes().ironics - user_image = ironic_conn.get_image_by_name('virtual_trusty_ext4') - network = ironic_conn.nova.networks.find(label='baremetal') - # Randomize name to avoid conflict on repetitive flavor creation. - flavor_name = self._rand_name('baremetal_flavor') - flavor = ironic_conn.create_flavor(flavor_name, 1024, 1, 50) - nics = [{'net-id': network.id}] - - for ironic_slave in ironic_slaves: - ironic_conn.nova.servers.create( - name=ironic_slave.name, - image=user_image.id, - flavor=flavor.id, - nics=nics) - - def _boot_check_delete_vm(self, ironic_conn): - """Boot instance, verify connection, then delete instance.""" - self._boot_nova_instances(ironic_conn) - ironic_conn.wait_for_vms(ironic_conn) - ironic_conn.verify_vms_connection(ironic_conn) - ironic_conn.delete_servers(ironic_conn) - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["ironic_deploy_swift"]) - @log_snapshot_after_test - def ironic_deploy_swift(self): - """Deploy ironic with 1 baremetal node - - Scenario: - 1. Create cluster - 2. Add 1 node with controller role - 3. Add 2 node with controller+ironic role - 4. Add 1 node with compute role - 5. Add 1 nodes with ironic role - 6. Deploy the cluster - 7. Upload image to glance - 8. Enroll Ironic nodes - 9. Boot nova instance - 10. Check Nova instance status - - Duration 90m - Snapshot ironic_deploy_swift - """ - - self.env.revert_snapshot("ready_with_5_slaves") - - self.show_step(1, initialize=True) - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.show_step(6) - cluster_id = self._deploy_ironic_cluster() - - ironic_conn = ironic_actions.IronicActions( - self.fuel_web.get_public_vip(cluster_id)) - - self.show_step(7) - self.show_step(8) - self._create_os_resources(ironic_conn) - - self.show_step(9) - self._boot_nova_instances(ironic_conn) - - self.show_step(10) - ironic_conn.wait_for_vms(ironic_conn) - ironic_conn.verify_vms_connection(ironic_conn) - - self.env.make_snapshot("ironic_deploy_swift") - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["ironic_deploy_ceph"]) - @log_snapshot_after_test - def ironic_deploy_ceph(self): - """Deploy ironic with 1 baremetal node - - Scenario: - 1. Create cluster - 2. Add 1 node with controller role - 3. Add 2 nodes with ironic+ceph-osd role - 4. Add 1 node with compute+ceph-osd role - 5. Add 1 nodes with ironic role - 6. Deploy the cluster - 7. Upload image to glance - 8. Enroll Ironic nodes - 9. Boot nova instance - 10. Check Nova instance status - - Duration 90m - Snapshot ironic_deploy_ceph - """ - - self.env.revert_snapshot("ready_with_5_slaves") - - data = { - 'volumes_ceph': True, - 'images_ceph': True, - 'objects_ceph': True, - 'volumes_lvm': False, - 'tenant': 'ceph1', - 'user': 'ceph1', - 'password': 'ceph1', - 'net_provider': 'neutron', - 'net_segment_type': NEUTRON_SEGMENT['vlan'], - 'ironic': True} - nodes = { - 'slave-01': ['controller'], - 'slave-02': ['ironic', 'ceph-osd'], - 'slave-03': ['ironic', 'ceph-osd'], - 'slave-04': ['compute', 'ceph-osd'], - 'slave-05': ['ironic']} - - self.show_step(1, initialize=True) - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.show_step(6) - cluster_id = self._deploy_ironic_cluster(settings=data, nodes=nodes) - - ironic_conn = ironic_actions.IronicActions( - self.fuel_web.get_public_vip(cluster_id), - user='ceph1', - passwd='ceph1', - tenant='ceph1') - - self.show_step(7) - self.show_step(8) - self._create_os_resources(ironic_conn) - - self.show_step(9) - self._boot_nova_instances(ironic_conn) - - self.show_step(10) - ironic_conn.wait_for_vms(ironic_conn) - ironic_conn.verify_vms_connection(ironic_conn) - - self.env.make_snapshot("ironic_deploy_ceph") - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["ironic_deploy_sahara"]) - @log_snapshot_after_test - def ironic_deploy_sahara(self): - """Deploy Ironic with Sahara - - Scenario: - 1. Create cluster. Set option for Sahara installation - 2. Add 1 node with Controller role - 3. Add 1 node with Compute role - 4. Add 1 node with Ironic conductor role - 5. Deploy the cluster - 6. Upload image to Glance - 7. Enroll Ironic nodes - 8. Boot Nova instance - 9. Check Nova instance status - - Duration 90m - Snapshot ironic_deploy_sahara - """ - - self.env.revert_snapshot("ready_with_3_slaves") - - data = { - 'net_provider': 'neutron', - 'net_segment_type': NEUTRON_SEGMENT['vlan'], - 'ironic': True, - 'sahara': True, - 'tenant': 'sharaoscomponent', - 'user': 'sharaoscomponent', - 'password': 'sharaoscomponent'} - - nodes = { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['ironic']} - - self.show_step(1, initialize=True) - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.show_step(5) - cluster_id = self._deploy_ironic_cluster(settings=data, nodes=nodes) - - ironic_conn = ironic_actions.IronicActions( - self.fuel_web.get_public_vip(cluster_id), - data['user'], data['password'], data['tenant']) - - self.show_step(6) - self._create_os_resources(ironic_conn) - self.show_step(7) - self._boot_nova_instances(ironic_conn) - self.show_step(8) - ironic_conn.wait_for_vms(ironic_conn) - self.show_step(9) - ironic_conn.verify_vms_connection(ironic_conn) - - self.env.make_snapshot("ironic_deploy_sahara") - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["ironic_deploy_ceilometer"], - enabled=False) - @log_snapshot_after_test - def ironic_deploy_ceilometer(self): - """Deploy Ironic with Ceilometer - - Scenario: - 1. Create cluster - 2. Add 1 node with Controller role - 3. Add 1 node with Compute role - 4. Add 1 node with Ironic and Mongo roles - 5. Deploy the cluster - 6. Upload image to glance - 7. Enroll Ironic nodes - 8. Boot nova instance - 9. Check Nova instance status - - Duration 90m - Snapshot ironic_deploy_ceilometer - """ - - self.env.revert_snapshot("ready_with_3_slaves") - - data = { - 'net_provider': 'neutron', - 'net_segment_type': NEUTRON_SEGMENT['vlan'], - 'ironic': True, - 'ceilometer': True} - - nodes = { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['ironic', 'mongo']} - - self.show_step(1, initialize=True) - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.show_step(5) - cluster_id = self._deploy_ironic_cluster(settings=data, nodes=nodes) - - ironic_conn = ironic_actions.IronicActions( - self.fuel_web.get_public_vip(cluster_id)) - - self.show_step(6) - self._create_os_resources(ironic_conn) - self.show_step(7) - self._boot_nova_instances(ironic_conn) - self.show_step(8) - ironic_conn.wait_for_vms(ironic_conn) - self.show_step(9) - ironic_conn.verify_vms_connection(ironic_conn) - - self.env.make_snapshot("ironic_deploy_ceilometer") - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_scale_controller_ironic"]) - @log_snapshot_after_test - def deploy_scale_controller_ironic(self): - """Test cluster scaling with Controller and Ironic - - Scenario: - 1. Create cluster with 5 slave nodes - 2. Bootstrap 1 additional slave node - 3. Add 2 Controller nodes - 4. Add 1 Compute node - 5. Add 1 Controller+Ironic node - 6. Deploy the cluster - 7. Run OSTF tests - 8. Boot, check connectivity, delete Ironic VM - 9. Rebalance Swift rings - 10. Add 1 Controller node - 11. Add 1 Controller+Ironic node - 12. Redeploy the cluster - 13. Run OSTF tests - 14. Boot, check connectivity, delete Ironic VM - 15. Rebalance Swift rings - 16. Remove 1 Controller node - 17. Remove 1 Controller+Ironic node - 18. Redeploy the cluster - 19. Run OSTF tests - 20. Boot, check connectivity, delete Ironic VM - - Duration 90m - Snapshot deploy_scale_controller_ironic - """ - - self.env.revert_snapshot("ready_with_5_slaves") - # Deploy 1st part - data = { - 'net_segment_type': NEUTRON_SEGMENT['vlan'], - 'ironic': True} - - nodes = { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller', 'ironic'], - 'slave-04': ['compute']} - - self.show_step(1) - self.show_step(2) - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[5:6]) - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.show_step(6) - self.show_step(7) - cluster_id = self._deploy_ironic_cluster(settings=data, nodes=nodes) - ironic_conn = ironic_actions.IronicActions( - self.fuel_web.get_public_vip(cluster_id)) - self._create_os_resources(ironic_conn) - self.show_step(8) - self._boot_check_delete_vm(ironic_conn) - - # Rebalance swift rings, add nodes and redeploy - self.show_step(9) - primary_node = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - ip = self.fuel_web.get_nailgun_node_by_name(primary_node.name)['ip'] - Common.rebalance_swift_ring(ip) - self.show_step(10) - self.show_step(11) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-05': ['controller'], - 'slave-06': ['controller', 'ironic'] - } - ) - self.show_step(12) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(13) - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.show_step(14) - ironic_conn = ironic_actions.IronicActions( - self.fuel_web.get_public_vip(cluster_id)) - self._boot_check_delete_vm(ironic_conn) - - # Rebalance swift rings, remove nodes and redeploy - self.show_step(15) - primary_node = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - ip = self.fuel_web.get_nailgun_node_by_name(primary_node.name)['ip'] - Common.rebalance_swift_ring(ip) - self.show_step(16) - self.show_step(17) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-05': ['controller'], - 'slave-06': ['controller', 'ironic'] - }, - pending_addition=False, - pending_deletion=True - ) - self.show_step(18) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(19) - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.show_step(20) - ironic_conn = ironic_actions.IronicActions( - self.fuel_web.get_public_vip(cluster_id)) - self._boot_check_delete_vm(ironic_conn) - - self.env.make_snapshot("deploy_scale_controller_ironic") diff --git a/fuelweb_test/tests/test_jumbo_frames.py b/fuelweb_test/tests/test_jumbo_frames.py deleted file mode 100644 index ceb25d2d9..000000000 --- a/fuelweb_test/tests/test_jumbo_frames.py +++ /dev/null @@ -1,575 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import subprocess - -from devops.helpers import helpers as devops_helpers -from devops.helpers.ssh_client import SSHAuth -from proboscis import asserts -from proboscis import test - -from fuelweb_test.helpers import decorators -from fuelweb_test.helpers import os_actions -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.settings import iface_alias -from fuelweb_test.tests import base_test_case - - -cirros_auth = SSHAuth(**settings.SSH_IMAGE_CREDENTIALS) - - -@test(groups=["jumbo_frames"]) -class TestJumboFrames(base_test_case.TestBasic): - def __init__(self): - self.os_conn = None - super(TestJumboFrames, self).__init__() - - interfaces = { - iface_alias('eth0'): ['fuelweb_admin'], - iface_alias('eth1'): ['public'], - iface_alias('eth2'): ['management'], - iface_alias('eth3'): ['private'], - iface_alias('eth4'): ['storage'], - } - - iface_update = { - 'name': iface_alias('eth3'), - 'interface_properties': { - 'mtu': 9000, - 'disable_offloading': False - } - } - - def check_node_iface_mtu(self, node, iface, mtu): - """Check mtu on environment node network interface.""" - - return "mtu {0}".format(mtu) in self.get_node_iface(node, iface) - - @staticmethod - def get_node_iface(node, iface): - """Get environment node network interface.""" - - command = "sudo ip link show {0}".format(iface) - return ''.join(node.execute(command)['stdout']) - - @staticmethod - def set_host_iface_mtu(iface, mtu): - """Set devops/fuel-qa host network interface mtu.""" - - command = "sudo ip link set {0} mtu {1}".format(iface, mtu).split() - return subprocess.call(command, stderr=subprocess.STDOUT) - - @staticmethod - def get_host_iface(iface): - """Get devops/fuel-qa host network interface.""" - - command = "sudo ip link show {0}".format(iface).split() - return subprocess.check_output(command, stderr=subprocess.STDOUT) - - @staticmethod - def get_host_bridge_ifaces(bridge_name): - """Get list of devops/fuel-qa host network bridge interfaces.""" - - command = "sudo brctl show {0}".format(bridge_name).split() - ifaces = subprocess.check_output(command, stderr=subprocess.STDOUT) - - ifaces = ifaces.splitlines()[1:] - bridge_iface = ifaces[0].split()[-1] - ifaces = [iface.strip() for iface in ifaces[1:]] - ifaces.append(bridge_iface) - - return ifaces - - def boot_instance_on_node(self, hypervisor_name, label, boot_timeout=300, - need_floating_ip=True): - instance = self.os_conn.create_server_for_migration( - neutron=True, - availability_zone="nova:{0}".format(hypervisor_name), label=label) - logger.info("New instance {0} created on {1}" - .format(instance.id, hypervisor_name)) - ip = self.os_conn.get_nova_instance_ip(instance, net_name=label, - addrtype='fixed') - logger.info("Instance {0} has IP {1}".format(instance.id, ip)) - - if not need_floating_ip: - return self.os_conn.nova.servers.get(instance.id) - - ip = self.os_conn.assign_floating_ip(instance) - logger.info("Floating address {0} associated with instance {1}" - .format(ip.ip, instance.id)) - - logger.info("Wait for ping from instance {}".format(instance.id)) - devops_helpers.wait( - lambda: devops_helpers.tcp_ping(ip.ip, 22), - timeout=boot_timeout, - timeout_msg=("Instance {0} is unreachable for {1} seconds". - format(instance.id, boot_timeout))) - - return self.os_conn.nova.servers.get(instance.id) - - def ping_instance_from_instance(self, source_instance, - destination_instance, - net_from, net_to, size, count=1): - destination_ip = self.os_conn.get_nova_instance_ip( - destination_instance, net_name=net_to, addrtype='fixed') - source_ip = self.os_conn.get_nova_instance_ip( - source_instance, net_name=net_from, addrtype='floating') - - with self.fuel_web.get_ssh_for_node("slave-01") as ssh: - command = "ping -s {0} {1}".format(size, destination_ip) - if count: - command = "{0} -c {1}".format(command, count) - logger.info( - "Try to ping private address {0} from {1} with {2} {3} bytes " - "packet(s): {4}".format(destination_ip, source_ip, count, size, - command)) - - ping = ssh.execute_through_host( - hostname=source_ip, - cmd=command, - auth=cirros_auth - ) - - logger.info( - "Ping result: \n" - "{0}\n" - "{1}\n" - "exit_code={2}".format( - ping['stdout_str'], ping['stderr_str'], ping['exit_code'])) - - return 0 == ping['exit_code'] - - def check_mtu_size_between_instances(self, mtu_offset, diff_net=False): - """Check private network mtu size - - Scenario: - 1. Boot two instances on different compute hosts - 2. Ping one from another with 1500 bytes packet - 3. Ping one from another with 9000 bytes packet - 4. Delete instances - - """ - cluster_id = self.fuel_web.get_last_created_cluster() - self.os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - - net_name = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - net_destination = net_name - need_floating_ip = True - hypervisors = self.os_conn.get_hypervisors() - - if diff_net: - net_destination = 'private1' - need_floating_ip = False - net1 = self.os_conn.create_network(net_destination)['network'] - subnet1 = self.os_conn.create_subnet('private1_subnet', net1['id'], - '192.168.200.0/24') - router = self.os_conn.get_router_by_name('router04') - self.os_conn.add_router_interface(router['id'], subnet1['id']) - - destination_instance = self.boot_instance_on_node( - hypervisors[1].hypervisor_hostname, label=net_destination, - need_floating_ip=need_floating_ip) - source_instance = self.boot_instance_on_node( - hypervisors[0].hypervisor_hostname, label=net_name) - - logger.info("Wait for ping from instance {}".format( - destination_instance)) - devops_helpers.wait( - lambda: self.ping_instance_from_instance( - source_instance, destination_instance, net_name, - net_destination, size=15, count=3), - interval=10, - timeout=600, - timeout_msg=("Instance {0} is unreachable for 600 seconds". - format(destination_instance.id))) - - for mtu in [1500, 9000]: - size = mtu - 28 - mtu_offset - asserts.assert_true( - self.ping_instance_from_instance( - source_instance, destination_instance, net_name, - net_destination, size=size, count=3), - "Ping response was not received for " - "{} bytes package".format(mtu)) - - for instance in [source_instance, destination_instance]: - self.os_conn.delete_instance(instance) - self.os_conn.verify_srv_deleted(instance) - - @test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_5], - groups=["prepare_5_slaves_with_jumbo_frames"]) - def prepare_5_slaves_with_jumbo_frames(self): - """Setup jumbo frames on private bridge on host - - Scenario: - 1. Find bridge with name "private" - 2. Set mtu 9000 for all bridge interfaces - 3. Make snapshot ready_with_5_slaves_jumbo_frames - - Duration 5m - Snapshot ready_with_5_slaves_jumbo_frames - - """ - self.check_run("ready_with_5_slaves_jumbo_frames") - self.env.revert_snapshot("ready_with_5_slaves") - - group = self.env.d_env.get_group(name='default') - l2_network_device = group.get_l2_network_device(name='private') - private_bridge = l2_network_device.bridge_name() - logger.info( - "Search for {0} interfaces for update".format(private_bridge)) - - bridge_interfaces = self.get_host_bridge_ifaces(private_bridge) - logger.info("Found {0} interfaces for update: {1}".format( - len(bridge_interfaces), bridge_interfaces)) - - for iface in bridge_interfaces: - self.set_host_iface_mtu(iface, 9000) - logger.info("MTU of {0} was changed to 9000".format(iface)) - logger.debug("New {0} interface properties:\n{1}" - .format(iface, self.get_host_iface(iface))) - - self.env.make_snapshot( - "ready_with_5_slaves_jumbo_frames", is_make=True) - - @test(depends_on=[prepare_5_slaves_with_jumbo_frames], - groups=["jumbo_frames_neutron_vlan"]) - @decorators.log_snapshot_after_test - def jumbo_frames_neutron_vlan(self): - """Verify jumbo frames between instances on HA Neutron VLAN - - Scenario: - 1. Revert snapshot ready_with_5_slaves_jumbo_frames - 2. Create cluster with neutron VLAN - 3. Add 3 node with controller role - 4. Add 2 nodes with compute role - 5. Set mtu=9000 on private interface - 6. Deploy the cluster - 7. Run network verification - 8. Check MTU on private interface - 9. Run MTU size check - 10. Run OSTF - - Duration 120m - Snapshot ready_jumbo_frames_neutron_vlan - - """ - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready_with_5_slaves_jumbo_frames") - - self.show_step(2) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE_HA, - settings={ - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - } - ) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - } - ) - - self.show_step(5) - slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - for node in slave_nodes: - self.fuel_web.set_mtu(node['id'], - self.iface_update['name'], mtu=9000) - self.fuel_web.disable_offloading(node['id'], - self.iface_update['name']) - - self.show_step(6) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - - self.show_step(8) - for node_name in ['slave-01', 'slave-02', 'slave-03', - 'slave-04', 'slave-05']: - node = self.fuel_web.get_nailgun_node_by_name(node_name) - with self.env.d_env.get_ssh_to_remote(node['ip']) as remote: - asserts.assert_true( - self.check_node_iface_mtu( - remote, self.iface_update['name'], 9000), - "MTU on {0} is not 9000. Actual value: {1}".format( - remote.host, - self.get_node_iface(remote, self.iface_update['name']) - )) - - self.show_step(9) - self.check_mtu_size_between_instances(mtu_offset=0) - - self.show_step(10) - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.env.make_snapshot("ready_jumbo_frames_neutron_vlan") - - @test(depends_on=[prepare_5_slaves_with_jumbo_frames], - groups=["jumbo_frames_neutron_vxlan"]) - @decorators.log_snapshot_after_test - def jumbo_frames_neutron_vxlan(self): - """Verify jumbo frames between instances on HA and Neutron VXLAN - - Scenario: - 1. Revert snapshot ready_with_5_slaves_jumbo_frames - 2. Create cluster with neutron VXLAN - 3. Add 3 node with controller role - 4. Add 2 nodes with compute role - 5. Set mtu=9000 on private interface - 6. Deploy the cluster - 7. Run network verification - 8. Check MTU on private interface - 9. Run MTU size check - 10. Run OSTF - - Duration 120m - Snapshot ready_jumbo_frames_neutron_vxlan - - """ - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready_with_5_slaves_jumbo_frames") - - self.show_step(2) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE_HA, - settings={ - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['tun'], - } - ) - - self.show_step(3) - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - } - ) - - self.show_step(5) - slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - for node in slave_nodes: - self.fuel_web.set_mtu(node['id'], - self.iface_update['name'], mtu=9000) - self.fuel_web.disable_offloading(node['id'], - self.iface_update['name']) - - self.show_step(6) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - - self.show_step(8) - for node_name in ['slave-01', 'slave-02', 'slave-03', - 'slave-04', 'slave-05']: - node = self.fuel_web.get_nailgun_node_by_name(node_name) - with self.env.d_env.get_ssh_to_remote(node['ip']) as remote: - asserts.assert_true( - self.check_node_iface_mtu( - remote, self.iface_update['name'], 9000), - "MTU on {0} is not 9000. Actual value: {1}".format( - remote.host, - self.get_node_iface(remote, self.iface_update['name']) - )) - - self.show_step(9) - self.check_mtu_size_between_instances(mtu_offset=50) - - self.show_step(10) - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.env.make_snapshot("ready_jumbo_frames_neutron_vxlan") - - @test(depends_on=[prepare_5_slaves_with_jumbo_frames], - groups=["jumbo_frames_neutron_diff_net_vlan"]) - @decorators.log_snapshot_after_test - def jumbo_frames_neutron_diff_net_vlan(self): - """Verify jumbo frames between instances in different networks on HA - and Neutron VLAN - - Scenario: - 1. Revert snapshot ready_with_5_slaves_jumbo_frames - 2. Create cluster with neutron VLAN - 3. Add 3 node with controller role - 4. Add 2 nodes with compute role - 5. Set mtu=9000 on private interface - 6. Deploy the cluster - 7. Run network verification - 8. Check MTU on private interface - 9. Run MTU size check - 10. Run OSTF - - Duration 120m - Snapshot jumbo_frames_neutron_diff_bond_vlan - - """ - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready_with_5_slaves_jumbo_frames") - - self.show_step(2) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE_HA, - settings={ - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - } - ) - - self.show_step(3) - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - } - ) - - self.show_step(5) - slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - for node in slave_nodes: - self.fuel_web.set_mtu(node['id'], - self.iface_update['name'], mtu=9000) - self.fuel_web.disable_offloading(node['id'], - self.iface_update['name']) - - self.show_step(6) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - - self.show_step(8) - for node_name in ['slave-01', 'slave-02', 'slave-03', - 'slave-04', 'slave-05']: - node = self.fuel_web.get_nailgun_node_by_name(node_name) - with self.env.d_env.get_ssh_to_remote(node['ip']) as remote: - asserts.assert_true( - self.check_node_iface_mtu( - remote, self.iface_update['name'], 9000), - "MTU on {0} is not 9000. Actual value: {1}".format( - remote.host, - self.get_node_iface(remote, self.iface_update['name']) - )) - - self.show_step(9) - self.check_mtu_size_between_instances(mtu_offset=0, diff_net=True) - - self.show_step(10) - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.env.make_snapshot("jumbo_frames_neutron_diff_net_vlan") - - @test(depends_on=[prepare_5_slaves_with_jumbo_frames], - groups=["jumbo_frames_neutron_diff_net_vxlan"]) - @decorators.log_snapshot_after_test - def jumbo_frames_neutron_diff_net_vxlan(self): - """Verify jumbo frames between instances in different networks on HA - and Neutron VXLAN - - Scenario: - 1. Revert snapshot ready_with_5_slaves_jumbo_frames - 2. Create cluster with neutron VXLAN - 3. Add 3 node with controller role - 4. Add 2 nodes with compute role - 5. Set mtu=9000 on private interface - 6. Deploy the cluster - 7. Run network verification - 8. Check MTU on private interface - 9. Run MTU size check - 10. Run OSTF - - Duration 120m - Snapshot jumbo_frames_neutron_diff_bond_vxlan - - """ - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready_with_5_slaves_jumbo_frames") - - self.show_step(2) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE_HA, - settings={ - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['tun'], - } - ) - - self.show_step(3) - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - } - ) - - self.show_step(5) - slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - for node in slave_nodes: - self.fuel_web.set_mtu(node['id'], - self.iface_update['name'], mtu=9000) - self.fuel_web.disable_offloading(node['id'], - self.iface_update['name']) - - self.show_step(6) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - - self.show_step(8) - for node_name in ['slave-01', 'slave-02', 'slave-03', - 'slave-04', 'slave-05']: - node = self.fuel_web.get_nailgun_node_by_name(node_name) - with self.env.d_env.get_ssh_to_remote(node['ip']) as remote: - asserts.assert_true( - self.check_node_iface_mtu( - remote, self.iface_update['name'], 9000), - "MTU on {0} is not 9000. Actual value: {1}".format( - remote.host, - self.get_node_iface(remote, self.iface_update['name']) - )) - - self.show_step(9) - self.check_mtu_size_between_instances(mtu_offset=50, diff_net=True) - - self.show_step(10) - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.env.make_snapshot("jumbo_frames_neutron_diff_net_vxlan") diff --git a/fuelweb_test/tests/test_manual_vip_allocation.py b/fuelweb_test/tests/test_manual_vip_allocation.py deleted file mode 100644 index 0ba1958c7..000000000 --- a/fuelweb_test/tests/test_manual_vip_allocation.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import netaddr -from proboscis import test -from proboscis.asserts import assert_equal - -from fuelweb_test import logger -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["change_vip_manually"]) -class ChangeVipManually(TestBasic): - """ChangeVipManually - Contains tests on manual vip allocation - """ - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["change_public_vip"]) - @log_snapshot_after_test - def change_public_vip(self): - """Deploy cluster with public vip manually set - - Scenario: - 1. Create cluster - 2. Add 1 node with controller role - 3. Add 1 node with compute role and 1 cinder node - 4. Change public vip value to ip address from public range - 5. Verify networks - 6. Deploy the cluster - 7. Check that cluster public vip is the same we set manually - 8. Verify networks - 9. Run OSTF - - Duration 180m - Snapshot change_public_vip - """ - - self.env.revert_snapshot("ready_with_3_slaves") - - data = { - 'tenant': 'manualvip', - 'user': 'manualvip', - 'password': 'manualvip' - } - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.show_step(2) - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'], - } - ) - self.show_step(4) - net = self.env.d_env.get_network(name='public').ip - ip_to_set = str(list(net.subnet(net.prefixlen + 1))[0][5]) - logger.debug("public vip is going to be set to {}".format(ip_to_set)) - public_vip_data = {'network': 2, - 'vip_name': 'public', - 'vip_namespace': 'haproxy', - 'ip_addr': ip_to_set} - - # TODO(ddmitriev): remove this 'disable' after moving to fuel-devops3.0 - # pylint: disable=no-member - self.fuel_web.client.update_vip_ip(cluster_id, public_vip_data) - # pylint: enable=no-member - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - self.show_step(6) - self.fuel_web.deploy_cluster_wait(cluster_id) - public_vip = self.fuel_web.get_public_vip(cluster_id) - self.show_step(7) - assert_equal(public_vip, ip_to_set, - "Public vip doesn't match, actual - {0}," - " expected - {1}".format(public_vip, ip_to_set)) - self.show_step(8) - self.fuel_web.verify_network(cluster_id) - self.show_step(9) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("change_public_vip") - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["change_public_vip_outside_range"]) - @log_snapshot_after_test - def change_public_vip_outside_range(self): - """Deploy cluster with public vip manually set - and picked from floating ips range - - Scenario: - 1. Create cluster - 2. Add 1 node with controller+ceph role - 3. Add 1 node with compute+ceph role and 1 ceph node - 4. Reduce floating ip upper bound on - 10 addresses - 5. Change public vip to first not used public address - 6. Verify networks - 7. Deploy the cluster - 8. Check that cluster public vip is the same we set manually - 9. Run OSTF - - Duration 180m - Snapshot change_public_vip_outside_range - """ - - self.env.revert_snapshot("ready_with_3_slaves") - - data = { - 'tenant': 'outsiderangevip', - 'user': 'outsiderangevip', - 'password': 'outsiderangevip', - 'volumes_lvm': False, - 'volumes_ceph': True, - 'images_ceph': True, - 'objects_ceph': True, - 'ephemeral_ceph': True, - } - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.show_step(2) - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'ceph-osd'], - 'slave-02': ['compute', 'ceph-osd'], - 'slave-03': ['ceph-osd'] - } - ) - self.show_step(4) - ranges = self.fuel_web.get_range( - self.env.d_env.get_network(name='public').ip, 1) - floating_upper_range = netaddr.IPAddress(ranges[0][-1]) - 10 - ranges[0][-1] = str(floating_upper_range) - params = self.fuel_web.client.get_networks( - cluster_id)['networking_parameters'] - params['floating_ranges'] = ranges - self.fuel_web.client.update_network( - cluster_id=cluster_id, - networking_parameters=params - ) - self.show_step(5) - ip_to_set = str(floating_upper_range + 1) - logger.debug('ip to be set is {}'.format(ip_to_set)) - public_vip_data = {'network': 2, - 'vip_name': 'public', - 'vip_namespace': 'haproxy', - 'ip_addr': ip_to_set} - - # TODO(ddmitriev): remove this 'disable' after moving to fuel-devops3.0 - # pylint: disable=no-member - self.fuel_web.client.update_vip_ip(cluster_id, public_vip_data) - # pylint: enable=no-member - - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - self.show_step(7) - self.fuel_web.deploy_cluster_wait(cluster_id) - public_vip = self.fuel_web.get_public_vip(cluster_id) - self.show_step(8) - assert_equal(public_vip, ip_to_set, - "Public vip doesn't match, actual - {0}," - " expected - {1}".format(public_vip, ip_to_set)) - self.show_step(9) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("change_public_vip_outside_range") diff --git a/fuelweb_test/tests/test_multipath_devices.py b/fuelweb_test/tests/test_multipath_devices.py deleted file mode 100644 index 3fc927fba..000000000 --- a/fuelweb_test/tests/test_multipath_devices.py +++ /dev/null @@ -1,208 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -from proboscis.asserts import assert_equal -from proboscis import test - -from fuelweb_test.helpers.checkers import ssh_manager -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.utils import TimeStat -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.settings import MULTIPATH -from fuelweb_test.settings import MULTIPATH_TEMPLATE -from fuelweb_test.settings import NEUTRON_SEGMENT -from fuelweb_test.settings import SLAVE_MULTIPATH_DISKS_COUNT -from fuelweb_test.settings import REPLACE_DEFAULT_REPOS -from fuelweb_test.settings import REPLACE_DEFAULT_REPOS_ONLY_ONCE -from fuelweb_test.tests import base_test_case -from gates_tests.helpers import exceptions -from system_test.core.discover import load_yaml - - -@test -class TestMultipath(base_test_case.TestBasic): - """TestMultipath. - - Required environment variables: - * MULTIPATH=true - * SLAVE_MULTIPATH_DISKS_COUNT>=2 - * MULTIPATH_TEMPLATE= - system_test/tests_templates/tests_configs/multipath_3_nodes.yaml - """ - - @staticmethod - def check_multipath_devices(ip, slave_multipath_disks_count): - """Check if multipath devices contain SLAVE_MULTIPATH_DISKS_COUNT of - disks. If yes return True, if no - False. - - :rtype: bool - """ - cmd = "multipath -l -v2" - - ssh_manager.update_connection(ip) - ssh_manager.get_remote(ip) - result = ssh_manager.execute_on_remote( - ip=ip, - cmd=cmd, - err_msg="Failed to check multipath on node {}".format(ip) - ) - multipath_info = [res.rstrip() for res in result['stdout']] - disk = re.compile('(?P^[\d|\w]+)\s+(?Pdm-\d+)') - status = re.compile( - '\d+:\d+:\d+:\d+\s+(?P\w+)' - '\s+\d+:\d+\s+(?P\w+)' - '\s+(?P\w+)' - '\s+(?P\w+)' - ) - dm = None - disks = dict() - for line in multipath_info: - m = re.match(disk, line) - if m: - dm = m.group('dm') - disks[dm] = [] - continue - - m = re.search(status, line) - if m: - disks[dm].append(m.group('devnode')) - assert_equal( - m.group('dm_status'), - 'active', - "Device {0} is in '{1}' status on {2}".format( - m.group('devnode'), m.group('dm_status'), dm)) - assert_equal( - m.group('online_status'), - 'running', - "Device {0} is in '{1}' status on {2}".format( - m.group('devnode'), m.group('online_status'), dm)) - for disk in disks: - assert_equal(len(disks[disk]), - slave_multipath_disks_count, - "{0}: wrong path count: {1}. Must be {2}".format( - disk, len(disk), slave_multipath_disks_count)) - - @staticmethod - def get_os_root_multipath_count(ip): - """Returns count of root partitions on multipath devices. - - :rtype: int - """ - cmd = "lsblk -lo NAME,TYPE,MOUNTPOINT | grep '/$' | grep -c lvm" - - ssh_manager.update_connection(ip) - ssh_manager.get_remote(ip) - result = ssh_manager.execute_on_remote( - ip=ip, - cmd=cmd, - err_msg="Failed to check lsblk on node {}".format(ip)) - return int(result['stdout_str']) - - @test(groups=["bootstrap_multipath"]) - @log_snapshot_after_test - def bootstrap_multipath(self): - """Bootstrap node with multipath devices - - Scenario: - 1. Setup environment - 2. Bootstrap slave nodes - 3. Verify multipath devices on the nodes - - Duration 30m - - """ - if not MULTIPATH: - raise exceptions.FuelQAVariableNotSet( - 'MULTIPATH', 'true') - if not MULTIPATH_TEMPLATE: - raise exceptions.FuelQAVariableNotSet( - 'MULTIPATH_TEMPLATE', - 'system_test/tests_templates/tests_configs/' - 'multipath_3_nodes.yaml') - if int(SLAVE_MULTIPATH_DISKS_COUNT) < 1: - raise exceptions.FuelQAVariableNotSet( - 'SLAVE_MULTIPATH_DISKS_COUNT', '2') - - self.show_step(1) - self._devops_config = load_yaml(MULTIPATH_TEMPLATE) - with TimeStat("setup_environment", is_uniq=True): - self.env.setup_environment() - self.fuel_post_install_actions() - if REPLACE_DEFAULT_REPOS and REPLACE_DEFAULT_REPOS_ONLY_ONCE: - self.fuel_web.replace_default_repos() - self.fuel_web.get_nailgun_version() - self.fuel_web.change_default_network_settings() - - self.show_step(2) - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:3], - skip_timesync=True) - - self.show_step(3) - for ip in [node['ip'] for node in self.fuel_web.client.list_nodes()]: - self.check_multipath_devices(ip, SLAVE_MULTIPATH_DISKS_COUNT) - - @test(depends_on_groups=["bootstrap_multipath"], - groups=["deploy_multipath"]) - @log_snapshot_after_test - def deploy_multipath(self): - """Deploy cluster with multipath devices - - Scenario: - 1. Create cluster with 1 controller, 1 compute and 1 cinder roles - 2. Run network verification - 3. Provision the cluster - 4. Verify multipath devices on nodes - 5. Deploy the cluster - 6. Run OSTF - - Duration 50m - - """ - self.show_step(1) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_segment_type": NEUTRON_SEGMENT['vlan'], - } - ) - self.fuel_web.update_nodes( - cluster_id, { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - } - ) - - self.show_step(2) - self.fuel_web.verify_network(cluster_id) - - self.show_step(3) - self.fuel_web.provisioning_cluster_wait(cluster_id) - - self.show_step(4) - for ip in [node['ip'] for node in self.fuel_web.client.list_nodes()]: - self.check_multipath_devices(ip, SLAVE_MULTIPATH_DISKS_COUNT) - assert_equal( - self.get_os_root_multipath_count(ip), - SLAVE_MULTIPATH_DISKS_COUNT, - "Wrong lvm structure of multipath device on {}".format(ip)) - - self.show_step(5) - self.fuel_web.deploy_task_wait(cluster_id) - - self.show_step(6) - self.fuel_web.run_ostf(cluster_id=cluster_id) diff --git a/fuelweb_test/tests/test_multiple_networks.py b/fuelweb_test/tests/test_multiple_networks.py deleted file mode 100644 index 3a353f40f..000000000 --- a/fuelweb_test/tests/test_multiple_networks.py +++ /dev/null @@ -1,775 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import division - -import json -import time - -from devops.helpers.helpers import wait -from devops.error import TimeoutError -import netaddr -from proboscis import asserts -from proboscis import SkipTest -from proboscis import test - -from fuelweb_test.helpers.checkers import check_get_network_data_over_cli -from fuelweb_test.helpers.checkers import check_ping -from fuelweb_test.helpers.checkers import check_update_network_data_over_cli -from fuelweb_test.helpers.decorators import check_fuel_statistics -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers import utils -from fuelweb_test.settings import DEPLOYMENT_MODE_HA -from fuelweb_test.settings import MULTIPLE_NETWORKS -from fuelweb_test.settings import NEUTRON_SEGMENT -from fuelweb_test.settings import NODEGROUPS -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.test_net_templates_base import TestNetworkTemplatesBase -from fuelweb_test import logger - - -@test(groups=["multiple_cluster_networks", "thread_7"]) -class TestMultipleClusterNets(TestNetworkTemplatesBase): - """TestMultipleClusterNets.""" # TODO documentation - - def __init__(self): - self.netconf_all_groups = None - super(TestMultipleClusterNets, self).__init__() - - @test(depends_on=[SetupEnvironment.prepare_release], - groups=["deploy_neutron_tun_ha_nodegroups"]) - @log_snapshot_after_test - @check_fuel_statistics - def deploy_neutron_tun_ha_nodegroups(self): - """Deploy HA environment with NeutronVXLAN and 2 nodegroups - - Scenario: - 1. Revert snapshot with ready master node - 2. Bootstrap slaves from default nodegroup - 3. Create cluster with Neutron VXLAN and custom nodegroups - 4. Remove 2nd custom nodegroup which is added automatically - 5. Bootstrap slave nodes from custom nodegroup - 6. Download network configuration - 7. Update network.json with customized ip ranges - 8. Put new json on master node and update network data - 9. Verify that new IP ranges are applied for network config - 10. Add 3 controller nodes from default nodegroup - 11. Add 2 compute nodes from custom nodegroup - 12. Deploy cluster - 13. Run network verification - 14. Verify that excluded ip is not used for nodes or VIP - 15. Run health checks (OSTF) - - Duration 110m - Snapshot deploy_neutron_tun_ha_nodegroups - - """ - - if not MULTIPLE_NETWORKS: - raise SkipTest('MULTIPLE_NETWORKS not enabled') - - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready") - - self.show_step(2) - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[0:3]) - - self.show_step(3) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE_HA, - settings={ - "net_provider": 'neutron', - "net_segment_type": NEUTRON_SEGMENT['tun'], - 'tenant': 'haVxlan', - 'user': 'haVxlan', - 'password': 'haVxlan' - } - ) - - self.show_step(4) - self.netconf_all_groups = self.fuel_web.client.get_networks(cluster_id) - custom_group2 = self.fuel_web.get_nodegroup( - cluster_id, name=NODEGROUPS[2]['name']) - wait(lambda: not self.is_update_dnsmasq_running( - self.fuel_web.client.get_tasks()), timeout=60, - timeout_msg="Timeout exceeded while waiting for task " - "'update_dnsmasq' is finished!") - self.fuel_web.client.delete_nodegroup(custom_group2['id']) - - self.show_step(5) - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5]) - - self.show_step(6) - check_get_network_data_over_cli(self.ssh_manager.admin_ip, - cluster_id, '/var/log/') - - management_ranges_default = [] - management_ranges_custom = [] - storage_ranges_default = [] - storage_ranges_custom = [] - default_group_id = self.fuel_web.get_nodegroup(cluster_id)['id'] - custom_group_id = self.fuel_web.get_nodegroup( - cluster_id, name=NODEGROUPS[1]['name'])['id'] - - self.show_step(7) - with self.env.d_env.get_admin_remote() as remote: - current_net = json.loads(remote.open( - '/var/log/network_1.json').read()) - # Get storage ranges for default and custom groups - storage_ranges_default.append(self.get_modified_ranges( - current_net, 'storage', group_id=default_group_id)) - - storage_ranges_custom.append(self.get_modified_ranges( - current_net, 'storage', group_id=custom_group_id)) - - management_ranges_default.append(self.get_modified_ranges( - current_net, 'management', group_id=default_group_id)) - - management_ranges_custom.append(self.get_modified_ranges( - current_net, 'management', group_id=custom_group_id)) - - update_data = { - default_group_id: {'storage': storage_ranges_default, - 'management': management_ranges_default}, - custom_group_id: {'storage': storage_ranges_custom, - 'management': management_ranges_custom}} - - updated_network = self.update_network_ranges( - current_net, update_data) - - logger.debug( - 'Plan to update ranges for default group to {0} for storage ' - 'and {1} for management and for custom group storage {2},' - ' management {3}'.format(storage_ranges_default, - management_ranges_default, - storage_ranges_custom, - management_ranges_custom)) - - # need to push to remote - self.show_step(8) - with remote.open( - '/var/log/network_{0}.json'.format(cluster_id), - mode='w') as file_obj: - json.dump(updated_network, file_obj) - - check_update_network_data_over_cli(self.ssh_manager.admin_ip, - cluster_id, - '/var/log/') - - self.show_step(9) - with self.env.d_env.get_admin_remote() as remote: - check_get_network_data_over_cli(self.ssh_manager.admin_ip, - cluster_id, '/var/log/') - latest_net = json.loads(remote.open( - '/var/log/network_1.json').read()) - updated_storage_default = self.get_ranges(latest_net, 'storage', - default_group_id) - - updated_storage_custom = self.get_ranges(latest_net, 'storage', - custom_group_id) - updated_mgmt_default = self.get_ranges(latest_net, 'management', - default_group_id) - updated_mgmt_custom = self.get_ranges(latest_net, 'management', - custom_group_id) - - asserts.assert_equal( - updated_storage_default, storage_ranges_default, - 'Looks like storage range for default nodegroup ' - 'was not updated. Expected {0}, Actual: {1}'.format( - storage_ranges_default, updated_storage_default)) - - asserts.assert_equal( - updated_storage_custom, storage_ranges_custom, - 'Looks like storage range for custom nodegroup ' - 'was not updated. Expected {0}, Actual: {1}'.format( - storage_ranges_custom, updated_storage_custom)) - - asserts.assert_equal( - updated_mgmt_default, management_ranges_default, - 'Looks like management range for default nodegroup was ' - 'not updated. Expected {0}, Actual: {1}'.format( - management_ranges_default, updated_mgmt_default)) - - asserts.assert_equal( - updated_mgmt_custom, management_ranges_custom, - 'Looks like management range for custom nodegroup was ' - 'not updated. Expected {0}, Actual: {1}'.format( - management_ranges_custom, updated_mgmt_custom)) - - self.show_step(10) - self.show_step(11) - nodegroup_default = NODEGROUPS[0]['name'] - nodegroup_custom1 = NODEGROUPS[1]['name'] - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': [['controller'], nodegroup_default], - 'slave-02': [['controller'], nodegroup_default], - 'slave-03': [['controller'], nodegroup_default], - 'slave-04': [['compute', 'cinder'], nodegroup_custom1], - 'slave-05': [['compute', 'cinder'], nodegroup_custom1], - } - ) - - self.show_step(12) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(13) - self.fuel_web.verify_network(cluster_id) - - self.show_step(14) - net_data_default_group = [ - data['network_data'] for data - in self.fuel_web.client.list_cluster_nodes( - cluster_id) if data['group_id'] == default_group_id] - - for net_node in net_data_default_group: - for net in net_node: - if 'storage' in net['name']: - asserts.assert_true( - self.is_ip_in_range( - net['ip'].split('/')[0], - updated_storage_default[0][0], - updated_storage_default[0][-1])) - if 'management' in net['name']: - asserts.assert_true( - self.is_ip_in_range( - net['ip'].split('/')[0], - updated_mgmt_default[0][0], - updated_mgmt_default[0][-1])) - - net_data_custom_group = [ - data['network_data'] for data - in self.fuel_web.client.list_cluster_nodes( - cluster_id) if data['group_id'] == custom_group_id] - - for net_node in net_data_custom_group: - for net in net_node: - if 'storage' in net['name']: - asserts.assert_true( - self.is_ip_in_range( - net['ip'].split('/')[0], - updated_storage_custom[0][0], - updated_storage_custom[0][-1])) - if 'management' in net['name']: - asserts.assert_true( - self.is_ip_in_range( - net['ip'].split('/')[0], - updated_mgmt_custom[0][0], - updated_mgmt_custom[0][-1])) - - mgmt_vrouter_vip = self.fuel_web.get_management_vrouter_vip( - cluster_id) - logger.debug('Management vrouter vips is {0}'.format( - mgmt_vrouter_vip)) - mgmt_vip = self.fuel_web.get_mgmt_vip(cluster_id) - logger.debug('Management vips is {0}'.format(mgmt_vip)) - # check for defaults - asserts.assert_true(self.is_ip_in_range(mgmt_vrouter_vip.split('/')[0], - updated_mgmt_default[0][0], - updated_mgmt_default[0][-1])) - asserts.assert_true(self.is_ip_in_range(mgmt_vip.split('/')[0], - updated_mgmt_default[0][0], - updated_mgmt_default[0][-1])) - self.show_step(15) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("deploy_neutron_tun_ha_nodegroups", - is_make=True) - - @test(depends_on_groups=['deploy_neutron_tun_ha_nodegroups'], - groups=["add_custom_nodegroup"]) - @log_snapshot_after_test - def add_custom_nodegroup(self): - """Add new nodegroup to operational environment - - Scenario: - 1. Revert snapshot with operational cluster - 2. Create new nodegroup for the environment and configure - it's networks - 3. Bootstrap slave node from custom-2 nodegroup - 4. Add node from new nodegroup to the environment with compute role - 5. Run network verification - 6. Deploy changes - 7. Run network verification - 8. Run OSTF - 9. Check that nodes from 'default' nodegroup can reach nodes - from new nodegroup via management and storage networks - - Duration 50m - Snapshot add_custom_nodegroup - """ - - self.show_step(1, initialize=True) - self.env.revert_snapshot('deploy_neutron_tun_ha_nodegroups') - cluster_id = self.fuel_web.get_last_created_cluster() - self.fuel_web.assert_nodes_in_ready_state(cluster_id) - asserts.assert_true(not any(ng['name'] == NODEGROUPS[2]['name'] for ng - in self.fuel_web.client.get_nodegroups()), - 'Custom nodegroup {0} already ' - 'exists!'.format(NODEGROUPS[2]['name'])) - - self.show_step(2) - new_nodegroup = self.fuel_web.client.create_nodegroup( - cluster_id, NODEGROUPS[2]['name']) - logger.debug('Updating custom nodegroup ID in network configuration..') - network_config_new = self.fuel_web.client.get_networks(cluster_id) - asserts.assert_true(self.netconf_all_groups is not None, - 'Network configuration for nodegroups is empty!') - - for network in self.netconf_all_groups['networks']: - if network['group_id'] is not None and \ - not any(network['group_id'] == ng['id'] - for ng in self.fuel_web.client.get_nodegroups()): - network['group_id'] = new_nodegroup['id'] - for new_network in network_config_new['networks']: - if new_network['name'] == network['name'] and \ - new_network['group_id'] == network['group_id']: - network['id'] = new_network['id'] - - self.fuel_web.client.update_network( - cluster_id, - self.netconf_all_groups['networking_parameters'], - self.netconf_all_groups['networks']) - - self.show_step(3) - self.env.bootstrap_nodes([self.env.d_env.nodes().slaves[6]]) - - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, - {'slave-07': [['compute'], new_nodegroup['name']]}, - True, False - ) - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - - self.show_step(6) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - - self.show_step(8) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(9) - primary_ctrl = self.fuel_web.get_nailgun_node_by_devops_node( - self.fuel_web.get_nailgun_primary_node( - slave=self.env.d_env.nodes().slaves[0])) - - with self.fuel_web.get_ssh_for_node('slave-07') as remote: - new_node_networks = utils.get_net_settings(remote) - - for interface in ('br-storage', 'br-mgmt'): - if interface in new_node_networks: - logger.info("Checking new node is accessible from primary " - "controller via {0} interface.".format(interface)) - for ip in new_node_networks[interface]['ip_addresses']: - address = ip.split('/')[0] - result = check_ping(primary_ctrl['ip'], - address, - timeout=3) - asserts.assert_true(result, - "New node isn't accessible from " - "primary controller via {0} interface" - ": {1}.".format(interface, result)) - - self.env.make_snapshot("add_custom_nodegroup") - - @test(depends_on=[SetupEnvironment.prepare_release], - groups=["deploy_ceph_ha_nodegroups"]) - @log_snapshot_after_test - def deploy_ceph_ha_nodegroups(self): - """Deploy HA environment with Neutron VXLAN, Ceph and 2 nodegroups - - Scenario: - 1. Revert snapshot with ready master node - 2. Create cluster with Neutron VXLAN, Ceph and custom nodegroup - 3. Exclude 10 first IPs from range for default admin/pxe network - 4. Bootstrap slave nodes from both default and custom nodegroups - 5. Check that excluded IPs aren't allocated to discovered nodes - 6. Add 3 controller + ceph nodes from default nodegroup - 7. Add 2 compute + ceph nodes from custom nodegroup - 8. Deploy cluster - 9. Run network verification - 10. Run health checks (OSTF) - 11. Check that excluded IPs aren't allocated to deployed nodes - 12. Check Ceph health - - Duration 110m - Snapshot deploy_ceph_ha_nodegroups - - """ - - if not MULTIPLE_NETWORKS: - raise SkipTest('MULTIPLE_NETWORKS not enabled') - - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready") - - self.show_step(2) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE_HA, - settings={ - 'volumes_ceph': True, - 'images_ceph': True, - 'ephemeral_ceph': True, - 'volumes_lvm': False, - "net_provider": 'neutron', - "net_segment_type": NEUTRON_SEGMENT['tun'], - 'tenant': 'haVxlanCeph', - 'user': 'haVxlanCeph', - 'password': 'haVxlanCeph' - } - ) - - self.show_step(3) - networks = self.fuel_web.client.get_networks(cluster_id)["networks"] - new_admin_range = self.change_default_admin_range( - networks, number_excluded_ips=10) - wait(lambda: not self.is_update_dnsmasq_running( - self.fuel_web.client.get_tasks()), timeout=60, - timeout_msg="Timeout exceeded while waiting for task " - "'update_dnsmasq' is finished!") - self.fuel_web.client.update_network(cluster_id, networks=networks) - logger.info("New addresses range for default admin network:" - " {0}".format(new_admin_range)) - - self.show_step(4) - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[0:5]) - - self.show_step(5) - default_ng_nodes = [self.fuel_web.get_nailgun_node_by_devops_node(node) - for node in self.env.d_env.nodes().slaves[0:3]] - for node in default_ng_nodes: - asserts.assert_true( - self.is_ip_in_range(node['ip'], *new_admin_range), - "Node '{0}' has IP address '{1}' which " - "is not from defined IP addresses range:" - " {2}!".format(node['fqdn'], node['ip'], new_admin_range)) - - self.show_step(6) - self.show_step(7) - nodegroup_default = NODEGROUPS[0]['name'] - nodegroup_custom = NODEGROUPS[1]['name'] - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': [['controller', 'ceph-osd'], nodegroup_default], - 'slave-02': [['controller', 'ceph-osd'], nodegroup_default], - 'slave-03': [['controller', 'ceph-osd'], nodegroup_default], - 'slave-04': [['compute', 'ceph-osd'], nodegroup_custom], - 'slave-05': [['compute', 'ceph-osd'], nodegroup_custom], - } - ) - - self.show_step(8) - self.fuel_web.deploy_cluster_wait(cluster_id, timeout=150 * 60) - self.show_step(9) - self.fuel_web.verify_network(cluster_id) - self.show_step(10) - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.show_step(11) - group_id = self.fuel_web.get_nodegroup(cluster_id, - name=nodegroup_default)['id'] - default_ng_nodes = [node for node in - self.fuel_web.client.list_cluster_nodes(cluster_id) - if node['group_id'] == group_id] - for node in default_ng_nodes: - asserts.assert_true( - self.is_ip_in_range(node['ip'], *new_admin_range), - "Node '{0}' has IP address '{1}' which " - "is not from defined IP addresses range:" - " {2}!".format(node['fqdn'], node['ip'], new_admin_range)) - - self.show_step(12) - self.fuel_web.check_ceph_status(cluster_id) - - self.env.make_snapshot("deploy_ceph_ha_nodegroups") - - @test(depends_on=[SetupEnvironment.prepare_release], - groups=["deploy_controllers_from_custom_nodegroup", - "multiple_cluster_networks"]) - @log_snapshot_after_test - def deploy_controllers_from_custom_nodegroup(self): - """Assigning controllers to non-default nodegroup - - Scenario: - 1. Revert snapshot with ready master node - 2. Create environment with Neutron VXLAN and custom nodegroup - 3. Configure network floating ranges to use public network - from custom nodegroup - 4. Bootstrap slaves from custom nodegroup - 5. Bootstrap slave nodes from default nodegroup - 6. Add 3 nodes from 'custom' nodegroup as controllers - Add 2 nodes from 'default' nodegroup as compute and cinder - 7. Run network verification - 8. Deploy environment - 9. Run network verification - 10. Run OSTF - 11. Check addresses allocated for VIPs belong to networks - from custom nodegroup - - Duration 120m - Snapshot deploy_controllers_from_custom_nodegroup - - """ - - if not MULTIPLE_NETWORKS: - raise SkipTest('MULTIPLE_NETWORKS not enabled') - - self.show_step(1, initialize=True) - self.check_run("deploy_controllers_from_custom_nodegroup") - self.env.revert_snapshot("ready") - - self.show_step(2) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE_HA, - settings={ - "net_provider": 'neutron', - "net_segment_type": NEUTRON_SEGMENT['tun'] - }, - configure_ssl=False - ) - - self.show_step(3) - # floating range - public2_cidr = self.env.d_env.get_network(name='public2').ip - new_settings_float = { - 'floating_ranges': [[str(public2_cidr[len(public2_cidr) // 2]), - str(public2_cidr[-2])]] - } - self.fuel_web.client.update_network(cluster_id, new_settings_float) - - self.show_step(4) - custom_nodes = self.env.d_env.nodes().slaves[3:6] - self.env.bootstrap_nodes(custom_nodes) # nodes 4, 5 and 6 - - self.show_step(5) - default_nodes = self.env.d_env.nodes().slaves[0:2] - self.env.bootstrap_nodes(default_nodes) # nodes 1 and 2 - - self.show_step(6) - - default_nodegroup = NODEGROUPS[0]['name'] - custom_nodegroup = NODEGROUPS[1]['name'] - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-04': [['controller'], custom_nodegroup], - 'slave-05': [['controller'], custom_nodegroup], - 'slave-06': [['controller'], custom_nodegroup], - 'slave-01': [['compute'], default_nodegroup], - 'slave-02': [['cinder'], default_nodegroup] - } - ) - - # configuring ssl after nodes added to cluster due to vips in custom ng - self.fuel_web.ssl_configure(cluster_id) - - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - - self.show_step(8) - self.fuel_web.deploy_cluster_wait(cluster_id, timeout=150 * 60) - - self.show_step(9) - self.fuel_web.verify_network(cluster_id) - - self.show_step(10) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(11) - current_settings = self.fuel_web.client.get_networks(cluster_id) - check = { - 'vrouter_pub': 'public2', - 'management': 'management2', - 'public': 'public2', - 'vrouter': 'management2' - } - - for k in check: - vip = netaddr.IPAddress(str(current_settings['vips'][k]['ipaddr'])) - custom_net = netaddr.IPNetwork( - str(self.env.d_env.get_network(name=check[k]).ip)) - asserts.assert_true( - vip in custom_net, - '{0} is not from {1} network'.format(k, check[k])) - logger.info('{0} is from {1} network'.format(k, check[k])) - - self.env.make_snapshot("deploy_controllers_from_custom_nodegroup", - is_make=True) - - @test(depends_on=[deploy_controllers_from_custom_nodegroup], - groups=["delete_cluster_with_custom_nodegroup"], - # TODO: enable this test when bug #1521682 is fixed - enabled=False) - @log_snapshot_after_test - def delete_cluster_with_custom_nodegroup(self): - """Delete env, check nodes from custom nodegroup can't bootstrap - - Scenario: - 1. Revert snapshot with cluster with nodes in custom nodegroup - 2. Delete cluster - 3. Check nodes from custom nodegroup can't bootstrap - 4. Reset nodes from custom nodegroup - 5. Check nodes from custom nodegroup can't bootstrap - - Duration 15m - """ - - self.show_step(1, initialize=True) - self.env.revert_snapshot('deploy_controllers_from_custom_nodegroup') - cluster_id = self.fuel_web.get_last_created_cluster() - self.fuel_web.assert_nodes_in_ready_state(cluster_id) - - self.show_step(2) - custom_nodes = self.env.d_env.nodes().slaves[3:6] - - self.fuel_web.delete_env_wait(cluster_id) - - self.show_step(3) - logger.info('Wait five nodes online for 900 seconds..') - wait(lambda: len(self.fuel_web.client.list_nodes()) == 5, - timeout=15 * 60, - timeout_msg='Timeout while waiting five nodes ' - 'to become online') - - logger.info('Wait all nodes from custom nodegroup become ' - 'in error state..') - # check all custom in error state - for slave in custom_nodes: - wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node( - slave)['status'] == 'error', timeout=15 * 60, - timeout_msg='Node {} not changed state to ' - 'error'.format(slave.name)) - logger.info( - 'Node {} changed state to error'.format(slave.name)) - - self.show_step(4) - logger.info('Rebooting nodes from custom nodegroup..') - self.fuel_web.cold_restart_nodes(custom_nodes, wait_online=False) - - self.show_step(5) - logger.info('Wait custom nodes are not online for 600 seconds..') - try: - wait( - lambda: any(self.fuel_web. - get_nailgun_node_by_devops_node(slave)['online'] - for slave in custom_nodes), - timeout=10 * 60) - raise AssertionError('Some nodes online') - except TimeoutError: - logger.info('Nodes are offline') - - self.env.make_snapshot("delete_cluster_with_custom_nodegroup") - - @test(depends_on=[deploy_controllers_from_custom_nodegroup], - groups=["delete_custom_nodegroup"]) - @log_snapshot_after_test - def delete_custom_nodegroup(self): - """Delete nodegroup, check its nodes are marked as 'error' - - Scenario: - 1. Revert snapshot with cluster with nodes in custom nodegroup - 2. Save cluster network configuration - 3. Reset cluster - 4. Remove custom nodegroup - 5. Check nodes from custom nodegroup have 'error' status - 6. Re-create custom nodegroup and upload saved network configuration - 7. Assign 'error' nodes to new nodegroup - 8. Check nodes from custom nodegroup are in 'discover' state - - Duration 30m - """ - - self.show_step(1, initialize=True) - self.env.revert_snapshot('deploy_controllers_from_custom_nodegroup') - cluster_id = self.fuel_web.get_last_created_cluster() - self.fuel_web.assert_nodes_in_ready_state(cluster_id) - - self.show_step(2) - network_config = self.fuel_web.client.get_networks(cluster_id) - - self.show_step(3) - custom_nodes = self.env.d_env.nodes().slaves[3:6] - self.fuel_web.stop_reset_env_wait(cluster_id) - # TODO(apanchenko): remove sleep(181) workaround when the issue with - # TODO(apanchenko): cluster reset is fixed (see LP#1588193) - # Nailgun waits 180 seconds before marking slave node as offline - time.sleep(181) - logger.info('Waiting for all nodes online for 900 seconds...') - wait(lambda: all(n['online'] for n in - self.fuel_web.client.list_cluster_nodes(cluster_id)), - timeout=15 * 60, - timeout_msg='Timeout while waiting nodes to become online ' - 'after reset') - - self.show_step(4) - custom_nodegroup = [ng for ng in self.fuel_web.client.get_nodegroups() - if ng['name'] == NODEGROUPS[1]['name']][0] - self.fuel_web.client.delete_nodegroup(custom_nodegroup['id']) - - self.show_step(5) - logger.info('Wait all nodes from custom nodegroup become ' - 'in error state..') - for slave in custom_nodes: - # pylint: disable=undefined-loop-variable - wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node( - slave)['status'] == 'error', timeout=60 * 5, - timeout_msg='Node {} status wasn\'t changed ' - 'to "error"!'.format(slave.name)) - # pylint: enable=undefined-loop-variable - logger.info('Node {} is in "error" state'.format(slave.name)) - - self.show_step(6) - new_nodegroup = self.fuel_web.client.create_nodegroup( - cluster_id, NODEGROUPS[1]['name']) - logger.debug('Updating custom nodegroup ID in network configuration..') - network_config_new = self.fuel_web.client.get_networks(cluster_id) - for network in network_config['networks']: - if network['group_id'] == custom_nodegroup['id']: - network['group_id'] = new_nodegroup['id'] - for new_network in network_config_new['networks']: - if new_network['name'] == network['name'] and \ - new_network['group_id'] == network['group_id']: - network['id'] = new_network['id'] - - self.fuel_web.client.update_network( - cluster_id, - network_config['networking_parameters'], - network_config['networks']) - - self.show_step(7) - self.fuel_web.client.assign_nodegroup( - new_nodegroup['id'], - [self.fuel_web.get_nailgun_node_by_devops_node(node) - for node in custom_nodes]) - - self.show_step(8) - logger.info('Wait all nodes from custom nodegroup become ' - 'in discover state..') - for slave in custom_nodes: - wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node( - slave)['status'] == 'discover', timeout=60 * 5, - timeout_msg='Node {} status wasn\'t changed ' - 'to "discover"!'.format(slave.name)) - logger.info('Node {} is in "discover" state'.format(slave.name)) - - self.env.make_snapshot("delete_custom_nodegroup") diff --git a/fuelweb_test/tests/test_multiqueue.py b/fuelweb_test/tests/test_multiqueue.py deleted file mode 100644 index 4e93199be..000000000 --- a/fuelweb_test/tests/test_multiqueue.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import division -from __future__ import unicode_literals - -import random - -from devops.helpers import helpers as devops_helpers -from devops.helpers.ssh_client import SSHAuth -from proboscis import test -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_true - -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers import os_actions -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test -class TestMultiqueue(TestBasic): - - def __init__(self): - super(TestMultiqueue, self).__init__() - assert_true(settings.KVM_USE, "Multiqueue feature requires " - "KVM_USE=true env variable!") - assert_true(settings.HARDWARE["slave_node_cpu"] > 1, - "Multiqueue feature requires more than 1 cpu for " - "enabling queues!") - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["multiqueue_base_check"]) - @log_snapshot_after_test - def multiqueue_base_check(self): - """Deploy non-HA cluster for base multiqueue check - - Scenario: - 1. Create cluster - 2. Add 1 node with controller role and - 1 node with compute+cinder role - 3. Deploy the cluster - 4. Run network verification - 5. Run OSTF - 6. Edit TestVM metadata - add hw_vif_multiqueue_enabled=true - 7. Create flavor with all available VCPUs - 8. Boot instance from TestVM image and new flavor - 9. Assign floating IP - 10. Enable queues in instance - 11. Check that queues was created - 11. Check instance availability - - """ - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(1) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - ) - - self.show_step(self.next_step) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute', 'cinder'], - } - ) - - self.show_step(self.next_step) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(self.next_step) - self.fuel_web.verify_network(cluster_id) - - self.show_step(self.next_step) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - # update image's metadata - self.show_step(self.next_step) - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - test_vm_image = os_conn.glance.images.find(name="TestVM") - test_vm_image.update(properties={'hw_vif_multiqueue_enabled': True}) - - nova_compute = os_conn.nova.hypervisors.list().pop() - vcpus = nova_compute.vcpus - # create flavor - self.show_step(self.next_step) - flavor_id = random.randint(10, 10000) - name = 'system_test-{}'.format(random.randint(10, 10000)) - - os_conn.create_flavor(name=name, ram=64, - vcpus=vcpus, disk=1, - flavorid=flavor_id) - - self.show_step(self.next_step) - net_name = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - server = os_conn.create_server_for_migration(neutron=True, - label=net_name, - flavor=flavor_id) - os_conn.verify_instance_status(server, 'ACTIVE') - - self.show_step(self.next_step) - floating_ip = os_conn.assign_floating_ip(server) - logger.info("Floating address {0} associated with instance {1}" - .format(floating_ip.ip, server.id)) - - logger.info("Wait for ping from instance {} " - "by floating ip".format(server.id)) - devops_helpers.wait( - lambda: devops_helpers.tcp_ping(floating_ip.ip, 22), - timeout=300, - timeout_msg=("Instance {0} is unreachable for {1} seconds". - format(server.id, 300))) - - cirros_auth = SSHAuth(**settings.SSH_IMAGE_CREDENTIALS) - slave_01_ssh = self.fuel_web.get_ssh_for_node("slave-01") - - self.show_step(self.next_step) - result = slave_01_ssh.execute_through_host( - hostname=floating_ip.ip, - cmd="sudo /sbin/ethtool -L eth0 combined {}".format(vcpus), - auth=cirros_auth) - - assert_equal( - result.exit_code, 0, - "Enabling queues using ethtool failed!\n{}".format(result)) - - self.show_step(self.next_step) - result = slave_01_ssh.execute_through_host( - hostname=floating_ip.ip, - cmd="ls /sys/class/net/eth0/queues", - auth=cirros_auth - ) - assert_equal(result.stdout_str.count("rx"), vcpus, - "RX queues count is not equal to vcpus count") - assert_equal(result.stdout_str.count("tx"), vcpus, - "TX queues count is not equal to vcpus count") diff --git a/fuelweb_test/tests/test_net_templates.py b/fuelweb_test/tests/test_net_templates.py deleted file mode 100644 index 0112c4778..000000000 --- a/fuelweb_test/tests/test_net_templates.py +++ /dev/null @@ -1,459 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test -from proboscis.asserts import assert_true - -from fuelweb_test import logger -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.utils import get_network_template -from fuelweb_test.settings import DEPLOYMENT_MODE_HA -from fuelweb_test.settings import NEUTRON_SEGMENT_TYPE -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.test_net_templates_base import TestNetworkTemplatesBase - - -@test(groups=["network_templates"]) -class TestNetworkTemplates(TestNetworkTemplatesBase): - """TestNetworkTemplates.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_cinder_net_tmpl"]) - @log_snapshot_after_test - def deploy_cinder_net_tmpl(self): - """Deploy HA environment with Cinder, Neutron and network template - - Scenario: - 1. Revert snapshot with 3 slaves - 2. Create cluster (HA) with Neutron VLAN/VXLAN/GRE - 3. Add 1 controller + cinder nodes - 4. Add 2 compute + cinder nodes - 5. Upload 'cinder' network template' - 6. Create custom network groups basing - on template endpoints assignments - 7. Run network verification - 8. Deploy cluster - 9. Run network verification - 10. Run health checks (OSTF) - 11. Check L3 network configuration on slaves - 12. Check that services are listening on their networks only - - Duration 180m - Snapshot deploy_cinder_net_tmpl - """ - - self.env.revert_snapshot("ready_with_3_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE_HA, - settings={ - 'tenant': 'netTemplate', - 'user': 'netTemplate', - 'password': 'netTemplate', - } - ) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'cinder'], - 'slave-02': ['compute', 'cinder'], - 'slave-03': ['compute', 'cinder'], - }, - update_interfaces=False - ) - - network_template = get_network_template('cinder') - self.fuel_web.client.upload_network_template( - cluster_id=cluster_id, network_template=network_template) - networks = self.generate_networks_for_template( - template=network_template, - ip_nets={'default': '10.200.0.0/16'}, - ip_prefixlen='24') - existing_networks = self.fuel_web.client.get_network_groups() - networks = self.create_custom_networks(networks, existing_networks) - - logger.debug('Networks: {0}'.format( - self.fuel_web.client.get_network_groups())) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60) - - self.fuel_web.verify_network(cluster_id) - - self.check_ipconfig_for_template(cluster_id, network_template, - networks) - self.check_services_networks(cluster_id, network_template) - - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['smoke', 'sanity', - 'ha', 'tests_platform']) - self.check_ipconfig_for_template(cluster_id, network_template, - networks) - - self.check_services_networks(cluster_id, network_template) - - self.env.make_snapshot("deploy_cinder_net_tmpl", - is_make=self.is_make_snapshot()) - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_ceph_net_tmpl"]) - @log_snapshot_after_test - def deploy_ceph_net_tmpl(self): - """Deploy HA environment with Ceph, Neutron and network template - - Scenario: - 1. Revert snapshot with 5 slaves - 2. Create cluster (HA) with Neutron VLAN/VXLAN/GRE - 3. Add 3 controller + ceph nodes - 4. Add 2 compute + ceph nodes - 5. Upload 'ceph' network template - 6. Create custom network groups basing - on template endpoints assignments - 7. Run network verification - 8. Deploy cluster - 9. Run network verification - 10. Run health checks (OSTF) - 11. Check L3 network configuration on slaves - 12. Check that services are listening on their networks only - - Duration 180m - Snapshot deploy_ceph_net_tmpl - """ - - self.env.revert_snapshot("ready_with_5_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE_HA, - settings={ - 'volumes_ceph': True, - 'images_ceph': True, - 'volumes_lvm': False, - 'ephemeral_ceph': True, - 'objects_ceph': True, - 'tenant': 'netTemplate', - 'user': 'netTemplate', - 'password': 'netTemplate', - } - ) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'ceph-osd'], - 'slave-02': ['controller', 'ceph-osd'], - 'slave-03': ['controller', 'ceph-osd'], - 'slave-04': ['compute', 'ceph-osd'], - 'slave-05': ['compute', 'ceph-osd'], - }, - update_interfaces=False - ) - - network_template = get_network_template('ceph') - self.fuel_web.client.upload_network_template( - cluster_id=cluster_id, network_template=network_template) - networks = self.generate_networks_for_template( - template=network_template, - ip_nets={'default': '10.200.0.0/16'}, - ip_prefixlen='24') - existing_networks = self.fuel_web.client.get_network_groups() - networks = self.create_custom_networks(networks, existing_networks) - - logger.debug('Networks: {0}'.format( - self.fuel_web.client.get_network_groups())) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['smoke', 'sanity', - 'ha', 'tests_platform']) - - self.check_ipconfig_for_template(cluster_id, network_template, - networks) - - self.check_services_networks(cluster_id, network_template) - - self.env.make_snapshot("deploy_ceph_net_tmpl") - - @test(depends_on_groups=["deploy_cinder_net_tmpl"], - groups=["add_nodes_net_tmpl"]) - @log_snapshot_after_test - def add_nodes_net_tmpl(self): - """Add nodes to operational environment with network template - - Scenario: - 1. Revert snapshot with deployed environment - 2. Bootstrap 2 more slave nodes - 3. Add 1 controller + cinder and 1 compute + cinder nodes - 4. Upload 'cinder_add_nodes' network template with broken - network mapping for new nodes - 5. Run network verification. Check it failed. - 6. Upload 'cinder' network template' - 7. Run network verification - 8. Deploy cluster - 9. Run network verification - 10. Run health checks (OSTF) - 11. Check L3 network configuration on slaves - 12. Check that services are listening on their networks only - - Duration 60m - Snapshot add_nodes_net_tmpl - """ - - self.env.revert_snapshot("deploy_cinder_net_tmpl") - - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5]) - - cluster_id = self.fuel_web.get_last_created_cluster() - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-04': ['controller', 'cinder'], - 'slave-05': ['compute', 'cinder'], - }, - update_interfaces=False - ) - - network_template = get_network_template('cinder_add_nodes') - self.fuel_web.client.upload_network_template( - cluster_id=cluster_id, network_template=network_template) - self.fuel_web.verify_network(cluster_id, success=False) - - network_template = get_network_template('cinder') - self.fuel_web.client.upload_network_template( - cluster_id=cluster_id, network_template=network_template) - networks = self.generate_networks_for_template( - template=network_template, - ip_nets={'default': '10.200.0.0/16'}, - ip_prefixlen='24') - existing_networks = self.fuel_web.client.get_network_groups() - networks = self.create_custom_networks(networks, existing_networks) - - logger.debug('Networks: {0}'.format( - self.fuel_web.client.get_network_groups())) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.verify_network(cluster_id) - - self.check_ipconfig_for_template(cluster_id, network_template, - networks) - self.check_services_networks(cluster_id, network_template) - - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['smoke', 'sanity', - 'ha', 'tests_platform']) - self.check_ipconfig_for_template(cluster_id, network_template, - networks) - - self.check_services_networks(cluster_id, network_template) - - self.env.make_snapshot("add_nodes_net_tmpl") - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["network_config_consistency_on_reboot"]) - @log_snapshot_after_test - def network_config_consistency_on_reboot(self): - """Deploy HA environment with Cinder, Neutron and network template - - Scenario: - 1. Revert snapshot with 5 slaves - 2. Create cluster (HA) with Neutron VLAN - 3. Add 3 controller and 1 compute + cinder nodes - 4. Upload 'default_ovs' network template - 5. Create custom network groups basing - on template endpoints assignments - 6. Run network verification - 7. Deploy cluster and run basic health checks - 8. Run network verification - 9. Check L3 network configuration on slaves - 10. Check that services are listening on their networks only - 11. Reboot a node - 12. Run network verification - 13. Check L3 network configuration on slaves - 14. Check that services are listening on their networks only - 15. Run OSTF - - Duration 180m - Snapshot deploy_cinder_net_tmpl - """ - - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready_with_5_slaves") - - self.show_step(2) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE_HA, - settings={ - "net_provider": 'neutron', - "net_segment_type": NEUTRON_SEGMENT_TYPE, - 'tenant': 'netTemplate', - 'user': 'netTemplate', - 'password': 'netTemplate', - } - ) - - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute', 'cinder'], - }, - update_interfaces=False - ) - - self.show_step(4) - network_template = get_network_template('default_ovs') - self.fuel_web.client.upload_network_template( - cluster_id=cluster_id, network_template=network_template) - - self.show_step(5) - networks = self.generate_networks_for_template( - template=network_template, - ip_nets={'default': '10.200.0.0/16'}, - ip_prefixlen='24') - existing_networks = self.fuel_web.client.get_network_groups() - networks = self.create_custom_networks(networks, existing_networks) - - logger.debug('Networks: {0}'.format( - self.fuel_web.client.get_network_groups())) - - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - self.show_step(7) - self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60) - self.show_step(8) - self.fuel_web.verify_network(cluster_id) - - self.show_step(9) - self.check_ipconfig_for_template( - cluster_id, network_template, networks) - self.show_step(10) - self.check_services_networks(cluster_id, network_template) - - self.show_step(11) - self.fuel_web.warm_restart_nodes([self.env.d_env.nodes().slaves[0]]) - self.fuel_web.assert_ha_services_ready(cluster_id) - self.fuel_web.assert_os_services_ready(cluster_id) - - self.show_step(12) - self.fuel_web.verify_network(cluster_id) - - self.show_step(13) - self.check_ipconfig_for_template( - cluster_id, network_template, networks) - self.show_step(14) - self.check_services_networks(cluster_id, network_template) - - self.show_step(15) - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['smoke', 'sanity', 'ha']) - - self.env.make_snapshot("network_config_consistency_on_reboot", - is_make=self.is_make_snapshot()) - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_cluster_without_management_net"]) - @log_snapshot_after_test - def deploy_cluster_without_management_net(self): - """Deploy HA environment network template: no dedicate management nwk - - Scenario: - 1. Revert snapshot with 3 slaves - 2. Create cluster (HA) with Neutron VLAN - 3. Add 1 controller + cinder nodes - 4. Add 2 compute + cinder nodes - 5. Upload network template - 6. Delete 'management' network-group - 7. Run network verification - 8. Deploy cluster - 9. Run network verification - 10. Run health checks (OSTF) - - Duration 180m - Snapshot deploy_cluster_without_management_net - """ - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(2) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE_HA, - ) - - self.show_step(3) - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'cinder'], - 'slave-02': ['compute', 'cinder'], - 'slave-03': ['compute', 'cinder'], - }, - update_interfaces=False - ) - - self.show_step(5) - template = 'default_no_mgmt_nwk' - logger.info('using template: {!s}'.format(template)) - network_template = get_network_template(template) - self.fuel_web.client.upload_network_template( - cluster_id=cluster_id, network_template=network_template) - - self.show_step(6) - mgmt_net = [ - grp for grp in self.fuel_web.client.get_network_groups() - if grp['name'] == 'management'].pop() - - assert_true( - self.fuel_web.client.del_network_group(mgmt_net['id']).status_code - in {200, 204}, - 'Network group delete failed' - ) - - assert_true( - mgmt_net not in self.fuel_web.client.get_network_groups(), - 'Network group has not been deleted' - ) - - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - - self.show_step(8) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(9) - self.fuel_web.verify_network(cluster_id) - - self.show_step(10) - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['smoke', 'sanity', 'ha', 'tests_platform']) - - self.env.make_snapshot('deploy_cluster_without_management_net') diff --git a/fuelweb_test/tests/test_net_templates_base.py b/fuelweb_test/tests/test_net_templates_base.py deleted file mode 100644 index 748b466bf..000000000 --- a/fuelweb_test/tests/test_net_templates_base.py +++ /dev/null @@ -1,499 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import netaddr - -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_true -from proboscis.asserts import fail - -from core.helpers.log_helpers import logwrap - -from fuelweb_test import logger -from fuelweb_test.helpers.utils import get_ip_listen_stats -from fuelweb_test.tests.base_test_case import TestBasic - - -class TestNetworkTemplatesBase(TestBasic): - """Base class to store all utility methods for network templates tests.""" - - @logwrap - def generate_networks_for_template(self, template, ip_nets, - ip_prefixlen): - """Slice network to subnets for template. - - Generate networks from network template and ip_nets descriptions - for node groups and value to slice that descriptions. ip_nets is a - dict with key named as nodegroup and strings values for with - description of network for that nodegroup in format '127.0.0.1/24' - to be sliced in pieces for networks. ip_prefixlen - the amount the - network prefix length should be sliced by. 24 will create networks - '127.0.0.1/24' from network '127.0.0.1/16'. - - :param template: Yaml template with network assignments on interfaces. - :param ip_nets: Dict with network descriptions. - :param ip_prefixlen: Integer for slicing network prefix. - :return: Data to be used to assign networks to nodes - """ - networks_data = [] - nodegroups = self.fuel_web.client.get_nodegroups() - for nodegroup, section in template['adv_net_template'].items(): - assert_true(any(n['name'] == nodegroup for n in nodegroups), - 'Network templates contains settings for Node Group ' - '"{0}", which does not exist!'.format(nodegroup)) - group_id = [n['id'] for n in nodegroups if - n['name'] == nodegroup][0] - ip_network = netaddr.IPNetwork(str(ip_nets[nodegroup])) - ip_subnets = list(ip_network.subnet(int(ip_prefixlen))) - for network in section['network_assignments']: - ip_subnet = ip_subnets.pop() - networks_data.append( - { - 'name': network, - 'cidr': str(ip_subnet), - 'group_id': group_id, - 'gateway': None, - 'meta': { - "notation": "ip_ranges", - "render_type": None, - "map_priority": 0, - "configurable": True, - "unmovable": False, - "use_gateway": False, - "render_addr_mask": None, - 'ip_range': [str(ip_subnet[1]), str(ip_subnet[-2])] - } - } - ) - return networks_data - - @logwrap - def map_group_by_iface_and_network(self, template): - """ Map groip id, iface name and network name - - :param template: Yaml template with network assignments on interfaces. - :return: Data to be used for check of ip assignment - """ - mapped_data = {} - nodegroups = self.fuel_web.client.get_nodegroups() - for nodegroup, section in template['adv_net_template'].items(): - networks = [(n, section['network_assignments'][n]['ep']) - for n in section['network_assignments']] - assert_true(any(n['name'] == nodegroup for n in nodegroups), - 'Network templates contains settings for Node Group ' - '"{0}", which does not exist!'.format(nodegroup)) - group_id = [n['id'] for n in nodegroups if - n['name'] == nodegroup][0] - mapped_data[group_id] = dict(networks) - return mapped_data - - @staticmethod - @logwrap - def get_template_ep_for_role(template, role, nodegroup='default', - skip_net_roles=None): - if skip_net_roles is None: - skip_net_roles = set() - tmpl = template['adv_net_template'][nodegroup] - endpoints = set() - networks = set() - network_types = tmpl['templates_for_node_role'][role] - for network_type in network_types: - endpoints.update(tmpl['network_scheme'][network_type]['endpoints']) - for scheme_type in tmpl['network_scheme']: - for net_role in tmpl['network_scheme'][scheme_type]['roles']: - if net_role in skip_net_roles: - endpoints.discard( - tmpl['network_scheme'][scheme_type]['roles'][net_role]) - for net in tmpl['network_assignments']: - if tmpl['network_assignments'][net]['ep'] in endpoints: - networks.add(net) - return networks - - @staticmethod - @logwrap - def get_template_netroles_for_role(template, role, nodegroup='default'): - tmpl = template['adv_net_template'][nodegroup] - netroles = dict() - network_types = tmpl['templates_for_node_role'][role] - for network_type in network_types: - netroles.update(tmpl['network_scheme'][network_type]['roles']) - return netroles - - @logwrap - def create_custom_networks(self, networks, existing_networks): - for custom_net in networks: - if not any([custom_net['name'] == n['name'] and - # ID of 'fuelweb_admin' default network group is None - custom_net['group_id'] == (n['group_id'] or 1) - for n in existing_networks]): - self.fuel_web.client.add_network_group(custom_net) - else: - # Copying settings from existing network - net = [n for n in existing_networks if - custom_net['name'] == n['name'] and - custom_net['group_id'] == (n['group_id'] or 1)][0] - custom_net['cidr'] = net['cidr'] - custom_net['meta'] = net['meta'] - custom_net['gateway'] = net['gateway'] - return networks - - @staticmethod - @logwrap - def get_interface_ips(remote, iface_name): - cmd = ("set -o pipefail; " - "ip -o -4 address show dev {0} | sed -rn " - "'s/^.*\sinet\s+([0-9\.]+\/[0-9]{{1,2}})\s.*$/\\1/p'").format( - iface_name) - result = remote.execute(cmd) - logger.debug("Checking interface IP result: {0}".format(result)) - assert_equal(result['exit_code'], 0, - "Device {0} not found on remote node!".format(iface_name)) - return [line.strip() for line in result['stdout']] - - @logwrap - def check_interface_ip_exists(self, remote, iface_name, cidr): - raw_addresses = self.get_interface_ips(remote, iface_name) - raw_ips = [raw_addr.split('/')[0] for raw_addr in raw_addresses] - try: - ips = [netaddr.IPAddress(str(raw_ip)) for raw_ip in raw_ips] - except ValueError: - fail('Device {0} on remote node does not have a valid ' - 'IPv4 address assigned!'.format(iface_name)) - return - actual_networks = [netaddr.IPNetwork(str(raw_addr)) for - raw_addr in raw_addresses] - network = netaddr.IPNetwork(str(cidr)) - assert_true(network in actual_networks, - 'Network(s) on {0} device differs than {1}: {2}'.format( - iface_name, cidr, raw_addresses)) - assert_true(any(ip in network for ip in ips), - 'IP address on {0} device is not from {1} network!'.format( - iface_name, cidr)) - - @logwrap - def check_ipconfig_for_template(self, cluster_id, network_template, - networks): - logger.info("Checking that IP addresses configuration on nodes " - "corresponds to used networking template...") - # Network for Neutron is configured in namespaces (l3/dhcp agents) - # and a bridge for it doesn't have IP, so skipping it for now - skip_roles = {'neutron/private'} - mapped_data = self.map_group_by_iface_and_network(network_template) - for node in self.fuel_web.client.list_cluster_nodes(cluster_id): - node_networks = set() - node_group_name = [ng['name'] for ng in - self.fuel_web.client.get_nodegroups() - if ng['id'] == node['group_id']][0] - for role in node['roles']: - node_networks.update( - self.get_template_ep_for_role(template=network_template, - role=role, - nodegroup=node_group_name, - skip_net_roles=skip_roles)) - with self.env.d_env.get_ssh_to_remote(node['ip']) as remote: - for network in networks: - if network['name'] not in node_networks or \ - network['group_id'] != node['group_id']: - continue - logger.debug( - 'Checking interface "{0}" for IP network ' - '"{1}" on "{2}"'.format( - mapped_data[node['group_id']][network['name']], - network['cidr'], - node['hostname'])) - self.check_interface_ip_exists( - remote, - mapped_data[node['group_id']][network['name']], - network['cidr']) - - @staticmethod - @logwrap - def get_port_listen_ips(listen_stats, port): - ips = set() - for socket in listen_stats: - hexip, hexport = socket.split(':') - if int(port) == int(hexport, 16): - ips.add('.'.join([str(int(hexip[n:n + 2], 16)) - for n in range(0, len(hexip), 2)][::-1])) - return ips - - @logwrap - def check_services_networks(self, cluster_id, net_template): - logger.info("Checking that OpenStack services on nodes are listening " - "on IP networks according to used networking template...") - services = [ - { - 'name': 'keystone_api', - 'network_roles': ['keystone/api'], - 'tcp_ports': [5000, 35357], - 'udp_ports': [], - # check is disabled because access to API is restricted - # using firewall (see LP#1489057, - # https://review.openstack.org/#/c/218853/) - 'enabled': False - }, - { - 'name': 'nova-api', - 'network_roles': ['nova/api'], - 'tcp_ports': [8773, 8774], - 'udp_ports': [], - 'enabled': True - }, - { - 'name': 'neutron-api', - 'network_roles': ['neutron/api'], - 'tcp_ports': [9696], - 'udp_ports': [], - 'enabled': True - }, - { - 'name': 'swift-api', - 'network_roles': ['swift/api'], - 'tcp_ports': [8080], - 'udp_ports': [], - 'enabled': True - }, - { - 'name': 'swift-replication', - 'network_roles': ['swift/replication'], - 'tcp_ports': [6000, 6001, 6002], - 'udp_ports': [], - 'enabled': True - }, - { - 'name': 'sahara-api', - 'network_roles': ['sahara/api'], - 'tcp_ports': [8386], - 'udp_ports': [], - 'enabled': True - }, - { - 'name': 'ceilometer-api', - 'network_roles': ['ceilometer/api'], - 'tcp_ports': [8777], - 'udp_ports': [], - 'enabled': True - }, - { - 'name': 'cinder-api', - 'network_roles': ['cinder/api'], - 'tcp_ports': [8776], - 'udp_ports': [], - 'enabled': True - }, - { - 'name': 'glance-api', - 'network_roles': ['glance/api'], - 'tcp_ports': [5509], - 'udp_ports': [], - 'enabled': True - }, - { - 'name': 'heat-api', - 'network_roles': ['heat/api'], - 'tcp_ports': [8000, 8003, 8004], - 'udp_ports': [], - 'enabled': True - }, - { - 'name': 'murano-api', - 'network_roles': ['murano/api'], - 'tcp_ports': [8082], - 'udp_ports': [], - 'enabled': True - }, - { - 'name': 'ceph', - 'network_roles': ['ceph/replication', 'ceph/public'], - 'tcp_ports': [6804, 6805, 6806, 6807], - 'udp_ports': [], - 'enabled': True - }, - { - 'name': 'ceph-radosgw', - 'network_roles': ['ceph/radosgw'], - 'tcp_ports': [7480], - 'udp_ports': [], - 'enabled': True - }, - { - 'name': 'mongo-db', - 'network_roles': ['mongo/db'], - 'tcp_ports': [27017], - 'udp_ports': [], - 'enabled': True - }, - { - 'name': 'mgmt-messaging', - 'network_roles': ['mgmt/messaging'], - 'tcp_ports': [5673], - 'udp_ports': [], - 'enabled': True - }, - { - 'name': 'mgmt-corosync', - 'network_roles': ['mgmt/corosync'], - 'tcp_ports': [], - 'udp_ports': [5405], - 'enabled': True - }, - { - 'name': 'mgmt-memcache', - 'network_roles': ['mgmt/memcache'], - 'tcp_ports': [11211], - 'udp_ports': [11211], - 'enabled': True - }, - { - 'name': 'mgmt-database', - 'network_roles': ['mgmt/database'], - 'tcp_ports': [3307, 4567], - 'udp_ports': [], - 'enabled': True - }, - { - 'name': 'cinder-iscsi', - 'network_roles': ['cinder/iscsi'], - 'tcp_ports': [3260], - 'udp_ports': [], - # ISCSI daemon is started automatically because cinder-volume - # package installs it by dependencies (LP#1491518) - 'enabled': False - }, - ] - - check_passed = True - - for node in self.fuel_web.client.list_cluster_nodes(cluster_id): - node_netroles = dict() - node_group_name = [ng['name'] for ng in - self.fuel_web.client.get_nodegroups() - if ng['id'] == node['group_id']][0] - for role in node['roles']: - node_netroles.update(self.get_template_netroles_for_role( - template=net_template, - role=role, - nodegroup=node_group_name)) - - with self.env.d_env.get_ssh_to_remote(node['ip']) as remote: - tcp_listen_stats = get_ip_listen_stats(remote, 'tcp') - udp_listen_stats = get_ip_listen_stats(remote, 'udp') - for service in services: - if any(net_role not in node_netroles.keys() - for net_role in service['network_roles']) \ - or not service['enabled']: - continue - ips = set() - for service_net_role in service['network_roles']: - iface_name = node_netroles[service_net_role] - ips.update([cidr.split('/')[0] for cidr in - self.get_interface_ips(remote, - iface_name)]) - - for port in service['tcp_ports']: - listen_ips = self.get_port_listen_ips(tcp_listen_stats, - port) - if not listen_ips: - logger.debug('Service "{0}" is not found on ' - '"{1}".'.format(service['name'], - node['hostname'])) - continue - if any(lip not in ips for lip in listen_ips): - check_passed = False - logger.error('Service "{0}" (port {4}/tcp) is ' - 'listening on wrong IP address(es) ' - 'on "{1}": expected "{2}", got ' - '"{3}"!'.format(service['name'], - node['hostname'], - ips, - listen_ips, - port)) - for port in service['udp_ports']: - listen_ips = self.get_port_listen_ips(udp_listen_stats, - port) - if not listen_ips: - logger.debug('Service "{0}" is not found on ' - '"{1}".'.format(service['name'], - node['hostname'])) - continue - if any(lip not in ips for lip in listen_ips): - check_passed = False - logger.error('Service "{0}" (port {4}/udp) is ' - 'listening on wrong IP address(es) ' - 'on "{1}": expected "{2}", got ' - '"{3}"!'.format(service['name'], - node['hostname'], - ips, - listen_ips, - port)) - assert_true(check_passed, - 'Some services are listening on wrong IPs! ' - 'Please check logs for details!') - - @staticmethod - def get_modified_ranges(net_dict, net_name, group_id): - for net in net_dict['networks']: - if net_name in net['name'] and net['group_id'] == group_id: - cidr = net['cidr'] - sliced_list = list(netaddr.IPNetwork(str(cidr)))[5:-5] - return [str(sliced_list[0]), str(sliced_list[-1])] - - @staticmethod - def change_default_admin_range(networks, number_excluded_ips): - """Change IP range for admin network by excluding N of first addresses - from default range - :param networks: list, environment networks configuration - :param number_excluded_ips: int, number of IPs to remove from range - """ - default_admin_network = [n for n in networks - if (n['name'] == "fuelweb_admin" and - n['group_id'] is None)] - assert_true(len(default_admin_network) == 1, - "Default 'admin/pxe' network not found " - "in cluster network configuration!") - default_admin_range = [netaddr.IPAddress(str(ip)) for ip - in default_admin_network[0]["ip_ranges"][0]] - new_admin_range = [default_admin_range[0] + number_excluded_ips, - default_admin_range[1]] - default_admin_network[0]["ip_ranges"][0] = [str(ip) - for ip in new_admin_range] - return default_admin_network[0]["ip_ranges"][0] - - @staticmethod - def is_ip_in_range(ip_addr, ip_range_start, ip_range_end): - return netaddr.IPAddress(str(ip_addr)) in netaddr.iter_iprange( - str(ip_range_start), str(ip_range_end)) - - @staticmethod - def is_update_dnsmasq_running(tasks): - for task in tasks: - if task['name'] == "update_dnsmasq" and \ - task["status"] == "running": - return True - return False - - @staticmethod - def update_network_ranges(net_data, update_data): - for net in net_data['networks']: - for group in update_data: - for net_name in update_data[group]: - if net_name in net['name'] and net['group_id'] == group: - net['ip_ranges'] = update_data[group][net_name] - net['meta']['notation'] = 'ip_ranges' - return net_data - - @staticmethod - def get_ranges(net_data, net_name, group_id): - return [net['ip_ranges'] for net in net_data['networks'] if - net_name in net['name'] and group_id == net['group_id']][0] diff --git a/fuelweb_test/tests/test_net_templates_multiple_networks.py b/fuelweb_test/tests/test_net_templates_multiple_networks.py deleted file mode 100644 index e171a8abc..000000000 --- a/fuelweb_test/tests/test_net_templates_multiple_networks.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import asserts -from proboscis import test - -from fuelweb_test import logger -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers import utils -from fuelweb_test.settings import DEPLOYMENT_MODE_HA -from fuelweb_test.settings import NEUTRON_SEGMENT -from fuelweb_test.settings import NODEGROUPS -from fuelweb_test.settings import MULTIPLE_NETWORKS -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.test_net_templates_base import TestNetworkTemplatesBase - - -@test(groups=["network_templates_multiple_networks", "multiracks_2"]) -class TestNetworkTemplatesMultipleNetworks(TestNetworkTemplatesBase): - """TestNetworkTemplatesMultipleNetworks.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_release], - groups=['two_nodegroups_network_templates']) - @log_snapshot_after_test - def two_nodegroups_network_templates(self): - """Deploy HA environment with Cinder, Neutron and network template on - two nodegroups. - - Scenario: - 1. Revert snapshot with ready master node - 2. Bootstrap 3 slaves from default nodegroup - 3. Create cluster with Neutron VXLAN and custom nodegroups - 4. Bootstrap 2 slaves nodes from custom nodegroup - 5. Add 3 controller nodes from default nodegroup - 6. Add 2 compute+cinder nodes from custom nodegroup - 7. Upload 'two_nodegroups' network template - 8. Verify networks - 9. Deploy cluster - 10. Run health checks (OSTF) - 11. Check L3 network configuration on slaves - 12. Check that services are listening on their networks only - - Duration 120m - Snapshot two_nodegroups_network_templates - """ - - asserts.assert_true(MULTIPLE_NETWORKS, "MULTIPLE_NETWORKS variable" - " wasn't exported") - self.show_step(1, initialize=True) - self.env.revert_snapshot('ready') - self.show_step(2) - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[0:3]) - self.show_step(3) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE_HA, - settings={ - 'net_provider': 'neutron', - 'net_segment_type': NEUTRON_SEGMENT['tun'], - 'tenant': 'netTemplate', - 'user': 'netTemplate', - 'password': 'netTemplate', - } - ) - - self.show_step(4) - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5]) - - self.show_step(5) - self.show_step(6) - nodegroup1 = NODEGROUPS[0]['name'] - nodegroup2 = NODEGROUPS[1]['name'] - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': [['controller'], nodegroup1], - 'slave-02': [['controller'], nodegroup1], - 'slave-03': [['controller'], nodegroup1], - 'slave-04': [['compute', 'cinder'], nodegroup2], - 'slave-05': [['compute', 'cinder'], nodegroup2], - } - ) - network_template = utils.get_network_template('two_nodegroups') - self.show_step(7) - self.fuel_web.client.upload_network_template( - cluster_id=cluster_id, - network_template=network_template) - networks = self.generate_networks_for_template( - template=network_template, - ip_nets={nodegroup1: '10.200.0.0/16', nodegroup2: '10.210.0.0/16'}, - ip_prefixlen='24') - existing_networks = self.fuel_web.client.get_network_groups() - networks = self.create_custom_networks(networks, existing_networks) - - logger.debug('Networks: {0}'.format( - self.fuel_web.client.get_network_groups())) - - self.show_step(8) - self.fuel_web.verify_network(cluster_id) - - self.show_step(9) - self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60) - - self.show_step(11) - self.check_ipconfig_for_template(cluster_id, - network_template, - networks) - self.show_step(12) - self.check_services_networks(cluster_id, network_template) - - # TODO(akostrikov) ostf may fail, need further investigation. - ostf_tmpl_set = ['smoke', 'sanity', 'ha', 'tests_platform'] - self.show_step(10) - self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=ostf_tmpl_set) - - self.env.make_snapshot('two_nodegroups_network_templates') diff --git a/fuelweb_test/tests/test_neutron.py b/fuelweb_test/tests/test_neutron.py deleted file mode 100644 index 1cca1cf02..000000000 --- a/fuelweb_test/tests/test_neutron.py +++ /dev/null @@ -1,256 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from warnings import warn - -from proboscis.asserts import assert_equal -from proboscis import test -from proboscis import SkipTest - -from fuelweb_test.helpers.common import Common -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers import os_actions -from fuelweb_test import logger -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.settings import NEUTRON_SEGMENT -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(enabled=False, groups=["thread_1", "neutron"]) -class NeutronVlan(TestBasic): - """NeutronVlan. - - Test disabled and move to fuel_tests suite: - fuel_tests.test.test_neutron - - """ # TODO documentation - - @test(enabled=False, - depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_neutron_vlan", "ha_one_controller_neutron_vlan", - "deployment", "nova", "nova-compute"]) - @log_snapshot_after_test - def deploy_neutron_vlan(self): - """Deploy cluster in ha mode with 1 controller and Neutron VLAN - - Test disabled and move to fuel_tests suite: - fuel_tests.test.test_neutron.TestNeutronVlan - - Scenario: - 1. Create cluster - 2. Add 1 node with controller role - 3. Add 2 nodes with compute role - 4. Deploy the cluster - 5. Run network verification - 6. Run OSTF - - Duration 35m - Snapshot deploy_neutron_vlan - - """ - # pylint: disable=W0101 - warn("Test disabled and move to fuel_tests suite", DeprecationWarning) - raise SkipTest("Test disabled and move to fuel_tests suite") - - self.env.revert_snapshot("ready_with_3_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": NEUTRON_SEGMENT['vlan'], - 'tenant': 'simpleVlan', - 'user': 'simpleVlan', - 'password': 'simpleVlan' - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['compute'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - cluster = self.fuel_web.client.get_cluster(cluster_id) - assert_equal(str(cluster['net_provider']), 'neutron') - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - self.env.make_snapshot("deploy_neutron_vlan", is_make=True) - - -@test(enabled=False, - groups=["neutron", "ha", "ha_neutron", "classic_provisioning"]) -class NeutronGreHa(TestBasic): - """NeutronGreHa. - - Test disabled and move to fuel_tests suite: - fuel_tests.test.test_neutron - - """ # TODO documentation - - @test(enabled=False, - depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_neutron_gre_ha", "ha_neutron_gre"]) - @log_snapshot_after_test - def deploy_neutron_gre_ha(self): - """Deploy cluster in HA mode with Neutron GRE (DEPRECATED) - - Test disabled and move to fuel_tests suite: - fuel_tests.test.test_neutron.TestNeutronTunHa - - Scenario: - 1. Create cluster - 2. Add 3 nodes with controller role - 3. Add 2 nodes with compute role - 4. Deploy the cluster - 5. Run network verification - 6. Check Swift ring and rebalance it if needed - 7. Run OSTF - - Duration 80m - Snapshot deploy_neutron_gre_ha - - """ - # pylint: disable=W0101 - warn("Test disabled and move to fuel_tests suite", DeprecationWarning) - raise SkipTest("Test disabled and move to fuel_tests suite") - - self.env.revert_snapshot("ready_with_5_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": NEUTRON_SEGMENT['gre'], - 'tenant': 'haGre', - 'user': 'haGre', - 'password': 'haGre' - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - cluster = self.fuel_web.client.get_cluster(cluster_id) - assert_equal(str(cluster['net_provider']), 'neutron') - - self.fuel_web.verify_network(cluster_id) - devops_node = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - logger.debug("devops node name is {0}".format(devops_node.name)) - ip = self.fuel_web.get_nailgun_node_by_name(devops_node.name)['ip'] - Common.rebalance_swift_ring(ip) - - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - self.env.make_snapshot("deploy_neutron_gre_ha") - - -@test(enabled=False, groups=["neutron", "ha", "ha_neutron"]) -class NeutronVlanHa(TestBasic): - """NeutronVlanHa. - - Test disabled and move to fuel_tests suite: - fuel_tests.test.test_neutron - - """ # TODO documentation - - @test(enabled=False, - depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_neutron_vlan_ha", "neutron_vlan_ha"]) - @log_snapshot_after_test - def deploy_neutron_vlan_ha(self): - """Deploy cluster in HA mode with Neutron VLAN - - Test disabled and move to fuel_tests suite: - fuel_tests.test.test_neutron.TestNeutronVlanHa - - Scenario: - 1. Create cluster - 2. Add 3 nodes with controller role - 3. Add 2 nodes with compute role - 4. Deploy the cluster - 5. Run network verification - 6. Check Swift ring and rebalance it if needed - 7. Run OSTF - - Duration 80m - Snapshot deploy_neutron_vlan_ha - - """ - # pylint: disable=W0101 - warn("Test disabled and move to fuel_tests suite", DeprecationWarning) - raise SkipTest("Test disabled and move to fuel_tests suite") - - self.env.revert_snapshot("ready_with_5_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": NEUTRON_SEGMENT['vlan'] - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'] - } - ) - self.fuel_web.update_internal_network(cluster_id, '192.168.196.0/22', - '192.168.196.1') - self.fuel_web.deploy_cluster_wait(cluster_id) - - cluster = self.fuel_web.client.get_cluster(cluster_id) - assert_equal(str(cluster['net_provider']), 'neutron') - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - self.fuel_web.check_fixed_network_cidr( - cluster_id, os_conn) - - self.fuel_web.verify_network(cluster_id) - devops_node = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - logger.debug("devops node name is {0}".format(devops_node.name)) - ip = self.fuel_web.get_nailgun_node_by_name(devops_node.name)['ip'] - Common.rebalance_swift_ring(ip) - - self.fuel_web.run_ostf( - cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) - - self.env.make_snapshot("deploy_neutron_vlan_ha") diff --git a/fuelweb_test/tests/test_neutron_ipv6.py b/fuelweb_test/tests/test_neutron_ipv6.py deleted file mode 100644 index 56f56eaba..000000000 --- a/fuelweb_test/tests/test_neutron_ipv6.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals - -from warnings import warn - -from devops.helpers import helpers as devops_helpers -from proboscis import SkipTest -from proboscis import test - -from fuelweb_test.helpers import os_actions -from fuelweb_test.helpers.checkers import ping6_from_instance -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.ssh_manager import SSHManager -from fuelweb_test.helpers.utils import get_instance_ipv6 -from fuelweb_test.tests.base_test_case import TestBasic -from fuelweb_test import logger - -ssh_manager = SSHManager() - - -@test(enabled=False, groups=["thread_1", "neutron"]) -class TestNeutronIPv6(TestBasic): - """NeutronIPv6. - - Test disabled and move to fuel_tests suite: - fuel_tests.test.test_neutron_ipv6 - - """ - - @test(enabled=False, - depends_on_groups=['deploy_neutron_vlan'], - groups=['deploy_neutron_ip_v6', - "nova", "nova-compute", "neutron_ipv6"]) - @log_snapshot_after_test - def deploy_neutron_ip_v6(self): - """Check IPv6 only functionality for Neutron VLAN - - Test disabled and move to fuel_tests suite: - fuel_tests.test.test_neutron_ipv6.TestNeutronIPv6 - - Scenario: - 1. Revert deploy_neutron_vlan snapshot - 2. Create network resources: two dualstack network IPv6 subnets - (should be in SLAAC mode, - address space should not intersect), - virtual router and set gateway. - 3. Create a Security Group, - that allows SSH and ICMP for both IPv4 and IPv6. - 4. Launch two instances, one for each network. - 5. Attach Floating IP for both instances. - 6. SSH to the main instance and ping6 another instance. - - Duration 10m - Snapshot deploy_neutron_ip_v6 - - """ - # pylint: disable=W0101 - warn("Test disabled and move to fuel_tests suite", DeprecationWarning) - raise SkipTest("Test disabled and move to fuel_tests suite") - - self.show_step(1, initialize=True) - self.env.revert_snapshot("deploy_neutron_vlan") - - cluster_id = self.fuel_web.get_last_created_cluster() - public_vip = self.fuel_web.get_public_vip(cluster_id) - logger.info('Public vip is %s', public_vip) - - os_conn = os_actions.OpenStackActions( - controller_ip=public_vip, - user='simpleVlan', - passwd='simpleVlan', - tenant='simpleVlan' - ) - - tenant = os_conn.get_tenant('simpleVlan') - - self.show_step(2) - net1, net2 = os_conn.create_network_resources_for_ipv6_test(tenant) - - self.show_step(3) - security_group = os_conn.create_sec_group_for_ssh() - - self.show_step(4) - instance1 = os_conn.create_server( - name='instance1', - security_groups=[security_group], - net_id=net1['id'], - ) - - instance2 = os_conn.create_server( - name='instance2', - security_groups=[security_group], - net_id=net2['id'], - ) - - self.show_step(5) - floating_ip = os_conn.assign_floating_ip(instance1) - floating_ip2 = os_conn.assign_floating_ip(instance2) - - self.show_step(6) - get_instance_ipv6(instance1, net1) - instance2_ipv6 = get_instance_ipv6(instance2, net2) - - node_ip = self.fuel_web.get_node_ip_by_devops_name("slave-01") - remote = ssh_manager.get_remote(node_ip) - for instance_ip, instance in ( - (floating_ip.ip, instance1), - (floating_ip2.ip, instance2) - ): - logger.info("Wait for ping from instance {} " - "by floating ip".format(instance.id)) - devops_helpers.wait( - lambda: devops_helpers.tcp_ping(instance_ip, 22), - timeout=300, - timeout_msg=("Instance {0} is unreachable for {1} seconds". - format(instance.id, 300))) - - ping6_from_instance(remote, floating_ip.ip, instance2_ipv6) - - self.env.make_snapshot('deploy_neutron_ip_v6') diff --git a/fuelweb_test/tests/test_neutron_public.py b/fuelweb_test/tests/test_neutron_public.py deleted file mode 100644 index 994d605ad..000000000 --- a/fuelweb_test/tests/test_neutron_public.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from proboscis.asserts import assert_equal -from proboscis import test - -from fuelweb_test.helpers import os_actions -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.settings import NEUTRON_SEGMENT -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["neutron", "ha", "ha_neutron_public"]) -class NeutronTunHaPublicNetwork(TestBasic): - """NeutronTunHaPublicNetwork.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_neutron_tun_ha_public_network"]) - @log_snapshot_after_test - def deploy_neutron_tun_ha_with_public_network(self): - """Deploy cluster in HA mode with Neutron VXLAN and public network - assigned to all nodes - - Scenario: - 1. Create cluster - 2. Add 3 nodes with controller role - 3. Add 2 nodes with compute role - 4. Enable assign public networks to all nodes option - 5. Deploy the cluster - 6. Check that public network was assigned to all nodes - 7. Run network verification - 8. Run OSTF - - Duration 80m - Snapshot deploy_neutron_tun_ha_public_network - - """ - self.env.revert_snapshot("ready_with_5_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": NEUTRON_SEGMENT['tun'], - 'tenant': 'haTun', - 'user': 'haTun', - 'password': 'haTun', - 'assign_to_all_nodes': True - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - cluster = self.fuel_web.client.get_cluster(cluster_id) - assert_equal(str(cluster['net_provider']), 'neutron') - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.security.verify_firewall(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - self.env.make_snapshot("deploy_neutron_tun_ha_public_network") - - -@test(groups=["neutron", "ha", "ha_neutron_public"]) -class NeutronVlanHaPublicNetwork(TestBasic): - """NeutronVlanHaPublicNetwork.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_neutron_vlan_ha_public_network"]) - @log_snapshot_after_test - def deploy_neutron_vlan_ha_with_public_network(self): - """Deploy cluster in HA mode with Neutron VLAN and public network - assigned to all nodes - - Scenario: - 1. Create cluster - 2. Add 3 nodes with controller role - 3. Add 2 nodes with compute role - 4. Enable assign public networks to all nodes option - 5. Deploy the cluster - 6. Check that public network was assigned to all nodes - 7. Run network verification - 8. Run OSTF - - Duration 80m - Snapshot deploy_neutron_vlan_ha_public_network - - """ - self.env.revert_snapshot("ready_with_5_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": NEUTRON_SEGMENT['vlan'], - 'assign_to_all_nodes': True - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'] - } - ) - self.fuel_web.update_internal_network(cluster_id, '192.168.196.0/22', - '192.168.196.1') - self.fuel_web.deploy_cluster_wait(cluster_id) - - cluster = self.fuel_web.client.get_cluster(cluster_id) - assert_equal(str(cluster['net_provider']), 'neutron') - # assert_equal(str(cluster['net_segment_type']), segment_type) - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - self.fuel_web.check_fixed_network_cidr( - cluster_id, os_conn) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.security.verify_firewall(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) - - self.env.make_snapshot("deploy_neutron_vlan_ha_public_network") diff --git a/fuelweb_test/tests/test_neutron_tun.py b/fuelweb_test/tests/test_neutron_tun.py deleted file mode 100644 index fad45b5a6..000000000 --- a/fuelweb_test/tests/test_neutron_tun.py +++ /dev/null @@ -1,407 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import re - -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_true -from proboscis import test - -from core.helpers.log_helpers import QuietLogger - -from fuelweb_test.helpers.common import Common -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers import os_actions -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.settings import NEUTRON_SEGMENT -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic -from fuelweb_test.tests.test_neutron_tun_base import NeutronTunHaBase -from fuelweb_test import logger - - -@test(groups=["ha_neutron_tun", "neutron", "smoke_neutron", "deployment"]) -class NeutronTun(TestBasic): - """NeutronTun.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_neutron_tun", "ha_one_controller_neutron_tun", - "cinder", "swift", "glance", "neutron", "deployment"]) - @log_snapshot_after_test - def deploy_neutron_tun(self): - """Deploy cluster in ha mode with 1 controller and Neutron VXLAN - - Scenario: - 1. Create cluster - 2. Add 1 node with controller role - 3. Add 2 nodes with compute role - 4. Run network verification - 5. Deploy the cluster - 6. Run network verification - 7. Run OSTF - - Duration 35m - Snapshot deploy_neutron_tun - - """ - self.env.revert_snapshot("ready_with_3_slaves") - - data = { - "net_provider": 'neutron', - "net_segment_type": NEUTRON_SEGMENT['tun'], - 'tenant': 'simpleTun', - 'user': 'simpleTun', - 'password': 'simpleTun' - } - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings=data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute', 'cinder'], - 'slave-03': ['compute', 'cinder'] - } - ) - self.fuel_web.update_internal_network(cluster_id, '192.168.196.0/26', - '192.168.196.1') - self.fuel_web.verify_network(cluster_id) - self.fuel_web.deploy_cluster_wait(cluster_id) - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id), - data['user'], data['password'], data['tenant']) - - cluster = self.fuel_web.client.get_cluster(cluster_id) - assert_equal(str(cluster['net_provider']), 'neutron') - # assert_equal(str(cluster['net_segment_type']), segment_type) - self.fuel_web.check_fixed_network_cidr( - cluster_id, os_conn) - - checkers.check_client_smoke(self.ssh_manager.admin_ip) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - self.env.make_snapshot("deploy_neutron_tun") - - -@test(groups=["neutron", "ha", "ha_neutron_tun"]) -class NeutronTunHa(NeutronTunHaBase): - """NeutronTunHa.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_neutron_tun_ha", "ha_neutron_tun"]) - @log_snapshot_after_test - def deploy_neutron_tun_ha(self): - """Deploy cluster in HA mode with Neutron VXLAN - - Scenario: - 1. Create cluster - 2. Add 3 nodes with controller role - 3. Add 2 nodes with compute role - 4. Deploy the cluster - 5. Run network verification - 6. Run OSTF - - Duration 80m - Snapshot deploy_neutron_tun_ha - """ - super(self.__class__, self).deploy_neutron_tun_ha_base( - snapshot_name="deploy_neutron_tun_ha") - - -@test(groups=["ha", "ha_neutron_tun"]) -class TestHaNeutronAddCompute(TestBasic): - """TestHaNeutronAddCompute.""" # TODO documentation - - @test(depends_on_groups=['deploy_neutron_tun_ha'], - groups=["neutron_tun_ha_add_compute"]) - @log_snapshot_after_test - def neutron_tun_ha_add_compute(self): - """Add compute node to cluster in HA mode with Neutron VXLAN network - - Scenario: - 1. Revert snapshot deploy_neutron_tun_ha with 3 controller - and 2 compute nodes - 2. Add 1 node with compute role - 3. Deploy the cluster - 4. Run network verification - 5. Run OSTF - - Duration 10m - Snapshot neutron_tun_ha_add_compute - - """ - self.env.revert_snapshot("deploy_neutron_tun_ha") - cluster_id = self.fuel_web.get_last_created_cluster() - - self.env.bootstrap_nodes( - self.env.d_env.nodes().slaves[5:6]) - self.fuel_web.update_nodes( - cluster_id, {'slave-06': ['compute']}, True, False - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - self.env.make_snapshot("neutron_tun_ha_add_compute") - - @test(depends_on_groups=['deploy_neutron_tun_ha'], - groups=["neutron_tun_ha_addremove"]) - @log_snapshot_after_test - def neutron_tun_ha_addremove(self): - """Add and re-add cinder / compute + cinder to HA cluster - - Scenario: - 1. Revert snapshot deploy_neutron_tun_ha with 3 controller - and 2 compute nodes - 2. Add 'cinder' role to a new slave - 3. Deploy changes - 4. Remove the 'cinder' node - Remove a 'controller' node - Add 'controller'+'cinder' multirole to a new slave - 5. Deploy changes - 6. Run verify networks - 7. Run OSTF - - Duration 50m - """ - - self.env.revert_snapshot("deploy_neutron_tun_ha") - cluster_id = self.fuel_web.get_last_created_cluster() - - self.env.bootstrap_nodes( - self.env.d_env.nodes().slaves[5:7]) - - self.fuel_web.update_nodes( - cluster_id, - {'slave-06': ['cinder']}) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.update_nodes( - cluster_id, - {'slave-06': ['cinder']}, False, True,) - - self.fuel_web.update_nodes( - cluster_id, - {'slave-03': ['controller']}, False, True,) - - self.fuel_web.update_nodes( - cluster_id, - {'slave-07': ['controller', 'cinder']}) - - self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False) - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - -@test(groups=["ha", "ha_neutron_tun_scale"]) -class TestHaNeutronScalability(TestBasic): - """TestHaNeutronScalability.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["neutron_tun_scalability"]) - @log_snapshot_after_test - def neutron_tun_scalability(self): - """Check HA mode on scalability - - Scenario: - 1. Create cluster - 2. Add 1 controller node - 3. Deploy the cluster - 4. Check swift, and invoke swift-rings-rebalance.sh - on primary controller if check failed - 5. Add 2 controller nodes - 6. Deploy changes - 7. Check swift, and invoke swift-rings-rebalance.sh - on primary controller if check failed - 8. Run OSTF - 9. Add 2 controller 1 compute nodes - 10. Deploy changes - 11. Check swift, and invoke swift-rings-rebalance.sh - on all the controllers - 12. Run OSTF - 13. Delete the primary and the last added controller. - 14. Deploy changes - 15. Check swift, and invoke swift-rings-rebalance.sh - on all the controllers - 16. Run OSTF - - Duration 160m - Snapshot neutron_tun_scalability - - """ - - def _check_pacemaker(devops_nodes): - for devops_node in devops_nodes: - with QuietLogger(): - self.fuel_web.assert_pacemaker( - devops_node.name, - devops_nodes, []) - ret = self.fuel_web.get_pacemaker_status(devops_node.name) - assert_true( - re.search('vip__management\s+\(ocf::fuel:ns_IPaddr2\):' - '\s+Started node', ret), - 'vip management started') - assert_true( - re.search('vip__public\s+\(ocf::fuel:ns_IPaddr2\):' - '\s+Started node', ret), - 'vip public started') - - self.env.revert_snapshot("ready_with_9_slaves") - # Step 1 Create cluster with 1 controller - logger.info("STEP1: Create new cluster {0}".format( - self.__class__.__name__)) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": NEUTRON_SEGMENT['tun'] - } - ) - - nodes = {'slave-01': ['controller']} - logger.info("Adding new node to the cluster: {0}".format(nodes)) - self.fuel_web.update_nodes( - cluster_id, nodes) - self.fuel_web.deploy_cluster_wait(cluster_id) - - logger.info("STEP3: Deploy 1 node cluster finishes") - primary_node = self.env.d_env.get_node(name='slave-01') - - # Step 4. Check swift - logger.info("STEP4: Check swift on primary controller {0}".format( - primary_node)) - ip = self.fuel_web.get_nailgun_node_by_name(primary_node.name)['ip'] - Common.rebalance_swift_ring(ip) - - nodes = {'slave-02': ['controller'], - 'slave-03': ['controller']} - logger.info("STEP 4: Adding new nodes " - "to the cluster: {0}".format(nodes)) - self.fuel_web.update_nodes( - cluster_id, nodes, - True, False - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - logger.info("STEP6: Deploy 3 ctrl node cluster has finished") - controllers = ['slave-01', 'slave-02', 'slave-03'] - _check_pacemaker(self.env.d_env.get_nodes(name__in=controllers)) - - primary_node_s3 = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - - logger.info("Primary controller after STEP6 is {0}".format( - primary_node_s3.name)) - logger.info("STEP7: Check swift on primary controller {0}".format( - primary_node_s3)) - ip = self.fuel_web.get_nailgun_node_by_name(primary_node_s3 - .name)['ip'] - Common.rebalance_swift_ring(ip) - - # Run smoke tests only according to ha and - # sanity executed in scope of deploy_cluster_wait() - - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['smoke']) - - nodes = {'slave-04': ['controller'], - 'slave-05': ['controller'], - 'slave-06': ['compute']} - logger.info("Adding new nodes to the cluster: {0}".format(nodes)) - self.fuel_web.update_nodes( - cluster_id, nodes, - True, False - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - logger.info("STEP10: Deploy 5 ctrl node cluster has finished") - controllers = ['slave-01', 'slave-02', 'slave-03', 'slave-04', - 'slave-05'] - _check_pacemaker(self.env.d_env.get_nodes(name__in=controllers)) - - primary_node_s9 = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - - logger.info("Primary controller after STEP10 is {0}".format( - primary_node_s9.name)) - - logger.info("STEP11: Check swift on primary controller {0}".format( - primary_node_s9)) - - ip = self.fuel_web.get_nailgun_node_by_name(primary_node_s9 - .name)['ip'] - Common.rebalance_swift_ring(ip) - - # Run smoke tests only according to ha and - # sanity executed in scope of deploy_cluster_wait() - - # Step 12. Run OSTF - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['smoke']) - - nodes = {primary_node_s9.name: ['controller'], - 'slave-05': ['controller']} - logger.info("STEP13: Deleting nodes from the cluster: {0}".format( - nodes)) - self.fuel_web.update_nodes( - cluster_id, nodes, - False, True - ) - - # Step 14. Deploy changes - self.fuel_web.deploy_cluster_wait(cluster_id) - - nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller']) - devops_nodes = [self.fuel_web.get_devops_node_by_nailgun_node(node) - for node in nodes] - _check_pacemaker(devops_nodes) - - logger.info("STEP13-14: Scale down happened. " - "3 controller should be now") - primary_node_s14 = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.get_node(name=devops_nodes[0].name)) - - logger.info("Primary controller after STEP15 is {0}".format( - primary_node_s14.name)) - - logger.info("STEP15: Check swift on primary controller {0}".format( - primary_node_s14)) - - ip = self.fuel_web.get_nailgun_node_by_name(primary_node_s14 - .name)['ip'] - Common.rebalance_swift_ring(ip) - - # Step 16. Run OSTF - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['sanity', 'smoke', 'ha']) - self.env.make_snapshot("neutron_vlan_ha_scalability") diff --git a/fuelweb_test/tests/test_neutron_tun_base.py b/fuelweb_test/tests/test_neutron_tun_base.py deleted file mode 100644 index 900a337ab..000000000 --- a/fuelweb_test/tests/test_neutron_tun_base.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis.asserts import assert_equal - -from fuelweb_test.helpers.common import Common -from fuelweb_test import logger -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.settings import NEUTRON_SEGMENT -from fuelweb_test.tests.base_test_case import TestBasic - - -class NeutronTunHaBase(TestBasic): - """NeutronTunHaBase.""" # TODO documentation - - def deploy_neutron_tun_ha_base(self, snapshot_name): - self.check_run(snapshot_name) - self.env.revert_snapshot("ready_with_5_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": NEUTRON_SEGMENT['tun'], - 'tenant': 'haTun', - 'user': 'haTun', - 'password': 'haTun' - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - cluster = self.fuel_web.client.get_cluster(cluster_id) - assert_equal(str(cluster['net_provider']), 'neutron') - - self.fuel_web.verify_network(cluster_id) - devops_node = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - logger.debug("devops node name is {0}".format(devops_node.name)) - ip = self.fuel_web.get_nailgun_node_by_name(devops_node - .name)['ip'] - Common.rebalance_swift_ring(ip) - - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - self.env.make_snapshot(snapshot_name, is_make=True) diff --git a/fuelweb_test/tests/test_node_reinstallation.py b/fuelweb_test/tests/test_node_reinstallation.py deleted file mode 100644 index 9dcd3ef0f..000000000 --- a/fuelweb_test/tests/test_node_reinstallation.py +++ /dev/null @@ -1,782 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinderclient.exceptions import NotFound -from devops.helpers import helpers as devops_helpers -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_true -from proboscis import test - -from fuelweb_test import logger -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers import os_actions -from fuelweb_test.helpers.ssh_manager import SSHManager -from fuelweb_test.helpers.utils import preserve_partition -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test -class NodeReinstallationEnv(TestBasic): - """NodeReinstallationEnv.""" # TODO documentation - - @staticmethod - def reinstall_nodes(fuel_web_client, cluster_id, nodes=None): - """Provision and deploy the given cluster nodes.""" - task = fuel_web_client.client.provision_nodes(cluster_id, nodes) - fuel_web_client.assert_task_success(task) - task = fuel_web_client.client.deploy_nodes(cluster_id, nodes) - fuel_web_client.assert_task_success(task) - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["node_reinstallation_env"]) - @log_snapshot_after_test - def node_reinstallation_env(self): - """Deploy a cluster for nodes reinstallation. - - Scenario: - 1. Create a cluster - 2. Add 3 nodes with controller role - 3. Add a node with compute and cinder roles - 4. Deploy the cluster - 5. Verify that the deployment is completed successfully - - Duration 190m - """ - self.check_run("node_reinstallation_env") - self.env.revert_snapshot("ready_with_5_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - ) - - self.fuel_web.update_nodes( - cluster_id, { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute', 'cinder'], - } - ) - - self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False) - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id) - - self.env.make_snapshot("node_reinstallation_env", is_make=True) - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["failed_node_reinstallation_env"]) - @log_snapshot_after_test - def failed_node_reinstallation_env(self): - """Prepare a cluster for 'failed node reinstallation' tests. - - Scenario: - 1. Revert the snapshot - 2. Create a cluster - 3. Add 3 nodes with controller role - 4. Add a node with compute and cinder roles - 5. Provision nodes - - Duration 25m - """ - self.check_run("failed_node_reinstallation_env") - self.env.revert_snapshot("ready_with_5_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - ) - - self.fuel_web.update_nodes( - cluster_id, { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute', 'cinder'], - } - ) - - # Provision nodes - task = self.fuel_web.client.provision_nodes(cluster_id) - self.fuel_web.assert_task_success(task) - - self.env.make_snapshot("failed_node_reinstallation_env", is_make=True) - - -@test(groups=["ready_node_reinstallation"]) -class ReadyNodeReinstallation(TestBasic): - """ReadyNodeReinstallation.""" # TODO documentation - - @staticmethod - def _check_hostname(old_node_nailgun, reinstalled_node_nailgun): - """Check that the hostname is the same on both given nodes.""" - assert_equal(old_node_nailgun['hostname'], - reinstalled_node_nailgun['hostname'], - "Hostname of the reinstalled controller {0} has been " - "automatically changed to {1} one". format( - reinstalled_node_nailgun['hostname'], - old_node_nailgun['hostname'])) - - @test(depends_on=[NodeReinstallationEnv.node_reinstallation_env], - groups=["reinstall_single_regular_controller_node"]) - @log_snapshot_after_test - def reinstall_single_regular_controller_node(self): - """Verify reinstallation of a regular (non-primary) controller node. - - Scenario: - 1. Revert snapshot - 2. Select a non-primary controller - 3. Reinstall the controller - 4. Run network verification - 5. Run OSTF - 6. Verify that the hostname is not changed on reinstallation - of the node - - Duration: 100m - """ - self.env.revert_snapshot("node_reinstallation_env") - - cluster_id = self.fuel_web.get_last_created_cluster() - - # Select a non-primary controller - regular_ctrl = self.fuel_web.get_nailgun_node_by_name("slave-02") - - # Reinstall the controller - NodeReinstallationEnv.reinstall_nodes( - self.fuel_web, cluster_id, [str(regular_ctrl['id'])]) - - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'smoke', 'sanity']) - - # Verify that the hostname isn't changed on reinstallation of the node - self._check_hostname( - regular_ctrl, self.fuel_web.get_nailgun_node_by_name("slave-02")) - - @test(depends_on=[NodeReinstallationEnv.node_reinstallation_env], - groups=["reinstall_single_primary_controller_node"]) - @log_snapshot_after_test - def reinstall_single_primary_controller_node(self): - """Verify reinstallation of the primary controller node. - - Scenario: - 1. Revert snapshot - 2. Select the primary controller - 3. Reinstall the controller - 4. Run network verification - 5. Run OSTF - 6. Verify that the hostname is not changed on reinstallation - of the node - 7. Verify that the primary-controller role is not migrated on - reinstallation of the node - - Duration: 100m - """ - self.env.revert_snapshot("node_reinstallation_env") - - cluster_id = self.fuel_web.get_last_created_cluster() - - # Select the primary controller - primary_ctrl_devops = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - primary_ctrl_nailgun = self.fuel_web.get_nailgun_node_by_devops_node( - primary_ctrl_devops) - - # Reinstall the controller - NodeReinstallationEnv.reinstall_nodes( - self.fuel_web, cluster_id, [str(primary_ctrl_nailgun['id'])]) - - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'smoke', 'sanity']) - - # Verify that the hostname isn't changed on reinstallation of the node - self._check_hostname( - primary_ctrl_nailgun, - self.fuel_web.get_nailgun_node_by_devops_node( - primary_ctrl_devops)) - - # Verify that the primary-controller role is not migrated on - # reinstallation of the node - reinstalled_primary_ctrl = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - assert_equal( - reinstalled_primary_ctrl.name, - primary_ctrl_devops.name, - "The primary-controller was migrated from {0} slave to {1} " - "one.".format(primary_ctrl_devops.name, - reinstalled_primary_ctrl.name) - ) - - @test(depends_on=[NodeReinstallationEnv.node_reinstallation_env], - groups=["reinstall_single_compute_node"]) - @log_snapshot_after_test - def reinstall_single_compute_node(self): - """Verify reinstallation of a compute node. - - Scenario: - 1. Revert snapshot - 2. Select a compute node - 3. Reinstall the compute - 4. Run network verification - 5. Run OSTF - 6. Verify that the hostname is not changed on reinstallation - of the node - - Duration: 55m - """ - self.env.revert_snapshot("node_reinstallation_env") - - cluster_id = self.fuel_web.get_last_created_cluster() - - # Select a compute - cmp_nailgun = self.fuel_web.get_nailgun_node_by_name('slave-04') - - # Reinstall the compute - logger.info('Reinstall') - NodeReinstallationEnv.reinstall_nodes( - self.fuel_web, cluster_id, [str(cmp_nailgun['id'])]) - - logger.info('Verify network') - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'smoke', 'sanity']) - - # Verify that the hostname isn't changed on reinstallation of the node - self._check_hostname( - cmp_nailgun, self.fuel_web.get_nailgun_node_by_name('slave-04')) - - -@test(groups=["full_cluster_reinstallation"]) -class FullClusterReinstallation(TestBasic): - @test(depends_on=[NodeReinstallationEnv.node_reinstallation_env], - groups=["full_cluster_reinstallation"]) - @log_snapshot_after_test - def full_cluster_reinstallation(self): - """Verify full cluster reinstallation. - - Scenario: - 1. Revert snapshot - 2. Create an empty sample file on each node to check that it is not - available after cluster reinstallation - 3. Reinstall all cluster nodes - 4. Verify that all nodes are reinstalled (not just rebooted), - i.e. there is no sample file on a node - 5. Run network verification - 6. Run OSTF - - Duration: 145m - """ - self.env.revert_snapshot("node_reinstallation_env") - - cluster_id = self.fuel_web.get_last_created_cluster() - - # Create a sample file on each node to check that it is not - # available after nodes' reinstallation - file_name = "node_reinstallation.test" - for slave in self.env.d_env.nodes().slaves[0:4]: - with self.fuel_web.get_ssh_for_node(slave.name) as remote: - remote.execute("touch {0}".format(file_name)) - node = self.fuel_web.get_nailgun_node_by_name(slave.name) - NodeReinstallationEnv.reinstall_nodes( - self.fuel_web, cluster_id, [str(node['id'])]) - - # Verify that all node are reinstalled (not just rebooted), - # i.e. there is no sample file on a node - for slave in self.env.d_env.nodes().slaves[0:4]: - with self.fuel_web.get_ssh_for_node(slave.name) as remote: - res = remote.execute("test -e {0}".format(file_name)) - assert_equal(1, res['exit_code'], - "{0} node was not reinstalled.".format(slave.name)) - - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'smoke', 'sanity']) - - -@test(groups=["error_node_reinstallation"]) -class ErrorNodeReinstallation(TestBasic): - """ErrorNodeReinstallation.""" # TODO documentation - - @staticmethod - def _turnoff_executable_ruby(node): - """Set mode -x for /usr/bin/ruby - - :param node: dict, node attributes - """ - ssh = SSHManager() - cmd = 'chmod -x /usr/bin/ruby' - ssh.execute_on_remote(node['ip'], cmd) - - @staticmethod - def _turnon_executable_ruby(node): - """Set mode +x for /usr/bin/ruby - - :param node: dict, node attributes - """ - ssh = SSHManager() - cmd = 'chmod +x /usr/bin/ruby' - ssh.execute_on_remote(node['ip'], cmd) - - def _put_cluster_in_error_state(self, cluster_id, node): - """Put cluster in error state - - :param cluster_id: int, number of cluster id - :param node: dict, node attributes - :return: - """ - - # Start deployment for corresponding node - task = self.fuel_web.client.deploy_nodes( - cluster_id, - [str(node['id'])]) - # disable ruby and wait for cluster will be in error state - self._turnoff_executable_ruby(node) - self.fuel_web.assert_task_failed(task) - # enable ruby - self._turnon_executable_ruby(node) - - @test(depends_on=[NodeReinstallationEnv.failed_node_reinstallation_env], - groups=["reinstall_failed_primary_controller_deployment"]) - @log_snapshot_after_test - def reinstall_failed_primary_controller_deployment(self): - """Verify reinstallation of a failed controller. - - Scenario: - 1. Revert the snapshot - 2. Start deployment; fail deployment on primary controller - 3. Reinstall the cluster - 4. Run network verification - 5. Run OSTF - - Duration: 145m - """ - self.show_step(1) - self.env.revert_snapshot("failed_node_reinstallation_env") - - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - # Get the primary controller - pr_controller = self.fuel_web.get_nailgun_node_by_name('slave-01') - self._put_cluster_in_error_state(cluster_id, pr_controller) - - self.show_step(3) - NodeReinstallationEnv.reinstall_nodes(self.fuel_web, cluster_id) - - self.show_step(4) - self.fuel_web.verify_network(cluster_id) - - self.show_step(5) - self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'smoke', 'sanity']) - - @test(depends_on=[NodeReinstallationEnv.failed_node_reinstallation_env], - groups=["reinstall_failed_regular_controller_deployment"]) - @log_snapshot_after_test - def reinstall_failed_regular_controller_deployment(self): - """Verify reinstallation of a failed controller. - - Scenario: - 1. Revert the snapshot - 2. Start deployment; fail deployment on regular controller - 3. Reinstall the cluster - 4. Run network verification - 5. Run OSTF - - Duration: 145m - """ - self.show_step(1) - self.env.revert_snapshot("failed_node_reinstallation_env") - - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - # Get a regular controller - regular_ctrl = self.fuel_web.get_nailgun_node_by_name('slave-02') - self._put_cluster_in_error_state(cluster_id, regular_ctrl) - - self.show_step(3) - NodeReinstallationEnv.reinstall_nodes(self.fuel_web, cluster_id) - - self.show_step(4) - self.fuel_web.verify_network(cluster_id) - - self.show_step(5) - self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'smoke', 'sanity']) - - @test(depends_on=[NodeReinstallationEnv.failed_node_reinstallation_env], - groups=["reinstall_failed_compute_deployment"]) - @log_snapshot_after_test - def reinstall_failed_compute_deployment(self): - """Verify reinstallation of a failed compute. - - Scenario: - 1. Revert the snapshot - 2. Start deployment; fail deployment on one of computes - 3. Reinstall the cluster - 4. Run network verification - 5. Run OSTF - - Duration: 45m - """ - self.show_step(1) - self.env.revert_snapshot("failed_node_reinstallation_env") - - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - # Get nailgun nodes - nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - cmps_nailgun = [n for n in nailgun_nodes - if 'compute' in n['pending_roles']] - self._put_cluster_in_error_state(cluster_id, cmps_nailgun[0]) - - self.show_step(3) - NodeReinstallationEnv.reinstall_nodes(self.fuel_web, cluster_id) - - self.show_step(4) - self.fuel_web.verify_network(cluster_id) - - self.show_step(5) - self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'smoke', 'sanity']) - - -@test(groups=["partition_preservation"], - enabled=False) -class PartitionPreservation(TestBasic): - """PartitionPreservation.""" # TODO documentation - - @test(depends_on=[NodeReinstallationEnv.node_reinstallation_env], - groups=["cinder_nova_partition_preservation"]) - @log_snapshot_after_test - def cinder_nova_partition_preservation(self): - """Verify partition preservation of Cinder and Nova instances data. - - Scenario: - 1. Revert the snapshot - 2. Create an OS volume and OS instance - 3. Mark 'cinder' partition to be preserved - 4. Mark 'vm' partition to be preserved - 5. Reinstall the compute node - 6. Run network verification - 7. Run OSTF - 8. Verify that the volume is present and has 'available' status - after the node reinstallation - 9. Verify that the VM is available and pingable - after the node reinstallation - - Duration: 115m - - """ - self.env.revert_snapshot("node_reinstallation_env") - - cluster_id = self.fuel_web.get_last_created_cluster() - - # Create an OS volume - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - - volume = os_conn.create_volume() - - # Create an OS instance - cmp_host = os_conn.get_hypervisors()[0] - - net_label = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - - vm = os_conn.create_server_for_migration( - neutron=True, - availability_zone="nova:{0}".format( - cmp_host.hypervisor_hostname), label=net_label) - vm_floating_ip = os_conn.assign_floating_ip(vm) - devops_helpers.wait( - lambda: devops_helpers.tcp_ping(vm_floating_ip.ip, 22), - timeout=120) - - cmp_nailgun = self.fuel_web.get_nailgun_node_by_fqdn( - cmp_host.hypervisor_hostname) - - # Mark 'cinder' and 'vm' partitions to be preserved - with self.env.d_env.get_admin_remote() as remote: - preserve_partition(remote, cmp_nailgun['id'], "cinder") - preserve_partition(remote, cmp_nailgun['id'], "vm") - - NodeReinstallationEnv.reinstall_nodes( - self.fuel_web, cluster_id, [str(cmp_nailgun['id'])]) - - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'smoke', 'sanity']) - - # Verify that the created volume is still available - try: - volume = os_conn.cinder.volumes.get(volume.id) - except NotFound: - raise AssertionError( - "{0} volume is not available after its {1} hosting node " - "reinstallation".format(volume.id, cmp_nailgun['fqdn'])) - expected_status = "available" - assert_equal( - expected_status, - volume.status, - "{0} volume status is {1} after its {2} hosting node " - "reinstallation. Expected status is {3}.".format( - volume.id, volume.status, cmp_nailgun['fqdn'], expected_status) - ) - - # Verify that the VM is still available - try: - os_conn.verify_instance_status(vm, 'ACTIVE') - except AssertionError: - raise AssertionError( - "{0} VM is not available after its {1} hosting node " - "reinstallation".format(vm.name, - cmp_host.hypervisor_hostname)) - assert_true(devops_helpers.tcp_ping(vm_floating_ip.ip, 22), - "{0} VM is not accessible via its {1} floating " - "ip".format(vm.name, vm_floating_ip)) - - @test(depends_on=[NodeReinstallationEnv.node_reinstallation_env], - groups=["mongo_mysql_partition_preservation"], - enabled=False) - @log_snapshot_after_test - def mongo_mysql_partition_preservation(self): - """Verify partition preservation of Ceilometer and mysql data. - - Scenario: - 1. Revert the snapshot - 2. Create a ceilometer alarm - 3. Mark 'mongo' and 'mysql' partitions to be - preserved on one of controllers - 4. Reinstall the controller - 5. Verify that the alarm is present after the node reinstallation - 6. Verify that the reinstalled node joined the Galera cluster - and synced its state - 7. Run network verification - 8. Run OSTF - - Duration: 110m - - """ - self.env.revert_snapshot("node_reinstallation_env") - - cluster_id = self.fuel_web.get_last_created_cluster() - - # Create a ceilometer alarm - with self.fuel_web.get_ssh_for_node("slave-01") as remote: - alarm_name = "test_alarm" - res = remote.execute( - "source openrc; " - "ceilometer alarm-threshold-create " - "--name {0} " - "-m {1} " - "--threshold {2}".format(alarm_name, "cpu_util", "80.0") - ) - assert_equal(0, res['exit_code'], - "Creating alarm via ceilometer CLI failed.") - initial_alarms = remote.execute( - "source openrc; ceilometer alarm-list") - - mongo_nailgun = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['mongo'])[0] - - # Mark 'mongo' and 'mysql' partitions to be preserved - with self.env.d_env.get_admin_remote() as remote: - preserve_partition(remote, mongo_nailgun['id'], "mongo") - preserve_partition(remote, mongo_nailgun['id'], "mysql") - - NodeReinstallationEnv.reinstall_nodes( - self.fuel_web, cluster_id, [str(mongo_nailgun['id'])]) - - with self.fuel_web.get_ssh_for_nailgun_node(mongo_nailgun) as rmt: - alarms = rmt.execute("source openrc; ceilometer alarm-list") - assert_equal( - initial_alarms['stdout'], - alarms['stdout'], - "{0} alarm is not available in mongo after reinstallation " - "of the controllers".format(alarm_name)) - - cmd = ("mysql --connect_timeout=5 -sse " - "\"SHOW STATUS LIKE 'wsrep%';\"") - err_msg = ("Galera isn't ready on {0} " - "node".format(mongo_nailgun['hostname'])) - devops_helpers.wait( - lambda: rmt.execute(cmd)['exit_code'] == 0, - timeout=10 * 60, - timeout_msg=err_msg) - - cmd = ("mysql --connect_timeout=5 -sse \"SHOW STATUS LIKE " - "'wsrep_local_state_comment';\"") - err_msg = ("The reinstalled node {0} is not synced with the " - "Galera cluster".format(mongo_nailgun['hostname'])) - devops_helpers.wait( - lambda: rmt.execute(cmd)['stdout'][0].split()[1] == "Synced", - timeout=10 * 60, - timeout_msg=err_msg) - - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'smoke', 'sanity']) - - -@test(groups=["known_issues"]) -class StopReinstallation(TestBasic): - """StopReinstallation.""" # TODO documentation - - @staticmethod - def _stop_reinstallation(fuel_web_client, cluster_id, node, slave_nodes): - - logger.info('Start reinstall') - - task = fuel_web_client.client.provision_nodes(cluster_id, node) - fuel_web_client.assert_task_success(task) - task = fuel_web_client.client.deploy_nodes(cluster_id, node) - fuel_web_client.assert_task_success(task, progress=60) - - logger.info('Stop reinstall') - fuel_web_client.stop_deployment_wait(cluster_id) - fuel_web_client.wait_nodes_get_online_state( - slave_nodes, - timeout=8 * 60) - - @test(depends_on=[NodeReinstallationEnv.node_reinstallation_env], - groups=["compute_stop_reinstallation"]) - @log_snapshot_after_test - def compute_stop_reinstallation(self): - """Verify stop reinstallation of compute. - - Scenario: - 1. Revert the snapshot - 2. Create an OS volume and OS instance - 3. Mark 'cinder' and 'vm' partitions to be preserved - 4. Stop reinstallation process of compute - 5. Start the reinstallation process again - 6. Run network verification - 7. Run OSTF - 8. Verify that the volume is present and has 'available' status - after the node reinstallation - 9. Verify that the VM is available and pingable - after the node reinstallation - - Duration: 115m - - """ - self.env.revert_snapshot("node_reinstallation_env") - - cluster_id = self.fuel_web.get_last_created_cluster() - - # Create an OS volume - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - - volume = os_conn.create_volume() - - # Create an OS instance - cmp_host = os_conn.get_hypervisors()[0] - - net_label = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - - vm = os_conn.create_server_for_migration( - neutron=True, - availability_zone="nova:{0}".format( - cmp_host.hypervisor_hostname), label=net_label) - vm_floating_ip = os_conn.assign_floating_ip(vm) - devops_helpers.wait( - lambda: devops_helpers.tcp_ping(vm_floating_ip.ip, 22), - timeout=120) - - cmp_nailgun = self.fuel_web.get_nailgun_node_by_fqdn( - cmp_host.hypervisor_hostname) - - # Mark 'cinder' and 'vm' partitions to be preserved - with self.env.d_env.get_admin_remote() as remote: - preserve_partition(remote, cmp_nailgun['id'], "cinder") - preserve_partition(remote, cmp_nailgun['id'], "vm") - - slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - devops_nodes = self.fuel_web.get_devops_nodes_by_nailgun_nodes( - slave_nodes) - - logger.info('Stop reinstallation process') - self._stop_reinstallation(self.fuel_web, cluster_id, - [str(cmp_nailgun['id'])], devops_nodes) - - self.fuel_web.verify_network(cluster_id) - logger.info('Start the reinstallation process again') - NodeReinstallationEnv.reinstall_nodes( - self.fuel_web, cluster_id, [str(cmp_nailgun['id'])]) - - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'smoke', 'sanity']) - - # Verify that the created volume is still available - try: - volume = os_conn.cinder.volumes.get(volume.id) - except NotFound: - raise AssertionError( - "{0} volume is not available after its {1} hosting node " - "reinstallation".format(volume.id, cmp_nailgun['fqdn'])) - expected_status = "available" - assert_equal( - expected_status, - volume.status, - "{0} volume status is {1} after its {2} hosting node " - "reinstallation. Expected status is {3}.".format( - volume.id, volume.status, cmp_nailgun['fqdn'], expected_status) - ) - - # Verify that the VM is still available - try: - os_conn.verify_instance_status(vm, 'ACTIVE') - except AssertionError: - raise AssertionError( - "{0} VM is not available after its {1} hosting node " - "reinstallation".format(vm.name, - cmp_host.hypervisor_hostname)) - assert_true(devops_helpers.tcp_ping(vm_floating_ip.ip, 22), - "{0} VM is not accessible via its {1} floating " - "ip".format(vm.name, vm_floating_ip)) - - @test(depends_on=[NodeReinstallationEnv.node_reinstallation_env], - groups=["node_stop_reinstallation"]) - @log_snapshot_after_test - def node_stop_reinstallation(self): - """Verify stop reinstallation of node. - - Scenario: - 1. Revert the snapshot - 2. Stop reinstallation process of node - 3. Start the reinstallation process again - 4. Run network verification - 5. Run OSTF - - Duration: 115m - - """ - self.env.revert_snapshot("node_reinstallation_env") - - cluster_id = self.fuel_web.get_last_created_cluster() - - # Select a node - ctrl_nailgun = self.fuel_web.get_nailgun_node_by_name('slave-01') - - slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - devops_nodes = self.fuel_web.get_devops_nodes_by_nailgun_nodes( - slave_nodes) - - logger.info('Stop reinstallation process of node') - self._stop_reinstallation(self.fuel_web, cluster_id, - [str(ctrl_nailgun['id'])], devops_nodes) - - logger.info('Start the reinstallation process again') - NodeReinstallationEnv.reinstall_nodes( - self.fuel_web, cluster_id, [str(ctrl_nailgun['id'])]) - - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'smoke', 'sanity']) diff --git a/fuelweb_test/tests/test_offloading_types.py b/fuelweb_test/tests/test_offloading_types.py deleted file mode 100644 index b5429fcfb..000000000 --- a/fuelweb_test/tests/test_offloading_types.py +++ /dev/null @@ -1,243 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from copy import deepcopy - -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_true -from proboscis import test - -from fuelweb_test.helpers.checkers import check_offload -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["offloading"]) -class TestOffloading(TestBasic): - - interfaces = { - settings.iface_alias('eth0'): ['fuelweb_admin'], - settings.iface_alias('eth1'): ['public'], - settings.iface_alias('eth2'): ['management'], - settings.iface_alias('eth3'): ['private'], - settings.iface_alias('eth4'): ['storage'], - } - - offloadings_1 = {'generic-receive-offload': False, - 'generic-segmentation-offload': False, - 'tcp-segmentation-offload': False, - 'large-receive-offload': False} - - offloadings_2 = {'rx-all': True, - 'rx-vlan-offload': True, - 'tx-vlan-offload': True} - - @staticmethod - def check_offloading_modes(nodes, offloadings, iface, state): - for node in nodes: - for name in offloadings: - result = check_offload(node['ip'], iface, name) - assert_equal(result, state, - "Offload type {0} is {1} on {2}".format( - name, result, node['name'])) - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["offloading_neutron_vlan", "offloading"]) - @log_snapshot_after_test - def offloading_neutron_vlan(self): - """Deploy cluster with specific offload modes and neutron VLAN - - Scenario: - 1. Create cluster with neutron VLAN - 2. Add 1 node with controller role - 3. Add 1 node with compute role and 1 node with cinder role - 4. Setup offloading types - 5. Run network verification - 6. Deploy the cluster - 7. Verify offloading modes on nodes - 8. Run network verification - 9. Run OSTF - - Duration 30m - Snapshot offloading_neutron_vlan - - """ - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - } - ) - - self.show_step(2) - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - } - ) - - iface1 = settings.iface_alias('eth3') - iface2 = settings.iface_alias('eth2') - - nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - - self.show_step(4) - offloadings_1 = {} - offloadings_2 = {} - for node in nodes: - modes = self.fuel_web.get_offloading_modes(node['id'], [iface1]) - for name in self.offloadings_1: - if name in modes and name not in offloadings_1: - offloadings_1[name] = self.offloadings_1[name] - modes = self.fuel_web.get_offloading_modes(node['id'], [iface2]) - for name in self.offloadings_2: - if name in modes and name not in offloadings_2: - offloadings_2[name] = self.offloadings_2[name] - - assert_true(len(offloadings_1) > 0, "No types for disable offloading") - assert_true(len(offloadings_2) > 0, "No types for enable offloading") - - offloadings = { - iface1: offloadings_1, - iface2: offloadings_2 - } - for node in nodes: - self.fuel_web.update_node_networks( - node['id'], - interfaces_dict=deepcopy(self.interfaces)) - for offloading in offloadings: - self.fuel_web.update_offloads( - node['id'], offloadings[offloading], offloading) - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - - self.show_step(6) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(7) - self.check_offloading_modes(nodes, offloadings_1, iface1, 'off') - self.check_offloading_modes(nodes, offloadings_2, iface2, 'on') - - self.show_step(8) - self.fuel_web.verify_network(cluster_id) - - self.show_step(9) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("offloading_neutron_vlan") - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["offloading_neutron_vxlan", "offloading"]) - @log_snapshot_after_test - def offloading_neutron_vxlan(self): - """Deploy cluster with specific offload modes and neutron VXLAN - - Scenario: - 1. Create cluster with neutron VXLAN - 2. Add 1 node with controller role - 3. Add 1 node with compute role and 1 node with cinder role - 4. Setup offloading types - 5. Run network verification - 6. Deploy the cluster - 7. Verify offloading modes on nodes - 8. Run network verification - 9. Run OSTF - - Duration 30m - Snapshot offloading_neutron_vxlan - - """ - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - } - ) - - self.show_step(2) - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - } - ) - - iface1 = settings.iface_alias('eth3') - iface2 = settings.iface_alias('eth2') - - nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - - self.show_step(4) - offloadings_1 = {} - offloadings_2 = {} - for node in nodes: - modes = self.fuel_web.get_offloading_modes(node['id'], [iface1]) - for name in self.offloadings_1: - if name in modes and name not in offloadings_1: - offloadings_1[name] = self.offloadings_1[name] - modes = self.fuel_web.get_offloading_modes(node['id'], [iface2]) - for name in self.offloadings_2: - if name in modes and name not in offloadings_2: - offloadings_2[name] = self.offloadings_2[name] - - assert_true(len(offloadings_1) > 0, "No types for disable offloading") - assert_true(len(offloadings_2) > 0, "No types for enable offloading") - - offloadings = { - iface1: offloadings_1, - iface2: offloadings_2 - } - for node in nodes: - self.fuel_web.update_node_networks( - node['id'], - interfaces_dict=deepcopy(self.interfaces)) - for offloading in offloadings: - self.fuel_web.update_offloads( - node['id'], offloadings[offloading], offloading) - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - - self.show_step(6) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(7) - self.check_offloading_modes(nodes, offloadings_1, iface1, 'off') - self.check_offloading_modes(nodes, offloadings_2, iface2, 'on') - - self.show_step(8) - self.fuel_web.verify_network(cluster_id) - - self.show_step(9) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("offloading_neutron_vxlan") diff --git a/fuelweb_test/tests/test_ovs_firewall.py b/fuelweb_test/tests/test_ovs_firewall.py deleted file mode 100644 index 2b09d778f..000000000 --- a/fuelweb_test/tests/test_ovs_firewall.py +++ /dev/null @@ -1,740 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals -import random - -from devops.helpers import helpers as devops_helpers -from devops.helpers.ssh_client import SSHAuth -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_not_equal -from proboscis import test - -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers import os_actions -from fuelweb_test.helpers.checkers import check_firewall_driver -from fuelweb_test.helpers.checkers import check_settings_requirements -from fuelweb_test.helpers.checkers import enable_feature_group -from fuelweb_test.helpers.checkers import ping6_from_instance -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.ssh_manager import SSHManager -from fuelweb_test.helpers.utils import get_instance_ipv6 -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - -cirros_auth = SSHAuth(**settings.SSH_IMAGE_CREDENTIALS) -ssh_manager = SSHManager() - - -class CheckOVSFirewall(TestBasic): - """Basic class for the check of OVS firewall deployments""" - - @staticmethod - def get_flows(ip): - cmd = 'ovs-ofctl dump-flows br-int' - return ssh_manager.check_call(ip, cmd) - - @staticmethod - def get_ifaces(ip): - cmd = 'ip -o link show' - return ssh_manager.check_call(ip, cmd) - - @staticmethod - def get_ovs_bridge_ifaces(ip): - cmd = 'ovs-vsctl list-ifaces br-int' - return ssh_manager.check_call(ip, cmd) - - def check_ovs_firewall_functionality(self, cluster_id, compute_ip, - dpdk=False): - """Check firewall functionality - - :param cluster_id: int, cluster id - :param compute_ip: str, compute ip - :param dpdk: bool, is DPDK enabled - """ - flows = self.get_flows(compute_ip) - if dpdk: - ifaces = self.get_ovs_bridge_ifaces(compute_ip) - else: - ifaces = self.get_ifaces(compute_ip) - net_name = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - if dpdk: - server = self.boot_dpdk_instance(os_conn, cluster_id) - current_ifaces = self.get_ovs_bridge_ifaces(compute_ip) - else: - server = os_conn.create_server_for_migration(label=net_name) - current_ifaces = self.get_ifaces(compute_ip) - current_flows = self.get_flows(compute_ip) - assert_equal(len(current_ifaces.stdout) - len(ifaces.stdout), 1, - "Check is failed:" - " {}\n\n{}".format(ifaces, current_ifaces)) - assert_not_equal(set(flows.stdout), set(current_flows.stdout), - "Check is failed. Passed data is equal:" - " {}\n\n{}".format(flows, current_flows)) - float_ip = os_conn.assign_floating_ip(server) - logger.info("Floating address {0} associated with instance {1}" - .format(float_ip.ip, server.id)) - - logger.info("Wait for ping from instance {} " - "by floating ip".format(server.id)) - devops_helpers.wait( - lambda: devops_helpers.tcp_ping(float_ip.ip, 22), - timeout=300, - timeout_msg=("Instance {0} is unreachable for {1} seconds". - format(server.id, 300))) - os_conn.delete_instance(server) - - def boot_dpdk_instance(self, os_conn, cluster_id, - mem_page_size='2048', net_name=None): - """Boot VM with HugePages with enabled DPDK for private network - - :param os_conn: an object of connection to openstack services - :param cluster_id: an integer number of cluster id - :param mem_page_size: huge pages size - :param net_name: str, network name - :return: obj, object of booted instance - """ - - extra_specs = { - 'hw:mem_page_size': mem_page_size - } - if net_name is None: - net_name = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - flavor_id = random.randint(10, 10000) - name = 'system_test-{}'.format(random.randint(10, 10000)) - flavor = os_conn.create_flavor(name=name, ram=64, - vcpus=1, disk=1, - flavorid=flavor_id, - extra_specs=extra_specs) - - server = os_conn.create_server_for_migration(neutron=True, - label=net_name, - flavor_id=flavor.id) - os_conn.verify_instance_status(server, 'ACTIVE') - return server - - -@test(groups=["ovs_firewall"]) -class TestOVSFirewall(CheckOVSFirewall): - """The current test suite checks deployment of clusters - with OVS firewall for neutron security groups - """ - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_non_ha_cluster_with_ovs_firewall_vlan"]) - @log_snapshot_after_test - def deploy_non_ha_cluster_with_ovs_firewall_vlan(self): - """Deploy non-HA cluster with VLAN, OVS firewall driver - - Scenario: - 1. Create new environment with VLAN segmentation for Neutron - 2. Add controller and compute nodes - 3. Enable OVS firewall driver for neutron security groups - 4. Run network verification - 5. Deploy environment - 6. Run OSTF - 7. Check option "firewall_driver" in config files - 8. Boot instance with custom security group - - Snapshot: deploy_non_ha_cluster_with_ovs_firewall_vlan - - """ - self.check_run("deploy_non_ha_cluster_with_ovs_firewall_vlan") - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(1) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": "vlan" - } - ) - - self.show_step(2) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'] - }) - - self.show_step(3) - self.fuel_web.set_ovs_firewall_driver(cluster_id) - - self.show_step(4) - self.fuel_web.verify_network(cluster_id) - - self.show_step(5) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(6) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(7) - nodes = self.fuel_web.client.list_cluster_nodes(cluster_id=cluster_id) - for node in nodes: - check_firewall_driver(node['ip'], node['roles'][0], 'openvswitch') - - self.show_step(8) - compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'])[0] - self.check_ovs_firewall_functionality(cluster_id, compute['ip']) - self.env.make_snapshot( - "deploy_non_ha_cluster_with_ovs_firewall_vlan", is_make=True) - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_non_ha_cluster_with_ovs_firewall_vxlan"]) - @log_snapshot_after_test - def deploy_non_ha_cluster_with_ovs_firewall_vxlan(self): - """Deploy non-HA cluster with VXLAN, OVS firewall driver - - Scenario: - 1. Create new environment with VXLAN segmentation for Neutron - 2. Add controller and compute nodes - 3. Enable OVS firewall driver for neutron security groups - 4. Run network verification - 5. Deploy environment - 6. Run OSTF - 7. Check option "firewall_driver" in config files - 8. Boot instance with custom security group - - Snapshot: deploy_non_ha_cluster_with_ovs_firewall_vxlan - - """ - self.check_run("deploy_non_ha_cluster_with_ovs_firewall_vxlan") - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(1) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": "tun" - } - ) - - self.show_step(2) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'] - }) - - self.show_step(3) - self.fuel_web.set_ovs_firewall_driver(cluster_id) - - self.show_step(4) - self.fuel_web.verify_network(cluster_id) - - self.show_step(5) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(6) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(7) - nodes = self.fuel_web.client.list_cluster_nodes(cluster_id=cluster_id) - for node in nodes: - check_firewall_driver(node['ip'], node['roles'][0], 'openvswitch') - - self.show_step(8) - compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'])[0] - self.check_ovs_firewall_functionality(cluster_id, compute['ip']) - self.env.make_snapshot( - "deploy_non_ha_cluster_with_ovs_firewall_vxlan", is_make=True) - - @test(depends_on_groups=["deploy_non_ha_cluster_with_ovs_firewall_vlan"], - groups=["deploy_non_ha_cluster_with_ovs_firewall_ipv6_vlan"]) - @log_snapshot_after_test - def deploy_non_ha_cluster_with_ovs_firewall_ipv6_vlan(self): - """Deploy non-HA cluster with VLAN, OVS firewall driver with the - check of IPv6 functionality - - Scenario: - 1. Revert deploy_non_ha_cluster_with_ovs_firewall_vlan snapshot - 2. Create network resources: two dualstack network IPv6 subnets - (should be in SLAAC mode, - address space should not intersect), - virtual router and set gateway. - 3. Create a Security Group, - that allows SSH and ICMP for both IPv4 and IPv6. - 4. Launch two instances, one for each network. - 5. Attach Floating IP for both instances. - 6. SSH to the main instance and ping6 another instance. - - Duration 10m - Snapshot deploy_non_ha_cluster_with_ovs_firewall_ipv6_vlan - - """ - self.show_step(1) - self.env.revert_snapshot( - "deploy_non_ha_cluster_with_ovs_firewall_vlan") - - cluster_id = self.fuel_web.get_last_created_cluster() - public_vip = self.fuel_web.get_public_vip(cluster_id) - logger.info('Public vip is %s', public_vip) - - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - - tenant = os_conn.get_tenant('admin') - - self.show_step(2) - net1, net2 = os_conn.create_network_resources_for_ipv6_test(tenant) - - self.show_step(3) - security_group = os_conn.create_sec_group_for_ssh() - - self.show_step(4) - instance1 = os_conn.create_server( - name='instance1', - security_groups=[security_group], - net_id=net1['id'], - ) - - instance2 = os_conn.create_server( - name='instance2', - security_groups=[security_group], - net_id=net2['id'], - ) - - self.show_step(5) - floating_ip = os_conn.assign_floating_ip(instance1) - floating_ip2 = os_conn.assign_floating_ip(instance2) - - self.show_step(6) - get_instance_ipv6(instance1, net1) - instance2_ipv6 = get_instance_ipv6(instance2, net2) - - node_ip = self.fuel_web.get_node_ip_by_devops_name("slave-01") - remote = ssh_manager.get_remote(node_ip) - for instance_ip, instance in ( - (floating_ip.ip, instance1), - (floating_ip2.ip, instance2) - ): - logger.info("Wait for ping from instance {} " - "by floating ip".format(instance.id)) - devops_helpers.wait( - lambda: devops_helpers.tcp_ping(instance_ip, 22), - timeout=300, - timeout_msg=("Instance {0} is unreachable for {1} seconds". - format(instance.id, 300))) - - ping6_from_instance(remote, floating_ip.ip, instance2_ipv6) - - self.env.make_snapshot( - 'deploy_non_ha_cluster_with_ovs_firewall_ipv6_vlan') - - @test(depends_on_groups=["deploy_non_ha_cluster_with_ovs_firewall_vxlan"], - groups=["deploy_non_ha_cluster_with_ovs_firewall_ipv6_vxlan"]) - @log_snapshot_after_test - def deploy_non_ha_cluster_with_ovs_firewall_ipv6_vxlan(self): - """Deploy non-HA cluster with VXLAN, OVS firewall driver with the - check of IPv6 functionality - - Scenario: - 1. Revert deploy_non_ha_cluster_with_ovs_firewall_vxlan snapshot - 2. Create network resources: two dualstack network IPv6 subnets - (should be in SLAAC mode, - address space should not intersect), - virtual router and set gateway. - 3. Create a Security Group, - that allows SSH and ICMP for both IPv4 and IPv6. - 4. Launch two instances, one for each network. - 5. Attach Floating IP for both instances. - 6. SSH to the main instance and ping6 another instance. - - Duration 10m - Snapshot deploy_non_ha_cluster_with_ovs_firewall_ipv6_vlan - - """ - self.show_step(1) - self.env.revert_snapshot( - "deploy_non_ha_cluster_with_ovs_firewall_vxlan") - - cluster_id = self.fuel_web.get_last_created_cluster() - public_vip = self.fuel_web.get_public_vip(cluster_id) - logger.info('Public vip is %s', public_vip) - - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - - tenant = os_conn.get_tenant('admin') - - self.show_step(2) - net1, net2 = os_conn.create_network_resources_for_ipv6_test(tenant) - - self.show_step(3) - security_group = os_conn.create_sec_group_for_ssh() - - self.show_step(4) - instance1 = os_conn.create_server( - name='instance1', - security_groups=[security_group], - net_id=net1['id'], - ) - - instance2 = os_conn.create_server( - name='instance2', - security_groups=[security_group], - net_id=net2['id'], - ) - - self.show_step(5) - floating_ip = os_conn.assign_floating_ip(instance1) - floating_ip2 = os_conn.assign_floating_ip(instance2) - - self.show_step(6) - get_instance_ipv6(instance1, net1) - instance2_ipv6 = get_instance_ipv6(instance2, net2) - - node_ip = self.fuel_web.get_node_ip_by_devops_name("slave-01") - remote = ssh_manager.get_remote(node_ip) - for instance_ip, instance in ( - (floating_ip.ip, instance1), - (floating_ip2.ip, instance2) - ): - logger.info("Wait for ping from instance {} " - "by floating ip".format(instance.id)) - devops_helpers.wait( - lambda: devops_helpers.tcp_ping(instance_ip, 22), - timeout=300, - timeout_msg=("Instance {0} is unreachable for {1} seconds". - format(instance.id, 300))) - - ping6_from_instance(remote, floating_ip.ip, instance2_ipv6) - - self.env.make_snapshot( - 'deploy_non_ha_cluster_with_ovs_firewall_ipv6_vxlan') - - -@test(groups=["ovs_firewall_with_dpdk"]) -class TestOVSFirewallDPDK(CheckOVSFirewall): - """The current test suite checks deployment of clusters - with OVS firewall for neutron security groups with enabled DPDK - """ - - tests_requirements = {'KVM_USE': True} - - def __init__(self): - super(TestOVSFirewallDPDK, self).__init__() - check_settings_requirements(self.tests_requirements) - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_ovs_firewall_and_dpdk_vlan"]) - @log_snapshot_after_test - def deploy_ovs_firewall_and_dpdk_vlan(self): - """Deploy non-HA cluster with VLAN, OVS firewall driver and DPDK - - Scenario: - 1. Create new environment with VLAN segmentation for Neutron - 2. Add controller and compute nodes - 3. Enable OVS firewall driver for neutron security groups - 4. Configure private network in DPDK mode - 5. Configure HugePages for compute nodes - 6. Run network verification - 7. Deploy environment - 8. Run OSTF - 9. Check option "firewall_driver" in config files - 10. Boot instance with custom security group - - Snapshot: deploy_ovs_firewall_and_dpdk_vlan - - """ - self.check_run("deploy_ovs_firewall_and_dpdk_vlan") - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(1) - enable_feature_group(self.env, 'experimental') - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": "vlan" - } - ) - - self.show_step(2) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'] - }) - - compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'], role_status='pending_roles')[0] - - self.show_step(3) - self.fuel_web.set_ovs_firewall_driver(cluster_id) - - self.show_step(4) - self.fuel_web.enable_dpdk(compute['id']) - - self.show_step(5) - self.fuel_web.setup_hugepages( - compute['id'], hp_2mb=256, hp_dpdk_mb=1024) - - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - - self.show_step(7) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(8) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(9) - nodes = self.fuel_web.client.list_cluster_nodes(cluster_id=cluster_id) - for node in nodes: - check_firewall_driver(node['ip'], node['roles'][0], 'openvswitch') - - self.show_step(10) - compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'])[0] - self.check_ovs_firewall_functionality(cluster_id, compute['ip'], - dpdk=True) - self.env.make_snapshot( - "deploy_ovs_firewall_and_dpdk_vlan", is_make=True) - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_ovs_firewall_and_dpdk_vxlan"]) - @log_snapshot_after_test - def deploy_ovs_firewall_and_dpdk_vxlan(self): - """Deploy non-HA cluster with VXLAN, OVS firewall driver and DPDK - - Scenario: - 1. Create new environment with VLAN segmentation for Neutron - 2. Add controller and compute nodes - 3. Enable OVS firewall driver for neutron security groups - 4. Configure private network in DPDK mode - 5. Configure HugePages for compute nodes - 6. Run network verification - 7. Deploy environment - 8. Run OSTF - 9. Check option "firewall_driver" in config files - 10. Boot instance with custom security group - - Snapshot: deploy_ovs_firewall_and_dpdk_vxlan - - """ - self.check_run("deploy_ovs_firewall_and_dpdk_vxlan") - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(1) - enable_feature_group(self.env, 'experimental') - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": "tun" - } - ) - - self.show_step(2) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'] - }) - - compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'], role_status='pending_roles')[0] - - self.show_step(3) - self.fuel_web.set_ovs_firewall_driver(cluster_id) - - self.show_step(4) - self.fuel_web.enable_dpdk(compute['id']) - - self.show_step(5) - self.fuel_web.setup_hugepages( - compute['id'], hp_2mb=256, hp_dpdk_mb=1024) - - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - - self.show_step(7) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(8) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(9) - nodes = self.fuel_web.client.list_cluster_nodes(cluster_id=cluster_id) - for node in nodes: - check_firewall_driver(node['ip'], node['roles'][0], 'openvswitch') - - self.show_step(10) - compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'])[0] - self.check_ovs_firewall_functionality(cluster_id, compute['ip'], - dpdk=True) - self.env.make_snapshot( - "deploy_ovs_firewall_and_dpdk_vxlan", is_make=True) - - @test(depends_on_groups=["deploy_ovs_firewall_and_dpdk_vlan"], - groups=["deploy_ovs_firewall_and_dpdk_vlan_ipv6"]) - @log_snapshot_after_test - def deploy_ovs_firewall_and_dpdk_vlan_ipv6(self): - """Deploy non-HA cluster with DPDK, VLAN, OVS firewall driver with the - check of IPv6 functionality - - Scenario: - 1. Revert deploy_ovs_firewall_and_dpdk_vlan snapshot - 2. Create network resources: two dualstack network IPv6 subnets - (should be in SLAAC mode, - address space should not intersect), - virtual router and set gateway. - 3. Create a Security Group, - that allows SSH and ICMP for both IPv4 and IPv6. - 4. Launch two instances, one for each network. - 5. Attach Floating IP for both instances. - 6. SSH to the main instance and ping6 another instance. - - Duration 10m - Snapshot deploy_ovs_firewall_and_dpdk_vlan_ipv6 - - """ - self.show_step(1) - self.env.revert_snapshot("deploy_ovs_firewall_and_dpdk_vlan") - - cluster_id = self.fuel_web.get_last_created_cluster() - public_vip = self.fuel_web.get_public_vip(cluster_id) - logger.info('Public vip is %s', public_vip) - - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - - tenant = os_conn.get_tenant('admin') - - self.show_step(2) - net1, net2 = os_conn.create_network_resources_for_ipv6_test(tenant) - - self.show_step(3) - instance1 = self.boot_dpdk_instance(os_conn, cluster_id, - net_name=net1['name']) - - instance2 = self.boot_dpdk_instance(os_conn, cluster_id, - net_name=net2['name']) - - self.show_step(5) - floating_ip = os_conn.assign_floating_ip(instance1) - floating_ip2 = os_conn.assign_floating_ip(instance2) - - self.show_step(6) - get_instance_ipv6(instance1, net1) - instance2_ipv6 = get_instance_ipv6(instance2, net2) - - node_ip = self.fuel_web.get_node_ip_by_devops_name("slave-01") - remote = ssh_manager.get_remote(node_ip) - for instance_ip, instance in ( - (floating_ip.ip, instance1), - (floating_ip2.ip, instance2) - ): - logger.info("Wait for ping from instance {} " - "by floating ip".format(instance.id)) - devops_helpers.wait( - lambda: devops_helpers.tcp_ping(instance_ip, 22), - timeout=300, - timeout_msg=("Instance {0} is unreachable for {1} seconds". - format(instance.id, 300))) - - ping6_from_instance(remote, floating_ip.ip, instance2_ipv6) - - self.env.make_snapshot('deploy_ovs_firewall_and_dpdk_vlan_ipv6') - - @test(depends_on_groups=["deploy_ovs_firewall_and_dpdk_vxlan"], - groups=["deploy_ovs_firewall_and_dpdk_vxlan_ipv6"]) - @log_snapshot_after_test - def deploy_ovs_firewall_and_dpdk_vxlan_ipv6(self): - """Deploy non-HA cluster with DPDK, VXLAN, OVS firewall driver with - the check of IPv6 functionality - - Scenario: - 1. Revert deploy_ovs_firewall_and_dpdk_vlan snapshot - 2. Create network resources: two dualstack network IPv6 subnets - (should be in SLAAC mode, - address space should not intersect), - virtual router and set gateway. - 3. Create a Security Group, - that allows SSH and ICMP for both IPv4 and IPv6. - 4. Launch two instances, one for each network. - 5. Attach Floating IP for both instances. - 6. SSH to the main instance and ping6 another instance. - - Duration 10m - Snapshot deploy_ovs_firewall_and_dpdk_vxlan_ipv6 - - """ - self.show_step(1) - self.env.revert_snapshot("deploy_ovs_firewall_and_dpdk_vxlan") - - cluster_id = self.fuel_web.get_last_created_cluster() - public_vip = self.fuel_web.get_public_vip(cluster_id) - logger.info('Public vip is %s', public_vip) - - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - - tenant = os_conn.get_tenant('admin') - - self.show_step(2) - net1, net2 = os_conn.create_network_resources_for_ipv6_test(tenant) - - self.show_step(3) - instance1 = self.boot_dpdk_instance(os_conn, cluster_id, - net_name=net1['name']) - - instance2 = self.boot_dpdk_instance(os_conn, cluster_id, - net_name=net2['name']) - - self.show_step(5) - floating_ip = os_conn.assign_floating_ip(instance1) - floating_ip2 = os_conn.assign_floating_ip(instance2) - - self.show_step(6) - get_instance_ipv6(instance1, net1) - instance2_ipv6 = get_instance_ipv6(instance2, net2) - - node_ip = self.fuel_web.get_node_ip_by_devops_name("slave-01") - remote = ssh_manager.get_remote(node_ip) - for instance_ip, instance in ( - (floating_ip.ip, instance1), - (floating_ip2.ip, instance2) - ): - logger.info("Wait for ping from instance {} " - "by floating ip".format(instance.id)) - devops_helpers.wait( - lambda: devops_helpers.tcp_ping(instance_ip, 22), - timeout=300, - timeout_msg=("Instance {0} is unreachable for {1} seconds". - format(instance.id, 300))) - - ping6_from_instance(remote, floating_ip.ip, instance2_ipv6) - - self.env.make_snapshot('deploy_ovs_firewall_and_dpdk_vxlan_ipv6') diff --git a/fuelweb_test/tests/test_public_api.py b/fuelweb_test/tests/test_public_api.py deleted file mode 100644 index ee65124ff..000000000 --- a/fuelweb_test/tests/test_public_api.py +++ /dev/null @@ -1,222 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from devops.error import TimeoutError -from devops.helpers.helpers import tcp_ping -from devops.helpers.helpers import wait -from devops.helpers.helpers import wait_pass -from devops.helpers.ssh_client import SSHAuth -from proboscis import asserts -from proboscis import test - -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers import os_actions -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers import utils -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.test_net_templates_base import TestNetworkTemplatesBase - -cirros_auth = SSHAuth(**settings.SSH_IMAGE_CREDENTIALS) - - -@test(groups=["public_api"]) -class TestPublicApi(TestNetworkTemplatesBase): - """TestPublicApi.""" - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=['deploy_env_with_public_api']) - @log_snapshot_after_test - def deploy_env_with_public_api(self): - """Deploy environment with enabled DMZ network for API. - - Scenario: - 1. Revert snapshot with ready master node - 2. Create new environment - 3. Run network verification - 4. Deploy the environment - 5. Run network verification - 6. Run OSTF - 7. Reboot cluster nodes - 8. Run OSTF - 9. Create environment snapshot deploy_env_with_public_api - - Duration 120m - Snapshot deploy_env_with_public_api - """ - - asserts.assert_true(settings.ENABLE_DMZ, - "ENABLE_DMZ variable wasn't exported") - self.check_run('deploy_env_with_public_api') - - self.show_step(1) - self.env.revert_snapshot('ready_with_5_slaves') - - self.show_step(2) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - ) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'], - }, - update_interfaces=False - ) - - network_template = utils.get_network_template('public_api') - self.fuel_web.client.upload_network_template( - cluster_id=cluster_id, network_template=network_template) - - net = self.fuel_web.get_network_pool('os-api') - nodegroup = self.fuel_web.get_nodegroup(cluster_id) - os_api_template = { - "group_id": nodegroup['id'], - "name": 'os-api', - "cidr": net['network'], - "gateway": net['gateway'], - "meta": { - 'notation': 'cidr', - 'render_type': None, - 'map_priority': 2, - 'configurable': True, - 'use_gateway': True, - 'name': 'os-api', - 'cidr': net['network'], - 'vlan_start': None, - 'vips': ['haproxy'] - } - } - self.fuel_web.client.add_network_group(os_api_template) - - logger.debug('Networks: {0}'.format( - self.fuel_web.client.get_network_groups())) - - self.show_step(3) - self.fuel_web.verify_network(cluster_id) - - self.show_step(4) - self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60) - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - - self.show_step(6) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(7) - nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - self.fuel_web.warm_restart_nodes( - self.fuel_web.get_devops_nodes_by_nailgun_nodes(nodes)) - - controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id=cluster_id, - roles=['controller'] - )[0] - controller_devops = \ - self.fuel_web.get_devops_node_by_nailgun_node(controller) - - # Wait until MySQL Galera is UP on some controller - self.fuel_web.wait_mysql_galera_is_up([controller_devops.name]) - - # Wait until Cinder services UP on a controller - self.fuel_web.wait_cinder_is_up([controller_devops.name]) - - wait_pass( - lambda: self.fuel_web.run_ostf(cluster_id, - test_sets=['sanity', 'smoke']), - interval=10, - timeout=12 * 60 - ) - - self.show_step(8) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(9) - self.env.make_snapshot('deploy_env_with_public_api', is_make=True) - - @test(depends_on=[deploy_env_with_public_api], - groups=['public_api_check_security_rules']) - @log_snapshot_after_test - def public_api_check_security_rules(self): - """Check that security rules are properly applied for DMZ network - - Scenario: - 1. Revert snapshot from previous test - 2. Run instance - 3. Try to access horizon from instance - 4. Remove instance - """ - - self.show_step(1) - self.env.revert_snapshot('deploy_env_with_public_api') - - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - controller_ip = self.fuel_web.get_public_vip(cluster_id) - - os_conn = os_actions.OpenStackActions( - controller_ip, - user='admin', - passwd='admin', - tenant='admin') - - # create instance - net_name = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - vm = os_conn.create_server_for_migration(neutron=True, label=net_name) - - # Check if instance active - os_conn.verify_instance_status(vm, 'ACTIVE') - - vm_floating_ip = os_conn.assign_floating_ip(vm) - logger.info('Trying to get vm via tcp.') - try: - wait(lambda: tcp_ping(vm_floating_ip.ip, 22), timeout=120) - except TimeoutError: - raise TimeoutError('Can not ping instance' - ' by floating ip {0}'.format(vm_floating_ip.ip)) - logger.info('VM is accessible via ip: {0}'.format(vm_floating_ip.ip)) - - self.show_step(3) - attributes = self.fuel_web.client.get_cluster_attributes(cluster_id) - protocol = 'https' if attributes['editable']['public_ssl']['horizon'][ - 'value'] is True else 'http' - - cmd = 'curl -I ' \ - '{proto}://{ip}/horizon --insecure'.format(proto=protocol, - ip=controller_ip) - logger.info('Trying to access horizon from instance: {}'.format(cmd)) - - controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id=cluster_id, - roles=['controller'] - )[0] - ssh = self.fuel_web.get_ssh_for_nailgun_node(controller) - res = ssh.execute_through_host(hostname=vm_floating_ip.ip, - cmd=cmd, - auth=cirros_auth) - logger.info(res.stdout) - asserts.assert_equal(res.exit_code, 0, - "Instance can't access " - "horizon via DMZ network") - - self.show_step(4) - # delete instance - os_conn.delete_instance(vm) - os_conn.verify_srv_deleted(vm) diff --git a/fuelweb_test/tests/test_pullrequest.py b/fuelweb_test/tests/test_pullrequest.py deleted file mode 100644 index f069c307a..000000000 --- a/fuelweb_test/tests/test_pullrequest.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["test_pullrequest"]) -class TestPullRequest(TestBasic): - """TestPullRequest.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_pr_ha"]) - @log_snapshot_after_test - def deploy_pr_ha(self): - """Deploy one-controller cluster in HA mode with Neutron GRE - - Scenario: - 1. Create cluster - 2. Add 1 node with controller role - 3. Add 1 node with compute role - 4. Deploy the cluster - 5. Validate cluster network - - Snapshot: deploy_pr_ha - - """ - self.env.revert_snapshot("ready_with_3_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - } - ) - - self.fuel_web.deploy_cluster_wait(cluster_id, - is_feature=True, - timeout=50 * 60) - self.fuel_web.run_ostf( - cluster_id=self.fuel_web.get_last_created_cluster() - ) diff --git a/fuelweb_test/tests/test_reduced_footprint.py b/fuelweb_test/tests/test_reduced_footprint.py deleted file mode 100644 index 16e6a55eb..000000000 --- a/fuelweb_test/tests/test_reduced_footprint.py +++ /dev/null @@ -1,728 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import yaml - -from devops.helpers.helpers import wait -from devops.helpers.ssh_client import SSHAuth -from paramiko.ssh_exception import ChannelException -from proboscis import asserts -from proboscis import test - -from core.helpers.setup_teardown import setup_teardown - -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.utils import get_network_template -from fuelweb_test.helpers.utils import preserve_partition -from fuelweb_test import settings -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["virt_role", "reduced_footprint"]) -class TestVirtRole(TestBasic): - """Tests for virt role. - - Part of Reduced footprint feature. - Creating reduced footprint environments performed by assigning new role - named "virt" to physical server, after that user should upload VMs - properties as node attributes. Virtual machines will be treated by Fuel - as standard bare metal servers. - """ - - @test(depends_on=[SetupEnvironment.prepare_slaves_1], - groups=["spawn_one_vm_on_one_virt_node"]) - @log_snapshot_after_test - def spawn_one_vm_on_one_virt_node(self): - """Spawn one vm node on one slave node - - Scenario: - 1. Create cluster - 2. Assign compute and virt roles to slave node - 3. Upload configuration for one VM - 4. Spawn VM - 5. Wait till VM become available for allocation - - Duration: 60m - """ - - self.env.revert_snapshot("ready_with_1_slaves") - - checkers.enable_feature_group(self.env, "advanced") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE_HA, - settings={ - 'net_provider': 'neutron', - 'net_segment_type': settings.NEUTRON_SEGMENT['tun'] - }) - - asserts.assert_true(settings.HARDWARE['slave_node_memory'] >= 1024, - "Wrong SLAVE_NODE_MEMORY value: {0}." - "Please allocate more than 1024Mb.". - format(settings.HARDWARE['slave_node_memory'])) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['compute', 'virt'] - }) - - node_id = self.fuel_web.get_nailgun_node_by_name("slave-01")['id'] - - self.fuel_web.client.create_vm_nodes( - node_id, - [{ - "id": 1, - "mem": 1, - "cpu": 1 - }]) - - self.fuel_web.spawn_vms_wait(cluster_id) - wait(lambda: len(self.fuel_web.client.list_nodes()) == 2, - timeout=60 * 60, - timeout_msg=("Timeout waiting 2 available nodes, " - "current nodes: \n{0}" + '\n'.join( - ['Name: {0}, status: {1}, online: {2}'. - format(i['name'], i['status'], i['online']) - for i in self.fuel_web.client.list_nodes()]))) - - self.env.make_snapshot("spawn_one_vm_on_one_virt_node") - - @test(depends_on=[SetupEnvironment.prepare_slaves_1], - groups=["spawn_two_vms_on_one_virt_node"]) - @log_snapshot_after_test - def spawn_two_vms_on_one_virt_node(self): - """Spawn two vm nodes on one slave node - - Scenario: - 1. Create cluster - 2. Assign compute and virt roles to slave node - 3. Upload configuration for two VMs - 4. Spawn VMs - 5. Wait till VMs become available for allocation - - Duration: 60m - """ - - self.env.revert_snapshot("ready_with_1_slaves") - - checkers.enable_feature_group(self.env, "advanced") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE_HA, - settings={ - 'net_provider': 'neutron', - 'net_segment_type': settings.NEUTRON_SEGMENT['tun'] - }) - - asserts.assert_true(settings.HARDWARE['slave_node_memory'] >= 2048, - "Wrong SLAVE_NODE_MEMORY value: {0}." - "Please allocate more than 2048Mb.". - format(settings.HARDWARE['slave_node_memory'])) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['compute', 'virt'] - }) - - node_id = self.fuel_web.get_nailgun_node_by_name("slave-01")['id'] - - self.fuel_web.client.create_vm_nodes( - node_id, - [ - { - "id": 1, - "mem": 1, - "cpu": 1 - }, - { - "id": 2, - "mem": 1, - "cpu": 1 - } - ]) - - self.fuel_web.spawn_vms_wait(cluster_id) - wait(lambda: len(self.fuel_web.client.list_nodes()) == 3, - timeout=60 * 60, - timeout_msg=("Timeout waiting 3 available nodes, " - "current nodes: \n{0}" + '\n'.join( - ['Name: {0}, status: {1}, online: {2}'. - format(i['name'], i['status'], i['online']) - for i in self.fuel_web.client.list_nodes()]))) - - self.env.make_snapshot("spawn_two_vms_on_one_virt_node") - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["spawn_three_vms_across_three_virt_nodes"]) - @log_snapshot_after_test - def spawn_three_vms_across_three_virt_nodes(self): - """Spawn three vm nodes across three slave nodes - - Scenario: - 1. Create cluster - 2. Assign compute and virt roles to three slave nodes - 3. Upload VM configuration for one VM to each slave node - 4. Spawn VMs - 5. Wait till VMs become available for allocation - - Duration: 60m - """ - - self.env.revert_snapshot("ready_with_3_slaves") - - checkers.enable_feature_group(self.env, "advanced") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE_HA, - settings={ - 'net_provider': 'neutron', - 'net_segment_type': settings.NEUTRON_SEGMENT['tun'] - }) - - asserts.assert_true(settings.HARDWARE['slave_node_memory'] >= 1024, - "Wrong SLAVE_NODE_MEMORY value: {0}." - "Please allocate more than 1024Mb.". - format(settings.HARDWARE['slave_node_memory'])) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['compute', 'virt'], - 'slave-02': ['compute', 'virt'], - 'slave-03': ['compute', 'virt'] - }) - - hw_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - for node in hw_nodes: - self.fuel_web.client.create_vm_nodes( - node['id'], - [ - { - "id": 1, - "mem": 1, - "cpu": 1 - } - ]) - - self.fuel_web.spawn_vms_wait(cluster_id) - wait(lambda: len(self.fuel_web.client.list_nodes()) == 6, - timeout=60 * 120, - timeout_msg=("Timeout waiting 6 available nodes, " - "current nodes: \n{0}" + '\n'.join( - ['Name: {0}, status: {1}, online: {2}'. - format(i['name'], i['status'], i['online']) - for i in self.fuel_web.client.list_nodes()]))) - - self.env.make_snapshot("spawn_three_vms_across_three_virt_nodes") - - -@test(groups=["virt_role_baremetal", "reduced_footprint_baremetal"]) -class TestVirtRoleBaremetal(TestBasic): - """Tests for virt role on baremetal servers""" - - # pylint: disable=no-self-use - def check_net_template_presence(self): - """Check for network template availability before starting any test""" - if not (settings.RF_NET_TEMPLATE and - os.path.exists(settings.RF_NET_TEMPLATE)): - raise AssertionError("Template for reduced footprint environment " - "is not provided") - # pylint: enable=no-self-use - - @property - def ssh_auth(self): - """Returns SSHAuth instance for connecting to slaves through - master node""" - # pylint: disable=protected-access - return SSHAuth( - username=settings.SSH_SLAVE_CREDENTIALS['login'], - password=settings.SSH_SLAVE_CREDENTIALS['password'], - key=self.ssh_manager._get_keys()[0]) - # pylint: disable=protected-access - - def deploy_cluster_wait(self, cluster_id): - """Initiate cluster deployment and wait until it is finished. - - As some environments have slaves accessible only from - master the conventional FuelWebClient.deploy_cluster_wait method would - fail on such checks. - The current method just deploys the cluster; the cluster health is - checked anyway by a subsequent OSTF run. - - :param cluster_id: id, ID of a cluster to deploy - :return: None - """ - self.fuel_web.client.assign_ip_address_before_deploy_start(cluster_id) - task = self.fuel_web.deploy_cluster(cluster_id) - self.fuel_web.assert_task_success(task, interval=30) - self.fuel_web.check_cluster_status(cluster_id, False) - - def get_slave_total_cpu(self, slave_ip): - """Get total number of CPUs on the given baremetal slave node. - - :param slave_ip: str, IP address of a slave node - :return: int - """ - with self.ssh_manager.get_remote(self.ssh_manager.admin_ip) as admin: - result = admin.execute_through_host( - slave_ip, - "grep -c processor /proc/cpuinfo", - auth=self.ssh_auth, - timeout=60) - asserts.assert_equal( - result['exit_code'], 0, - "Failed to get number of CPUs on {0} slave node".format(slave_ip)) - # pylint: disable=no-member - cpu = int(result['stdout'][0].strip()) - # pylint: enable=no-member - return cpu - - def get_slave_total_mem(self, slave_ip): - """Get total amount of RAM (in GB) on the given baremetal slave node. - - :param slave_ip: str, IP address of a slave node - :return: int, total amount of RAM in GB on the given node - """ - with self.ssh_manager.get_remote(self.ssh_manager.admin_ip) as admin: - result = admin.execute_through_host( - slave_ip, - "grep -i memtotal /proc/meminfo | awk '{print $2}'", - auth=self.ssh_auth, - timeout=60) - asserts.assert_equal( - result['exit_code'], 0, - "Failed to get amount of RAM on {0} slave node".format(slave_ip)) - # pylint: disable=no-member - mem_in_gb = int(result['stdout'][0].strip()) // pow(1024, 2) - # pylint: enable=no-member - return mem_in_gb - - def update_virt_vm_template( - self, - path='/etc/puppet/modules/osnailyfacter/templates/vm_libvirt.erb'): - """Update virtual VM template for VLAN environment - - :param path: str, path to the virtual vm template on Fuel master node - :return: None - """ - - cmd = ('sed -i "s/mesh/prv/; s/.*prv.*/&\\n /" {0}'.format(path)) - self.ssh_manager.execute_on_remote(self.ssh_manager.admin_ip, cmd) - - def update_virtual_nodes(self, cluster_id, nodes_dict): - """Update nodes attributes with nailgun client. - - FuelWebClient.update_nodes uses devops nodes as data source. - Virtual nodes are not in devops database, so we have to - update nodes attributes directly via nailgun client. - - :param cluster_id: int, ID of a cluster in question - :param nodes_dict: dict, 'name: role(s)' key-paired collection of - virtual nodes to add to the cluster - :return: None - """ - - nodes = self.fuel_web.client.list_nodes() - virt_nodes = [node for node in nodes if node['cluster'] != cluster_id] - asserts.assert_equal(len(nodes_dict), - len(virt_nodes), - "Number of given virtual nodes differs from the" - "number of virtual nodes available in nailgun:\n" - "Nodes dict: {0}\nAvailable nodes: {1}" - .format(nodes_dict, - [node['name'] for node in virt_nodes])) - - for virt_node, virt_node_name in zip(virt_nodes, nodes_dict): - new_roles = nodes_dict[virt_node_name] - new_name = '{}_{}'.format(virt_node_name, "_".join(new_roles)) - data = {"cluster_id": cluster_id, - "pending_addition": True, - "pending_deletion": False, - "pending_roles": new_roles, - "name": new_name} - self.fuel_web.client.update_node(virt_node['id'], data) - - def wait_for_slave(self, slave, timeout=10 * 60): - """Wait for slave ignoring connection errors that appear - until the node is online (after reboot, environment reset, etc.)""" - def ssh_ready(ip): - with self.ssh_manager.get_remote(self.ssh_manager.admin_ip) as \ - admin: - try: - return admin.execute_through_host( - ip, "cd ~", auth=self.ssh_auth)['exit_code'] == 0 - except ChannelException: - return False - - wait(lambda: ssh_ready(slave['ip']), - timeout=timeout, - timeout_msg="{0} didn't appear online within {1} " - "seconds". format(slave['name'], timeout)) - - @staticmethod - def get_network_template(template, template_dir=settings.RF_NET_TEMPLATE): - """Download a network template from the provided local directory - - :param template: str, template name - :param template_dir: str, template path - :return: dict - """ - template_path = os.path.join(template_dir, '{0}.yaml'.format(template)) - with open(template_path) as template_file: - return yaml.load(template_file) - - @test(depends_on=[SetupEnvironment.prepare_slaves_1], - groups=["baremetal_deploy_cluster_with_virt_node"]) - @log_snapshot_after_test - @setup_teardown(setup=check_net_template_presence) - def baremetal_deploy_cluster_with_virt_node(self): - """Baremetal deployment of cluster with one virtual node - - Scenario: - 1. Create a cluster - 2. Assign compute and virt roles to the slave node - 3. Upload configuration for one VM - 4. Apply network template for the env and spawn the VM - 5. Assign controller role to the VM - 6. Deploy the environment - 7. Run OSTF - 8. Reset the environment - 9. Redeploy cluster - 10. Run OSTF - - Duration: 240m - """ - - self.env.revert_snapshot("ready_with_1_slaves") - - self.show_step(1) - checkers.enable_feature_group(self.env, "advanced") - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE_HA, - settings={ - 'net_provider': 'neutron', - 'net_segment_type': settings.NEUTRON_SEGMENT['vlan'] - }) - - self.show_step(2) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['compute', 'virt'] - }) - self.show_step(3) - node = self.fuel_web.get_nailgun_node_by_name("slave-01") - self.fuel_web.client.create_vm_nodes( - node['id'], - [ - { - "id": 1, - "mem": self.get_slave_total_mem(node['ip']) - 2, - "cpu": self.get_slave_total_cpu(node['ip']) - 2, - "vda_size": "100G" - } - ]) - - self.show_step(4) - self.update_virt_vm_template() - net_template = get_network_template("baremetal_rf") - self.fuel_web.client.upload_network_template(cluster_id, net_template) - self.fuel_web.spawn_vms_wait(cluster_id) - wait(lambda: len(self.fuel_web.client.list_nodes()) == 2, - timeout=60 * 60, - timeout_msg=("Timeout waiting for available nodes, " - "current nodes: \n{0}" + '\n'.join( - ['Name: {0}, status: {1}, online: {2}'. - format(i['name'], i['status'], i['online']) - for i in self.fuel_web.client.list_nodes()]))) - - self.show_step(5) - virt_nodes = {'vslave-01': ['controller']} - self.update_virtual_nodes(cluster_id, virt_nodes) - - self.show_step(6) - self.deploy_cluster_wait(cluster_id) - - self.show_step(7) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(8) - self.fuel_web.stop_reset_env_wait(cluster_id) - for node in self.fuel_web.client.list_nodes(): - self.wait_for_slave(node) - - self.show_step(9) - self.deploy_cluster_wait(cluster_id) - - self.show_step(10) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["baremetal_deploy_virt_nodes_on_different_computes"]) - @log_snapshot_after_test - @setup_teardown(setup=check_net_template_presence) - def baremetal_deploy_virt_nodes_on_different_computes(self): - """Baremetal deployment of a cluster with virtual nodes in HA mode; - each virtual node on a separate compute - - Scenario: - 1. Create cluster - 2. Assign compute and virt roles to three slave nodes - 3. Upload VM configuration for one VM to each slave node - 4. Apply network template for the env and spawn the VMs - 5. Assign controller role to VMs - 6. Deploy cluster - 7. Run OSTF - 8. Mark 'mysql' partition to be preserved on one of controllers - 9. Reinstall the controller - 10. Verify that the reinstalled controller joined the Galera - cluster and synced its state - 11. Run OSTF - 12. Gracefully reboot one controller using "reboot" command - and wait till it comes up - 13. Run OSTF - 14. Forcefully reboot one controller using "reboot -f" command - and wait till it comes up - 15. Run OSTF - 16. Gracefully reboot one compute using "reboot" command - and wait till compute and controller come up - 17. Run OSTF - 18. Forcefully reboot one compute using "reboot -f" command - and wait till compute and controller come up - 19. Run OSTF - - Duration: 360m - """ - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(1) - checkers.enable_feature_group(self.env, "advanced") - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE_HA, - settings={ - 'net_provider': 'neutron', - 'net_segment_type': settings.NEUTRON_SEGMENT['vlan'] - }) - - self.show_step(2) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['compute', 'virt'], - 'slave-02': ['compute', 'virt'], - 'slave-03': ['compute', 'virt'] - }) - - self.show_step(3) - for node in self.fuel_web.client.list_cluster_nodes(cluster_id): - self.fuel_web.client.create_vm_nodes( - node['id'], - [{ - "id": 1, - "mem": 2, - "cpu": 2, - "vda_size": "100G" - }]) - - self.show_step(4) - self.update_virt_vm_template() - net_template = get_network_template("baremetal_rf_ha") - self.fuel_web.client.upload_network_template(cluster_id, net_template) - self.fuel_web.spawn_vms_wait(cluster_id) - wait(lambda: len(self.fuel_web.client.list_nodes()) == 6, - timeout=60 * 60, - timeout_msg=("Timeout waiting 2 available nodes, " - "current nodes: \n{0}" + '\n'.join( - ['Name: {0}, status: {1}, online: {2}'. - format(i['name'], i['status'], i['online']) - for i in self.fuel_web.client.list_nodes()]))) - - self.show_step(5) - virt_nodes = { - 'vslave-01': ['controller'], - 'vslave-02': ['controller'], - 'vslave-03': ['controller'] - } - self.update_virtual_nodes(cluster_id, virt_nodes) - - self.show_step(6) - self.deploy_cluster_wait(cluster_id) - - self.show_step(7) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(8) - virt_nodes = [n for n in self.fuel_web.client.list_nodes() - if n['name'].startswith('vslave')] - ctrl = virt_nodes[0] - with self.ssh_manager.get_remote(self.ssh_manager.admin_ip) as admin: - preserve_partition(admin, ctrl['id'], "mysql") - - self.show_step(9) - task = self.fuel_web.client.provision_nodes( - cluster_id, [str(ctrl['id'])]) - self.fuel_web.assert_task_success(task) - task = self.fuel_web.client.deploy_nodes( - cluster_id, [str(ctrl['id'])]) - self.fuel_web.assert_task_success(task) - - self.show_step(10) - cmd = "mysql --connect_timeout=5 -sse \"SHOW STATUS LIKE 'wsrep%';\"" - with self.ssh_manager.get_remote(self.ssh_manager.admin_ip) as admin: - err_msg = ("Galera isn't ready on {0} node".format( - ctrl['hostname'])) - wait( - lambda: admin.execute_through_host( - ctrl['ip'], cmd, auth=self.ssh_auth)['exit_code'] == 0, - timeout=10 * 60, timeout_msg=err_msg) - - cmd = ("mysql --connect_timeout=5 -sse \"SHOW STATUS LIKE " - "'wsrep_local_state_comment';\"") - err_msg = ("The reinstalled node {0} is not synced with the " - "Galera cluster".format(ctrl['hostname'])) - wait( - # pylint: disable=no-member - lambda: admin.execute_through_host( - ctrl['ip'], cmd, - auth=self.ssh_auth)['stdout'][0].split()[1] == "Synced", - # pylint: enable=no-member - timeout=10 * 60, - timeout_msg=err_msg) - - self.show_step(11) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(12) - self.show_step(13) - self.show_step(14) - self.show_step(15) - cmds = {"reboot": "gracefully", "reboot -f >/dev/null &": "forcefully"} - for cmd in cmds: - with self.ssh_manager.get_remote(self.ssh_manager.admin_ip) as \ - admin: - asserts.assert_true( - admin.execute_through_host( - virt_nodes[1]['ip'], cmd, auth=self.ssh_auth, - timeout=60)['exit_code'] == 0, - "Failed to {0} reboot {1} controller" - "node".format(cmds[cmd], virt_nodes[1]['name'])) - self.wait_for_slave(virt_nodes[1]) - - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(16) - self.show_step(17) - self.show_step(18) - self.show_step(19) - compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'])[0] - for cmd in cmds: - with self.ssh_manager.get_remote(self.ssh_manager.admin_ip) as \ - admin: - asserts.assert_true( - admin.execute_through_host( - compute['ip'], cmd, auth=self.ssh_auth, - timeout=60)['exit_code'] == 0, - "Failed to {0} reboot {1} compute" - "node".format(cmds[cmd], compute['name'])) - self.wait_for_slave(compute) - for vm in virt_nodes: - self.wait_for_slave(vm) - - self.fuel_web.run_ostf(cluster_id=cluster_id) - - @test(depends_on=[SetupEnvironment.prepare_slaves_1], - groups=["baremetal_deploy_virt_nodes_on_one_compute"]) - @log_snapshot_after_test - @setup_teardown(setup=check_net_template_presence) - def baremetal_deploy_virt_nodes_on_one_compute(self): - """Baremetal deployment of a cluster with virtual nodes in HA mode; - all virtual nodes on the same compute - - Scenario: - 1. Create a cluster - 2. Assign compute and virt roles to the slave node - 3. Upload configuration for three VMs - 4. Spawn the VMs and wait until they are available for allocation - 5. Assign controller role to the VMs - 6. Deploy the cluster - 7. Run OSTF - - Duration: 180m - """ - self.env.revert_snapshot("ready_with_1_slaves") - - self.show_step(1) - checkers.enable_feature_group(self.env, "advanced") - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE_HA, - settings={ - 'net_provider': 'neutron', - 'net_segment_type': settings.NEUTRON_SEGMENT['vlan'] - }) - - self.show_step(2) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['compute', 'virt'], - }) - - self.show_step(3) - node = self.fuel_web.get_nailgun_node_by_name("slave-01") - self.fuel_web.client.create_vm_nodes( - node['id'], - [ - {"id": 1, "mem": 4, "cpu": 2, "vda_size": "100G"}, - {"id": 2, "mem": 4, "cpu": 2, "vda_size": "100G"}, - {"id": 3, "mem": 4, "cpu": 2, "vda_size": "100G"}, - ]) - - self.show_step(4) - self.update_virt_vm_template() - net_template = get_network_template("baremetal_rf") - self.fuel_web.client.upload_network_template(cluster_id, net_template) - self.fuel_web.spawn_vms_wait(cluster_id) - wait(lambda: len(self.fuel_web.client.list_nodes()) == 4, - timeout=60 * 60, - timeout_msg=("Timeout waiting for available nodes, " - "current nodes: \n{0}" + '\n'.join( - ['Name: {0}, status: {1}, online: {2}'. - format(i['name'], i['status'], i['online']) - for i in self.fuel_web.client.list_nodes()]))) - - self.show_step(5) - virt_nodes = { - 'vslave-01': ['controller'], - 'vslave-02': ['controller'], - 'vslave-03': ['controller']} - self.update_virtual_nodes(cluster_id, virt_nodes) - - self.show_step(6) - self.deploy_cluster_wait(cluster_id) - - self.show_step(7) - self.fuel_web.run_ostf(cluster_id=cluster_id) diff --git a/fuelweb_test/tests/test_services.py b/fuelweb_test/tests/test_services.py deleted file mode 100644 index 0464a423e..000000000 --- a/fuelweb_test/tests/test_services.py +++ /dev/null @@ -1,980 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import division - -from proboscis import asserts -from proboscis import test -from proboscis.asserts import assert_equal - -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers import utils -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers import os_actions -from fuelweb_test import settings -from fuelweb_test import logger -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - -SERVICES_TO_CHECK = ['ceilometer-agent-notification', - 'ceilometer-collector', - # 'ceilometer-polling', - 'aodh-listener', - 'aodh-notifier', - # 'aodh-evaluator' - ] - - -@test(groups=["services", "services.sahara", "services_ha_one_controller"]) -class SaharaHAOneController(TestBasic): - """Sahara ha with 1 controller tests. - Don't recommend to start tests without kvm - Put Sahara image before start - """ - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_sahara_ha_one_controller_tun"]) - @log_snapshot_after_test - def deploy_sahara_ha_one_controller_tun(self): - """Deploy cluster in ha mode with 1 controller Sahara and Neutron VXLAN - - Scenario: - 1. Create a Fuel cluster. Set the option for Sahara installation - 2. Add 1 node with "controller" role - 3. Add 1 node with "compute" role - 4. Deploy the Fuel cluster - 5. Verify Sahara service on controller - 6. Run all sanity and smoke tests - 7. Register Vanilla2 image for Sahara - 8. Run platform Vanilla2 test for Sahara - - Duration 65m - Snapshot: deploy_sahara_ha_one_controller_tun - """ - - self.env.revert_snapshot("ready_with_3_slaves") - - logger.debug('Create Fuel cluster for Sahara tests') - data = { - 'sahara': True, - 'net_provider': 'neutron', - 'net_segment_type': settings.NEUTRON_SEGMENT['tun'], - 'tenant': 'saharaSimple', - 'user': 'saharaSimple', - 'password': 'saharaSimple' - } - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings=data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id), - data['user'], data['password'], data['tenant']) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5) - - logger.debug('Verify Sahara service on controller') - _ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip'] - # count = 1 + api_workers (from sahara.conf) - checkers.verify_service(_ip, service_name='sahara-api', count=2) - # count = 2 * 1 (hardcoded by deployment team) - checkers.verify_service(_ip, service_name='sahara-engine', count=2) - - logger.debug('Check MD5 sum of Vanilla2 image') - check_image = checkers.check_image( - settings.SERVTEST_SAHARA_VANILLA_2_IMAGE, - settings.SERVTEST_SAHARA_VANILLA_2_IMAGE_MD5, - settings.SERVTEST_LOCAL_PATH) - asserts.assert_true(check_image) - - logger.debug('Run all sanity and smoke tests') - path_to_tests = 'fuel_health.tests.sanity.test_sanity_sahara.' - test_names = ['VanillaTwoTemplatesTest.test_vanilla_two_templates', - 'HDPTwoTemplatesTest.test_hdp_two_templates'] - self.fuel_web.run_ostf( - cluster_id=self.fuel_web.get_last_created_cluster(), - tests_must_be_passed=[path_to_tests + test_name - for test_name in test_names] - ) - - logger.debug('Import Vanilla2 image for Sahara') - - with open('{0}/{1}'.format( - settings.SERVTEST_LOCAL_PATH, - settings.SERVTEST_SAHARA_VANILLA_2_IMAGE)) as data: - os_conn.create_image( - name=settings.SERVTEST_SAHARA_VANILLA_2_IMAGE_NAME, - properties=settings.SERVTEST_SAHARA_VANILLA_2_IMAGE_META, - data=data, - is_public=True, - disk_format='qcow2', - container_format='bare') - - path_to_tests = 'fuel_health.tests.tests_platform.test_sahara.' - test_names = ['VanillaTwoClusterTest.test_vanilla_two_cluster'] - for test_name in test_names: - logger.debug('Run platform test {0} for Sahara'.format(test_name)) - self.fuel_web.run_single_ostf_test( - cluster_id=cluster_id, test_sets=['tests_platform'], - test_name=path_to_tests + test_name, timeout=60 * 200) - - self.env.make_snapshot("deploy_sahara_ha_one_controller_tun") - - -@test(groups=["services", "services.sahara", "services_ha"]) -class SaharaHA(TestBasic): - """Sahara HA tests. - Don't recommend to start tests without kvm - Put Sahara image before start - """ - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_sahara_ha_tun"]) - @log_snapshot_after_test - def deploy_sahara_ha_tun(self): - """Deploy cluster in HA mode with Sahara and Neutron VXLAN - - Scenario: - 1. Create a Fuel cluster. Set the option for Sahara installation - 2. Add 3 node with "controller" role - 3. Add 1 node with "compute" role - 4. Deploy the Fuel cluster - 5. Verify Sahara service on all controllers - 6. Run all sanity and smoke tests - 7. Register Vanilla2 image for Sahara - 8. Run platform Vanilla2 test for Sahara - - Duration 130m - Snapshot: deploy_sahara_ha_tun - - """ - - self.env.revert_snapshot("ready_with_5_slaves") - - logger.debug('Create Fuel cluster for Sahara tests') - data = { - 'sahara': True, - 'net_provider': 'neutron', - 'net_segment_type': settings.NEUTRON_SEGMENT['tun'], - 'tenant': 'saharaHA', - 'user': 'saharaHA', - 'password': 'saharaHA' - } - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings=data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - cluster_vip = self.fuel_web.get_public_vip(cluster_id) - os_conn = os_actions.OpenStackActions( - cluster_vip, data['user'], data['password'], data['tenant']) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=13) - - logger.debug('Verify Sahara service on all controllers') - for slave in ["slave-01", "slave-02", "slave-03"]: - _ip = self.fuel_web.get_nailgun_node_by_name(slave)['ip'] - # count = 1 + api_workers (from sahara.conf) - checkers.verify_service(_ip, service_name='sahara-api', count=2) - # count = 2 * 1 (hardcoded by deployment team) - checkers.verify_service(_ip, - service_name='sahara-engine', count=2) - - logger.debug('Check MD5 sum of Vanilla2 image') - check_image = checkers.check_image( - settings.SERVTEST_SAHARA_VANILLA_2_IMAGE, - settings.SERVTEST_SAHARA_VANILLA_2_IMAGE_MD5, - settings.SERVTEST_LOCAL_PATH) - asserts.assert_true(check_image) - - logger.debug('Run all sanity and smoke tests') - path_to_tests = 'fuel_health.tests.sanity.test_sanity_sahara.' - test_names = ['VanillaTwoTemplatesTest.test_vanilla_two_templates', - 'HDPTwoTemplatesTest.test_hdp_two_templates'] - self.fuel_web.run_ostf( - cluster_id=self.fuel_web.get_last_created_cluster(), - tests_must_be_passed=[path_to_tests + test_name - for test_name in test_names] - ) - - logger.debug('Import Vanilla2 image for Sahara') - - with open('{0}/{1}'.format( - settings.SERVTEST_LOCAL_PATH, - settings.SERVTEST_SAHARA_VANILLA_2_IMAGE)) as data: - os_conn.create_image( - name=settings.SERVTEST_SAHARA_VANILLA_2_IMAGE_NAME, - properties=settings.SERVTEST_SAHARA_VANILLA_2_IMAGE_META, - data=data, - is_public=True, - disk_format='qcow2', - container_format='bare') - - path_to_tests = 'fuel_health.tests.tests_platform.test_sahara.' - test_names = ['VanillaTwoClusterTest.test_vanilla_two_cluster'] - for test_name in test_names: - logger.debug('Run platform test {0} for Sahara'.format(test_name)) - self.fuel_web.run_single_ostf_test( - cluster_id=cluster_id, test_sets=['tests_platform'], - test_name=path_to_tests + test_name, timeout=60 * 200) - - self.env.make_snapshot("deploy_sahara_ha_tun") - - -@test(groups=["services", "services.murano", "services_ha_one_controller"], - enabled=False) -class MuranoHAOneController(TestBasic): - """Murano HA with 1 controller tests. - Don't recommend to start tests without kvm. - """ - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_murano_ha_one_controller_tun"]) - @log_snapshot_after_test - def deploy_murano_ha_one_controller_tun(self): - """Deploy cluster in HA mode with Murano and Neutron VXLAN - - Scenario: - 1. Create cluster. Set install Murano option - 2. Add 1 node with controller role - 3. Add 1 nodes with compute role - 4. Deploy the cluster - 5. Verify Murano services - 6. Run OSTF - 7. Run OSTF Murano platform tests - - Duration 40m - Snapshot: deploy_murano_ha_one_controller_tun - """ - self.env.revert_snapshot("ready_with_3_slaves") - - data = { - 'murano': True, - 'net_provider': 'neutron', - 'net_segment_type': settings.NEUTRON_SEGMENT['tun'], - 'tenant': 'muranoSimple', - 'user': 'muranoSimple', - 'password': 'muranoSimple' - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings=data, - configure_ssl=False - ) - # TODO(freerunner): Need to configure SSL again when root cause for - # TODO(freerunner): https://bugs.launchpad.net/fuel/+bug/1590633 - # TODO(freerunner): will be found and fixed - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'] - } - ) - - self.fuel_web.deploy_cluster_wait(cluster_id) - _ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip'] - checkers.verify_service(_ip, service_name='murano-api', - ignore_count_of_proccesses=True) - - logger.debug('Run sanity and functional Murano OSTF tests') - self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['sanity']) - - logger.debug('Run OSTF platform tests') - - test_class_main = ('fuel_health.tests.tests_platform' - '.test_murano_linux.MuranoDeployLinuxServicesTests') - tests_names = ['test_deploy_dummy_app_with_glare', ] - - test_classes = [] - - for test_name in tests_names: - test_classes.append('{0}.{1}'.format(test_class_main, - test_name)) - - for test_name in test_classes: - self.fuel_web.run_single_ostf_test( - cluster_id=cluster_id, test_sets=['tests_platform'], - test_name=test_name, timeout=60 * 20) - - self.env.make_snapshot("deploy_murano_ha_one_controller_tun") - - -@test(groups=["services", "services.murano", "services_ha"], - enabled=False) -class MuranoHA(TestBasic): - """Murano HA tests. - Don't recommend to start tests without kvm. - """ - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_murano_ha_with_tun"]) - @log_snapshot_after_test - def deploy_murano_ha_with_tun(self): - """Deploy cluster in ha mode with Murano and Neutron VXLAN - - Scenario: - 1. Create cluster. Set install Murano option - 2. Add 3 node with controller role - 3. Add 1 nodes with compute role - 4. Deploy the cluster - 5. Verify Murano services - 6. Run OSTF - 7. Run OSTF Murano platform tests - - Duration 100m - Snapshot: deploy_murano_ha_with_tun - - """ - self.env.revert_snapshot("ready_with_5_slaves") - - data = { - 'murano': True, - 'net_provider': 'neutron', - 'net_segment_type': settings.NEUTRON_SEGMENT['tun'], - 'tenant': 'muranoHA', - 'user': 'muranoHA', - 'password': 'muranoHA' - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings=data, - configure_ssl=False - ) - # TODO(freerunner): Need to configure SSL again when root cause for - # TODO(freerunner): https://bugs.launchpad.net/fuel/+bug/1590633 - # TODO(freerunner): will be found and fixed - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - for slave in ["slave-01", "slave-02", "slave-03"]: - _ip = self.fuel_web.get_nailgun_node_by_name(slave)['ip'] - checkers.verify_service(_ip, service_name='murano-api') - - logger.debug('Run sanity and functional Murano OSTF tests') - self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['sanity']) - - logger.debug('Run OSTF platform tests') - - test_class_main = ('fuel_health.tests.tests_platform' - '.test_murano_linux.MuranoDeployLinuxServicesTests') - tests_names = ['test_deploy_dummy_app_with_glare'] - - test_classes = [] - - for test_name in tests_names: - test_classes.append('{0}.{1}'.format(test_class_main, - test_name)) - - for test_name in test_classes: - self.fuel_web.run_single_ostf_test( - cluster_id=cluster_id, test_sets=['tests_platform'], - test_name=test_name, timeout=60 * 20) - - self.env.make_snapshot("deploy_murano_ha_with_tun") - - -class OSTFCeilometerHelper(TestBasic): - - def run_tests(self, cluster_id, skip_tests=None): - """Method run smoke, sanity and platform Ceilometer tests.""" - - logger.debug('Run sanity and smoke tests') - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['smoke', 'sanity'], - timeout=60 * 15 - ) - - logger.debug('Run platform OSTF Ceilometer tests') - - test_class_main = ('fuel_health.tests.tests_platform.' - 'test_ceilometer.' - 'CeilometerApiPlatformTests') - tests_names = ['test_check_alarm_state', - 'test_create_sample', - 'test_check_volume_events', - 'test_check_glance_notifications', - 'test_check_keystone_notifications', - 'test_check_neutron_notifications', - 'test_check_sahara_notifications', - 'test_check_events_and_traits'] - - test_classes = [] - - for test_name in tests_names: - test_classes.append('{0}.{1}'.format(test_class_main, - test_name)) - - all_tests = [ - test['id'] for test - in self.fuel_web.fuel_client.ostf.get_tests(cluster_id)] - - for test_id in test_classes: - if test_id in all_tests: - if skip_tests and test_id.split('.')[-1] in skip_tests: - - all_status = self.fuel_web.run_single_ostf_test( - cluster_id=cluster_id, test_sets=['tests_platform'], - test_name=test_id, retries=True, timeout=60 * 20) - - test_name = next( - test['name'] for test - in self.fuel_web.fuel_client.ostf.get_tests(cluster_id) - if test['id'] == test_id) - - status = next(test.values()[0] - for test in all_status - if test.keys()[0] == test_name) - - assert_equal( - status, "skipped", - 'Test: "{}" must be skipped status, ' - 'but his status {}'.format(test_name, status)) - else: - self.fuel_web.run_single_ostf_test( - cluster_id=cluster_id, test_sets=['tests_platform'], - test_name=test_id, timeout=60 * 20) - - -@test(groups=["services", "services.ceilometer", "services_ha_one_controller"]) -class CeilometerHAOneControllerMongo(OSTFCeilometerHelper): - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_ceilometer_ha_one_controller_with_mongo"], - enabled=False) - @log_snapshot_after_test - def deploy_ceilometer_ha_one_controller_with_mongo(self): - """Deploy cluster in HA mode with Ceilometer - - Scenario: - 1. Create cluster. Set install Ceilometer option - 2. Add 1 node with controller role - 3. Add 1 nodes with compute role - 4. Add 1 node with cinder role - 5. Add 1 node with mongo role - 6. Deploy the cluster - 7. Verify ceilometer api is running - 8. Run OSTF - - Duration 45m - Snapshot: deploy_ceilometer_ha_one_controller_with_mongo - """ - self.env.revert_snapshot("ready_with_3_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - 'ceilometer': True, - 'net_provider': 'neutron', - 'net_segment_type': 'tun', - 'tenant': 'ceilometerSimple', - 'user': 'ceilometerSimple', - 'password': 'ceilometerSimple' - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute', 'cinder'], - 'slave-03': ['mongo'] - } - ) - nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - - disk_mb = 0 - for node in nailgun_nodes: - if node.get('pending_roles') == ['mongo']: - disk_mb = self.fuel_web.get_node_disk_size(node.get('id'), - "vda") - - logger.debug('disk size is {0}'.format(disk_mb)) - mongo_disk_mb = 11116 - os_disk_mb = disk_mb - mongo_disk_mb - # pylint: disable=round-builtin - mongo_disk_gb = ("{0}G".format(round(mongo_disk_mb / 1024, 1))) - # pylint: enable=round-builtin - disk_part = { - "vda": { - "os": os_disk_mb, - "mongo": mongo_disk_mb - } - } - - for node in nailgun_nodes: - if node.get('pending_roles') == ['mongo']: - self.fuel_web.update_node_disk(node.get('id'), disk_part) - - self.fuel_web.deploy_cluster_wait(cluster_id) - - _ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip'] - for service_name in SERVICES_TO_CHECK: - checkers.verify_service(_ip, - service_name=service_name, - ignore_count_of_proccesses=True) - - _ip = self.fuel_web.get_nailgun_node_by_name("slave-03")['ip'] - partitions = utils.get_mongo_partitions(_ip, "vda5") - - assert_equal(partitions[0].rstrip(), mongo_disk_gb, - 'Mongo size {0} before deployment is not equal' - ' to size after {1}'.format(mongo_disk_gb, partitions)) - - self.run_tests(cluster_id) - self.env.make_snapshot( - "deploy_ceilometer_ha_one_controller_with_mongo") - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_ceilometer_ha_one_controller_multirole"], - enabled=False) - @log_snapshot_after_test - def deploy_ceilometer_ha_one_controller_multirole(self): - """Deploy cluster in ha multirole mode with Ceilometer - - Scenario: - 1. Create cluster. Set install Ceilometer option - 2. Add 1 node with controller role - 3. Add 1 nodes with compute role - 4. Add 2 nodes with cinder and mongo roles - 5. Deploy the cluster - 6. Verify ceilometer api is running - 7. Run OSTF - - Duration 35m - Snapshot: deploy_ceilometer_ha_one_controller_multirole - """ - self.env.revert_snapshot("ready_with_3_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - 'ceilometer': True, - 'net_provider': 'neutron', - 'net_segment_type': 'tun', - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder', 'mongo'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - for service_name in SERVICES_TO_CHECK: - _ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip'] - checkers.verify_service(_ip, - service_name=service_name, - ignore_count_of_proccesses=True) - - self.run_tests(cluster_id) - self.env.make_snapshot("deploy_ceilometer_ha_one_controller_multirole") - - -@test(groups=["services", "services.ceilometer", "services_ha.ceilometer"]) -class CeilometerHAMongo(OSTFCeilometerHelper): - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_ceilometer_ha_with_mongo"], - enabled=False) - @log_snapshot_after_test - def deploy_ceilometer_ha_with_mongo(self): - """Deploy cluster in ha mode with Ceilometer - - Scenario: - 1. Create cluster. Set install Ceilometer option - 2. Add 3 node with controller role - 3. Add 1 nodes with compute role - 4. Add 1 node with mongo role - 5. Deploy the cluster - 6. Verify ceilometer api is running - 7. Run OSTF - - Duration 65m - Snapshot: deploy_ceilometer_ha_with_mongo - - """ - - self.env.revert_snapshot("ready_with_5_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - 'ceilometer': True, - 'net_provider': 'neutron', - 'net_segment_type': 'tun', - 'tenant': 'ceilometerHA', - 'user': 'ceilometerHA', - 'password': 'ceilometerHA' - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['mongo'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - for service_name in SERVICES_TO_CHECK: - _ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip'] - checkers.verify_service(_ip, - service_name=service_name, - ignore_count_of_proccesses=True) - - self.run_tests(cluster_id, - skip_tests=['test_check_volume_events']) - self.env.make_snapshot("deploy_ceilometer_ha_with_mongo") - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_ceilometer_ha_multirole"], - enabled=False) - @log_snapshot_after_test - def deploy_ceilometer_ha_multirole(self): - """Deploy cluster in ha multirole mode with Ceilometer - - Scenario: - 1. Create cluster. Set install Ceilometer option - 2. Add 3 node with controller and mongo roles - 3. Add 1 nodes with compute role - 4. Add 1 nodes with cinder - 5. Deploy the cluster - 6. Verify ceilometer api is running - 7. Run OSTF - - Duration 80m - Snapshot: deploy_ceilometer_ha_multirole - - """ - self.env.revert_snapshot("ready_with_5_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - 'ceilometer': True, - 'net_provider': 'neutron', - 'net_segment_type': 'tun', - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'mongo'], - 'slave-02': ['controller', 'mongo'], - 'slave-03': ['controller', 'mongo'], - 'slave-04': ['compute'], - 'slave-05': ['cinder'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - for service_name in SERVICES_TO_CHECK: - _ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip'] - checkers.verify_service(_ip, - service_name=service_name, - ignore_count_of_proccesses=True) - - self.run_tests(cluster_id) - self.env.make_snapshot("deploy_ceilometer_ha_multirole", is_make=True) - - @test(depends_on=[deploy_ceilometer_ha_multirole], - groups=["ceilometer_ha_multirole_add_mongo"], - enabled=False) - @log_snapshot_after_test - def ceilometer_ha_multirole_add_mongo(self): - """Add mongo node to cluster with HA mode and Ceilometer - - Scenario: - 1. Revert snapshot deploy_ceilometer_ha_multirole - 2. Add 1 node with mongo role - 3. Deploy the cluster - 4. Run OSTF - - Duration 60m - Snapshot: ceilometer_ha_multirole_add_mongo - - """ - self.env.revert_snapshot("deploy_ceilometer_ha_multirole") - cluster_id = self.fuel_web.get_last_created_cluster() - - self.env.bootstrap_nodes( - self.env.d_env.nodes().slaves[5:6]) - self.fuel_web.update_nodes( - cluster_id, {'slave-06': ['mongo']}, True, False - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.run_tests(cluster_id) - - self.env.make_snapshot("ceilometer_ha_multirole_add_mongo") - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_ceilometer_ha_with_external_mongo"], - enabled=False) - @log_snapshot_after_test - def deploy_ceilometer_ha_with_external_mongo(self): - """Deploy cluster in ha mode with Ceilometer and external Mongo - - Scenario: - 1. Create cluster. Set install Ceilometer, external Mongo option - 2. Add 3 node with controller role - 3. Add 1 nodes with compute and ceph roles - 4. Add 1 node with ceph role - 5. Deploy the cluster - 6. Verify ceilometer api is running - 7. Run OSTF - - Duration 65m - Snapshot: deploy_ceilometer_ha_with_external_mongo - - """ - - self.env.revert_snapshot("ready_with_5_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - 'ceilometer': True, - 'tenant': 'ceilometerHA', - 'user': 'ceilometerHA', - 'password': 'ceilometerHA', - 'net_provider': 'neutron', - 'net_segment_type': 'tun', - 'volumes_ceph': True, - 'images_ceph': True, - 'volumes_lvm': False, - 'osd_pool_size': "2", - 'mongo': True, - 'hosts_ip': settings.SERVTEST_EXTERNAL_MONGO_URLS, - 'mongo_db_name': settings.SERVTEST_EXTERNAL_MONGO_DB_NAME, - 'mongo_user': settings.SERVTEST_EXTERNAL_MONGO_USER, - 'mongo_password': settings.SERVTEST_EXTERNAL_MONGO_PASS, - 'mongo_replset': settings.SERVTEST_EXTERNAL_MONGO_REPL_SET - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute', 'ceph-osd'], - 'slave-05': ['ceph-osd'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - for service_name in SERVICES_TO_CHECK: - _ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip'] - checkers.verify_service(_ip, - service_name=service_name, - ignore_count_of_proccesses=True) - - self.run_tests(cluster_id) - self.env.make_snapshot("deploy_ceilometer_ha_with_external_mongo") - - -@test(groups=["services", "services.heat", "services_ha_one_controller"]) -class HeatHAOneController(TestBasic): - """Heat HA one controller test. - Don't recommend to start tests without kvm - """ - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_heat_ha_one_controller_neutron"]) - @log_snapshot_after_test - def deploy_heat_ha_one_controller_neutron(self): - """Deploy Heat cluster in HA mode with Neutron VXLAN - - Scenario: - 1. Create cluster - 2. Add 1 node with controller role - 3. Add 1 nodes with compute role - 4. Deploy the cluster - 5. Verify Heat services - 6. Run OSTF platform tests - - Duration 40m - Snapshot: deploy_heat_ha_one_controller_neutron - """ - - self.env.revert_snapshot("ready_with_3_slaves") - - data = { - 'net_provider': 'neutron', - 'net_segment_type': settings.NEUTRON_SEGMENT['tun'], - 'tenant': 'heatSimple', - 'user': 'heatSimple', - 'password': 'heatSimple' - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings=data) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id), - data['user'], data['password'], data['tenant']) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5) - - _ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip'] - checkers.verify_service(_ip, service_name='heat-api', count=5) - - logger.debug('Run Heat OSTF platform tests') - - test_class_main = ('fuel_health.tests.tests_platform.' - 'test_heat.' - 'HeatSmokeTests') - tests_names = ['test_actions', - 'test_advanced_actions', - 'test_rollback', - 'test_update', - 'test_wait_condition'] - - test_classes = [] - - for test_name in tests_names: - test_classes.append('{0}.{1}'.format(test_class_main, - test_name)) - - for test_name in test_classes: - self.fuel_web.run_single_ostf_test( - cluster_id=cluster_id, test_sets=['tests_platform'], - test_name=test_name, timeout=60 * 60) - - self.env.make_snapshot("deploy_heat_ha_one_controller_neutron") - - -@test(groups=["services", "services.heat", "services_ha"]) -class HeatHA(TestBasic): - """Heat HA test. - Don't recommend to start tests without kvm - """ - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_heat_ha"]) - @log_snapshot_after_test - def deploy_heat_ha(self): - """Deploy Heat cluster in HA mode - - Scenario: - 1. Create cluster - 2. Add 3 node with controller role - 3. Add 1 nodes with compute role - 4. Deploy the cluster - 5. Verify Heat services - 6. Run OSTF platform tests - - Duration 70m - Snapshot: deploy_heat_ha - """ - - self.env.revert_snapshot("ready_with_5_slaves") - - data = { - 'net_provider': 'neutron', - 'net_segment_type': settings.NEUTRON_SEGMENT['tun'], - 'tenant': 'heatHA', - 'user': 'heatHA', - 'password': 'heatHA' - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings=data) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - cluster_vip = self.fuel_web.get_public_vip(cluster_id) - os_conn = os_actions.OpenStackActions( - cluster_vip, data['user'], data['password'], data['tenant']) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=13) - - logger.debug('Run Heat OSTF platform tests') - - test_class_main = ('fuel_health.tests.tests_platform.' - 'test_heat.' - 'HeatSmokeTests') - tests_names = ['test_actions', - 'test_advanced_actions', - 'test_rollback', - 'test_update'] - - test_classes = [] - - for test_name in tests_names: - test_classes.append('{0}.{1}'.format(test_class_main, - test_name)) - - for test_name in test_classes: - self.fuel_web.run_single_ostf_test( - cluster_id=cluster_id, test_sets=['tests_platform'], - test_name=test_name, timeout=60 * 60) - - self.env.make_snapshot("deploy_heat_ha") diff --git a/fuelweb_test/tests/test_services_reconfiguration.py b/fuelweb_test/tests/test_services_reconfiguration.py deleted file mode 100644 index 104164f9f..000000000 --- a/fuelweb_test/tests/test_services_reconfiguration.py +++ /dev/null @@ -1,1426 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals - -import random -import time -import traceback - -from devops.helpers.ssh_client import SSHAuth -from devops.helpers import helpers -from keystoneauth1.exceptions import HttpError -from keystoneauth1.exceptions import NotFound -import netaddr -from proboscis import asserts -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers import os_actions -from fuelweb_test.helpers import utils -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - -cirros_auth = SSHAuth(**settings.SSH_IMAGE_CREDENTIALS) - - -def get_structured_config_dict(config): - structured_conf = {} - - def helper(key1, key2): - structured_conf[key2] = [] - for param, value in config[key1].items(): - d = {} - param = param.split('/') - d['section'] = param[0] - d['option'] = param[1] - if value.get('ensure', False) == 'absent': - d['value'] = None - else: - d['value'] = str(value.get('value', '')) - structured_conf[key2].append(d) - - for key in config.keys(): - if key == 'neutron_config': - helper(key, '/etc/neutron/neutron.conf') - if key == 'neutron_plugin_ml2': - helper(key, '/etc/neutron/plugins/ml2/ml2_conf.ini') - if key == 'neutron_dhcp_agent_config': - helper(key, '/etc/neutron/dhcp_agent.ini') - if key == 'neutron_l3_agent_config': - helper(key, '/etc/neutron/l3_agent.ini') - if key == 'neutron_metadata_agent_config': - helper(key, '/etc/neutron/metadata_agent.ini') - if key == 'neutron_api_config': - helper(key, '/etc/neutron/api-paste.ini') - if key == 'nova_config': - helper(key, '/etc/nova/nova.conf') - if key == 'keystone_config': - helper(key, '/etc/keystone/keystone.conf') - return structured_conf - - -@test(groups=["services_reconfiguration"]) -class ServicesReconfiguration(TestBasic): - """ServicesReconfiguration.""" - - def wait_for_node_status(self, devops_node, status, timeout=1200): - helpers.wait( - lambda: self.fuel_web.get_nailgun_node_by_devops_node( - devops_node)['status'] == status, timeout=timeout, - timeout_msg="Timeout exceeded while waiting for " - "node status: {0}".format(status)) - - @staticmethod - def check_response_code(expected_code, err_msg, - func, *args, **kwargs): - try: - func(*args, **kwargs) - except HttpError as e: - if e.http_status != expected_code: - raise - logger.warning('Ignoring exception: {!r}'.format(e)) - logger.debug(traceback.format_exc()) - else: - raise Exception(err_msg) - - @staticmethod - def change_default_range(networks, number_excluded_ips, - cut_from_start=True): - """ - Change IP range for public, management, storage network - by excluding N of first addresses from default range - :param networks: a list of environment networks configuration - :param number_excluded_ips: an integer number of IPs - :param cut_from_start: a boolean flag that select first part of - the default range if True and last one if False - :return: - """ - for default_network in filter( - lambda x: ((x['name'] != 'fuelweb_admin')and - (x['name'] != 'private')), - networks): - default_range = [netaddr.IPAddress(str(ip)) for ip - in default_network["ip_ranges"][0]] - if cut_from_start: - new_range = [default_range[0], - default_range[0] + number_excluded_ips] - else: - new_range = [default_range[0] + number_excluded_ips + 1, - default_range[1]] - # noinspection PyUnresolvedReferences - default_network["ip_ranges"][0] = [str(ip) - for ip in new_range] - - @staticmethod - def is_update_dnsmasq_running(tasks): - for task in tasks: - if task['name'] == "update_dnsmasq" and \ - task["status"] == "running": - return True - return False - - def get_service_uptime(self, nodes, service_name): - """ - :param nodes: a list of nailgun nodes - :param service_name: a string of service name - :return: a dictionary of ip nodes and process uptime - """ - nodes = [x['ip'] for x in nodes] - uptimes = {} - for node in nodes: - with self.env.d_env.get_ssh_to_remote(node) as remote: - uptimes[node] = \ - utils.get_process_uptime(remote, service_name) - return uptimes - - def check_config_on_remote(self, nodes, config): - """ - :param nodes: a list of nailgun nodes - :param config: a structured dictionary of config - :return: - """ - nodes = [x['ip'] for x in nodes] - for node in nodes: - with self.env.d_env.get_ssh_to_remote(node) as remote: - for configpath, params in config.items(): - result = remote.open(configpath) - conf_for_check = utils.get_ini_config(result) - for param in params: - utils.check_config(conf_for_check, - configpath, - param['section'], - param['option'], - param['value']) - - def check_service_was_restarted(self, nodes, uptime_before, service_name): - """ - :param nodes: a list of nailgun nodes - :param uptime_before: a dictionary of ip nodes and process uptime - :param service_name: a string of service name - :return: - """ - nodes = [x['ip'] for x in nodes] - for node in nodes: - with self.env.d_env.get_ssh_to_remote(node) as remote: - uptime = utils.get_process_uptime(remote, service_name) - asserts.assert_true(uptime <= uptime_before[node], - 'Service "{0}" was not ' - 'restarted on {1}'.format(service_name, - node)) - - def check_overcommit_ratio(self, os_conn, cluster_id): - """ - :param os_conn: an object of connection to openstack services - :param cluster_id: an integer number of cluster id - :return: - """ - net_name = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - server = os_conn.create_instance(neutron_network=True, - label=net_name, - server_name="Test_reconfig", - vcpus=2) - os_conn.verify_instance_status(server, 'ACTIVE') - excessive_server = os_conn.create_instance(neutron_network=True, - label=net_name, - server_name="excessive_VM", - flavor_name="overcommit") - os_conn.verify_instance_status(excessive_server, 'ERROR') - os_conn.delete_instance(excessive_server) - os_conn.delete_instance(server) - - def check_nova_ephemeral_disk(self, os_conn, cluster_id, - hypervisor_name=None, fs_type='ext4'): - """ - :param os_conn: an object of connection to openstack services - :param cluster_id: an integer number of cluster id - :param hypervisor_name: a string of hypervisor name - :param fs_type: a string of fs type name - :return: - """ - net_name = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - flavor_id = random.randint(10, 10000) - os_conn.create_flavor(name='ephemeral{0}'.format(flavor_id), - ram=64, - vcpus=1, - disk=1, - flavorid=flavor_id, - ephemeral=1) - - kwargs = {} - if hypervisor_name: - kwargs['availability_zone'] = "nova:{0}".format(hypervisor_name) - instance = os_conn.create_server_for_migration( - neutron=True, label=net_name, flavor_id=flavor_id, **kwargs) - - floating_ip = os_conn.assign_floating_ip(instance) - - helpers.wait(lambda: helpers.tcp_ping(floating_ip.ip, 22), - timeout=120, - timeout_msg="Can not ping instance by floating " - "ip {0}".format(floating_ip.ip)) - - controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller'])[0]['ip'] - with self.env.d_env.get_ssh_to_remote(controller) as remote: - res = remote.execute_through_host( - hostname=floating_ip.ip, - cmd="mount", - auth=cirros_auth - ) - test_substr = '/mnt type {0}'.format(fs_type) - asserts.assert_true(test_substr in res['stdout_str'], - "Ephemeral disk format was not " - "changed on instance. " - "Please, see details: {0}".format(res)) - os_conn.delete_instance(instance) - - @staticmethod - def check_ml2_vlan_range(os_conn): - """ - :param os_conn: an object of connection to openstack services - :return: - """ - tenant = os_conn.get_tenant('admin') - os_conn.create_network('net1', tenant_id=tenant.id) - - try: - os_conn.create_network('net2', tenant_id=tenant.id) - except Exception as e: - if 'No tenant network is available' not in e.message: - raise - logger.warning('Ignoring exception: {!r}'.format(e)) - logger.debug(traceback.format_exc()) - else: - raise Exception("New configuration was not applied") - - def check_nova_quota(self, os_conn, cluster_id): - """ - :param os_conn: an object of connection to openstack services - :param cluster_id: an integer number of cluster id - :return: - """ - net_name = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - server = os_conn.create_instance(neutron_network=True, - label=net_name, - server_name="Test_reconfig") - os_conn.verify_instance_status(server, 'ACTIVE') - try: - os_conn.create_instance(neutron_network=True, - label=net_name, - server_name="excessive_VM", - flavor_name="nova_quota") - except Exception as e: - if 'Quota exceeded for instances' not in e.message: - raise - logger.warning('Ignoring exception: {!r}'.format(e)) - logger.debug(traceback.format_exc()) - else: - raise Exception("New configuration was not applied") - - @staticmethod - def check_token_expiration(os_conn, time_expiration): - """ - :param os_conn: an object of connection to openstack services - :param time_expiration: an integer value of token time expiration - in seconds - :return: - """ - token = os_conn.keystone.tokens.authenticate(username='admin', - password='admin') - time.sleep(time_expiration) - - asserts.assert_raises( - NotFound, - os_conn.keystone.tokens.validate, - (token.id, ) - ) - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["services_reconfiguration_thread_1", - "services_reconfiguration_thread_2", - "basic_env_for_reconfiguration"]) - @log_snapshot_after_test - def basic_env_for_reconfiguration(self): - """Basic environment for reconfiguration - - Scenario: - 1. Create cluster - 2. Add 1 node with compute role - 3. Add 3 nodes with controller role - 4. Deploy the cluster - 5. Verify network - 6. Run OSTF - - Snapshot: basic_env_for_reconfiguration - - """ - snapshot_name = 'basic_env_for_reconfiguration' - self.check_run(snapshot_name) - self.env.revert_snapshot("ready_with_5_slaves") - - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT_TYPE, - } - ) - self.show_step(2) - self.show_step(3) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['compute'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['controller'] - }) - - self.show_step(4) - self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False) - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - - self.show_step(6) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("basic_env_for_reconfiguration", is_make=True) - - @test(depends_on_groups=['basic_env_for_reconfiguration'], - groups=["services_reconfiguration_thread_1", - "reconfigure_ml2_vlan_range"]) - @log_snapshot_after_test - def reconfigure_ml2_vlan_range(self): - """Reconfigure neutron ml2 VLAN range - - Scenario: - 1. Revert snapshot "basic_env_for_reconfiguration" - 2. Upload a new openstack configuration - 3. Get uptime of process "neutron-server" on each controller - 4. Apply a new VLAN range(minimal range) to all nodes - 5. Wait for configuration applying - 6. Check that service "neutron-server" was restarted - 7. Verify ml2 plugin settings - 8. Create new private network - 9. Try to create one more, verify that it is impossible - - Snapshot: reconfigure_ml2_vlan_range - - """ - self.check_run('reconfigure_ml2_vlan_range') - self.show_step(1, initialize=True) - self.env.revert_snapshot("basic_env_for_reconfiguration") - - cluster_id = self.fuel_web.get_last_created_cluster() - controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller']) - - self.show_step(2) - config = utils.get_config_template('neutron') - structured_config = get_structured_config_dict(config) - self.fuel_web.client.upload_configuration(config, - cluster_id, - role="controller") - - self.show_step(3) - service_name = 'neutron-server' - uptimes = self.get_service_uptime(controllers, service_name) - - self.show_step(4) - task = self.fuel_web.client.apply_configuration(cluster_id, - role="controller") - - self.show_step(5) - self.fuel_web.assert_task_success(task, timeout=900, interval=5) - - self.show_step(6) - self.check_service_was_restarted(controllers, uptimes, service_name) - - self.show_step(7) - self.check_config_on_remote(controllers, structured_config) - - self.show_step(8) - self.show_step(9) - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - self.check_ml2_vlan_range(os_conn) - - self.env.make_snapshot("reconfigure_ml2_vlan_range", is_make=True) - - @test(depends_on_groups=["basic_env_for_reconfiguration"], - groups=["services_reconfiguration_thread_1", - "reconfigure_overcommit_ratio"]) - @log_snapshot_after_test - def reconfigure_overcommit_ratio(self): - """Tests for reconfiguration nova CPU overcommit ratio. - - Scenario: - 1. Revert snapshot "basic_env_for_reconfiguration" - 2. Apply new CPU overcommit ratio for each controller - 3. Verify deployment task is finished - 4. Verify nova-scheduler services uptime - 5. Verify configuration file on each controller - 6. Boot instances with flavor that occupy all CPU, - boot extra instance and catch the error - 7. Apply old CPU overcommit ratio for each controller - 8. Verify deployment task is finished - 9. Verify nova-scheduler services uptime - 10. Verify configuration file on each controller - - Snapshot: reconfigure_overcommit_ratio - - """ - self.check_run('reconfigure_overcommit_ratio') - self.show_step(1, initialize=True) - self.env.revert_snapshot("basic_env_for_reconfiguration") - - cluster_id = self.fuel_web.get_last_created_cluster() - - self.show_step(2) - config_new = utils.get_config_template('nova_cpu') - structured_config = get_structured_config_dict(config_new) - self.fuel_web.client.upload_configuration(config_new, - cluster_id, - role="controller") - - service_name = "nova-scheduler" - - controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller']) - uptimes = self.get_service_uptime(controllers, service_name) - task = self.fuel_web.client.apply_configuration(cluster_id, - role="controller") - - self.show_step(3) - self.fuel_web.assert_task_success(task, timeout=900, interval=5) - - self.show_step(4) - self.check_service_was_restarted(controllers, uptimes, service_name) - - self.show_step(5) - self.check_config_on_remote(controllers, structured_config) - - self.show_step(6) - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - - self.check_overcommit_ratio(os_conn, cluster_id) - - self.show_step(7) - config_revert = utils.get_config_template('nova_cpu_old') - structured_config_revert = get_structured_config_dict(config_revert) - self.fuel_web.client.upload_configuration(config_revert, - cluster_id, - role="controller") - uptimes = self.get_service_uptime(controllers, service_name) - task = self.fuel_web.client.apply_configuration(cluster_id, - role="controller") - self.show_step(8) - self.fuel_web.assert_task_success(task, timeout=900, interval=5) - - self.show_step(9) - self.check_service_was_restarted(controllers, uptimes, service_name) - - self.show_step(10) - self.check_config_on_remote(controllers, structured_config_revert) - - self.env.make_snapshot("reconfigure_overcommit_ratio", - is_make=True) - - @test(depends_on_groups=['basic_env_for_reconfiguration'], - groups=["services_reconfiguration_thread_1", - "reconfigure_keystone_to_use_ldap"]) - @log_snapshot_after_test - def reconfigure_keystone_to_use_ldap(self): - """Reconfigure keystone to use LDAP - - Scenario: - 1. Revert snapshot "basic_env_for_reconfiguration" - 2. Upload a new openstack configuration - 3. Try to apply a new keystone configuration - 4. Wait for finishing of the apply configuration task - 5. Verify configuration file on primary controller - - Snapshot: reconfigure_keystone_to_use_ldap - - """ - self.show_step(1, initialize=True) - self.env.revert_snapshot("basic_env_for_reconfiguration") - cluster_id = self.fuel_web.get_last_created_cluster() - devops_pr_controller = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - - pr_controller = self.fuel_web.get_nailgun_node_by_devops_node( - devops_pr_controller) - - self.show_step(2) - config = utils.get_config_template('keystone_ldap') - structured_config = get_structured_config_dict(config) - self.fuel_web.client.upload_configuration( - config, - cluster_id, - role="controller") - - self.show_step(3) - task = self.fuel_web.client.apply_configuration(cluster_id, - role="controller") - - self.show_step(4) - self.fuel_web.task_wait(task, timeout=3600, interval=30) - - self.show_step(5) - self.check_config_on_remote([pr_controller], structured_config) - logger.info("New configuration was applied") - - self.env.make_snapshot("reconfigure_keystone_to_use_ldap") - - @test(depends_on_groups=['basic_env_for_reconfiguration'], - groups=["services_reconfiguration_thread_2", - "reconfigure_nova_quota"]) - @log_snapshot_after_test - def reconfigure_nova_quota(self): - """Tests for reconfiguration nova quota. - - Scenario: - 1. Revert snapshot "basic_env_for_reconfiguration" - 2. Upload a new openstack configuration - 3. Get uptime of process "nova-api" on each controller - 4. Apply a new quota driver and quota_instances to all nodes - 5. Wait for configuration applying - 6. Verify uptime of process "nova-api" on each controller - 7. Verify nova config settings - 8. Create new instance - 9. Try to create one more, verify that it is impossible - - Snapshot: reconfigure_nova_quota - - """ - self.show_step(1, initialize=True) - self.env.revert_snapshot("basic_env_for_reconfiguration") - - cluster_id = self.fuel_web.get_last_created_cluster() - controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller']) - - self.show_step(2) - config = utils.get_config_template('nova_quota') - structured_config = get_structured_config_dict(config) - self.fuel_web.client.upload_configuration(config, - cluster_id, - role="controller") - - self.show_step(3) - uptimes = self.get_service_uptime(controllers, 'nova-api') - - self.show_step(4) - task = self.fuel_web.client.apply_configuration(cluster_id, - role="controller") - - self.show_step(5) - self.fuel_web.assert_task_success(task, timeout=900, interval=5) - - self.show_step(6) - self.check_service_was_restarted(controllers, uptimes, 'nova-api') - - self.show_step(7) - self.check_config_on_remote(controllers, structured_config) - - self.show_step(8) - self.show_step(9) - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - - self.check_nova_quota(os_conn, cluster_id) - - self.env.make_snapshot("reconfigure_nova_quota") - - @test(depends_on_groups=['reconfigure_overcommit_ratio'], - groups=["services_reconfiguration_thread_1", - "reconfigure_nova_ephemeral_disk"]) - @log_snapshot_after_test - def reconfigure_nova_ephemeral_disk(self): - """Reconfigure nova ephemeral disk format - - Scenario: - 1. Revert snapshot reconfigure_overcommit_ratio - 2. Delete previous OpenStack config - 3. Upload a new openstack configuration for nova on computes - 4. Apply configuration - 5. Wait for configuration applying - 6. Get uptime of process "nova-compute" on each compute - 7. Verify nova-compute settings - 8. Create flavor with ephemral disk, - 9. Boot instance on updated compute with ephemral disk, - 10. Assign floating ip, - 11. Check ping to the instance, - 12. SSH to VM and check ephemeral disk format - - Snapshot: reconfigure_nova_ephemeral_disk - - """ - self.check_run('reconfigure_nova_ephemeral_disk') - self.show_step(1, initialize=True) - self.env.revert_snapshot("reconfigure_overcommit_ratio") - - cluster_id = self.fuel_web.get_last_created_cluster() - computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute']) - - self.show_step(2) - existing_configs = self.fuel_web.client.list_configuration( - cluster_id) - for existing_config in existing_configs: - self.fuel_web.client.delete_configuration(existing_config["id"]) - - self.show_step(3) - config = utils.get_config_template('nova_disk') - structured_config = get_structured_config_dict(config) - self.fuel_web.client.upload_configuration(config, - cluster_id, - role='compute') - - service_name = "nova-compute" - - uptimes = self.get_service_uptime(computes, service_name) - - self.show_step(4) - task = self.fuel_web.client.apply_configuration(cluster_id, - role='compute') - self.show_step(5) - self.fuel_web.assert_task_success(task, timeout=900, interval=5) - - self.show_step(6) - self.check_service_was_restarted(computes, uptimes, service_name) - - self.show_step(7) - self.check_config_on_remote(computes, structured_config) - - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - - self.show_step(8) - self.show_step(9) - self.show_step(10) - self.show_step(11) - self.show_step(12) - self.check_nova_ephemeral_disk(os_conn, cluster_id) - - self.env.make_snapshot("reconfigure_nova_ephemeral_disk", - is_make=True) - - @test(depends_on_groups=['reconfigure_ml2_vlan_range'], - groups=["services_reconfiguration_thread_1", - "preservation_config_after_reset_and_preconfigured_deploy"]) - @log_snapshot_after_test - def preservation_config_after_reset_and_preconfigured_deploy(self): - """Preservation config after reset of cluster and preconfigured deploy - - Scenario: - 1. Revert snapshot reconfigure_ml2_vlan_range - 2. Reset cluster - 3. Deploy changes - 4. Run OSTF - 5. Verify neutron settings - 6. Create new private network - 7. Try to create one more, verify that it is impossible - - - Snapshot "preservation_config_after_reset_and_preconfigured_deploy" - - """ - - self.show_step(1, initialize=True) - self.env.revert_snapshot("reconfigure_ml2_vlan_range") - - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - self.fuel_web.stop_reset_env_wait(cluster_id) - - config = utils.get_config_template('neutron') - structured_config = get_structured_config_dict(config) - - self.show_step(3) - self.fuel_web.wait_nodes_get_online_state( - self.env.d_env.nodes().slaves[:4], timeout=10 * 60) - - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(4) - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - self.show_step(5) - controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller']) - self.check_config_on_remote(controllers, structured_config) - - self.show_step(6) - self.show_step(7) - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - self.check_ml2_vlan_range(os_conn) - - snapshot = "preservation_config_after_reset_and_preconfigured_deploy" - self.env.make_snapshot(snapshot, is_make=True) - - @test(depends_on_groups=['reconfigure_nova_ephemeral_disk'], - groups=["services_reconfiguration_thread_1", - "reconfiguration_scalability"]) - @log_snapshot_after_test - def reconfiguration_scalability(self): - """Check scalability of configured environment - - Scenario: - 1. Revert snapshot "reconfigure_nova_ephemeral_disk" - 2. Upload a new openstack configuration for keystone - 3. Wait for configuration applying - 4. Verify keystone settings - 5. Keystone actions - 6. Add 1 compute and 1 controller to cluster - 7. Run network verification - 8. Deploy changes - 9. Run OSTF tests - 10. Verify keystone settings - 11. Verify nova settings - 12. Create flavor with ephemral disk - 13. Boot instance on updated compute with ephemral disk - 14. Assign floating ip - 15. Check ping to the instance - 16. SSH to VM and check ephemeral disk format - 17. Keystone actions - - Snapshot "reconfiguration_scalability" - """ - - self.check_run('reconfiguration_scalability') - self.show_step(1, initialize=True) - self.env.revert_snapshot("reconfigure_nova_ephemeral_disk") - - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - config = utils.get_config_template('nova_disk') - structured_config_nova = get_structured_config_dict(config) - config = utils.get_config_template('keystone') - structured_config_keystone = get_structured_config_dict(config) - self.fuel_web.client.upload_configuration(config, - cluster_id, - role='controller') - controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller']) - - self.show_step(3) - task = self.fuel_web.client.apply_configuration(cluster_id, - role='controller') - self.fuel_web.assert_task_success(task, timeout=900, interval=5) - - self.show_step(4) - self.check_config_on_remote(controllers, structured_config_keystone) - - self.show_step(5) - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - time_expiration = config[ - 'keystone_config']['token/expiration']['value'] - self.check_token_expiration(os_conn, time_expiration) - - self.show_step(6) - bs_nodes = [x for x in self.env.d_env.get_nodes() - if x.name == 'slave-05' or x.name == 'slave-06'] - self.env.bootstrap_nodes(bs_nodes) - self.fuel_web.update_nodes( - cluster_id, - {'slave-05': ['compute', 'cinder']}) - self.fuel_web.update_nodes( - cluster_id, - {'slave-06': ['controller']}) - - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - - self.show_step(8) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(9) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(10) - controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller']) - computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute']) - target_controller = [x for x in controllers - if 'slave-06' in x['name']] - target_compute = [x for x in computes - if 'slave-05' in x['name']] - self.check_config_on_remote(target_controller, - structured_config_keystone) - - self.show_step(11) - self.check_config_on_remote(target_compute, structured_config_nova) - - self.show_step(12) - self.show_step(13) - self.show_step(14) - self.show_step(15) - self.show_step(16) - - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - hypervisor_name = target_compute[0]['fqdn'] - self.check_nova_ephemeral_disk(os_conn, cluster_id, - hypervisor_name=hypervisor_name) - - self.show_step(17) - self.check_token_expiration(os_conn, time_expiration) - - self.env.make_snapshot("reconfiguration_scalability", is_make=True) - - @test(depends_on_groups=['reconfiguration_scalability'], - groups=["services_reconfiguration_thread_1", - "multiple_apply_config"]) - @log_snapshot_after_test - def multiple_apply_config(self): - """Multiple serial applying of configuration - - Scenario: - 1. Revert snapshot "reconfiguration_scalability" - 2. Upload a new openstack configuration for certain compute - 3. Get uptime of process "nova-compute" on target compute - 4. Wait for configuration applying - 5. Get uptime of process "nova-compute" on target compute - 6. Verify nova settings on each compute - 7. Create flavor with ephemeral disk - 8. Boot instance on nontarget compute with ephemral disk - 9. Assign floating ip - 10. Check ping to the instance - 11. SSH to VM and check ephemeral disk format - 12. Boot instance on target compute with ephemeral disk - 13. Assign floating ip - 14. Check ping to the instance - 15. SSH to VM and check ephemeral disk format - - Snapshot "multiple_apply_config" - """ - - self.show_step(1, initialize=True) - self.env.revert_snapshot("reconfiguration_scalability") - - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute']) - target_compute = computes[0] - config = utils.get_config_template('nova_disk') - structured_config_old = get_structured_config_dict(config) - - config['nova_config'][ - 'DEFAULT/default_ephemeral_format']['value'] = 'ext3' - structured_config_new = get_structured_config_dict(config) - self.fuel_web.client.upload_configuration(config, - cluster_id, - node_id=target_compute['id']) - - self.show_step(3) - service_name = 'nova-compute' - uptimes = self.get_service_uptime([target_compute], service_name) - - self.show_step(4) - task = self.fuel_web.client.apply_configuration( - cluster_id, - node_id=target_compute['id']) - self.fuel_web.assert_task_success(task, timeout=900, interval=5) - - self.show_step(5) - self.check_service_was_restarted([target_compute], - uptimes, service_name) - - self.show_step(6) - for compute in computes: - if compute == target_compute: - self.check_config_on_remote([compute], structured_config_new) - target_hypervisor_name = compute['fqdn'] - else: - hypervisor_name = compute['fqdn'] - self.check_config_on_remote([compute], structured_config_old) - - self.show_step(7) - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - - self.show_step(8) - self.show_step(9) - self.show_step(10) - self.show_step(11) - self.check_nova_ephemeral_disk(os_conn, cluster_id, - hypervisor_name=target_hypervisor_name, - fs_type='ext3') - self.show_step(12) - self.show_step(13) - self.show_step(14) - self.show_step(15) - self.check_nova_ephemeral_disk(os_conn, cluster_id, - hypervisor_name=hypervisor_name) - - self.env.make_snapshot("multiple_apply_config") - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["services_reconfiguration_thread_2", - "two_clusters_reconfiguration"]) - @log_snapshot_after_test - def two_clusters_reconfiguration(self): - """Deploy two clusters with different configs - - Scenario: - 1. Revert snapshot "ready_with_5_slaves" - 2. Divided the IP ranges into two parts - 3. Verify network of the first environment - 4. Verify network of the second environment - 5. Deploy environment with first ranges - 6. Run OSTF on the first environment - 7. Deploy environment with second ranges - 8. Run OSTF on the second environment - 9. Apply new CPU overcommit ratio for first environment - 10. Verify deployment task is finished - 11. Verify nova-scheduler services uptime - 12. Verify configuration file on controller - 13. Boot instances with flavor that occupy all CPU, - boot extra instance and catch the error - 14. Apply old CPU overcommit ratio for each controller - 15. Verify deployment task is finished - 16. Verify nova-scheduler services uptime - 17. Verify configuration file on each controller - - Snapshot "two_clusters_reconfiguration" - - """ - - self.show_step(1) - self.env.revert_snapshot("ready_with_5_slaves") - - self.show_step(2) - cluster_id_1 = self.fuel_web.create_cluster( - name="env1", - mode=settings.DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT_TYPE, - } - ) - cluster_id_2 = self.fuel_web.create_cluster( - name="env2", - mode=settings.DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT_TYPE, - } - ) - - self.fuel_web.update_nodes( - cluster_id_1, - { - 'slave-01': ['compute'], - 'slave-02': ['controller'] - }) - - self.fuel_web.update_nodes( - cluster_id_2, - { - 'slave-03': ['compute'], - 'slave-04': ['controller'] - }) - - networks_1 = self.fuel_web.client.get_networks( - cluster_id_1)["networks"] - self.change_default_range(networks_1, - number_excluded_ips=30, - cut_from_start=True) - helpers.wait(lambda: not self.is_update_dnsmasq_running( - self.fuel_web.client.get_tasks()), timeout=60, - timeout_msg="Timeout exceeded while waiting for task " - "'update_dnsmasq' is finished!") - floating_list = [self.fuel_web.get_floating_ranges()[0][0]] - networking_parameters = { - "floating_ranges": floating_list} - self.fuel_web.client.update_network( - cluster_id_1, - networks=networks_1, - networking_parameters=networking_parameters - ) - - networks_2 = self.fuel_web.client.get_networks( - cluster_id_2)["networks"] - self.change_default_range(networks_2, - number_excluded_ips=30, - cut_from_start=False) - helpers.wait(lambda: not self.is_update_dnsmasq_running( - self.fuel_web.client.get_tasks()), timeout=60, - timeout_msg="Timeout exceeded while waiting for task " - "'update_dnsmasq' is finished!") - floating_list = [self.fuel_web.get_floating_ranges()[0][1]] - - vlan_range_1 = self.fuel_web.client.get_networks( - cluster_id_1)["networking_parameters"]["vlan_range"] - vlan_range_2 = [vlan_range_1[-1] + 1, vlan_range_1[-1] + 31] - - networking_parameters = { - "floating_ranges": floating_list, - "vlan_range": vlan_range_2} - self.fuel_web.client.update_network( - cluster_id_2, - networks=networks_2, - networking_parameters=networking_parameters - ) - self.show_step(3) - self.fuel_web.verify_network(cluster_id_1) - self.show_step(4) - self.fuel_web.verify_network(cluster_id_2) - self.show_step(5) - self.fuel_web.deploy_cluster_wait(cluster_id_1, check_services=False) - self.show_step(6) - self.fuel_web.run_ostf(cluster_id=cluster_id_1) - self.show_step(7) - self.fuel_web.deploy_cluster_wait(cluster_id_2, check_services=False) - self.show_step(8) - self.fuel_web.run_ostf(cluster_id=cluster_id_2) - - self.show_step(9) - config_new = utils.get_config_template('nova_cpu') - structured_config = get_structured_config_dict(config_new) - self.fuel_web.client.upload_configuration(config_new, - cluster_id_1, - role="controller") - - service_name = "nova-scheduler" - - controller_env_1 = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id_1, ['controller']) - controller_env_2 = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id_2, ['controller']) - uptimes = self.get_service_uptime(controller_env_1, service_name) - task = self.fuel_web.client.apply_configuration(cluster_id_1, - role="controller") - - self.show_step(10) - self.fuel_web.assert_task_success(task, timeout=900, interval=5) - - self.show_step(11) - self.check_service_was_restarted(controller_env_1, - uptimes, - service_name) - - self.show_step(12) - self.check_config_on_remote(controller_env_1, structured_config) - - self.show_step(13) - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id_1)) - - self.check_overcommit_ratio(os_conn, cluster_id_1) - - self.show_step(14) - config_revert = utils.get_config_template('nova_cpu_old') - structured_config_revert = get_structured_config_dict(config_revert) - self.fuel_web.client.upload_configuration(config_revert, - cluster_id_2, - role="controller") - uptimes = self.get_service_uptime(controller_env_2, service_name) - task = self.fuel_web.client.apply_configuration(cluster_id_2, - role="controller") - self.show_step(15) - self.fuel_web.assert_task_success(task, timeout=900, interval=5) - - self.show_step(16) - self.check_service_was_restarted(controller_env_2, - uptimes, - service_name) - - self.show_step(17) - self.check_config_on_remote(controller_env_2, - structured_config_revert) - - self.env.make_snapshot("two_clusters_reconfiguration") - - @test(depends_on_groups=['basic_env_for_reconfiguration'], - groups=["services_reconfiguration_thread_2", - "upload_config_for_node_and_env_in_transitional_state"]) - @log_snapshot_after_test - def upload_config_for_node_and_env_in_transitional_state(self): - """Upload config for node and env in transitional state - - Scenario: - 1. Revert snapshot "basic_env_for_reconfiguration" - 2. Add 1 compute - 3. Deploy changes - 4. Upload a new openstack configuration for env - 5. Check nailgun response - 6. Wait for added node in provisioning state - 7. Upload a new openstack configuration for node - 8. Check Nailgun response - 9. Wait for added node in deploying state - 10. Upload a new openstack configuration for node - 11. Check Nailgun response - 12. Wait for finishing of deployment - - Snapshot: upload_config_for_node_and_env_in_transitional_state - - """ - - self.show_step(1, initialize=True) - self.env.revert_snapshot("basic_env_for_reconfiguration") - - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - bs_node = [ - node for node in self.env.d_env.get_nodes() - if node.name == 'slave-05'] - self.env.bootstrap_nodes(bs_node) - self.fuel_web.update_nodes( - cluster_id, - {'slave-05': ['compute']}) - target_node = bs_node[0] - target_node_id = self.fuel_web.get_nailgun_node_by_devops_node( - target_node)['id'] - - config = {'nova_config': {'foo': {'value': 'bar'}}} - - self.show_step(3) - task = self.fuel_web.deploy_cluster(cluster_id) - # wait for creation of child 'deployment' task - self.fuel_web.wait_for_tasks_presence(self.fuel_web.client.get_tasks, - name='deployment', - parent_id=task.get('id')) - - self.show_step(4) - self.show_step(5) - expected_code = 403 - err_msg = 'A configuration was applied for env in deploying state' - self.check_response_code( - expected_code, err_msg, - self.fuel_web.client.upload_configuration, - config, cluster_id) - - self.show_step(6) - self.wait_for_node_status(target_node, 'provisioning') - - self.show_step(7) - self.show_step(8) - err_msg = 'A configuration was applied for node in provisioning state' - self.check_response_code( - expected_code, err_msg, - self.fuel_web.client.upload_configuration, - config, cluster_id, node_id=target_node_id) - - self.show_step(9) - self.wait_for_node_status(target_node, 'deploying') - - self.show_step(10) - self.show_step(11) - err_msg = 'A configuration was applied for node in deploying state' - self.check_response_code( - expected_code, err_msg, - self.fuel_web.client.upload_configuration, - config, cluster_id, node_id=target_node_id) - - self.show_step(12) - self.fuel_web.assert_task_success(task, timeout=7800, interval=30) - - snapshot_name = "upload_config_for_node_and_env_in_transitional_state" - self.env.make_snapshot(snapshot_name) - - @test(depends_on_groups=['reconfiguration_scalability'], - groups=["services_reconfiguration_thread_1", - "apply_config_for_node_with_multiple_role"]) - @log_snapshot_after_test - def apply_config_for_node_with_multiple_role(self): - """Apply config for node with multiple role - - Scenario: - 1. Revert snapshot "reconfiguration_scalability" - 2. Upload a new openstack configuration for compute role - 3. Upload a new openstack configuration for cinder role - 4. Wait for configuration applying - 5. Get uptime of process "nova-compute" - 6. Check settings on target node - - Snapshot "apply_config_for_node_with_multiple_role" - """ - - self.show_step(1, initialize=True) - self.env.revert_snapshot("reconfiguration_scalability") - - cluster_id = self.fuel_web.get_last_created_cluster() - target_node = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute', 'cinder']) - config_for_compute_role = utils.get_config_template('nova_disk') - config_for_compute_role['nova_config'].update( - {'DEFAULT/debug': {'value': 'False'}}) - config_for_cinder_role = utils.get_config_template( - 'nova_disk_cinder_role') - - self.show_step(2) - self.fuel_web.client.upload_configuration(config_for_compute_role, - cluster_id, - role='compute') - - self.show_step(3) - self.fuel_web.client.upload_configuration(config_for_cinder_role, - cluster_id, - role='cinder') - - # Configs are merging with ID-priority - general_config = {} - general_config.update(config_for_compute_role) - general_config.update(config_for_cinder_role) - structured_config = get_structured_config_dict(general_config) - service_name = 'nova-compute' - uptime = self.get_service_uptime(target_node, service_name) - - self.show_step(4) - task = self.fuel_web.client.apply_configuration( - cluster_id, - node_id=target_node[0]['id']) - self.fuel_web.assert_task_success(task, timeout=900, interval=5) - - self.show_step(5) - self.check_service_was_restarted(target_node, - uptime, - service_name) - - self.show_step(6) - self.check_config_on_remote(target_node, structured_config) - - snapshot_name = "apply_config_for_node_with_multiple_role" - self.env.make_snapshot(snapshot_name) - - @test(depends_on_groups=['basic_env_for_reconfiguration'], - groups=["services_reconfiguration_thread_2", - "reconfigure_with_new_fields"]) - @log_snapshot_after_test - def reconfigure_with_new_fields(self): - """Reconfigure services with new fields - - Scenario: - 1. Revert snapshot "basic_env_for_reconfiguration" - 2. Upload a new openstack configuration for controller - 3. Get uptime of processes from config on each controller - 4. Apply a new openstack configuration for controller - 5. Check that neutron related services were restarted - 6. Verify configuration file on each controller - 7. Upload a new openstack configuration for compute - 8. Get uptime of nova-compute on each compute - 9. Apply a new openstack configuration for compute - 10. Check that nova-compute service was restarted - 11. Verify configuration file on each compute - - Snapshot: reconfigure_with_new_fields - - """ - self.show_step(1, initialize=True) - self.env.revert_snapshot("basic_env_for_reconfiguration") - - cluster_id = self.fuel_web.get_last_created_cluster() - controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller']) - - self.show_step(2) - config_controller = utils.get_config_template('new_fields_controller') - structured_config = get_structured_config_dict(config_controller) - self.fuel_web.client.upload_configuration(config_controller, - cluster_id, - role="controller") - - self.show_step(3) - service_list = ['neutron-server', 'neutron-dhcp-agent', - 'neutron-l3-agent', 'neutron-metadata-agent', - 'nova-scheduler', 'nova-novncproxy', 'nova-conductor', - 'nova-api', 'nova-consoleauth', 'nova-cert'] - services_uptime = {} - for service_name in service_list: - services_uptime[service_name] = self.get_service_uptime( - controllers, service_name) - - self.show_step(4) - task = self.fuel_web.client.apply_configuration(cluster_id, - role="controller") - - self.fuel_web.assert_task_success(task, timeout=900, interval=5) - - self.show_step(5) - for service_name in service_list: - self.check_service_was_restarted( - controllers, - services_uptime[service_name], - service_name) - - self.show_step(6) - self.check_config_on_remote(controllers, structured_config) - - computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute']) - - self.show_step(7) - config_copmute = utils.get_config_template('new_fields_compute') - structured_config = get_structured_config_dict(config_copmute) - self.fuel_web.client.upload_configuration(config_copmute, - cluster_id, - role='compute') - - self.show_step(8) - uptimes_nova = self.get_service_uptime(computes, 'nova-compute') - - self.show_step(9) - task = self.fuel_web.client.apply_configuration(cluster_id, - role='compute') - self.fuel_web.assert_task_success(task, timeout=900, interval=5) - - self.show_step(10) - self.check_service_was_restarted(computes, - uptimes_nova, - 'nova-compute') - - self.show_step(11) - self.check_config_on_remote(computes, structured_config) - self.env.make_snapshot("reconfigure_with_new_fields") - - @test(depends_on_groups=['basic_env_for_reconfiguration'], - groups=["services_reconfiguration_thread_2", - "reconfigure_ml2_vlan_range_for_suite_of_nodes"]) - @log_snapshot_after_test - def reconfigure_ml2_vlan_range_for_suite_of_nodes(self): - """Reconfigure neutron ml2 VLAN range for suite of controller nodes - - Scenario: - 1. Revert snapshot "basic_env_for_reconfiguration" - 2. Upload a new VLAN range(minimal range) for suite of controller - nodes - 3. Get uptime of process "neutron-server" on each controller - 4. Apply a new openstack configuration to all controller nodes - 5. Wait for configuration applying - 6. Check that service "neutron-server" was restarted - 7. Verify ml2 plugin settings - 8. Try to create two private networks, check that the second - network is failed to create - - Snapshot: reconfigure_ml2_vlan_range_for_suite_of_nodes - - """ - self.show_step(1) - self.env.revert_snapshot("basic_env_for_reconfiguration") - cluster_id = self.fuel_web.get_last_created_cluster() - controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller']) - - controller_ids = [int(ctrl['id']) for ctrl in controllers] - - self.show_step(2) - config = utils.get_config_template('neutron') - structured_config = get_structured_config_dict(config) - self.fuel_web.client.upload_configuration(config, - cluster_id, - node_ids=controller_ids) - - self.show_step(3) - service_name = 'neutron-server' - uptimes = self.get_service_uptime(controllers, service_name) - - self.show_step(4) - task = self.fuel_web.client.apply_configuration(cluster_id, - role="controller") - - self.show_step(5) - self.fuel_web.assert_task_success(task, timeout=900, interval=5) - - self.show_step(6) - self.check_service_was_restarted(controllers, uptimes, service_name) - - self.show_step(7) - self.check_config_on_remote(controllers, structured_config) - - self.show_step(8) - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - self.check_ml2_vlan_range(os_conn) - - snapshotname = "reconfigure_ml2_vlan_range_for_suite_of_nodes" - self.env.make_snapshot(snapshotname) diff --git a/fuelweb_test/tests/test_sriov.py b/fuelweb_test/tests/test_sriov.py deleted file mode 100644 index 131a145b8..000000000 --- a/fuelweb_test/tests/test_sriov.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test -from proboscis.asserts import assert_true - -from fuelweb_test import settings -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=['sriov']) -class TestSRIOV(TestBasic): - - @test(depends_on_groups=['prepare_slaves_all'], - groups=['deploy_cluster_with_sriov']) - @log_snapshot_after_test - def deploy_cluster_with_sriov(self): - """Deploy cluster with SR-IOV - - Scenario: - 1. Create new environment with VLAN segmentation for Neutron - 2. Verify that at least 2 SR-IOV capable nodes are present - 3. Add 3 controller, 1 cinder and 3 compute nodes - 4. Enable SR-IOV on compatible compute nodes - 5. Run network verification - 6. Deploy environment - 7. Run network verification - 8. Run OSTF - 9. Reboot computes with SR-IOV on NICs - 10. Run OSTF - - Duration 90m - Snapshot: deploy_cluster_with_sriov - - """ - self.env.revert_snapshot("ready_with_all_slaves") - - assert_true(len(self.env.d_env.nodes().slaves) >= 7, - 'At least 7 slaves are required for ' - 'this test! But, only {0} nodes are ' - 'available!'.format(self.env.d_env.nodes().slaves) - ) - - self.show_step(1) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": "vlan" - } - ) - self.show_step(2) - nodes = self.fuel_web.client.list_nodes() - sriov_nailgun_nodes = [n for n in nodes - if self.fuel_web.check_sriov(n['id'])] - assert_true(len(sriov_nailgun_nodes) >= 2, - 'At least 2 nodes with SR-IOV support are required for ' - 'this test! But, only {0} nodes are ' - 'available!'.format(sriov_nailgun_nodes) - ) - sriov_nailgun_nodes = sriov_nailgun_nodes[:2] - sriov_n_nodes_ids = [n['id'] for n in sriov_nailgun_nodes] - other_n_nodes = [n for n in nodes if n['id'] not in sriov_n_nodes_ids] - sriov_devops_nodes = self.fuel_web.get_devops_nodes_by_nailgun_nodes( - sriov_nailgun_nodes) - other_devops_nodes = self.fuel_web.get_devops_nodes_by_nailgun_nodes( - other_n_nodes) - sriov_nodes = [d_node.name for d_node in sriov_devops_nodes] - other_nodes = [d_node.name for d_node in other_devops_nodes] - - assert_true(len(other_nodes) >= 5, - 'At least 5 other nodes are required for ' - 'this test! But, only {0} nodes are ' - 'available!'.format(other_nodes) - ) - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, - { - other_nodes[0]: ['controller'], - other_nodes[1]: ['controller'], - other_nodes[2]: ['controller'], - other_nodes[3]: ['cinder'], - other_nodes[4]: ['compute'], - sriov_nodes[0]: ['compute'], - sriov_nodes[1]: ['compute'] - }) - - self.show_step(4) - computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'], role_status='pending_roles') - - computes_with_sriov_support = [n for n in computes - if self.fuel_web.check_sriov(n['id'])] - - assert_true(computes_with_sriov_support, 'There is no compute with ' - 'SR-IOV support available!') - for compute in computes_with_sriov_support: - self.fuel_web.enable_sriov(compute['id']) - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - - self.show_step(6) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - - self.show_step(8) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(9) - self.fuel_web.warm_restart_nodes( - [self.fuel_web.get_devops_node_by_nailgun_node(compute) - for compute in computes_with_sriov_support], timeout=10 * 60) - - # Wait until OpenStack services are UP - self.fuel_web.assert_os_services_ready(cluster_id) - - self.show_step(10) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("deploy_cluster_with_sriov") diff --git a/fuelweb_test/tests/test_ssl.py b/fuelweb_test/tests/test_ssl.py deleted file mode 100644 index 5e8cfb567..000000000 --- a/fuelweb_test/tests/test_ssl.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test -from proboscis.asserts import assert_equal -# pylint: disable=import-error -# noinspection PyUnresolvedReferences -from six.moves import http_client -# noinspection PyUnresolvedReferences -from six.moves import urllib -# pylint: enable=import-error - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic -from fuelweb_test.helpers.os_actions import OpenStackActions - - -@test(groups=["ssl"]) -class SSL_Tests(TestBasic): - @test(depends_on=[SetupEnvironment.prepare_release], - groups=["master_node_with_https_only"]) - @log_snapshot_after_test - def master_node_with_https_only(self): - """Check cluster creation with SSL is enabled only on Master node - - Scenario: - 1. Create environment using fuel-qa - 2. Force master node to use https - 3. Check that we cannot connect to master node by http(8000 port) - 4. Bootstrap slaves nodes and - check here that they appear in nailgun - - Duration 30m - """ - self.show_step(1) - self.env.revert_snapshot("ready") - admin_ip = self.ssh_manager.admin_ip - self.show_step(2) - self.show_step(3) - connection = http_client.HTTPConnection(admin_ip, 8000) - connection.request("GET", "/") - response = connection.getresponse() - assert_equal(str(response.status), '301', - message="HTTP was not disabled for master node") - self.show_step(4) - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:2]) - nodes = self.fuel_web.client.list_nodes() - assert_equal(2, len(nodes)) - self.env.make_snapshot("master_node_with_https_only", is_make=True) - - @test(depends_on=['master_node_with_https_only'], - groups=["endpoints_with_disabled_ssl"]) - @log_snapshot_after_test - def endpoints_with_disabled_ssl(self): - """Check MOS services are NOT running ssl on public endpoints - when TLS is disabled - - Scenario: - 1. Pre-condition - perform steps - from master_node_with_https_only test - 2. Create a new cluster - 3. Go to the Settings tab - 4. Disable TLS for public endpoints - 5. Add 1 controller and compute+cinder - 6. Deploy cluster - 7. Run OSTF - 8. Check that all endpoints link to plain http protocol. - - Duration 30m - """ - self.show_step(1) - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.env.revert_snapshot("master_node_with_https_only") - self.show_step(5) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - configure_ssl=False, - mode=DEPLOYMENT_MODE) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute', 'cinder'], - } - ) - self.show_step(6) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(7) - # Run OSTF - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['smoke']) - self.show_step(8) - # Get controller ip address - controller_keystone_ip = self.fuel_web.get_public_vip(cluster_id) - action = OpenStackActions(controller_ip=controller_keystone_ip) - endpoint_list = action.get_keystone_endpoints() - for endpoint in endpoint_list: - url = urllib.parse.urlparse(endpoint.publicurl) - assert_equal(url.scheme, "http", - message=( - "Endpoint id {0} uses {1} instead http.".format( - endpoint.id, url.scheme))) diff --git a/fuelweb_test/tests/test_states_unlock_settings_tab.py b/fuelweb_test/tests/test_states_unlock_settings_tab.py deleted file mode 100644 index 4a6145b3b..000000000 --- a/fuelweb_test/tests/test_states_unlock_settings_tab.py +++ /dev/null @@ -1,408 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from netaddr import IPNetwork -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_true -from proboscis import test - -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["unlock_settings_tab_states"]) -class UnlockSettingsTabStates(TestBasic): - """UnlockSettingsTabStates.""" # TODO documentation - - def __init__(self): - super(UnlockSettingsTabStates, self).__init__() - self._cluster_id = None - - @property - def cluster_id(self): - return self._cluster_id - - @cluster_id.setter - def cluster_id(self, cluster_id): - self._cluster_id = cluster_id - - def create_cluster(self): - self.cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE) - - def update_nodes(self, nodes): - self.fuel_web.update_nodes(self.cluster_id, nodes) - - def provision_nodes(self): - self.fuel_web.provisioning_cluster_wait(self.cluster_id) - - def deploy_selected_nodes(self, nodes): - logger.info( - "Start deploying of selected nodes with ids: {}".format(nodes)) - task = self.fuel_web.client.deploy_nodes(self.cluster_id, nodes) - self.fuel_web.assert_task_success(task) - - def deploy_cluster(self, do_not_fail=False): - try: - self.fuel_web.deploy_cluster_wait(self.cluster_id) - except AssertionError: - if do_not_fail: - logger.info("Cluster deployment was failed due to " - "expected error with netconfig") - else: - raise - - def get_cluster_attributes(self): - return self.fuel_web.client.get_cluster_attributes(self.cluster_id) - - def get_networks(self): - return self.fuel_web.client.get_networks(self.cluster_id) - - def get_deployed_cluster_attributes(self): - return self.fuel_web.client.get_deployed_cluster_attributes( - self.cluster_id) - - def get_deployed_network_configuration(self): - return self.fuel_web.client.get_deployed_network_configuration( - self.cluster_id) - - def get_default_cluster_settings(self): - return self.fuel_web.client.get_default_cluster_settings( - self.cluster_id) - - def update_cluster_attributes(self, attributes): - self.fuel_web.client.update_cluster_attributes(self.cluster_id, - attributes) - - def update_network_settings(self, - networking_parameters=None, networks=None): - self.fuel_web.client.update_network( - self.cluster_id, networking_parameters=networking_parameters, - networks=networks) - - @staticmethod - def change_settings(attrs): - options = {'common': ['puppet_debug', - 'resume_guests_state_on_host_boot', - 'nova_quota'], - 'public_network_assignment': ['assign_to_all_nodes'], - 'neutron_advanced_configuration': ['neutron_qos'] - } - logger.info( - "The following settings will be changed: {}".format(options)) - editable = attrs['editable'] - for group in options: - for opt in options[group]: - value = editable[group][opt]['value'] - editable[group][opt]['value'] = not value - return attrs - - @staticmethod - def compare_networks(old_settings, new_settings): - logger.debug("Old setting are:{}".format(old_settings)) - logger.debug("New setting are:{}".format(new_settings)) - for setting in old_settings: - if setting != 'networks': - if old_settings[setting] != new_settings[setting]: - return False - for net1 in old_settings['networks']: - for net2 in new_settings['networks']: - if net1['name'] == net2['name'] and set(net1) != set(net2): - return False - else: - continue - return True - - @staticmethod - def compare_settings(old_attrs, new_attrs): - skipped_options =\ - [ - u'service_user.password', - u'public_ssl.cert_data', - u'storage.bootstrap_osd_key', - u'storage.radosgw_key', - u'storage.admin_key', - u'storage.fsid', - u'storage.mon_key', - u'workloads_collector.password', - u'murano_settings.murano_glance_artifacts_plugin', - u'additional_components.murano_glance_artifacts_plugin', - u'common.debug', - u'external_dns.dns_list', - u'external_ntp.ntp_list', - u'public_ssl.horizon', - u'public_ssl.services', - u'public_ssl.cert_source', - u'operator_user.password', - u'neutron_advanced_configuration.metadata' - ] - logger.debug("Old default cluster settings: {}".format(old_attrs)) - logger.debug("New default cluster settings: {}".format(new_attrs)) - editable_old = old_attrs['editable'] - editable_new = new_attrs['editable'] - for group in editable_old: - for opt in editable_old[group]: - key = '.'.join([group, opt]) - if key in skipped_options or 'metadata' in key: - continue - else: - old_val = editable_old[group][opt]['value'] - new_val = editable_new[group][opt]['value'] - if old_val != new_val: - logger.debug( - "Failed key old value: {0}:{1}".format(key, - old_val)) - logger.debug( - "Failed key new value: {0}:{1}".format(key, - new_val)) - return False - return True - - def change_netconfig_task(self, fail=True): - ssh_manager = self.ssh_manager - admin_ip = ssh_manager.admin_ip - taskfile = "/etc/puppet/modules/osnailyfacter/modular/netconfig/" \ - "connectivity_tests.pp" - if fail: - cmd = \ - "echo 'fail(\"Emulate deployment failure after " \ - "netconfig!\")' >> {}".format(taskfile) - else: - cmd = "sed -i '/^fail.*$/d' {}".format(taskfile) - - ssh_manager.execute_on_remote(admin_ip, cmd) - - def run_ostf(self): - self.fuel_web.run_ostf(self.cluster_id) - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["partially_deployed_unlock"]) - @log_snapshot_after_test - def partially_deployed_unlock(self): - """Check settings tab is unlocked for partially-deployed environment - - Scenario: - 1. Revert snapshot ready_with_3_slaves - 2. Create a new env - 3. Add controller and 2 computes - 4. Provision nodes without deploy - 5. Select some nodes (not all) and deploy them - 6. Download current settings and modify some of them - 7. Upload changed settings - 8. Re-deploy cluster - 9. Run OSTF - 10. Make snapshot - - Duration 90m - Snapshot partially_deployed_unlock - """ - self.show_step(1) - self.env.revert_snapshot("ready_with_3_slaves") - self.show_step(2) - self.create_cluster() - self.show_step(3) - nodes_dict = { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['compute'] - } - self.update_nodes(nodes_dict) - self.show_step(4) - self.provision_nodes() - self.show_step(5) - controller_id = \ - self.fuel_web.get_nailgun_node_by_name("slave-01")['id'] - compute_id = self.fuel_web.get_nailgun_node_by_name("slave-02")['id'] - self.deploy_selected_nodes([str(controller_id), str(compute_id)]) - self.show_step(6) - attrs = self.get_cluster_attributes() - new_attrs = self.change_settings(attrs) - self.show_step(7) - self.update_cluster_attributes(new_attrs) - self.show_step(8) - self.deploy_cluster() - self.show_step(9) - self.run_ostf() - self.show_step(10) - self.env.make_snapshot("partially_deployed_unlock") - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["failed_deploy_unlock"]) - @log_snapshot_after_test - def failed_deploy_unlock(self): - """Check settings tab is unlocked for a deployed with error environment - - Scenario: - 1. Revert snapshot ready_with_3_slaves - 2. Create a new env - 3. Add controller and 2 computes - 4. Change netconfig task to fail deploy - 5. Deploy the env - 6. Download current settings and modify some of them - 7. Upload changed settings - 8. Change netconfig task to normal state - 9. Re-deploy cluster - 10. Run OSTF - 11. Make snapshot - - Duration 60m - Snapshot failed_deploy_unlock - """ - self.show_step(1) - self.env.revert_snapshot("ready_with_3_slaves") - self.show_step(2) - self.create_cluster() - self.show_step(3) - nodes_dict = { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['compute'] - } - self.update_nodes(nodes_dict) - self.show_step(4) - self.change_netconfig_task() - self.show_step(5) - self.deploy_cluster(do_not_fail=True) - self.show_step(6) - attrs = self.get_cluster_attributes() - new_attrs = self.change_settings(attrs) - self.show_step(7) - self.update_cluster_attributes(new_attrs) - self.show_step(8) - self.change_netconfig_task(fail=False) - self.show_step(9) - self.deploy_cluster() - self.show_step(10) - self.run_ostf() - self.show_step(11) - self.env.make_snapshot("failed_deploy_unlock") - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["unlock_settings_tab_positive"]) - @log_snapshot_after_test - def unlock_settings_tab_positive(self): - """Check settings and network tabs is unlocked for a positively - deployed and redeployed cluster - - Scenario: - 1. Create cluster - 2. Download default cluster settings - 3. Create custom_config and upload it to cluster - 4. Add 3 nodes with controller role and 2 nodes with compute role - 5. Deploy the cluster - 6. Stop deployment process - 7. Get current settings - 8. Change and save them (that means settings are unlocked) - 9. Redeploy cluster via api - 10. Get cluster and network settings via api (api load deployed) - 11. Compare settings from step 8 and 10 (them must be equal) - 12. Get default settings via api (load defaults) - 13. Compare settings from step 2 and 13 (them must be equal) - 14. Redeploy cluster - 15. Stop deployment process - 16. Redeploy cluster - 17. Run OSTF - - Duration 50m - Snapshot unlock_settings_tab_positive - - """ - self.env.revert_snapshot("ready_with_5_slaves") - self.show_step(1) - self.create_cluster() - self.show_step(2) - default_config = self.get_cluster_attributes() - self.show_step(3) - new_config = copy.deepcopy(default_config) - editable = new_config['editable'] - editable['access']['email']['value'] = 'custom@localhost' - editable[ - 'neutron_advanced_configuration']['neutron_qos']['value'] = True - editable['common']['puppet_debug']['value'] = False - self.update_cluster_attributes(new_config) - self.show_step(4) - self.update_nodes( - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'] - } - ) - self.show_step(5) - self.fuel_web.deploy_cluster_wait_progress(cluster_id=self.cluster_id, - progress=10) - self.show_step(6) - self.fuel_web.stop_deployment_wait(self.cluster_id) - self.fuel_web.wait_nodes_get_online_state( - self.env.d_env.nodes().slaves[:5], timeout=10 * 60) - self.show_step(7) - new_cluster_settings = self.get_cluster_attributes() - self.show_step(8) - editable = new_cluster_settings['editable'] - editable['access']['email']['value'] = 'custom2@localhost' - editable['public_ssl']['horizon']['value'] = False - editable['public_ssl']['services']['value'] = False - self.update_cluster_attributes(new_cluster_settings) - current_network_settings = self.get_networks() - networking_parameters = \ - current_network_settings['networking_parameters'] - networking_parameters['vlan_range'] = [1015, 1030] - networking_parameters['gre_id_range'] = [3, 65535] - current_networks = current_network_settings['networks'] - for network in current_networks: - if network['cidr'] is not None and network['name'] != 'public': - cidr = IPNetwork(network['cidr']) - cidr.prefixlen += 1 - network['cidr'] = str(cidr) - network['ip_ranges'][0][1] = str(cidr[-2]) - self.update_network_settings( - networking_parameters=networking_parameters, - networks=current_networks) - self.show_step(9) - self.fuel_web.deploy_cluster_changes_wait( - self.cluster_id, new_cluster_settings) - self.show_step(10) - deployed_settings = self.get_deployed_cluster_attributes() - deployed_net_conf = self.get_deployed_network_configuration() - self.show_step(11) - assert_equal(new_cluster_settings, deployed_settings, - message="Cluster settings before deploy" - " are not equal with deployed settings") - assert_true(self.compare_networks( - current_network_settings, deployed_net_conf), - message='Network settings comparing failed') - self.show_step(12) - default_settings = self.get_default_cluster_settings() - self.show_step(13) - assert_true( - self.compare_settings(default_config, default_settings), - message='Default settings are not equal') - self.show_step(14) - self.fuel_web.redeploy_cluster_changes_wait_progress( - cluster_id=self.cluster_id, progress=30) - self.show_step(15) - self.fuel_web.stop_deployment_wait(self.cluster_id) - self.show_step(16) - self.deploy_cluster() - self.show_step(17) - self.run_ostf() - self.env.make_snapshot("unlock_settings_tab_positive") diff --git a/fuelweb_test/tests/test_support_hugepages.py b/fuelweb_test/tests/test_support_hugepages.py deleted file mode 100644 index 7b9f49f48..000000000 --- a/fuelweb_test/tests/test_support_hugepages.py +++ /dev/null @@ -1,464 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from devops.settings import DRIVER_PARAMETERS -from proboscis import asserts -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test import logger -from fuelweb_test.helpers import utils -from fuelweb_test.helpers import os_actions -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic -from gates_tests.helpers import exceptions - - -@test(groups=["support_hugepages"]) -class SupportHugepages(TestBasic): - """SupportHugepages. - - Required environment variables: - * KVM_USE=true - * DRIVER_ENABLE_ACPI=true - * NUMA_NODES=2 - * SLAVE_NODE_CPU=4 - * SLAVE_NODE_MEMORY=5120 - """ - def __init__(self): - self.os_conn = None - super(SupportHugepages, self).__init__() - - def boot_instance_with_hugepage(self, target_compute_name, - flavor_name, flavor_ram, page_size): - - cluster_id = self.fuel_web.get_last_created_cluster() - - logger.info("Creating flavor {}, RAM: {}, PageSize: {}" - .format(flavor_name, flavor_ram, page_size)) - flavor = self.os_conn.nova.flavors.create( - name=flavor_name, - ram=flavor_ram, - vcpus=1, - disk=1 - ) - flavor.set_keys(metadata={"hw:mem_page_size": page_size}) - - target_compute = \ - self.fuel_web.get_nailgun_node_by_name(target_compute_name) - net_name = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - - logger.info("Booting instance on compute {}" - .format(target_compute["fqdn"])) - server = self.os_conn.create_server_for_migration( - neutron=True, - label=net_name, - availability_zone="nova:{0}".format(target_compute['fqdn']), - flavor_id=flavor.id) - - server = server.to_dict() - asserts.assert_equal( - server['OS-EXT-SRV-ATTR:host'], target_compute['fqdn'], - "Server scheduled on a wrong host, server data: {}".format(server)) - - instance_name = server['OS-EXT-SRV-ATTR:instance_name'] - cmd = "virsh dumpxml {}".format(instance_name) - result = "".join( - self.ssh_manager.execute(target_compute['ip'], cmd)["stdout"]) - asserts.assert_true( - "page size='{}'".format(page_size) in result, - "Virsh xml contain different page size: {}".format(result)) - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["basic_env_for_hugepages"]) - @log_snapshot_after_test - def basic_env_for_hugepages(self): - """Basic environment for hugepages - - Scenario: - 1. Create cluster - 2. Add 3 compute nodes and 1 controller node - 3. Check what type of HugePages do support 2M and 1GB - 4. Verify the same HP size is present in CLI - 5. Download attributes for computes and check HP size - - Snapshot: basic_env_for_hugepages - - """ - snapshot_name = 'basic_env_for_hugepages' - self.check_run(snapshot_name) - self.env.revert_snapshot("ready_with_5_slaves") - - if not settings.KVM_USE: - raise exceptions.FuelQAVariableNotSet( - 'KVM_USE', 'true') - - if not DRIVER_PARAMETERS['enable_acpi']: - raise exceptions.FuelQAVariableNotSet( - 'DRIVER_ENABLE_ACPI', 'true') - - if settings.HARDWARE['numa_nodes'] != 2: - raise exceptions.FuelQAVariableNotSet( - 'NUMA_NODES', 2) - - if settings.HARDWARE['slave_node_cpu'] != 4: - raise exceptions.FuelQAVariableNotSet( - 'SLAVE_NODE_CPU', 4) - - if settings.HARDWARE['slave_node_memory'] != 5120: - raise exceptions.FuelQAVariableNotSet( - 'SLAVE_NODE_MEMORY', 5120) - - if not settings.INTERFACES_DICT['eth0'] == 'ens3': - raise exceptions.FuelQAVariableNotSet( - 'IFACE_0', 'ens3') - - if not settings.INTERFACES_DICT['eth1'] == 'ens4': - raise exceptions.FuelQAVariableNotSet( - 'IFACE_1', 'ens4') - - if not settings.INTERFACES_DICT['eth2'] == 'ens5': - raise exceptions.FuelQAVariableNotSet( - 'IFACE_2', 'ens5') - - if not settings.INTERFACES_DICT['eth3'] == 'ens6': - raise exceptions.FuelQAVariableNotSet( - 'IFACE_3', 'ens6') - - if not settings.INTERFACES_DICT['eth4'] == 'ens7': - raise exceptions.FuelQAVariableNotSet( - 'IFACE_4', 'ens7') - - if not settings.INTERFACES_DICT['eth5'] == 'ens8': - raise exceptions.FuelQAVariableNotSet( - 'IFACE_5', 'ens8') - - self.show_step(1) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT_TYPE - } - ) - - self.show_step(2) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['compute'], - 'slave-02': ['compute'], - 'slave-03': ['compute'], - 'slave-04': ['compute', 'cinder'], - 'slave-05': ['controller'] - }) - - self.show_step(3) - computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'], role_status="pending_roles") - for compute in computes: - self.ssh_manager.execute_on_remote( - ip=compute['ip'], - cmd="grep \"pse\" /proc/cpuinfo", - err_msg="{} compute doesn't support 2Mb HugePages" - .format(compute['fqdn'])) - - self.ssh_manager.execute_on_remote( - ip=compute['ip'], - cmd="grep \"pdpe1gb\" /proc/cpuinfo", - err_msg="{} compute doesn't support 1GB HugePages" - .format(compute['fqdn'])) - - self.show_step(4) - for compute in computes: - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd="fuel2 node show {0} | grep hugepages | " - "grep 2048".format(compute['id']), - err_msg="2Mb HugePages doesn't present in CLI for node " - "{0}".format(compute['fqdn'])) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd="fuel2 node show {0} | grep hugepages | " - "grep 1048576".format(compute['id']), - err_msg="1Gb HugePages doesn't present in CLI for node " - "{0}".format(compute['fqdn'])) - - self.show_step(5) - for compute in computes: - config = self.fuel_web.client.get_node_attributes(compute['id']) - asserts.assert_true( - config['hugepages']['nova']['value']['2048'] == 0, - "Number of 2Mb HugePages for node {} is not " - "0".format(compute['fqdn'])) - asserts.assert_true( - config['hugepages']['nova']['value']['1048576'] == 0, - "Number of 1Gb HugePages for node {} is not " - "0".format(compute['fqdn'])) - - self.env.make_snapshot(snapshot_name, is_make=True) - - @test(depends_on=[basic_env_for_hugepages], - groups=["check_hugepages_distribution_per_numa"]) - @log_snapshot_after_test - def check_hugepages_distribution_per_numa(self): - """Deploy environment with different HugePages allocation - - Scenario: - 1. Revert basic_env_for_hugepages snapshot - 2. Configure hugepages for three computes - 3. Deploy cluster - 4. Validate available huge pages on computes - - Snapshot: check_hugepages_distribution_per_numa - """ - snapshot_name = "check_hugepages_distribution_per_numa" - self.check_run(snapshot_name) - - self.show_step(1) - self.env.revert_snapshot("basic_env_for_hugepages") - - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - mixed_host = "slave-01" - one_gb_host = "slave-02" - two_mb_host = "slave-03" - mixed_role_host = "slave-04" - - configs = { - mixed_host: {"cpu_pinning": {"nova": {"value": 2}}, - "hugepages": {"nova": {"value": {"2048": 258, - "1048576": 1}} - } - }, - one_gb_host: {"cpu_pinning": {"nova": {"value": 2}}, - "hugepages": {"nova": {"value": {"2048": 0, - "1048576": 2}} - } - }, - two_mb_host: {"cpu_pinning": {"nova": {"value": 2}}, - "hugepages": {"nova": {"value": {"2048": 540, - "1048576": 0}} - } - }, - mixed_role_host: {"cpu_pinning": {"nova": {"value": 2}}, - "hugepages": {"nova": {"value": {"2048": 258, - "1048576": 1}} - } - }, - } - - for compute_name, config in configs.items(): - compute_id = \ - self.fuel_web.get_nailgun_node_by_name(compute_name)['id'] - original_config = \ - self.fuel_web.client.get_node_attributes(compute_id) - self.fuel_web.client.upload_node_attributes( - utils.dict_merge(original_config, config), compute_id) - - self.show_step(3) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(4) - for compute_name, config in configs.items(): - two_mb_count = config["hugepages"]["nova"]["value"]["2048"] - one_gb_count = config["hugepages"]["nova"]["value"]["1048576"] - - compute = self.fuel_web.get_nailgun_node_by_name(compute_name) - cmd = ("cat /sys/devices/system/node/node{}/hugepages/" - "hugepages-{}kB/nr_hugepages") - - actual_two_mb_count = 0 - actual_one_gb_count = 0 - - for numa_node in [0, 1]: - actual_two_mb_count += int("".join(self.ssh_manager.execute( - compute['ip'], cmd.format(numa_node, "2048"))["stdout"])) - - result = "".join(self.ssh_manager.execute( - compute['ip'], cmd.format(numa_node, "1048576"))["stdout"]) - - result = "0" if not result else result - actual_one_gb_count += int(result) - - asserts.assert_equal( - two_mb_count, actual_two_mb_count, - "Actual number of allocated 2Mb pages is {}, expected {}" - .format(actual_two_mb_count, two_mb_count)) - asserts.assert_equal( - one_gb_count, actual_one_gb_count, - "Actual number of allocated 1Gb pages is {}, expected {}" - .format(actual_one_gb_count, one_gb_count)) - - self.env.make_snapshot(snapshot_name, is_make=True) - - @test(depends_on=[check_hugepages_distribution_per_numa], - groups=["check_instance_one_gb_page_size_one_gb_host"]) - @log_snapshot_after_test - def check_instance_one_gb_page_size_one_gb_host(self): - """Boot instance with 1 Gb page size on host with only 1 Gb HugePages - - Scenario: - 1. Revert snapshot "check_hugepages_distribution_per_numa" - 2. Boot and validate instance on compute with only 1 Gb pages - """ - self.env.revert_snapshot("check_hugepages_distribution_per_numa") - - cluster_id = self.fuel_web.get_last_created_cluster() - controller_ip = self.fuel_web.get_public_vip(cluster_id) - self.os_conn = os_actions.OpenStackActions(controller_ip) - one_gb_host = "slave-02" - - self.boot_instance_with_hugepage( - target_compute_name=one_gb_host, - flavor_name="h1.huge.hpgs", - flavor_ram=1024, - page_size=1048576) - - @test(depends_on=[check_hugepages_distribution_per_numa], - groups=["check_instance_two_mb_page_size_two_mb_host"]) - @log_snapshot_after_test - def check_instance_two_mb_page_size_two_mb_host(self): - """Boot instance with 2 Mb page size on host with only 2 Mb HugePages - - Scenario: - 1. Revert snapshot "check_hugepages_distribution_per_numa" - 2. Boot and validate instance on compute with only 2 Mb pages - """ - self.env.revert_snapshot("check_hugepages_distribution_per_numa") - - cluster_id = self.fuel_web.get_last_created_cluster() - controller_ip = self.fuel_web.get_public_vip(cluster_id) - self.os_conn = os_actions.OpenStackActions(controller_ip) - two_mb_host = "slave-03" - - self.boot_instance_with_hugepage( - target_compute_name=two_mb_host, - flavor_name="h1.small.hpgs", - flavor_ram=512, - page_size=2048) - - @test(depends_on=[check_hugepages_distribution_per_numa], - groups=["check_instance_one_gb_page_size_mixed_size_host"]) - @log_snapshot_after_test - def check_instance_one_gb_page_size_mixed_size_host(self): - """Boot instance with 1 Gb page size on host with both HugePages types - - Scenario: - 1. Revert snapshot "check_hugepages_distribution_per_numa" - 2. Boot and validate instance on compute with both pages types - """ - self.env.revert_snapshot("check_hugepages_distribution_per_numa") - - cluster_id = self.fuel_web.get_last_created_cluster() - controller_ip = self.fuel_web.get_public_vip(cluster_id) - self.os_conn = os_actions.OpenStackActions(controller_ip) - mixed_host = "slave-01" - - self.boot_instance_with_hugepage( - target_compute_name=mixed_host, - flavor_name="h1.huge_mixed.hpgs", - flavor_ram=1024, - page_size=1048576) - - @test(depends_on=[check_hugepages_distribution_per_numa], - groups=["check_instance_two_mb_page_size_mixed_size_host"]) - @log_snapshot_after_test - def check_instance_two_mb_page_size_mixed_size_host(self): - """Boot instance with 2 Mb page size on host with both HugePages types - - Scenario: - 1. Revert snapshot "check_hugepages_distribution_per_numa" - 2. Boot and validate instance on compute with both pages types - """ - self.env.revert_snapshot("check_hugepages_distribution_per_numa") - - cluster_id = self.fuel_web.get_last_created_cluster() - controller_ip = self.fuel_web.get_public_vip(cluster_id) - self.os_conn = os_actions.OpenStackActions(controller_ip) - mixed_host = "slave-01" - - self.boot_instance_with_hugepage( - target_compute_name=mixed_host, - flavor_name="h1.small_mixed.hpgs", - flavor_ram=128, - page_size=2048) - - @test(depends_on=[check_hugepages_distribution_per_numa], - groups=["check_hugepages_nova_scheduler"]) - @log_snapshot_after_test - def check_instance_two_mb_page_size_mixed_role_host(self): - """Boot instance with both HP sizes on host with Cinder+Compute role - - Scenario: - 1. Revert snapshot "check_hugepages_distribution_per_numa" - 2. Boot and validate instance on compute+mongo node with 2Mb - 3. Boot and validate instance on compute+mongo node with 1Gb - """ - self.env.revert_snapshot("check_hugepages_distribution_per_numa") - - cluster_id = self.fuel_web.get_last_created_cluster() - controller_ip = self.fuel_web.get_public_vip(cluster_id) - self.os_conn = os_actions.OpenStackActions(controller_ip) - mixed_role_host = "slave-04" - - self.boot_instance_with_hugepage( - target_compute_name=mixed_role_host, - flavor_name="h1.small_mixed_roles.hpgs", - flavor_ram=128, - page_size=2048) - - self.boot_instance_with_hugepage( - target_compute_name=mixed_role_host, - flavor_name="h1.huge_mixed_roles.hpgs", - flavor_ram=1024, - page_size=1048576) - - @test(depends_on=[check_hugepages_distribution_per_numa], - groups=["check_hugepages_after_reboot"]) - @log_snapshot_after_test - def check_hugepages_after_reboot(self): - """Boot instances with both HP sizes on compute after reboot - - Scenario: - 1. Revert snapshot "check_hugepages_distribution_per_numa" - 2. Reboot node with mixed reserved HugePages - 3. Boot and validate instance with 2Mb page size - 4. Boot and validate instance with 1Gb page size - """ - self.env.revert_snapshot("check_hugepages_distribution_per_numa") - - cluster_id = self.fuel_web.get_last_created_cluster() - controller_ip = self.fuel_web.get_public_vip(cluster_id) - self.os_conn = os_actions.OpenStackActions(controller_ip) - mixed_host = "slave-01" - - target_compute = self.fuel_web.get_devops_node_by_nailgun_node( - self.fuel_web.get_nailgun_node_by_name(mixed_host)) - self.fuel_web.cold_restart_nodes([target_compute]) - - self.boot_instance_with_hugepage( - target_compute_name=mixed_host, - flavor_name="h1.small_mixed.hpgs", - flavor_ram=128, - page_size=2048) - - self.boot_instance_with_hugepage( - target_compute_name=mixed_host, - flavor_name="h1.huge_mixed.hpgs", - flavor_ram=1024, - page_size=1048576) diff --git a/fuelweb_test/tests/test_ubuntu_bootstrap.py b/fuelweb_test/tests/test_ubuntu_bootstrap.py deleted file mode 100644 index 127042082..000000000 --- a/fuelweb_test/tests/test_ubuntu_bootstrap.py +++ /dev/null @@ -1,607 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import tempfile -import textwrap - -from devops.error import DevopsCalledProcessError -from devops.helpers.helpers import tcp_ping -from devops.helpers.helpers import wait -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_not_equal -from proboscis.asserts import assert_raises -from proboscis.asserts import assert_true -from proboscis import SkipTest -from proboscis import test - -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers import utils -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test.tests import base_test_case - - -@test(groups=["ubuntu_bootstrap_builder", "bvt_ubuntu_bootstrap"]) -class UbuntuBootstrapBuild(base_test_case.TestBasic): - @staticmethod - def _get_main_repo(repos, repo_name, suite_type): - for repo in repos: - if repo_name not in repo["name"]: - continue - - if suite_type == "main" and not any(["updates" in repo["suite"], - "security" in repo["suite"], - "proposed" in repo["suite"]]): - return repo - - else: - if suite_type in repo["suite"]: - return repo - - raise Exception("Can not find repo '{0}' suite '{1}' in repo list: {2}" - .format(repo_name, suite_type, repos)) - - @test(depends_on=[base_test_case.SetupEnvironment.prepare_release], - groups=["build_default_bootstrap"]) - @log_snapshot_after_test - def build_default_bootstrap(self): - """Verify than slaves retrieved Default ubuntu bootstrap - - Scenario: - 1. Revert snapshot ready - 2. Build and activate Ubuntu bootstrap with default settings - 3. Bootstrap slaves - 4. Verify Ubuntu bootstrap on slaves - - Duration: 20m - Snapshot: build_default_bootstrap - """ - self.env.revert_snapshot("ready") - - uuid, bootstrap_location = \ - self.env.fuel_bootstrap_actions.build_bootstrap_image() - self.env.fuel_bootstrap_actions.\ - import_bootstrap_image(bootstrap_location) - self.env.fuel_bootstrap_actions.\ - activate_bootstrap_image(uuid) - - nodes = self.env.d_env.get_nodes( - name__in=["slave-01", "slave-02", "slave-03"]) - self.env.bootstrap_nodes(nodes) - - for node in nodes: - _ip = self.fuel_web.get_nailgun_node_by_devops_node(node)['ip'] - checkers.verify_bootstrap_on_node(_ip, os_type="ubuntu", uuid=uuid) - - self.env.make_snapshot("build_default_bootstrap", is_make=True) - - @test(depends_on=[base_test_case.SetupEnvironment.prepare_release], - groups=["build_simple_bootstrap"]) - @log_snapshot_after_test - def build_simple_bootstrap(self): - """Verify than slaves retrieved Ubuntu bootstrap with extra package - - Scenario: - 1. Revert snapshot ready - 2. Build and activate Ubuntu bootstrap with extra package - 3. Bootstrap slaves - 4. Verify Ubuntu bootstrap on slaves - - Duration: 20m - """ - self.env.revert_snapshot("ready") - - bootstrap_params = { - "ubuntu-release": "xenial", - "label": "UbuntuBootstrap", - "output-dir": "/tmp", - "package": ["ipython"] - } - - uuid, bootstrap_location = \ - self.env.fuel_bootstrap_actions.build_bootstrap_image( - **bootstrap_params) - - self.env.fuel_bootstrap_actions.\ - import_bootstrap_image(bootstrap_location) - self.env.fuel_bootstrap_actions.\ - activate_bootstrap_image(uuid) - - nodes = self.env.d_env.get_nodes( - name__in=["slave-01", "slave-02", "slave-03"]) - self.env.bootstrap_nodes(nodes) - - for node in nodes: - n_node = self.fuel_web.get_nailgun_node_by_devops_node(node) - checkers.verify_bootstrap_on_node(n_node['ip'], - os_type="ubuntu", - uuid=uuid) - - ipython_version = utils.get_package_versions_from_node( - n_node['ip'], name="ipython", os_type="Ubuntu") - assert_not_equal(ipython_version, "") - - @test(depends_on=[base_test_case.SetupEnvironment.prepare_release], - groups=["build_full_bootstrap"]) - @log_snapshot_after_test - def build_full_bootstrap(self): - """Verify than slaves retrieved Ubuntu bootstrap with extra settings - - Scenario: - 1. Revert snapshot ready - 2. Build and activate Ubuntu bootstrap with extra settings - 3. Bootstrap slaves - 4. Verify Ubuntu bootstrap on slaves - - Duration: 20m - """ - self.env.revert_snapshot("ready") - - with self.env.d_env.get_admin_remote() as remote: - bootstrap_script = '''\ - #!/bin/bash - - echo "testdata" > /test_bootstrap_script - apt-get install ipython -y - ''' - - with tempfile.NamedTemporaryFile() as temp_file: - temp_file.write(textwrap.dedent(bootstrap_script)) - temp_file.flush() - remote.mkdir("/root/bin") - remote.upload(temp_file.name, "/root/bin/bootstrap_script.sh") - - remote.mkdir("/root/inject/var/lib/testdir") - remote.mkdir("/root/inject/var/www/testdir2") - - kernel_cmdline = ["biosdevname=0", "net.ifnames=1", "debug", - "ignore_loglevel", "log_buf_len=10M"] - - bootstrap_params = { - "ubuntu-release": "xenial", - "direct-repo-addr": [self.env.admin_node_ip], - "script": "/root/bin/bootstrap_script.sh", - "label": "UbuntuBootstrap", - "extra-dir": ["/root/inject/"], - "extend-kopts": "'{0}'".format(" ".join(kernel_cmdline)), - "kernel-flavor": "linux-generic-lts-xenial", - "output-dir": "/tmp", - "package": ["fuse", "sshfs"], - } - - uuid, bootstrap_location = \ - self.env.fuel_bootstrap_actions.build_bootstrap_image( - **bootstrap_params) - self.env.fuel_bootstrap_actions.\ - import_bootstrap_image(bootstrap_location) - self.env.fuel_bootstrap_actions.\ - activate_bootstrap_image(uuid) - - nodes = self.env.d_env.get_nodes( - name__in=["slave-01", "slave-02", "slave-03"]) - self.env.bootstrap_nodes(nodes) - - for node in nodes: - n_node = self.fuel_web.get_nailgun_node_by_devops_node(node) - with self.fuel_web.get_ssh_for_node(node.name) as slave_remote: - checkers.verify_bootstrap_on_node(n_node['ip'], - os_type="ubuntu", - uuid=uuid) - - for package in ['ipython', 'fuse', 'sshfs']: - package_version = utils.get_package_versions_from_node( - n_node['ip'], name=package, os_type="Ubuntu") - assert_not_equal(package_version, "", - "Package {0} is not installed on slave " - "{1}".format(package, node.name)) - - for injected_dir in ["/var/lib/testdir", "/var/www/testdir2"]: - checkers.check_file_exists(n_node['ip'], injected_dir) - - file_content = \ - slave_remote.execute("cat /test_bootstrap_script") - assert_equal("".join(file_content["stdout"]).strip(), - "testdata") - - actual_kernel_cmdline = "".join( - slave_remote.execute("cat /proc/cmdline")["stdout"]) - - for kernel_opt in kernel_cmdline: - assert_true(kernel_opt in actual_kernel_cmdline, - "No {0} option in cmdline: {1}" - .format(kernel_opt, actual_kernel_cmdline)) - - @test(depends_on_groups=["build_default_bootstrap"], - groups=["create_list_import_delete_bootstrap_image"]) - @log_snapshot_after_test - def create_list_import_delete_bootstrap_image(self): - """Validate CRD operations of fuel-bootstrap utility - - Scenario: - 1. Revert snapshot build_default_bootstrap - 2. Build and Ubuntu bootstrap with default settings - 3. Validate it is available in images list - 4. Delete Ubuntu bootstrap image - 5. Validate it is not available and can not be activated - 6. Validate restriction for deleting active image - - Duration 30m - """ - self.env.revert_snapshot("build_default_bootstrap") - - expected_bootstrap_uuids = \ - self.env.fuel_bootstrap_actions.list_bootstrap_images_uuids() - - uuid, bootstrap_location = \ - self.env.fuel_bootstrap_actions.build_bootstrap_image() - self.env.fuel_bootstrap_actions.\ - import_bootstrap_image(bootstrap_location) - - bootstrap_uuids = self.env.fuel_bootstrap_actions.\ - list_bootstrap_images_uuids() - assert_true(uuid in bootstrap_uuids, - "Newly built bootstrap image {0} is not in list of " - "available images: {1}".format(uuid, bootstrap_uuids)) - - assert_equal( - len(expected_bootstrap_uuids) + 1, len(bootstrap_uuids), - "Only {0} bootstrap images should be available; current list: " - "\n{1}".format(bootstrap_uuids, len(expected_bootstrap_uuids) + 1)) - - self.env.fuel_bootstrap_actions.delete_bootstrap_image(uuid) - - bootstrap_uuids = self.env.fuel_bootstrap_actions.\ - list_bootstrap_images_uuids() - assert_true(uuid not in bootstrap_uuids, - "Bootstrap {0} was not deleted and still available: {1}" - .format(uuid, bootstrap_uuids)) - - assert_raises(DevopsCalledProcessError, - self.env.fuel_bootstrap_actions.activate_bootstrap_image, - uuid) - - assert_equal( - len(expected_bootstrap_uuids), len(bootstrap_uuids), - "Only {0} bootstrap images should be available; current list: " - "\n{1}".format(bootstrap_uuids, len(expected_bootstrap_uuids))) - - uuid = self.env.fuel_bootstrap_actions.get_active_bootstrap_uuid() - # we need to fail in case uuid is None, otherwise the assert_raises - # will use: uuid = None - assert_true(uuid is not None, "No active bootstrap. Possibly centos " - "is active or something went wrong.") - assert_raises( - DevopsCalledProcessError, - self.env.fuel_bootstrap_actions.delete_bootstrap_image, - uuid) - - -@test(groups=["ubuntu_bootstrap_deploy", "bvt_ubuntu_bootstrap"]) -class UbuntuBootstrap(base_test_case.TestBasic): - @test(depends_on=[base_test_case.SetupEnvironment.prepare_release], - groups=["deploy_with_two_ubuntu_bootstraps"]) - @log_snapshot_after_test - def deploy_with_two_ubuntu_bootstraps(self): - """Deploy cluster with two different bootstrap images - - Scenario: - 1. Boot two nodes - 2. Validate bootstrap - 3. Build another one bootstrap image - 4. Boot additional node - 5. Validate new bootstrap - 6. Create cluster in Ha mode with 1 controller, 1 compute - and 1 cinder node - 7. Deploy cluster - 8. Verify network - 9. Run OSTF - - Duration 45m - """ - if not self.env.revert_snapshot('ready'): - raise SkipTest('Required snapshot not found') - - uuid = self.env.fuel_bootstrap_actions.get_active_bootstrap_uuid() - - nodes = self.env.d_env.get_nodes( - name__in=["slave-01", "slave-02"]) - - self.env.bootstrap_nodes(nodes) - for node in nodes: - checkers.verify_bootstrap_on_node( - self.env.fuel_web.get_node_ip_by_devops_name(node.name), - os_type="ubuntu", - uuid=uuid) - - new_uuid, _ = \ - self.env.fuel_bootstrap_actions.build_bootstrap_image( - activate=True) - new_node = self.env.d_env.get_node(name="slave-03") - self.env.bootstrap_nodes([new_node]) - checkers.verify_bootstrap_on_node( - self.env.fuel_web.get_node_ip_by_devops_name(new_node.name), - os_type="ubuntu", - uuid=new_uuid) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - 'tenant': 'stop_deploy', - 'user': 'stop_deploy', - 'password': 'stop_deploy', - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - } - ) - - expected_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - self.fuel_web.deploy_cluster_wait(cluster_id) - - assert_equal( - len(expected_nodes), - len(self.fuel_web.client.list_cluster_nodes(cluster_id))) - - # Run ostf - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['smoke']) - - @test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_3], - groups=["deploy_stop_on_deploying_ubuntu_bootstrap"]) - @log_snapshot_after_test - def deploy_stop_on_deploying_ubuntu_bootstrap(self): - """Stop reset cluster in HA mode with 1 controller on Ubuntu Bootstrap - - Scenario: - 1. Create cluster in Ha mode with 1 controller - 2. Add 1 node with controller role - 3. Add 1 node with compute role - 4. Add 1 node with cinder role - 5. Verify network - 6. Provision nodes - 7. Make a test file on every node - 8. Deploy nodes - 9. Stop deployment - 10. Verify nodes are not reset to bootstrap image - 11. Re-deploy cluster - 12. Verify network - 13. Run OSTF - - Duration 45m - Snapshot: deploy_stop_on_deploying_ubuntu_bootstrap - """ - - if not self.env.revert_snapshot('ready_with_3_slaves'): - raise SkipTest('Required snapshot not found') - - def check_node(ssh_manager_executor, ip): - cmd = 'grep bootstrap /etc/hostname' - err_msg = "Node with ip {:s} was reset to bootstrap".format(ip) - ssh_manager_executor(ip=ip, - cmd=cmd, - err_msg=err_msg, - assert_ec_equal=[1]) - cmd = 'grep test ~/test' - ssh_manager_executor(ip=ip, - cmd=cmd, - err_msg=err_msg, - assert_ec_equal=[0]) - - def make_test_file(ssh_manager_executor, ip): - cmd = 'echo test >> ~/test' - ssh_manager_executor(ip, cmd) - - self.show_step(step=1, initialize=True) - - executor = self.ssh_manager.execute_on_remote - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - 'tenant': 'stop_deploy', - 'user': 'stop_deploy', - 'password': 'stop_deploy', - } - ) - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - } - ) - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - - self.show_step(6) - self.fuel_web.provisioning_cluster_wait(cluster_id) - - self.show_step(7) - nodes_ips = [ - self.fuel_web.get_nailgun_node_by_devops_node(node)['ip'] - for node in self.env.d_env.get_nodes( - name__in=['slave-01', 'slave-02', 'slave-03'])] - - for _ip in nodes_ips: - make_test_file(executor, _ip) - - self.show_step(8) - self.fuel_web.deploy_task_wait(cluster_id=cluster_id, progress=30) - - self.show_step(9) - self.fuel_web.stop_deployment_wait(cluster_id) - - self.show_step(10) - for _ip in nodes_ips: - check_node(executor, _ip) - - self.show_step(11) - self.fuel_web.deploy_cluster_wait(cluster_id) - - assert_equal( - len(nodes_ips), - len(self.fuel_web.client.list_cluster_nodes(cluster_id))) - - self.show_step(12) - self.fuel_web.verify_network(cluster_id) - - self.show_step(13) - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['smoke']) - - self.env.make_snapshot( - "deploy_stop_on_deploying_ubuntu_bootstrap", - is_make=True) - - @test(depends_on_groups=['deploy_stop_on_deploying_ubuntu_bootstrap'], - groups=["deploy_reset_on_ready_ubuntu_bootstrap"]) - @log_snapshot_after_test - def reset_on_ready_ubuntu_bootstrap(self): - """Stop reset cluster in HA mode with 1 controller on Ubuntu Bootstrap - - Scenario: - 1. Reset cluster - 2. Verify bootstrap on slaves - 3. Re-deploy cluster - 4. Verify network - 5. Run OSTF - - Duration 30m - """ - - if not self.env.revert_snapshot( - 'deploy_stop_on_deploying_ubuntu_bootstrap'): - raise SkipTest('Required snapshot not found') - - cluster_id = self.fuel_web.get_last_created_cluster() - - # Reset environment, - # then verify bootstrap on slaves and re-deploy cluster - self.fuel_web.stop_reset_env_wait(cluster_id) - - nodes = self.env.d_env.get_nodes( - name__in=["slave-01", "slave-02", "slave-03"]) - - self.fuel_web.wait_nodes_get_online_state(nodes, timeout=10 * 60) - for node in nodes: - nailgun_node = self.fuel_web.get_nailgun_node_by_devops_node(node) - wait(lambda: tcp_ping(nailgun_node['ip'], 22), - timeout=300, - timeout_msg=("Node {0} is still unreachable after {1} " - "seconds".format(nailgun_node['name'], 300))) - checkers.verify_bootstrap_on_node( - nailgun_node['ip'], os_type="ubuntu") - - self.fuel_web.deploy_cluster_wait(cluster_id) - - # Network verification - self.fuel_web.verify_network(cluster_id) - - # Run ostf - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['smoke']) - - @test(depends_on_groups=['deploy_stop_on_deploying_ubuntu_bootstrap'], - groups=["delete_on_ready_ubuntu_bootstrap"]) - @log_snapshot_after_test - def delete_on_ready_ubuntu_bootstrap(self): - """Delete cluster cluster in HA mode\ - with 1 controller on Ubuntu Bootstrap - - Scenario: - 1. Delete cluster - 2. Verify bootstrap on slaves - - Duration 30m - Snapshot: delete_on_ready_ubuntu_bootstrap - """ - if not self.env.revert_snapshot( - 'deploy_stop_on_deploying_ubuntu_bootstrap'): - raise SkipTest('Required snapshot not found') - - cluster_id = self.fuel_web.get_last_created_cluster() - - # Delete cluster, then verify bootstrap on slaves - self.fuel_web.client.delete_cluster(cluster_id) - - # wait nodes go to reboot - wait(lambda: not self.fuel_web.client.list_nodes(), timeout=10 * 60, - timeout_msg='Timeout while waiting nodes to become offline') - - # wait for nodes to appear after bootstrap - wait(lambda: len(self.fuel_web.client.list_nodes()) == 3, - timeout=10 * 60, - timeout_msg='Timeout while waiting nodes to become online') - - nodes = self.env.d_env.get_nodes( - name__in=["slave-01", "slave-02", "slave-03"]) - for node in nodes: - _ip = self.fuel_web.get_nailgun_node_by_devops_node(node)['ip'] - checkers.verify_bootstrap_on_node(_ip, os_type="ubuntu") - - self.env.make_snapshot( - "delete_on_ready_ubuntu_bootstrap", - is_make=True) - - @test(depends_on_groups=['deploy_stop_on_deploying_ubuntu_bootstrap'], - groups=["delete_node_on_ready_ubuntu_bootstrap"]) - @log_snapshot_after_test - def delete_node_on_ready_ubuntu_bootstrap(self): - """Delete node from cluster in HA mode\ - with 1 controller on Ubuntu Bootstrap - - Scenario: - 1. Delete node - 2. Verify bootstrap on slaves - - Duration 30m - Snapshot: delete_on_ready_ubuntu_bootstrap - """ - if not self.env.revert_snapshot( - 'deploy_stop_on_deploying_ubuntu_bootstrap'): - raise SkipTest('Required snapshot not found') - - cluster_id = self.fuel_web.get_last_created_cluster() - - # Delete cluster, then verify bootstrap on slaves - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-03': ['cinder'] - }, - pending_addition=False, - pending_deletion=True - ) - - self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False) - - # wait for nodes to appear after bootstrap - wait(lambda: len(self.fuel_web.client.list_nodes()) == 3, - timeout=10 * 60, - timeout_msg='Timeout while waiting nodes to become online') - self.fuel_web.verify_network(cluster_id) - - node = self.fuel_web.get_nailgun_node_by_name("slave-03") - checkers.verify_bootstrap_on_node(node['ip'], os_type="ubuntu") diff --git a/fuelweb_test/tests/test_unlock_settings_tab.py b/fuelweb_test/tests/test_unlock_settings_tab.py deleted file mode 100644 index f6d765251..000000000 --- a/fuelweb_test/tests/test_unlock_settings_tab.py +++ /dev/null @@ -1,288 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import yaml - -from devops.error import TimeoutError -from proboscis import SkipTest -from proboscis import test -from proboscis.asserts import assert_false -from keystoneauth1.exceptions import HttpError -# pylint: disable=redefined-builtin -from six.moves import xrange -# pylint: enable=redefined-builtin - -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.utils import SettingsChanger -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["unlock_settings_tab"]) -class UnlockSettingsTab(TestBasic): - """UnlockSettingsTab.""" # TODO documentation - - def __init__(self): - super(UnlockSettingsTab, self).__init__() - self._cluster_id = None - self._cluster_name = None - - @property - def cluster_id(self): - return self._cluster_id - - @cluster_id.setter - def cluster_id(self, cluster_id): - self._cluster_id = cluster_id - - @property - def cluster_name(self): - return self._cluster_name - - @cluster_name.setter - def cluster_name(self, cluster_name): - self._cluster_name = cluster_name - - @staticmethod - def load_config_from_file(path_to_conf=None): - if not path_to_conf: - logger.error("Please, specify file to load config from") - raise SkipTest("File with config is not specified. " - "Aborting the test") - with open(path_to_conf, 'r') as f: - try: - config = yaml.load(f) - return config - except ValueError: - logger.error("Check config file for consistency") - raise - - def revert_snapshot(self, nodes_count): - """ - :param nodes_count: number of nodes - :return: nothing, but reverts snapshot - """ - if nodes_count == 1: - num = '1' - elif nodes_count <= 3: - num = '3' - elif nodes_count <= 5: - num = '5' - else: - num = '9' - self.env.revert_snapshot('ready_with_{}_slaves'.format(num)) - - @staticmethod - def check_config_for_ceph(attrs): - storage = attrs['editable']['storage'] - options_to_check = ['volumes_ceph', 'objects_ceph', 'images_ceph', - 'ephemeral_ceph'] - for option in options_to_check: - if storage[option]['value']: - pool_size = storage['osd_pool_size']['value'] - return int(pool_size) - return None - - @staticmethod - def get_existed_ceph_nodes_count(conf): - nodes = conf['nodes'] - return len([node for node in nodes if 'ceph-osd' in nodes[node]]) - - def add_ceph_nodes(self, count, ceph_nodes_count): - self.env.bootstrap_nodes( - self.env.d_env.nodes().slaves[count:count + ceph_nodes_count], - skip_timesync=True) - nodes = {'slave-0{}'.format(i): ['ceph-osd'] - for i in xrange(count + 1, count + ceph_nodes_count + 1)} - self.fuel_web.update_nodes(self.cluster_id, nodes) - - def load_config(self, file_name): - conf_path = os.path.dirname(os.path.abspath(__file__)) - cluster_conf = \ - self.load_config_from_file(os.path.join(conf_path, file_name)) - return cluster_conf - - def create_cluster(self, conf): - self.cluster_name = '_'.join([self.__class__.__name__, conf['name']]) - cluster_settings = { - "net_provider": conf['network']['net_provider'], - "net_segment_type": conf['network']['net_segment_type']} - if conf.get('settings'): - cluster_settings.update(conf['settings']) - - self.cluster_id = self.fuel_web.create_cluster( - name=self.cluster_name, - mode=settings.DEPLOYMENT_MODE, - settings=cluster_settings) - - def update_nodes(self, conf): - self.fuel_web.update_nodes( - self.cluster_id, - conf['nodes']) - - def deploy_cluster(self): - try: - self.fuel_web.deploy_cluster_wait(self.cluster_id) - except (AssertionError, TimeoutError): - self.env.make_snapshot( - "error_" + self.cluster_name, is_make=True) - return False - else: - return True - - def get_cluster_attributes(self): - return self.fuel_web.client.get_cluster_attributes(self.cluster_id) - - def update_cluster_attributes(self, new_attrs): - try: - self.fuel_web.client.update_cluster_attributes( - self.cluster_id, new_attrs) - except HttpError: - logger.info( - "Failed to update cluster attributes, please check logs") - return False - else: - return True - - def run_ostf(self): - try: - self.fuel_web.run_ostf(cluster_id=self.cluster_id) - except AssertionError: - logger.info("Some OSTF tests are failed. Check logs.") - self.env.make_snapshot( - "error_" + self.cluster_name, is_make=True) - return False - else: - return True - - @test(depends_on=[SetupEnvironment.prepare_slaves_1, - SetupEnvironment.prepare_slaves_3, - SetupEnvironment.prepare_slaves_5, - SetupEnvironment.prepare_slaves_9], - groups=["deploy_with_redeploy_and_modify_settings"]) - @log_snapshot_after_test - def deploy_with_redeploy_and_modify_settings(self): - """Deploy iteratively clusters from config, modify settings, redeploy - - Scenario: - 1. Load clusters' configurations from the file - 2. Revert snapshot with appropriate nodes count - 3. Create a cluster from config - 4. Update nodes accordingly to the config - 5. Deploy the cluster - 6. Run OSTF - 7. Get cluster attributes - 8. Modify randomly cluster attributes - 9. Add if it's needed ceph nodes - 10. Update cluster attributes with changed one - 11. Redeploy cluster - 12. Run OSTF - 13. Go to the next config - - Duration xxx m - Snapshot will be made for all failed configurations - """ - fail_trigger = False - failed_confs = [] - self.show_step(1) - for conf in self.load_config('cluster_configs.yaml'): - logger.info( - "Creating cluster from config with name: {}".format( - conf['name'])) - self.show_step(2, details=conf['name'], initialize=True) - self.revert_snapshot(len(conf['nodes'])) - self.show_step(3, details=conf['name']) - self.create_cluster(conf) - self.show_step(4, details=conf['name']) - self.update_nodes(conf) - self.show_step(5, details=conf['name']) - if not self.deploy_cluster(): - logger.error( - "Initial deployment of cluster {0} " - "with config name {1} was failed. " - "Go to the next config".format( - self.cluster_name, conf['name'])) - fail_trigger = True - failed_confs.append(conf['name']) - continue - - self.show_step(6, details=conf['name']) - if not self.run_ostf(): - fail_trigger = True - failed_confs.append(conf['name']) - logger.error( - "Failed to pass OSTF tests for first time deployed " - "cluster with config {}".format(conf['name'])) - continue - - self.show_step(7, details=conf['name']) - attrs = self.get_cluster_attributes() - self.show_step(8, details=conf['name']) - changer = SettingsChanger(attrs) - logger.info( - "The options below will NOT be changed: {}".format( - changer.SKIPPED_FIELDS_LIST)) - changer.make_changes(options=None, randomize=30) - new_attrs = changer.attrs - self.show_step(9, details=conf['name']) - ceph_nodes_count = self.check_config_for_ceph(new_attrs) - existed_ceph_count = self.get_existed_ceph_nodes_count(conf) - if ceph_nodes_count > existed_ceph_count: - count = len(conf['nodes']) - if count + ceph_nodes_count > settings.NODES_COUNT - 1: - logger.info("There are not enough nodes to redeploy with " - "ceph nodes pool size. Go to the next config") - continue - self.add_ceph_nodes(count, ceph_nodes_count) - - self.show_step(10, details=conf['name']) - if not self.update_cluster_attributes(new_attrs): - fail_trigger = True - failed_confs.append(conf['name']) - logger.error( - "Failed to update cluster attributes with changed one") - continue - - self.show_step(11, details=conf['name']) - if not self.deploy_cluster(): - logger.error( - "Redeployment of cluster {0} " - "with config name {1} was failed. " - "Go to the next config".format( - self.cluster_name, conf['name'])) - fail_trigger = True - failed_confs.append(conf['name']) - continue - - # Run ostf - self.show_step(12, details=conf['name']) - if not self.run_ostf(): - fail_trigger = True - failed_confs.append(conf['name']) - logger.error("Failed to pass OSTF tests for redeployed " - "cluster with config {}".format(conf['name'])) - continue - logger.info( - "Redeployment and OSTF were successfully " - "executed for cluster {}".format(self.cluster_name)) - - self.show_step(13, details=conf['name']) - - if fail_trigger: - assert_false(fail_trigger, - "A few configurations were failed: {} " - "Please, check logs".format(failed_confs)) diff --git a/fuelweb_test/tests/tests_cli/__init__.py b/fuelweb_test/tests/tests_cli/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/tests_cli/test_cli_deploy.py b/fuelweb_test/tests/tests_cli/test_cli_deploy.py deleted file mode 100644 index ed25bb6b4..000000000 --- a/fuelweb_test/tests/tests_cli/test_cli_deploy.py +++ /dev/null @@ -1,182 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import re - -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.settings import OPENSTACK_RELEASE -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests import test_cli_base - - -@test(groups=["cli_acceptance_deployment_tests"]) -class CommandLineAcceptanceDeploymentTests(test_cli_base.CommandLine): - """CommandLineAcceptanceDeploymentTests.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["cli_deploy_neutron_tun"]) - @log_snapshot_after_test - def cli_deploy_neutron_tun(self): - """Deployment with 1 controller, NeutronTUN - - Scenario: - 1. Create new environment using fuel-qa - 2. Choose Neutron, TUN - 3. Add 1 controller - 4. Add 1 compute - 5. Add 1 cinder - 6. Update nodes interfaces - 7. Verify networks - 8. Deploy the environment - 9. Verify networks - 10. Run OSTF tests - - Duration 40m - """ - self.env.revert_snapshot("ready_with_3_slaves") - - node_ids = sorted([node['id'] for node in - self.fuel_web.client.list_nodes()]) - release_id = self.fuel_web.get_releases_list_for_os( - release_name=OPENSTACK_RELEASE)[0] - - self.show_step(1, initialize=True) - self.show_step(2) - cmd = ('fuel2 env create {0} -r {1} ' - '-nst tun -f json'.format(self.__class__.__name__, - release_id)) - - env_result = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd, - jsonify=True - )['stdout_json'] - cluster_id = env_result['id'] - - self.update_cli_network_configuration(cluster_id) - - self.update_ssl_configuration(cluster_id) - self.set_public_networks_for_all_nodes(cluster_id) - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.add_nodes_to_cluster(cluster_id, node_ids[0], ['controller']) - self.add_nodes_to_cluster(cluster_id, node_ids[1], ['compute']) - self.add_nodes_to_cluster(cluster_id, node_ids[2], ['cinder']) - self.show_step(6) - for node_id in node_ids: - self.update_node_interfaces(node_id) - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - self.show_step(8) - cmd = 'fuel2 env deploy {0}'.format(cluster_id) - - task = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd - ) - task_id = re.findall('id (\d+)', task['stdout_str']) - task = {'id': task_id[0], 'name': 'deploy'} - self.assert_cli_task_success(task, timeout=130 * 60) - - self.show_step(9) - self.fuel_web.verify_network(cluster_id) - - self.show_step(10) - self.fuel_web.run_ostf( - cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["cli_deploy_tasks"]) - @log_snapshot_after_test - def cli_deploy_tasks(self): - """Deployment with 3 controllers, NeutronVLAN - - Scenario: - 1. Create new environment - 2. Choose Neutron, Vlan - 3. Add 3 controllers - 4. Update nodes interfaces - 5. Provision 3 controllers - (fuel2 env nodes provision -n x,x,x -e ) - 6. Start rsync_core_puppet task on second controller - (fuel2 graph execute -e 1 -t default -T rsync_core_puppet -n 2) - 7. Deploy controller nodes - (fuel2 env deploy ) - 8. Verify networks - 9. Run OSTF tests - - Duration 50m - """ - self.env.revert_snapshot("ready_with_3_slaves") - node_ids = sorted([node['id'] for node in - self.fuel_web.client.list_nodes()]) - - release_id = self.fuel_web.get_releases_list_for_os( - release_name=OPENSTACK_RELEASE)[0] - - self.show_step(1) - self.show_step(2) - cmd = ('fuel2 env create {0} -r {1} ' - '-nst vlan -f json'.format(self.__class__.__name__, - release_id)) - env_result = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd, - jsonify=True - )['stdout_json'] - cluster_id = env_result['id'] - self.show_step(3) - self.add_nodes_to_cluster(cluster_id, node_ids[0:3], - ['controller']) - self.show_step(4) - for node_id in node_ids: - self.update_node_interfaces(node_id) - self.show_step(5) - cmd = ('fuel2 env nodes provision -n {0} -e {1}'. - format(' '.join(str(n) for n in node_ids), cluster_id)) - task = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd - ) - task_id = re.findall('id (\d+)', task['stdout_str']) - task = {'id': task_id[0], 'name': 'provision'} - - self.assert_cli_task_success(task, timeout=20 * 60) - self.show_step(6) - tasks = 'rsync_core_puppet' - cmd = ('fuel2 graph execute -e {0} -t default -T {1} ' - '-n {2} --format json'.format(cluster_id, tasks, node_ids[1])) - task = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd, - jsonify=True - )['stdout_json'] - self.assert_cli_task_success(task, timeout=30 * 60) - self.show_step(7) - cmd = 'fuel2 env deploy {0}'.format(cluster_id) - - task = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd - ) - task_id = re.findall('id (\d+)', task['stdout_str']) - task = {'id': task_id[0], 'name': 'deploy'} - self.assert_cli_task_success(task, timeout=130 * 60) - self.show_step(8) - self.fuel_web.verify_network(cluster_id) - self.show_step(9) - self.fuel_web.run_ostf( - cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) diff --git a/fuelweb_test/tests/tests_cli/test_cli_deploy_ceph.py b/fuelweb_test/tests/tests_cli/test_cli_deploy_ceph.py deleted file mode 100644 index 194fa5f9a..000000000 --- a/fuelweb_test/tests/tests_cli/test_cli_deploy_ceph.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import re - -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.settings import OPENSTACK_RELEASE -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests import test_cli_base - - -@test(groups=["cli_acceptance_ceph_deployment_tests"]) -class CommandLineAcceptanceCephDeploymentTests(test_cli_base.CommandLine): - """CommandLineAcceptanceCephDeploymentTests.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["cli_deploy_ceph_neutron_tun"]) - @log_snapshot_after_test - def cli_deploy_ceph_neutron_tun(self): - """Deployment with 3 controllers, NeutronTUN, both Ceph - - Scenario: - 1. Create new environment - 2. Choose Neutron, TUN - 3. Choose Ceph for volumes and Ceph for images - 4. Change ceph replication factor to 2 - 5. Add 3 controller - 6. Add 2 compute - 7. Add 2 cephi - 8. Update nodes interfaces - 9. Verify networks - 10. Deploy the environment - 11. Verify networks - 12. Run OSTF tests - - Duration 40m - """ - self.env.revert_snapshot("ready_with_9_slaves") - - node_ids = sorted([node['id'] for node in - self.fuel_web.client.list_nodes()[0:7]]) - release_id = self.fuel_web.get_releases_list_for_os( - release_name=OPENSTACK_RELEASE)[0] - - self.show_step(1, initialize=True) - self.show_step(2) - cmd = ('fuel2 env create {0} -r {1} ' - '-nst tun -f json'.format(self.__class__.__name__, - release_id)) - env_result = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd, - jsonify=True - )['stdout_json'] - cluster_id = env_result['id'] - - self.update_cli_network_configuration(cluster_id) - - self.update_ssl_configuration(cluster_id) - self.set_public_networks_for_all_nodes(cluster_id) - self.show_step(3) - self.use_ceph_for_volumes(cluster_id) - self.use_ceph_for_images(cluster_id) - self.change_osd_pool_size(cluster_id, '2') - - self.show_step(4) - self.show_step(5) - self.show_step(6) - self.show_step(7) - self.add_nodes_to_cluster(cluster_id, node_ids[0:3], - ['controller']) - self.add_nodes_to_cluster(cluster_id, node_ids[3:5], - ['compute']) - self.add_nodes_to_cluster(cluster_id, node_ids[5:7], - ['ceph-osd']) - self.show_step(8) - for node_id in node_ids: - self.update_node_interfaces(node_id) - self.show_step(9) - self.fuel_web.verify_network(cluster_id) - self.show_step(10) - cmd = 'fuel2 env deploy {0}'.format(cluster_id) - - task = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd - ) - task_id = re.findall('id (\d+)', task['stdout_str']) - task = {'id': task_id[0], 'name': 'deploy'} - self.assert_cli_task_success(task, timeout=130 * 60) - - self.show_step(11) - self.fuel_web.verify_network(cluster_id) - - self.show_step(12) - self.fuel_web.run_ostf( - cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["cli_deploy_ceph_neutron_vlan"]) - @log_snapshot_after_test - def cli_deploy_ceph_neutron_vlan(self): - """Deployment with 3 controlelrs, NeutronVLAN, both Ceph - - Scenario: - 1. Create new environment - 2. Choose Neutron, VLAN - 3. Choose Ceph for volumes and Ceph for images - 4. Add 3 controller - 5. Add 2 compute - 6. Add 3 ceph - 7. Update nodes interfaces - 8. Verify networks - 9. Deploy the environment - 10. Verify networks - 11. Run OSTF tests - - Duration: 60 min - """ - - self.env.revert_snapshot("ready_with_9_slaves") - - node_ids = sorted([node['id'] for node in - self.fuel_web.client.list_nodes()[0:8]]) - - release_id = self.fuel_web.get_releases_list_for_os( - release_name=OPENSTACK_RELEASE)[0] - - admin_ip = self.ssh_manager.admin_ip - - self.show_step(1) - self.show_step(2) - cmd = ('fuel2 env create {0} -r {1} -nst vlan -f json' - ''.format(self.__class__.__name__, release_id)) - cluster = self.ssh_manager.execute_on_remote( - ip=admin_ip, - cmd=cmd, - jsonify=True - )['stdout_json'] - - self.set_public_networks_for_all_nodes(cluster['id']) - self.show_step(3) - self.use_ceph_for_volumes(cluster['id']) - self.use_ceph_for_images(cluster['id']) - - self.show_step(4) - self.show_step(5) - self.show_step(6) - nodes = { - 'controller': node_ids[0:3], - 'compute': node_ids[3:5], - 'ceph-osd': node_ids[5:8] - } - - for role in nodes: - self.ssh_manager.execute_on_remote( - ip=admin_ip, - cmd='fuel2 env add nodes -e {0} -n {1} -r {2}' - ''.format(cluster['id'], - ' '.join(map(str, nodes[role])), role) - ) - self.show_step(7) - for node_id in node_ids: - self.update_node_interfaces(node_id) - self.show_step(8) - self.fuel_web.verify_network(cluster['id']) - - self.show_step(9) - cmd = 'fuel2 env deploy {0}'.format(cluster['id']) - task = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd - ) - task_id = re.findall('id (\d+)', task['stdout_str']) - task = {'id': task_id[0], 'name': 'deploy'} - self.assert_cli_task_success(task, timeout=130 * 60) - - self.show_step(10) - self.fuel_web.verify_network(cluster['id']) - self.show_step(11) - self.fuel_web.run_ostf( - cluster_id=cluster['id'], - test_sets=['ha', 'smoke', 'sanity'] - ) diff --git a/fuelweb_test/tests/tests_cli/test_cli_role.py b/fuelweb_test/tests/tests_cli/test_cli_role.py deleted file mode 100644 index 5b21d7708..000000000 --- a/fuelweb_test/tests/tests_cli/test_cli_role.py +++ /dev/null @@ -1,429 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -from proboscis import test -from proboscis.asserts import assert_equal, assert_true - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.settings import NEUTRON_SEGMENT_TYPE -from fuelweb_test.settings import OPENSTACK_RELEASE -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests import test_cli_base - - -@test(groups=["cli_component_role_tests"]) -class CommandLineRoleTests(test_cli_base.CommandLine): - """CommandLineRoleTests.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["cli_update_role"]) - @log_snapshot_after_test - def cli_update_role(self): - """Update controller role using Fuel CLI - - Scenario: - 1. Setup master node - 2. SSH to the master node - 3. Download to file controller role with command: - fuel role --rel 2 --role controller --file controller.yaml - 4. Edit the controller.yaml file, - remove section "conflicts" under "meta" section. Save file - 5. Update role from file with command: - fuel role --rel 2 --update --file controller.yaml - 6. Go to the Fuel UI and try to create a new environment - 7. Add new node to the environment, - choose controller and compute roles for node - - Duration 20m - """ - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready_with_3_slaves") - node_ids = sorted([node['id'] for node in - self.fuel_web.client.list_nodes()]) - release_id = self.fuel_web.get_releases_list_for_os( - release_name=OPENSTACK_RELEASE)[0] - - self.show_step(2) - self.show_step(3) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd='fuel2 role download -r {} -n controller -f yaml -d /tmp' - ''.format(release_id)) - - self.show_step(4) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd="sed -i '/conflicts/,+1 d' /tmp/releases_{}/controller.yaml" - "".format(release_id)) - - self.show_step(5) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd='fuel2 role update -r {} -n controller -d /tmp -f yaml' - ''.format(release_id)) - - if NEUTRON_SEGMENT_TYPE: - nst = '-nst {0}'.format(NEUTRON_SEGMENT_TYPE) - else: - nst = '' - self.show_step(6) - cmd = ('fuel2 env create {0} -r {1} {2} -f json' - ''.format(self.__class__.__name__, release_id, nst)) - env_result = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd, - jsonify=True - )['stdout_json'] - cluster_id = env_result['id'] - self.show_step(7) - cmd = ('fuel2 env add nodes -e {0} -n {1} -r controller compute' - ''.format(cluster_id, node_ids[0])) - result = self.ssh_manager.execute( - ip=self.ssh_manager.admin_ip, - cmd=cmd, - ) - assert_equal(result['exit_code'], 0, - "Can't assign controller and compute node" - " to node id {}".format(node_ids[0])) - - self.env.make_snapshot("cli_update_role") - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["cli_create_role"]) - @log_snapshot_after_test - def cli_create_role(self): - """Create new role using Fuel CLI - - Scenario: - 1. Create environment using fuel-qa - 2. SSH to the master node - 3. Create new file "role.yaml" and paste the above: - - meta: - conflicts: - - controller - - compute - - description: New role - - has_primary: true - - name: Test role - - name: test-role - volumes_roles_mapping: - - allocate_size: min - - id: os - - 4. Create new role with command: - fuel role --rel 2 --create --file role.yaml - 5. Go to the Fuel UI and try to create a new environment - 6. Add new node to the environment, choose test-role - and try to add compute or controller role to the same node - - Duration 20m - """ - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready_with_3_slaves") - node_ids = sorted([node['id'] for node in - self.fuel_web.client.list_nodes()]) - release_id = self.fuel_web.get_releases_list_for_os( - release_name=OPENSTACK_RELEASE)[0] - templates_path = os.path.join( - '{0}/fuelweb_test/config_templates/'.format(os.environ.get( - "WORKSPACE", "./")), 'create_role.yaml') - self.show_step(2) - if os.path.exists(templates_path): - self.ssh_manager.upload_to_remote(self.ssh_manager.admin_ip, - templates_path, '/tmp') - self.show_step(3) - self.show_step(4) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd='fuel2 role create -n /tmp/create_role -r {} -f yaml' - ''.format(release_id)) - - if NEUTRON_SEGMENT_TYPE: - nst = '-nst {0}'.format(NEUTRON_SEGMENT_TYPE) - else: - nst = '' - self.show_step(5) - cmd = ('fuel2 env create {0} -r {1} {2} -f json' - ''.format(self.__class__.__name__, release_id, nst)) - env_result = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd, - jsonify=True - )['stdout_json'] - cluster_id = env_result['id'] - self.show_step(6) - cmd = ('fuel2 env add nodes -e {0} -n {1} -r test-role' - ''.format(cluster_id, node_ids[0])) - result = self.ssh_manager.execute( - ip=self.ssh_manager.admin_ip, - cmd=cmd, - ) - assert_equal(result['exit_code'], 0, - "Can't assign controller and compute node" - " to node id {}".format(node_ids[0])) - cmd = ('fuel2 env add nodes -e {0} -n {1} -r test-role controller ' - 'compute'.format(cluster_id, node_ids[1])) - result = self.ssh_manager.execute( - ip=self.ssh_manager.admin_ip, - cmd=cmd, - ) - assert_equal(result['exit_code'], 1, - "We shouldn't be able to assign controller and" - " compute node to node id {}".format(node_ids[1])) - self.env.make_snapshot("cli_create_role") - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["cli_create_role_with_has_primary"]) - @log_snapshot_after_test - def cli_create_role_with_has_primary(self): - """Create role with flag 'has_primary' set in 'true' - - Scenario: - 1. Create environment using fuel-qa - 2. SSH to the master node - 3. Create new file "role.yaml" and paste the following: - - meta: - conflicts: - - controller - - compute - - description: New role - - has_primary: true - - name: Test role - - name: test-role - volumes_roles_mapping: - - allocate_size: min - - id: os - - 4. Upload yaml to nailgun using Fuel CLI - 5. Create new role with command: - fuel role --rel 2 --create --file role.yaml - - Duration 20m - """ - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready_with_3_slaves") - node_ids = sorted([node['id'] for node in - self.fuel_web.client.list_nodes()]) - release_id = self.fuel_web.get_releases_list_for_os( - release_name=OPENSTACK_RELEASE)[0] - templates_path = os.path.join( - '{0}/fuelweb_test/config_templates/'.format(os.environ.get( - "WORKSPACE", "./")), 'create_primary_role.yaml') - self.show_step(2) - self.show_step(3) - if os.path.exists(templates_path): - self.ssh_manager.upload_to_remote(self.ssh_manager.admin_ip, - templates_path, '/tmp') - self.show_step(4) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd='fuel2 role create -n /tmp/create_primary_role -r {} -f yaml' - ''.format(release_id)) - - if NEUTRON_SEGMENT_TYPE: - nst = '-nst {0}'.format(NEUTRON_SEGMENT_TYPE) - else: - nst = '' - self.show_step(5) - cmd = ('fuel2 env create {0} -r {1} {2} -f json' - ''.format(self.__class__.__name__, release_id, nst)) - env_result = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd, - jsonify=True - )['stdout_json'] - cluster_id = env_result['id'] - - cmd = ('fuel2 env add nodes -e {0} -n {1} -r test-primary-role' - ''.format(cluster_id, node_ids[0])) - result = self.ssh_manager.execute( - ip=self.ssh_manager.admin_ip, - cmd=cmd, - ) - assert_equal(result['exit_code'], 0, - "Can't assign new role" - " to node id {}".format(node_ids[0])) - self.env.make_snapshot("cli_create_role_with_has_primary") - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["cli_delete_role"]) - @log_snapshot_after_test - def cli_delete_role(self): - """Delete role using Fuel CLI - - Scenario: - 1. Create environment using fuel-qa - 2. SSH to the master node - 3. Create new file "role.yaml" with the following content: - - meta: - conflicts: - - controller - - compute - - description: New role - - name: Test role - - name: test-role - volumes_roles_mapping: - - allocate_size: min - - id: os - - 4. Create new role with command: - fuel role --rel 2 --create --file role.yaml - 5. Go to the Fuel UI and try to create a new environment - 6. Check if new role exists in the list of roles - 7. Add new nodes to the environment: controller, compute - 8. Go to the console and try to delete roles: - fuel role --rel 2 --delete --role - fuel role --rel 2 --delete --role controller - - Duration 20m - """ - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready_with_3_slaves") - node_ids = sorted([node['id'] for node in - self.fuel_web.client.list_nodes()]) - release_id = self.fuel_web.get_releases_list_for_os( - release_name=OPENSTACK_RELEASE)[0] - templates_path = os.path.join( - '{0}/fuelweb_test/config_templates/'.format(os.environ.get( - "WORKSPACE", "./")), 'create_role.yaml') - self.show_step(2) - self.show_step(3) - if os.path.exists(templates_path): - self.ssh_manager.upload_to_remote(self.ssh_manager.admin_ip, - templates_path, '/tmp') - self.show_step(4) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd='fuel2 role create -n /tmp/create_role -r {} -f yaml' - ''.format(release_id)) - result = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd='fuel2 role list -r {}'.format(release_id))['stdout_str'] - - assert_true('test-role' in result, - "role is not in the list:\n{}".format(result)) - - if NEUTRON_SEGMENT_TYPE: - nst = '-nst {0}'.format(NEUTRON_SEGMENT_TYPE) - else: - nst = '' - self.show_step(5) - self.show_step(6) - cmd = ('fuel2 env create {0} -r {1} {2} -f json' - ''.format(self.__class__.__name__, release_id, nst)) - env_result = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd, - jsonify=True - )['stdout_json'] - cluster_id = env_result['id'] - self.show_step(7) - cmd = ('fuel2 env add nodes -e {0} -n {1} -r controller' - ''.format(cluster_id, node_ids[0])) - result = self.ssh_manager.execute( - ip=self.ssh_manager.admin_ip, - cmd=cmd, - ) - assert_equal(result['exit_code'], 0, - "Can't assign controller and" - " compute node to node id {}".format(node_ids[0])) - - self.show_step(8) - cmd = ('fuel2 role delete -r {} -n test-role'.format(release_id)) - result = self.ssh_manager.execute( - ip=self.ssh_manager.admin_ip, - cmd=cmd, - ) - assert_equal(result['exit_code'], 0, - "Can't delete role, result is {}".format(result)) - - result = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd='fuel2 role list -r {}'.format(release_id))['stdout_str'] - assert_true('test-role' not in result, - "role is not in the list:\n{}".format(result)) - cmd = ('fuel2 role delete -r {} -n controller'.format(release_id)) - result = self.ssh_manager.execute( - ip=self.ssh_manager.admin_ip, - cmd=cmd, - ) - - assert_equal(result['exit_code'], 1, - "Controller role shouldn't be able to be deleted") - - self.env.make_snapshot("cli_delete_role") - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["cli_incorrect_update_role"]) - @log_snapshot_after_test - def cli_incorrect_update_role(self): - """Update controller role using Fuel CLI - - Scenario: - 1. Setup master node - 2. SSH to the master node - 3. Download to file controller role with command: - fuel role --rel 2 --role controller --file controller.yaml - 4. Modify created file: change "id" value at - the "volumes_roles_mapping" to something incorrect, - for ex.: "id: blabla" - 5. Save file and upload it to the nailgun with: - fuel role --rel 2 --role controller --update --file - controller.yaml - There should be an error message and role shouldn't be updated. - - Duration 20m - """ - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready_with_3_slaves") - release_id = self.fuel_web.get_releases_list_for_os( - release_name=OPENSTACK_RELEASE)[0] - - self.show_step(2) - self.show_step(3) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd='fuel role --rel {} --role controller --file' - ' /tmp/controller.yaml'.format(release_id)) - - self.show_step(4) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd="sed -i -r 's/id: os/id: blabla/' /tmp/controller.yaml") - - self.show_step(5) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd='fuel role --rel {} --role controller --update --file' - ' /tmp/controller.yaml'.format(release_id), - assert_ec_equal=[1]) - self.env.make_snapshot("cli_incorrect_update_role") diff --git a/fuelweb_test/tests/tests_configdb/__init__.py b/fuelweb_test/tests/tests_configdb/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/tests_configdb/test_configdb_api.py b/fuelweb_test/tests/tests_configdb/test_configdb_api.py deleted file mode 100644 index 0d69882b1..000000000 --- a/fuelweb_test/tests/tests_configdb/test_configdb_api.py +++ /dev/null @@ -1,282 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from devops.helpers.helpers import wait_pass -from proboscis import test -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_false -from proboscis.asserts import assert_not_equal - -from fuelweb_test import logger -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.utils import install_configdb -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["tests_configdb_api"]) -class TestsConfigDBAPI(TestBasic): - """Tests ConfigDB""" # TODO documentations - - RESOURCE_NAME_1 = 'resource1' - SLASHED_RESOURCE = 'slashed/resource' - - @test(depends_on=[SetupEnvironment.setup_master], - groups=["create_component_and_env_configdb", - "smoke_test_configdb"]) - @log_snapshot_after_test - def create_component_and_env_configdb(self): - """ Install and check ConfigDB - - Scenario: - 1. Revert snapshot empty - 2. Install configDB extension - 3. Create components - 4. Create environment with component - 5. Get and check created data - 6. Make snapshot - - Duration: 5 min - Snapshot: create_component_and_env_configdb - """ - - self.check_run('create_component_and_env_configdb') - self.show_step(1) - self.env.revert_snapshot('empty') - self.show_step(2) - install_configdb() - - logger.debug('Waiting for ConfigDB') - wait_pass(lambda: self.fuel_web.client.get_components(), - timeout=45) - - logger.debug('Get env and component data') - components = self.fuel_web.client.get_components() - envs = self.fuel_web.client.get_environments() - - assert_false(components, - "Components is not empty after tuningbox installation") - assert_false(envs, - "Environments is not empty after tuningbox installation") - - # Uploaded data - component = { - "name": "comp1", - "resource_definitions": [ - {"name": self.RESOURCE_NAME_1, "content": {}}, - {"name": self.SLASHED_RESOURCE, "content": {}} - ] - } - - environment = { - "name": "env1", - "components": ["comp1"], - "hierarchy_levels": ["nodes"] - } - self.show_step(3) - self.fuel_web.client.create_component(component) - self.show_step(4) - self.fuel_web.client.create_environment(environment) - self.show_step(5) - comp = self.fuel_web.client.get_components(comp_id=1) - env = self.fuel_web.client.get_environments(env_id=1) - - expected_comp = { - 'resource_definitions': [ - {'content': {}, 'component_id': 1, 'id': 1, - 'name': self.RESOURCE_NAME_1}, - {'content': {}, 'component_id': 1, 'id': 2, - 'name': self.SLASHED_RESOURCE} - ], - 'id': 1, 'name': "comp1" - } - expected_env = { - 'hierarchy_levels': ["nodes"], - 'id': 1, - 'components': [1] - } - logger.debug('Compare original component with ' - 'received component from API') - assert_equal(comp, expected_comp) - logger.debug('Compare original env with received env from API') - assert_equal(env, expected_env) - self.show_step(6) - self.env.make_snapshot('create_component_and_env_configdb', - is_make=True) - - @test(depends_on_groups=['create_component_and_env_configdb'], - groups=["get_upload_resource_value", "smoke_test_configdb"]) - @log_snapshot_after_test - def get_upload_resource_value(self): - """ Getting and uploading resource values - - Scenario: - 1. Revert snapshot with installed ConfigDB and - created component + env - 2. Check getting global resource value by resource id - 3. Check getting node resource value by resource id - 4. Upload global and node values - 5. Compare global uploaded and effective values - 6. Check getting resource value by resource name - 7. Check getting node effective and uploaded data - 8. Check node effective data contains global_value too - 9. Add data to slashed resource and compare received data by id and - by name of resource - - Duration: 5 min - """ - self.show_step(1) - self.env.revert_snapshot('create_component_and_env_configdb') - - self.show_step(2) - global_res = self.fuel_web.client.get_global_resource_id_value( - env_id=1, resource_id=1) - self.show_step(3) - node_res = self.fuel_web.client.get_node_resource_id_value( - env_id=1, resource_id=1, node_id=1) - - assert_false(global_res, "Global resource value is not empty for " - "the first resource") - assert_false(node_res, "Node level resource value is not empty " - "for the first resource") - - self.show_step(4) - node_value = {'node_key': 'node_value'} - global_value = {'global_key': 'global_value'} - - self.fuel_web.client.put_node_resource_value( - env_id=1, resource=1, node_id=1, data=node_value) - self.fuel_web.client.put_global_resource_value( - env_id=1, resource=1, data=global_value) - - self.show_step(5) - glob = self.fuel_web.client.get_global_resource_id_value( - env_id=1, resource_id=1) - glob_eff = self.fuel_web.client.get_global_resource_id_value( - env_id=1, resource_id=1, effective=True) - logger.debug('Get global value by resource id and compare with' - 'original global value') - assert_equal(glob, global_value) - logger.debug('Get global effective value by resource id and compare' - 'with original node value') - assert_equal(glob, glob_eff) - - self.show_step(6) - node_uploaded_n = self.fuel_web.client.get_node_resource_name_value( - env_id=1, resource_name=self.RESOURCE_NAME_1, node_id=1) - global_uploaded_n = \ - self.fuel_web.client.get_global_resource_name_value( - env_id=1, resource_name=self.RESOURCE_NAME_1) - assert_equal(global_uploaded_n, glob) - - self.show_step(7) - node_uploaded = self.fuel_web.client.get_node_resource_id_value( - env_id=1, resource_id=1, node_id=1) - node_effective = self.fuel_web.client.get_node_resource_id_value( - env_id=1, resource_id=1, node_id=1, effective=True) - logger.debug('Get node value by resource id and compare with' - 'original node value') - assert_equal(node_uploaded, node_value) - logger.debug('Get node value by resource name and compare with' - 'original node value') - assert_equal(node_uploaded_n, node_uploaded) - - assert_not_equal(node_uploaded, node_effective) - self.show_step(8) - merged_value = node_value.copy() - merged_value.update(global_value) - assert_equal(merged_value, node_effective) - - self.show_step(9) - slashed_value = {'slashed_key': 'slashed_value'} - self.fuel_web.client.put_global_resource_value( - env_id=1, resource=2, data=slashed_value) - glob_slashed = self.fuel_web.client.get_global_resource_id_value( - env_id=1, resource_id=2) - glob_slashed_n = self.fuel_web.client.get_global_resource_name_value( - env_id=1, resource_name=self.SLASHED_RESOURCE) - assert_equal(glob_slashed, slashed_value) - assert_equal(glob_slashed, glob_slashed_n) - - @test(depends_on_groups=['create_component_and_env_configdb'], - groups=["override_resource_value", "smoke_test_configdb"]) - @log_snapshot_after_test - def override_resource_value(self): - """ Check overridden data takes priority - - Scenario: - 1. Revert snapshot with installed ConfigDB and - created component + env - 2. Upload node and global resource values - 3. Override global resource value - 4. Check global overridden data affects on node level - 5. Upload new global data and check it doesn't - affect on node level - 6. Check Node level override takes priority over global override - 7. Check nodes data on second node has only global overridden data - - Duration: 5 min - """ - - self.show_step(1) - self.env.revert_snapshot('create_component_and_env_configdb') - - self.show_step(2) - node_value = {'node_key': 'node_value'} - global_value = {'global_key': 'global_value'} - logger.debug('Check overriding global data') - global_override = {'global_key': 'global_override'} - self.fuel_web.client.put_node_resource_value( - env_id=1, resource=1, node_id=1, data=node_value) - self.fuel_web.client.put_global_resource_value( - env_id=1, resource=1, data=global_value) - - merged_value = node_value.copy() - merged_value.update(global_value) - merged_value.update(global_override) - - self.show_step(3) - self.fuel_web.client.put_global_resource_override( - env_id=1, resource=1, data=global_override) - - self.show_step(4) - node_effective = self.fuel_web.client.get_node_resource_id_value( - env_id=1, resource_id=1, node_id=1, effective=True) - assert_equal(node_effective, merged_value) - - self.show_step(5) - global_new = {'global_key': 'global_new'} - - self.fuel_web.client.put_global_resource_value( - env_id=1, resource=1, data=global_new) - - # Check new global data does not affect on node level - node_effective = self.fuel_web.client.get_node_resource_id_value( - env_id=1, resource_id=1, node_id=1, effective=True) - assert_equal(node_effective, merged_value) - - self.show_step(6) - node_override = {'global_key': 'node_override'} - self.fuel_web.client.put_node_resource_overrides( - env_id=1, resource=1, node_id=1, data=node_override) - - node_effective = self.fuel_web.client.get_node_resource_id_value( - env_id=1, resource_id=1, node_id=1, effective=True) - merged_value.update(node_override) - assert_equal(node_effective, merged_value) - - self.show_step(7) - node_effective = self.fuel_web.client.get_node_resource_id_value( - env_id=1, resource_id=1, node_id=2, effective=True) - assert_equal(node_effective, global_override) diff --git a/fuelweb_test/tests/tests_configdb/test_configdb_cli.py b/fuelweb_test/tests/tests_configdb/test_configdb_cli.py deleted file mode 100644 index e3a22c653..000000000 --- a/fuelweb_test/tests/tests_configdb/test_configdb_cli.py +++ /dev/null @@ -1,587 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import operator -import functools - -from proboscis import test -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_true - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import TestBasic - - -RESOURCE_NAME_1 = 'resource1' -SLASHED_RESOURCE = 'slashed/resource' -ENV_FILE_PARAMS_PATH = '/tmp/configdb_env' -ROOT_PARAMS_FILE = '/root/.config/fuel/fuel_client.yaml' -EXPECTED_RES_DEF = { - u'content': {u'var': 1}, - u'name': u'res1' -} - - -@test(groups=["tests_configdb_api"]) -class TestsConfigDBAPI(TestBasic): - """Tests to cover cli interface of communication with - configdb(tuningbox)""" - - @test(depends_on_groups=['create_component_and_env_configdb'], - groups=['configdb_cli_interface']) - @log_snapshot_after_test - def validate_creation_of_component(self): - """Validate CRUD operations on components and resource definitions - - Scenario: - 1. Revert snapshot create_component_and_env_configdb - 2. Create empty component - 3. Verify empty component contents - 4. Verify failure of duplicate creation - 5. Create component to store resource definitions - 6. Verify component rename - 7. Add resources to component - 8. Verify resources of the component - 9. Make snapshot - - Duration: 5 min - Snapshot: configdb_component_tests - """ - self.show_step(1) # Revert snapshot - self.env.revert_snapshot('create_component_and_env_configdb') - admin_ip = self.ssh_manager.admin_ip - - self.show_step(2) # Create empty component - create_component_cmd = 'fuel2 config comp create --name empty' - self.ssh_manager.check_call(self.ssh_manager.admin_ip, - create_component_cmd) - - self.show_step(3) # Verify empty component contents - list_component_cmd = 'fuel2 config comp list --format json' - list_cmd_out = self.ssh_manager.check_call( - self.ssh_manager.admin_ip, - list_component_cmd).stdout_json - actual_component = [c for c in list_cmd_out if - c['name'] == u'empty'][0] - assert_equal(actual_component['resource_definitions'], []) - assert_equal(actual_component['name'], 'empty') - - self.show_step(4) # Verify failure of duplicate creation - create_duplicate = 'fuel2 config comp create --name empty' - stdout = self.ssh_manager.check_call( - self.ssh_manager.admin_ip, - create_duplicate, - raise_on_err=False).stdout_str - assert_true('duplicate key value violates unique constraint' in stdout) - - self.show_step(5) # Create component to store resource definitions - create_with_resources = 'fuel2 config comp create --name res' - self.ssh_manager.check_call(admin_ip, create_with_resources) - list_component_cmd = 'fuel2 config comp list --format json' - list_cmd_out = self.ssh_manager.check_call( - admin_ip, - list_component_cmd).stdout_json - res_comp = [c for c in list_cmd_out if - c['name'] == 'res'][0] - assert_equal(res_comp['resource_definitions'], []) - res_id = res_comp['id'] - - self.show_step(6) # Verify component rename - update_comp_cmd = 'fuel2 config comp update -n res_updated ' \ - '{id}'.format(id=res_id) - self.ssh_manager.check_call(admin_ip, update_comp_cmd) - - self.show_step(7) # Add resources to component - create_res_cmd = 'fuel2 config def create --name res1 -i {id} ' \ - '--content \'{{"var": 1}}\' -t json'.format(id=res_id) - self.ssh_manager.check_call(admin_ip, create_res_cmd) - - # TODO(akostrikov) Add more resources to the component - self.show_step(8) # Verify resources of the component - show_comp_cmd = 'fuel2 config comp show {id} --format json'.format( - id=res_id) - component = self.ssh_manager.check_call( - self.ssh_manager.admin_ip, - show_comp_cmd).stdout_json - res_def = component['resource_definitions'][0] - assert_equal(res_def['content'], - EXPECTED_RES_DEF['content']) - assert_equal(res_def['component_id'], - res_id) - assert_equal(res_def['name'], - EXPECTED_RES_DEF['name']) - - self.show_step(9) # Make snapshot - self.env.make_snapshot('configdb_component_tests') - - @test(depends_on_groups=['create_component_and_env_configdb'], - groups=['configdb_cli_interface']) - @log_snapshot_after_test - def validate_creation_of_env(self): - """Validate creation of configdb environment - - Scenario: - 1. Revert snapshot create_component_and_env_configdb - 2. Create environment with level - 3. Verify environment fields - 4. Create component for environment - 5. Create environment with component - 6. Verify environment with component - 7. Create environment with component and level - 8. Verify environment with component and level - 9. Create environment with component and two levels - 10. Verify environment with component and two levels - 11. Make snapshot - - Duration: 5 min - Snapshot: configdb_env_tests - """ - self.show_step(1) # Revert snapshot - self.env.revert_snapshot('create_component_and_env_configdb') - admin_ip = self.ssh_manager.admin_ip - - self.show_step(2) # Create environment with level - create_env_cmd = 'fuel2 config env create -l servers' - self.ssh_manager.check_call(self.ssh_manager.admin_ip, create_env_cmd) - list_env_cmd = 'fuel2 config env list -f json' - list_cmd_out = self.ssh_manager.check_call(self.ssh_manager.admin_ip, - list_env_cmd).stdout_json - - self.show_step(3) # Verify environment fields - actual_env = [e for e in list_cmd_out if - e['hierarchy_levels'] == ['servers']][0] - assert_equal(actual_env['hierarchy_levels'], ['servers']) - assert_equal(actual_env['components'], []) - - self.show_step(4) # Create component for environment - create_with_resources = 'fuel2 config comp create --name res' - self.ssh_manager.check_call(admin_ip, create_with_resources) - list_component_cmd = 'fuel2 config comp list --format json' - list_cmd_out = self.ssh_manager.check_call( - admin_ip, - list_component_cmd).stdout_json - - res_comp = [c for c in list_cmd_out if - c['name'] == 'res'][0] - assert_equal(res_comp['resource_definitions'], []) - res_id = res_comp['id'] - - self.show_step(5) # Create environment with component - create_with_comp = 'fuel2 config env create -i {id} -f json'.format( - id=res_id) - self.ssh_manager.check_call(admin_ip, create_with_comp) - - self.show_step(6) # Verify environment with component - find_comp_env = 'fuel2 config env list -f json' - env_list = self.ssh_manager.check_call(admin_ip, - find_comp_env).stdout_json - env_comp = [e for e in env_list - if e['components'] == [res_id]][0] - assert_equal(env_comp['hierarchy_levels'], []) - - self.show_step(7) # Create environment with component and level - create_lvl_comp = 'fuel2 config env create ' \ - '-i {id} -l nodes -f json'.format(id=res_id) - out_lvl_comp = self.ssh_manager.check_call( - admin_ip, create_lvl_comp).stdout_json - - self.show_step(8) # Verify environment with component and level - env_lvl_comp = out_lvl_comp - assert_equal(env_lvl_comp['components'], [res_id]) - assert_equal(env_lvl_comp['hierarchy_levels'], ['nodes']) - - self.show_step(9) # Create environment with component and two levels - create_new_comp = 'fuel2 config comp create -n another_comp -f json' - comp_res = self.ssh_manager.check_call( - admin_ip, create_new_comp).stdout_json - comp_id = comp_res['id'] - create_mult_env_cmd = 'fuel2 config env create ' \ - '-l nodes,servers -f json ' \ - '-i{id1},{id2}'.format(id1=comp_id, id2=res_id) - env_obj = self.ssh_manager.check_call( - admin_ip, create_mult_env_cmd).stdout_json - - self.show_step(10) # Verify environment with component and two levels - - levels = env_obj['hierarchy_levels'] - levels_contained = functools.reduce(operator.and_, - ['nodes' in levels, - 'servers' in levels], True) - assert_true(levels_contained) - - components = env_obj['components'] - levels_contained = functools.reduce(operator.and_, - [res_id in components, - comp_id in components], True) - assert_true(levels_contained) - - self.show_step(11) # Make snapshot - self.env.make_snapshot('configdb_env_tests') - - @test(depends_on_groups=['create_component_and_env_configdb'], - groups=['configdb_cli_interface']) - @log_snapshot_after_test - def resource_value_without_level(self): - """Getting and setting resources without level with cli - - Scenario: - 1. Revert snapshot create_component_and_env_configdb - 2. Create component for environment - 3. Create environment with component - 4. Get default resource value - 5. Update resource value - 6. Verify updated resource value - 7. Make snapshot - - Duration: 5 min - Snapshot: configdb_resource_tests - """ - self.show_step(1) # Revert snapshot - self.env.revert_snapshot('create_component_and_env_configdb') - admin_ip = self.ssh_manager.admin_ip - - self.show_step(2) # Create component with resource for environment - create_new_comp = 'fuel2 config comp create -n another_comp -f json' - comp_res = self.ssh_manager.check_call( - admin_ip, create_new_comp).stdout_json - comp_id = comp_res['id'] - create_res_cmd = 'fuel2 config def create --name res1 -i {id} ' \ - '--content \'{{"var": 1}}\' ' \ - '-t json -f json'.format(id=comp_id) - create_res_out = self.ssh_manager.check_call( - admin_ip, create_res_cmd).stdout_json - create_res_obj = create_res_out - res_id = create_res_obj['id'] - - self.show_step(3) # Create environment with component - create_mult_env_cmd = 'fuel2 config env create -f json ' \ - '-i{cid}'.format(cid=comp_id) - env_obj = self.ssh_manager.check_call( - admin_ip, create_mult_env_cmd).stdout_json - env_id = env_obj['id'] - - self.show_step(4) # Get default resource value - get_resource_cmd = 'fuel2 config get --env {env_id} ' \ - '--resource {res_id} ' \ - '-f json'.format(env_id=env_id, res_id=res_id) - admin_ip = self.ssh_manager.admin_ip - res_obj = self.ssh_manager.check_call( - admin_ip, get_resource_cmd).stdout_json - assert_equal(res_obj, {}) - - self.show_step(5) # Update resource value - set_resource_cmd = 'fuel2 config set --env {env_id} --resource ' \ - '{res_id} --value \'{{}}\' --type json' - set_resource_cmd = set_resource_cmd.format(env_id=env_id, - res_id=res_id) - self.ssh_manager.check_call( - admin_ip, set_resource_cmd) - - set_resource_cmd = 'fuel2 config set --env {env_id} --resource ' \ - '{res_id} --value \'{{"a": 1, "b": null}}\' ' \ - '--key key --type json' - set_resource_cmd = set_resource_cmd.format(env_id=env_id, - res_id=res_id) - self.ssh_manager.check_call( - admin_ip, set_resource_cmd) - - self.show_step(6) # Verify updated resource value - get_resource_cmd = 'fuel2 config get --env {env_id} ' \ - '--resource {res_id} ' \ - '-f json'.format(env_id=env_id, res_id=res_id) - admin_ip = self.ssh_manager.admin_ip - res_obj = self.ssh_manager.check_call( - admin_ip, get_resource_cmd).stdout_json - assert_equal(res_obj['key'], {'a': 1, 'b': None}) - - self.show_step(7) # Make snapshot - self.env.make_snapshot('configdb_resource_tests') - - @test(depends_on_groups=['create_component_and_env_configdb'], - groups=['configdb_cli_interface']) - @log_snapshot_after_test - def resource_value_with_level(self): - """Getting and setting resources without level with cli - - Scenario: - 1. Revert snapshot create_component_and_env_configdb - 2. Create component for environment - 3. Create environment with component and levels - 4. Get default resource value by level - 5. Update resource value with level - 6. Verify updated resource value with level - 7. Verify level value does not leak - 8. Make snapshot - - Duration: 5 min - Snapshot: configdb_resource_tests_lvl - """ - self.show_step(1) # Revert snapshot - self.env.revert_snapshot('create_component_and_env_configdb') - admin_ip = self.ssh_manager.admin_ip - - self.show_step(2) # Create component for environment - create_new_comp = 'fuel2 config comp create -n another_comp -f json' - comp_res = self.ssh_manager.check_call( - admin_ip, create_new_comp).stdout_json - comp_id = comp_res['id'] - create_res_cmd = 'fuel2 config def create --name res1 -i {id} ' \ - '--content \'{{"var": 1}}\' ' \ - '-t json -f json'.format(id=comp_id) - create_res_obj = self.ssh_manager.check_call( - admin_ip, create_res_cmd).stdout_json - res_id = create_res_obj['id'] - - self.show_step(3) # Create environment with component and levels - create_mult_env_cmd = 'fuel2 config env create -l nodes ' \ - '-i{cid} -f json'.format(cid=comp_id) - env_obj = self.ssh_manager.check_call( - admin_ip, create_mult_env_cmd).stdout_json - env_id = env_obj['id'] - get_resource_cmd = 'fuel2 config get --env {env_id} ' \ - '--resource {res_id} ' \ - '-f json'.format(env_id=env_id, res_id=res_id) - admin_ip = self.ssh_manager.admin_ip - res_obj = self.ssh_manager.check_call( - admin_ip, - get_resource_cmd).stdout_json - assert_equal(res_obj, {}) - - self.show_step(4) # Get default resource value by level - get_lvl_res_cmd = 'fuel2 config get --env {env_id} ' \ - '--resource {res_id} ' \ - '--format json --level nodes=1'.format(env_id=env_id, - res_id=res_id) - lvl_obj = self.ssh_manager.check_call( - admin_ip, get_lvl_res_cmd).stdout_json - assert_equal(lvl_obj, {}) - - self.show_step(5) # Update resource value with level - set_lvl_res_cmd = 'fuel2 config set --env {env_id} --resource ' \ - '{res_id} --value \'{{}}\' ' \ - '--type json --level nodes=1'.format(env_id=env_id, - res_id=res_id) - self.ssh_manager.check_call( - admin_ip, set_lvl_res_cmd) - - set_lvl_res_cmd = 'fuel2 config set --env {env_id} --resource ' \ - '{res_id} --value \'{{"a": 1, "b": null}}\' ' \ - '--key key --type ' \ - 'json --level nodes=1'.format(env_id=env_id, - res_id=res_id) - self.ssh_manager.check_call( - admin_ip, set_lvl_res_cmd) - - self.show_step(6) # Verify updated resource value with level - get_lvl_res_cmd = 'fuel2 config get --env {env_id} ' \ - '--resource {res_id} ' \ - '--format json --level nodes=1'.format(env_id=env_id, - res_id=res_id) - lvl_obj = self.ssh_manager.check_call( - admin_ip, get_lvl_res_cmd).stdout_json - assert_equal(lvl_obj['key']['a'], 1) - assert_equal(lvl_obj['key']['b'], None) - - self.show_step(7) # Verify level value does not leak - get_lvl_res_cmd = 'fuel2 config get --env {env_id} ' \ - '--resource {res_id} ' \ - '--format json'.format(env_id=env_id, - res_id=res_id) - lvl_obj = self.ssh_manager.check_call( - admin_ip, get_lvl_res_cmd).stdout_json - assert_equal(lvl_obj, {}) - - self.show_step(8) # Make snapshot - self.env.make_snapshot('configdb_resource_tests_lvl') - - @test(depends_on_groups=['create_component_and_env_configdb'], - groups=['configdb_cli_interface']) - @log_snapshot_after_test - def merge_overrides_without_level(self): - """Test overrides behaviour without levels - - Scenario: - 1. Revert snapshot create_component_and_env_configdb - 2. Create component for environment - 3. Create environment for overrides - 4. Update resource value - 5. Update resource override - 6. Check effective value - 7. Make snapshot - - Duration: 5 min - Snapshot: configdb_resource_tests_overrides - """ - self.show_step(1) # Revert snapshot - self.env.revert_snapshot('create_component_and_env_configdb') - admin_ip = self.ssh_manager.admin_ip - - self.show_step(2) # Create component for environment - create_new_comp = 'fuel2 config comp create -n another_comp -f json' - comp_res = self.ssh_manager.check_call( - admin_ip, create_new_comp).stdout_json - comp_id = comp_res['id'] - create_res_cmd = 'fuel2 config def create --name res1 -i {id} ' \ - '--content \'{{"var": 1}}\' ' \ - '-t json -f json'.format(id=comp_id) - create_res_obj = self.ssh_manager.check_call( - admin_ip, create_res_cmd).stdout_json - res_id = create_res_obj['id'] - - self.show_step(3) # Create environment for overrides - create_mult_env_cmd = 'fuel2 config env create ' \ - '-i{cid} -f json'.format(cid=comp_id) - env_obj = self.ssh_manager.check_call( - admin_ip, create_mult_env_cmd).stdout_json - env_id = env_obj['id'] - - self.show_step(4) # Update resource value - # TODO(akostrikov) Operations on resource by resource name - set_res_cmd = 'fuel2 config set --env {env_id} --resource ' \ - '{res_id} --value \'{{}}\' ' \ - '--type json'.format(env_id=env_id, - res_id=res_id) - self.ssh_manager.check_call( - admin_ip, set_res_cmd) - - set_res_cmd = 'fuel2 config set --env {env_id} --resource ' \ - '{res_id} --value \'{{"a": 1, "b": null}}\' ' \ - '--key key --type ' \ - 'json'.format(env_id=env_id, - res_id=res_id) - self.ssh_manager.check_call( - admin_ip, set_res_cmd) - - self.show_step(5) # Update resource override - set_override_cmd = 'fuel2 config override --env {env_id} --resource ' \ - '{res_id} --value \'{{"a": 3, "b": null}}\' ' \ - '--key key --type ' \ - 'json'.format(env_id=env_id, - res_id=res_id) - self.ssh_manager.check_call( - admin_ip, set_override_cmd) - - self.show_step(6) # Check effective value - get_resource_cmd = 'fuel2 config get --env {env_id} ' \ - '--resource {res_id} ' \ - '-f json'.format(env_id=env_id, res_id=res_id) - admin_ip = self.ssh_manager.admin_ip - res_obj = self.ssh_manager.check_call( - admin_ip, get_resource_cmd).stdout_json - assert_equal(res_obj['key']['a'], 3) - assert_equal(res_obj['key']['b'], None) - - self.show_step(7) # Make snapshot - self.env.make_snapshot('configdb_resource_tests_overrides') - - @test(depends_on_groups=['create_component_and_env_configdb'], - groups=['configdb_cli_interface']) - @log_snapshot_after_test - def merge_overrides_with_level(self): - """Test overrides behaviour with levels - - Scenario: - 1. Revert snapshot create_component_and_env_configdb - 2. Create component for environment - 3. Create environment with level for overrides - 4. Update resource value with level - 5. Update resource override with level - 6. Check effective value with level - 7. Check effective value without level - 8. Make snapshot - - Duration: 5 min - Snapshot: configdb_resource_tests_lvl_overrides - """ - self.show_step(1) # Revert snapshot - self.env.revert_snapshot('create_component_and_env_configdb') - admin_ip = self.ssh_manager.admin_ip - - self.show_step(2) # Create component for environment - create_new_comp = 'fuel2 config comp create -n another_comp -f json' - comp_res = self.ssh_manager.check_call( - admin_ip, create_new_comp).stdout_json - comp_id = comp_res['id'] - create_res_cmd = 'fuel2 config def create --name res1 -i {id} ' \ - '--content \'{{"var": 1}}\' ' \ - '-t json -f json'.format(id=comp_id) - create_res_obj = self.ssh_manager.check_call( - admin_ip, create_res_cmd).stdout_json - res_id = create_res_obj['id'] - - self.show_step(3) # Create environment for overrides - create_mult_env_cmd = 'fuel2 config env create -l nodes ' \ - '-i{cid} -f json'.format(cid=comp_id) - env_obj = self.ssh_manager.check_call( - admin_ip, create_mult_env_cmd).stdout_json - env_id = env_obj['id'] - - self.show_step(4) # Update resource value with level - set_res_cmd = 'fuel2 config set --env {env_id} --resource ' \ - '{res_id} --value \'{{}}\' --type json ' \ - '--level nodes=1'.format(env_id=env_id, - res_id=res_id) - self.ssh_manager.check_call( - admin_ip, set_res_cmd) - - set_res_cmd = 'fuel2 config set --env {env_id} --resource ' \ - '{res_id} --value \'{{"a": 1, "b": null}}\' ' \ - '--key key --type json ' \ - '--level nodes=1'.format(env_id=env_id, - res_id=res_id) - self.ssh_manager.check_call( - admin_ip, set_res_cmd) - - self.show_step(5) # Update resource override with level - set_override_cmd = 'fuel2 config override --env {env_id} --resource ' \ - '{res_id} --value \'{{"a": 3, "b": null}}\' ' \ - '--key key --type json ' \ - '--level nodes=1'.format(env_id=env_id, - res_id=res_id) - self.ssh_manager.check_call( - admin_ip, set_override_cmd) - - self.show_step(6) # Check effective value with level - get_resource_cmd = 'fuel2 config get --env {env_id} ' \ - '--resource {res_id} --level nodes=1 ' \ - '-f json'.format(env_id=env_id, res_id=res_id) - res_obj = self.ssh_manager.check_call( - admin_ip, get_resource_cmd).stdout_json - assert_equal(res_obj['key']['a'], 3) - assert_equal(res_obj['key']['b'], None) - - self.show_step(7) # Check effective value without level - get_resource_cmd = 'fuel2 config get --env {env_id} ' \ - '--resource {res_id} ' \ - '-f json'.format(env_id=env_id, res_id=res_id) - - res_obj = self.ssh_manager.check_call( - admin_ip, get_resource_cmd).stdout_json - assert_equal(res_obj, {}) - - # TODO(akostrikov) Multiple levels - self.show_step(8) # Make snapshot - self.env.make_snapshot('configdb_resource_tests_lvl_overrides') - - @test(depends_on_groups=['create_component_and_env_configdb'], - groups=['configdb_cli_interface'], enabled=False) - def update_via_key_path(self): - # TODO(akostrikov) Update key by path - pass - - @test(depends_on_groups=['create_component_and_env_configdb'], - groups=['configdb_cli_interface'], enabled=False) - def key_deletion_via_path(self): - # TODO(akostrikov) Wipe key by path - # TODO(akostrikov) Delete key by path - pass diff --git a/fuelweb_test/tests/tests_custom_graph/__init__.py b/fuelweb_test/tests/tests_custom_graph/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/tests_custom_graph/test_custom_graph.py b/fuelweb_test/tests/tests_custom_graph/test_custom_graph.py deleted file mode 100644 index fa88a9b76..000000000 --- a/fuelweb_test/tests/tests_custom_graph/test_custom_graph.py +++ /dev/null @@ -1,911 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -That is a place for testing of custom graph. -""" -import os -import re - -from proboscis import test -from proboscis.asserts import assert_equal -import yaml - -import fuelweb_test -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.settings import OPENSTACK_RELEASE_UBUNTU -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=['custom-graph']) -class TestCustomGraph(TestBasic): - """Test to check custom graph""" - - def check_tasks_on_node(self, cluster_id, node_role, expected_content): - """Method to check custom tasks on node. - - :param cluster_id: id of a cluster to check - :param node_role: role to check - :param expected_content: content, which should be in custom_task_log - :return: - """ - checked_node = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, [node_role])[0] - actual_content = self.ssh_manager.execute_on_remote( - ip=checked_node['ip'], - cmd='cat /tmp/custom_task_log', - raise_on_assert=False - )['stdout_str'] - assert_equal(expected_content, actual_content) - - def move_ubuntu_target_image(self, release_id, cluster_id): - """Command moves cached image file to cluster destination - - :param release_id: id of release from which was built. - :param cluster_id: id of cluster to move to - :return: - """ - move_img_cmd = ( - 'cp /var/www/nailgun/targetimages/env_release_{release_id}' - '_ubuntu_1604_amd64-boot.img.gz /var/www/nailgun/targetimages/' - 'env_{cluster_id}_ubuntu_1604_amd64-boot.img.gz;' - 'cp /var/www/nailgun/targetimages/env_release_{release_id}' - '_ubuntu_1604_amd64.img.gz /var/www/nailgun/targetimages/' - 'env_{cluster_id}_ubuntu_1604_amd64.img.gz;' - 'cp /var/www/nailgun/targetimages/env_release_{release_id}' - '_ubuntu_1604_amd64.yaml /var/www/nailgun/targetimages/' - 'env_{cluster_id}_ubuntu_1604_amd64.yaml;' - 'sed -i -- "s/release_2/{cluster_id}/g" ' - '/var/www/nailgun/targetimages/env_release_{release_id}' - '_ubuntu_1604_amd64.yaml').format(release_id=release_id, - cluster_id=cluster_id) - - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=move_img_cmd) - - @test(depends_on=[SetupEnvironment.prepare_release], - groups=['pre_provision_ubuntu_slaves_3']) - @log_snapshot_after_test - def pre_provision_ubuntu_slaves_3(self): - """Bootstrap 3 slave nodes with prepared target image - - Scenario: - 1. Revert snapshot "ready" - 2. Start 3 slave nodes - 3. Upload script to generate command - 4. Execute script to generate command - 5. Use command to build target image - 6. Save snapshot 'pre_provision_ubuntu_slaves_3' - - Duration 30m - Snapshot pre_provision_ubuntu_slaves_3 - """ - self.show_step(1) # Revert snapshot "ready" - self.check_run('pre_provision_ubuntu_slaves_3') - self.env.revert_snapshot("ready", skip_timesync=True) - - self.show_step(2) # Bootstrap 3 nodes - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:3], - skip_timesync=True) - - self.show_step(3) # Upload script to generate command - tasks_filename = 'prepare_release_image.py' - script_filepath = os.path.join( - os.path.dirname(fuelweb_test.__file__), - 'config_templates', - tasks_filename) - upload_tasks_path = '/tmp/{}'.format(tasks_filename) - self.ssh_manager.upload_to_remote( - ip=self.ssh_manager.admin_ip, - source=script_filepath, - target=upload_tasks_path) - - self.show_step(4) # Execute script to generate command - release_id = self.fuel_web.get_releases_list_for_os( - release_name=OPENSTACK_RELEASE_UBUNTU)[0] - upload_tasks_cmd = 'cd /tmp && python prepare_release_image.py ' \ - '{release_id}'.format(release_id=release_id) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=upload_tasks_cmd) - - self.show_step(5) # Use command to build target image - upload_tasks_cmd = 'bash /tmp/build_image.sh' - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=upload_tasks_cmd) - - self.show_step(6) # Save snapshot 'pre_provision_ubuntu_slaves_3' - self.env.make_snapshot('pre_provision_ubuntu_slaves_3', is_make=True) - - @test(depends_on=[pre_provision_ubuntu_slaves_3], - groups=['custom_graph', 'graph_isolation', 'custom_graph_leakage']) - @log_snapshot_after_test - def custom_graph_leakage(self): - """Check tasks for custom graph are not shown in default - - Scenario: - 1. Revert snapshot 'pre_provision_ubuntu_slaves_3' - 2. Create cluster - 3. Add 1 node with controller role - 4. Add 1 node with compute role - 5. Add 1 node with storage role - 6. Create custom graph 'custom_graph' - 7. Upload tasks to 'custom_graph' - 8. Download tasks for 'default' graph - 9. Verify that there no 'custom_graph' tasks in 'default' graph - 10. Deploy the cluster - 11. Run network verification - 12. Run OSTF to check services are deployed - 13. Verify that 'custom_graph' tasks are not called on controller - 14. Verify that 'custom_graph' tasks are not called on compute - 15. Verify that 'custom_graph' tasks are not called on cinder - 16. Create snapshot - - Duration 90m - Snapshot custom_graph_leakage - """ - self.show_step(1) # Revert snapshot 'pre_provision_ubuntu_slaves_3' - self.env.revert_snapshot('pre_provision_ubuntu_slaves_3') - graph_type = 'custom_graph' - - self.show_step(2) # Create cluster - cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__) - - self.move_ubuntu_target_image(2, cluster_id) - - self.show_step(3) # Add 1 node with controller role - self.show_step(4) # Add 1 node with compute role - self.show_step(5) # Add 1 node with storage role - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'], - } - ) - - self.show_step(6) # Create custom graph 'custom_graph' - self.show_step(7) # Upload tasks to 'custom_graph' - tasks_filename = 'custom_graph_tasks.yaml' - local_tasks_file = os.path.join(os.path.dirname(fuelweb_test.__file__), - 'config_templates', - tasks_filename) - with open(local_tasks_file, 'r') as yaml_file: - tasks_yaml_data = yaml.load(yaml_file) - custom_tasks = set([t['id'] for t in tasks_yaml_data]) - upload_tasks_path = '/tmp/{}'.format(tasks_filename) - self.ssh_manager.upload_to_remote( - ip=self.ssh_manager.admin_ip, - source=local_tasks_file, - target=upload_tasks_path) - upload_tasks_cmd = 'fuel2 graph upload -e {cluster_id} -t ' \ - '{graph_type} -f {path}'.format( - cluster_id=cluster_id, - graph_type=graph_type, - path=upload_tasks_path - ) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=upload_tasks_cmd) - - self.show_step(8) # Download tasks for 'default' graph - rel_id = self.fuel_web.get_cluster_release_id(cluster_id) - rel_tasks = self.fuel_web.client.get_release_tasks(rel_id)[0]['tasks'] - release_tasks = set([task['task_name'] for task in rel_tasks]) - - self.show_step(9) # no 'custom_graph' tasks in 'default' graph - assert_equal(release_tasks, - release_tasks - custom_tasks, - 'There were custom tasks in release. ' - 'Release is the place where default graph takes tasks.') - - self.show_step(10) # Deploy the cluster - self.fuel_web.deploy_cluster_wait(cluster_id, check_tasks=False) - - self.show_step(11) # Run network verification - self.fuel_web.verify_network(cluster_id) - - self.show_step(12) # Run OSTF to check services are deployed - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(13) # 'custom_graph' tasks are not called on controller - self.check_tasks_on_node(cluster_id, 'controller', '') - - self.show_step(14) # 'custom_graph' tasks are not called on compute - self.check_tasks_on_node(cluster_id, 'compute', '') - - self.show_step(15) # 'custom_graph' tasks are not called on cinder - self.check_tasks_on_node(cluster_id, 'cinder', '') - - self.show_step(16) # Create snapshot - self.env.make_snapshot('custom_graph_leakage') - - @test(depends_on=[pre_provision_ubuntu_slaves_3], - groups=['custom_graph', 'graph_isolation', 'default_graph_leakage']) - @log_snapshot_after_test - def default_graph_leakage(self): - """Check tasks for default graph are not shown in custom - - Scenario: - 1. Revert snapshot 'pre_provision_ubuntu_slaves_3' - 2. Create cluster - 3. Add 1 node with controller role - 4. Add 1 node with compute role - 5. Add 1 node with storage role - 6. Provision the cluster - 7. Create custom graph 'custom_graph' - 8. Upload tasks to 'custom_graph' - 9. Download tasks for 'custom_graph' graph from api - 10. Verify that there no 'default' tasks - in 'custom_graph' graph - 11. Run 'custom_graph' deployment - 12. Verify that 'custom_graph' tasks are called on controller - 13. Verify that 'controller' role has not been deployed - 14. Verify that 'custom_graph' tasks are called on compute - 15. Verify that 'compute' role has not been deployed - 16. Verify that 'custom_graph' tasks are called on cinder - 17. Verify that 'cinder' role has not been deployed - 18. Create snapshot - - Duration 100m - Snapshot default_graph_leakage - """ - self.show_step(1) # Revert snapshot 'pre_provision_ubuntu_slaves_3' - self.env.revert_snapshot('pre_provision_ubuntu_slaves_3') - graph_type = 'custom_graph' - - self.show_step(2) # Create cluster - cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__) - rel_id = self.fuel_web.get_cluster_release_id(cluster_id) - self.move_ubuntu_target_image(rel_id, cluster_id) - - self.show_step(3) # Add 1 node with controller role - self.show_step(4) # Add 1 node with compute role - self.show_step(5) # Add 1 node with storage role - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'], - } - ) - - self.show_step(6) # Provision the cluster - self.fuel_web.provisioning_cluster_wait(cluster_id) - self.env.check_slaves_are_ready() - self.show_step(7) # Create custom graph 'custom_graph' - self.show_step(8) # Upload tasks to 'custom_graph' - tasks_filename = 'custom_graph_tasks.yaml' - local_tasks_file = os.path.join(os.path.dirname(fuelweb_test.__file__), - 'config_templates', - tasks_filename) - with open(local_tasks_file, 'r') as yaml_file: - tasks_yaml_data = yaml.load(yaml_file) - expected_tasks = set([t['id'] for t in tasks_yaml_data]) - upload_tasks_path = '/tmp/{}'.format(tasks_filename) - self.ssh_manager.upload_to_remote( - ip=self.ssh_manager.admin_ip, - source=local_tasks_file, - target=upload_tasks_path) - upload_tasks_cmd = 'fuel2 graph upload -e {cluster_id} -t ' \ - '{graph_type} -f {path}'.format( - cluster_id=cluster_id, - graph_type=graph_type, - path=upload_tasks_path - ) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=upload_tasks_cmd) - - self.show_step(9) # Download tasks for 'custom_graph' graph from api - cli_tasks_data = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd='fuel2 graph list -e {cluster_id} -c tasks -f csv |' - 'grep custom'.format(cluster_id=cluster_id) - )['stdout'][0] - actual_tasks = set(re.findall(r'[\w\-_]+', cli_tasks_data)) - - self.show_step(10) # Verify that there no 'default' tasks leak - assert_equal(actual_tasks, - expected_tasks, - 'There were difference in processed tasks. ' - 'Possibly, regex to find actual_tasks is wrong.') - - self.show_step(11) # Run 'custom_graph' deployment. - self.fuel_web.deploy_custom_graph_wait(cluster_id, graph_type) - - self.show_step(12) # 'custom_graph' tasks are called on controller - self.show_step(13) # 'controller' role has not been deployed - self.check_tasks_on_node(cluster_id, 'controller', 'controller') - self.ssh_manager.execute_on_remote( - ip=self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, - ['controller'])[0]['ip'], - cmd='pgrep neutron', - assert_ec_equal=[1] - ) - - self.show_step(14) # 'custom_graph' tasks are called on controller - self.show_step(15) # 'compute' role has not been deployed - self.check_tasks_on_node(cluster_id, 'compute', 'compute') - self.ssh_manager.execute_on_remote( - ip=self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, - ['compute'])[0]['ip'], - cmd='pgrep nova-compute', - assert_ec_equal=[1] - ) - - self.show_step(16) # 'custom_graph' tasks are called on controller - self.show_step(17) # 'cinder' role has not been deployed - self.check_tasks_on_node(cluster_id, 'cinder', 'cinder') - self.ssh_manager.execute_on_remote( - ip=self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, - ['cinder'])[0]['ip'], - cmd='pgrep cinder', - assert_ec_equal=[1] - ) - - self.show_step(18) # Create snapshot - self.env.make_snapshot('default_graph_leakage') - - @test(depends_on=[pre_provision_ubuntu_slaves_3], - groups=['custom_graph', 'graph_merge', 'default_is_from_puppet']) - @log_snapshot_after_test - def default_is_from_puppet(self): - """Verify that default graph is generated from - tasks in /etc/puppet - - Scenario: - 1. Revert snapshot 'pre_provision_ubuntu_slaves_3' - 2. Create cluster - 3. Download deployment graph - 4. Fetch all tasks from /etc/puppet - 5. Verify that tasks in deployment graph are - from /etc/puppet - - Duration 30m - """ - self.show_step(1) # Revert snapshot 'pre_provision_ubuntu_slaves_3' - self.env.revert_snapshot('pre_provision_ubuntu_slaves_3') - - self.show_step(2) # Create cluster - cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__) - rel_id = self.fuel_web.get_cluster_release_id(cluster_id) - self.move_ubuntu_target_image(rel_id, cluster_id) - - self.show_step(3) # Download deployment graph - rel_tasks = self.fuel_web.client.get_release_tasks(rel_id)[3]['tasks'] - release_tasks = set([task['task_name'] for task in rel_tasks]) - - self.show_step(4) # Fetch all tasks from /etc/puppet - tasks_cmd = ('find /etc/puppet -name "*.yaml" -print0|' - 'xargs -0 grep -oh ' - '"name: [^(/()]*" ' # To avoid /(primary-)?rabbitmq/ - '| awk -F" " \'{print $2}\' |sort -u|uniq') - puppet_tasks = set([name.strip() for name in - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=tasks_cmd)['stdout'] if - name.strip() != '']) - - self.show_step(5) # tasks in deployment graph are from /etc/puppet - # There are fuel-0x /etc/puppet/modules/cobbler/examples/nodes.yaml - tasks = [x for x in puppet_tasks - release_tasks - if 'fuel-0' not in x] - assert_equal(tasks, [], - 'There are not all tasks from puppet in release') - - @test(depends_on=[pre_provision_ubuntu_slaves_3], - groups=['custom_graph', 'graph_merge', - 'tasks_merge_cluster_and_release']) - @log_snapshot_after_test - def tasks_merge_cluster_and_release(self): - """Verify custom graph merging from release and cluster tasks - - Scenario: - 1. Revert snapshot 'pre_provision_ubuntu_slaves_3' - 2. Create cluster - 3. Upload 'custom_graph' tasks to release - 4. Upload 'custom_graph' tasks to cluster - 5. Download 'custom_graph' deployment graph - 6. Verify that 'custom_graph' is a merge of - release and cluster graphs. - 7. Create snapshot 'tasks_diff' - - Duration 30m - Snapshot merge_cluster_and_release - """ - self.show_step(1) # Revert snapshot - self.env.revert_snapshot('pre_provision_ubuntu_slaves_3') - graph_type = 'custom_graph' - - self.show_step(2) # Create cluster - cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__) - rel_id = self.fuel_web.get_cluster_release_id(cluster_id) - self.move_ubuntu_target_image(rel_id, cluster_id) - - self.show_step(3) # Upload 'custom_graph' tasks to release - rel_tasks_filename = 'release_custom_tasks.yaml' - local_tasks_file = os.path.join(os.path.dirname(fuelweb_test.__file__), - 'config_templates', - rel_tasks_filename) - with open(local_tasks_file, 'r') as yaml_file: - release_tasks_yaml_data = yaml.load(yaml_file) - upload_tasks_path = '/tmp/{}'.format(rel_tasks_filename) - - self.ssh_manager.upload_to_remote( - ip=self.ssh_manager.admin_ip, - source=local_tasks_file, - target=upload_tasks_path) - upload_tasks_cmd = 'fuel2 graph upload -e {cluster_id} -t ' \ - '{graph_type} -f {path}'.format( - cluster_id=cluster_id, - graph_type=graph_type, - path=upload_tasks_path - ) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=upload_tasks_cmd) - - self.show_step(4) # Upload 'custom_graph' tasks to cluster - c_tasks_filename = 'custom_graph_tasks.yaml' - local_tasks_file = os.path.join(os.path.dirname(fuelweb_test.__file__), - 'config_templates', - c_tasks_filename) - with open(local_tasks_file, 'r') as yaml_file: - cluster_tasks_yaml_data = yaml.load(yaml_file) - upload_tasks_path = '/tmp/{}'.format(rel_tasks_filename) - - self.ssh_manager.upload_to_remote( - ip=self.ssh_manager.admin_ip, - source=local_tasks_file, - target=upload_tasks_path) - upload_tasks_cmd = 'fuel2 graph upload -e {cluster_id} -t ' \ - '{graph_type} -f {path}'.format( - cluster_id=cluster_id, - graph_type=graph_type, - path=upload_tasks_path - ) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=upload_tasks_cmd) - - self.show_step(5) # Download 'custom_graph' deployment graph - custom_tasks = \ - self.fuel_web.client.get_custom_cluster_deployment_tasks( - cluster_id, - graph_type) - - self.show_step(6) # 'custom_graph' is a merge of release and cluster. - generated_names = set([t['task_name'] for t in custom_tasks]) - uploaded_names = set( - [t['id'] for t in release_tasks_yaml_data] + - [t['id'] for t in cluster_tasks_yaml_data]) - diff = generated_names - uploaded_names - assert_equal(diff, set([]), 'Tasks are not result of merge!') - - self.show_step(7) # Create snapshot 'tasks_diff' - self.env.make_snapshot('tasks_diff') - - @test(depends_on=[pre_provision_ubuntu_slaves_3], - groups=['custom_graph', 'graph_isolation', - 'two_custom_graphs_interfere']) - @log_snapshot_after_test - def two_custom_graphs_interfere(self): - """Verify that two custom graphs do not interfere with each other. - - Scenario: - 1. Revert snapshot 'pre_provision_ubuntu_slaves_3' - 2. Create cluster - 3. Add 1 node with controller role - 4. Add 1 node with compute role - 5. Add 1 node with storage role - 6. Provision cluster - 7. Upload 'custom_graph' tasks to release - 8. Upload 'yaql_graph' tasks to release - 9. Run 'custom_graph' deployment. - 10. Run 'yaql_graph' deployment. - 11. Verify that 'yaql_graph' tasks are called on controller - 12. Verify that 'yaql_graph' tasks are called on compute - 13. Verify that 'yaql_graph' tasks are called on cinder - 14. Create snapshot `two_custom_graphs_interfere` - - Duration 30m - Snapshot two_custom_graphs_interfere - """ - self.show_step(1) # Revert snapshot 'pre_provision_ubuntu_slaves_3' - self.env.revert_snapshot('pre_provision_ubuntu_slaves_3') - - self.show_step(2) # Create cluster - cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__) - rel_id = self.fuel_web.get_cluster_release_id(cluster_id) - self.move_ubuntu_target_image(rel_id, cluster_id) - - self.show_step(3) # Add 1 node with controller role - self.show_step(4) # Add 1 node with compute role - self.show_step(5) # Add 1 node with storage role - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'], - } - ) - - self.show_step(6) # Create cluster - self.fuel_web.provisioning_cluster_wait(cluster_id) - self.env.check_slaves_are_ready() - self.show_step(7) # Upload 'custom_graph' tasks to release - graph_type = 'custom_graph' - tasks_filename = 'custom_graph_tasks.yaml' - local_tasks_file = os.path.join(os.path.dirname(fuelweb_test.__file__), - 'config_templates', - tasks_filename) - upload_tasks_path = '/tmp/{}'.format(tasks_filename) - self.ssh_manager.upload_to_remote( - ip=self.ssh_manager.admin_ip, - source=local_tasks_file, - target=upload_tasks_path) - upload_tasks_cmd = 'fuel2 graph upload -e {cluster_id} -t ' \ - '{graph_type} -f {path}'.format( - cluster_id=cluster_id, - graph_type=graph_type, - path=upload_tasks_path - ) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=upload_tasks_cmd) - - self.show_step(8) # Upload 'yaql_graph' tasks to release - graph_type = 'yaql_graph' - tasks_filename = 'custom_yaql_tasks.yaml' - local_tasks_file = os.path.join(os.path.dirname(fuelweb_test.__file__), - 'config_templates', - tasks_filename) - upload_tasks_path = '/tmp/{}'.format(tasks_filename) - self.ssh_manager.upload_to_remote( - ip=self.ssh_manager.admin_ip, - source=local_tasks_file, - target=upload_tasks_path) - upload_tasks_cmd = 'fuel2 graph upload -e {cluster_id} -t ' \ - '{graph_type} -f {path}'.format( - cluster_id=cluster_id, - graph_type=graph_type, - path=upload_tasks_path - ) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=upload_tasks_cmd) - - self.show_step(9) # Run 'custom_graph' deployment. - self.fuel_web.deploy_custom_graph_wait(cluster_id, 'custom_graph') - - self.show_step(10) # Run 'yaql_graph' deployment. - self.fuel_web.deploy_custom_graph_wait(cluster_id, graph_type) - - # NOTE(akostrikov) - # Verify that yaql tasks which uploaded with custom graph tasks are - # not called at first run, because they are isolated in another graph - # but are called at second run because current approach to check - # states of nodes exposes new state to the tasks. - self.show_step(11) # 'yaql_graph' tasks are called on controller - controller_node = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller'])[0] - check_yaql_cmd = 'ls /tmp/yaql_task_on_all_nodes' - self.ssh_manager.execute_on_remote( - ip=controller_node['ip'], - cmd=check_yaql_cmd, - assert_ec_equal=[0]) # Explicit exit code for success - - self.show_step(12) # 'yaql_graph' tasks are called on compute - compute_node = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'])[0] - check_yaql_cmd = 'ls /tmp/yaql_task_on_all_nodes' - self.ssh_manager.execute_on_remote( - ip=compute_node['ip'], - cmd=check_yaql_cmd, - assert_ec_equal=[0]) # Explicit exit code for success - - self.show_step(13) # 'yaql_graph' tasks are called on cinder - cinder_node = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['cinder'])[0] - check_yaql_cmd = 'ls /tmp/yaql_task_on_all_nodes' - self.ssh_manager.execute_on_remote( - ip=cinder_node['ip'], - cmd=check_yaql_cmd, - assert_ec_equal=[0]) # Explicit exit code for success - - self.show_step(14) # Create snapshot `two_custom_graphs_interfere` - self.env.make_snapshot('two_custom_graphs_interfere') - - @test(depends_on=[pre_provision_ubuntu_slaves_3], - groups=['custom_graph', 'custom_graph_master_node']) - @log_snapshot_after_test - def master_node_tasks(self): - """Verify tasks execution and ordering on master node - - Scenario: - 1. Revert snapshot 'pre_provision_ubuntu_slaves_3' - 3. Create cluster - 3. Upload 'master_node' tasks - 4. Run 'master_node' tasks - 5. Verify that tasks are executed in correct order - 6. Create snapshot - - Duration 30m - Snapshot master_node_tasks - """ - self.show_step(1) # Revert snapshot 'pre_provision_ubuntu_slaves_3' - self.env.revert_snapshot('pre_provision_ubuntu_slaves_3') - - self.show_step(2) # Create cluster - cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__) - rel_id = self.fuel_web.get_cluster_release_id(cluster_id) - self.move_ubuntu_target_image(rel_id, cluster_id) - - self.show_step(3) # Upload 'master_node' tasks - graph_type = 'master_node' - tasks_filename = 'master_node_tasks.yaml' - local_tasks_file = os.path.join(os.path.dirname(fuelweb_test.__file__), - 'config_templates', - tasks_filename) - upload_tasks_path = '/tmp/{}'.format(tasks_filename) - self.ssh_manager.upload_to_remote( - ip=self.ssh_manager.admin_ip, - source=local_tasks_file, - target=upload_tasks_path) - upload_tasks_cmd = 'fuel2 graph upload -e {cluster_id} -t ' \ - '{graph_type} -f {path}'.format( - cluster_id=cluster_id, - graph_type=graph_type, - path=upload_tasks_path - ) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=upload_tasks_cmd) - - self.show_step(4) # Run 'master_node' deployment - self.fuel_web.deploy_custom_graph_wait(cluster_id, graph_type) - - self.show_step(5) # Tasks should be executed in correct order - check_cmd = 'cat /tmp/master_task' - tasks_order = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=check_cmd)['stdout_str'] - actual_result = ''.join([s for s in tasks_order.split() - if s.isdigit()]) - expected_result = '123' - assert_equal(actual_result, expected_result, - 'Task ordering error: {actual} != {expected}' - .format(actual=actual_result, - expected=expected_result)) - - self.show_step(6) # Create snapshot - self.env.make_snapshot('custom_graph_master_node') - - @test(depends_on=[pre_provision_ubuntu_slaves_3], - groups=['custom_graph', 'custom_graph_edges']) - @log_snapshot_after_test - def custom_yaql_expression_tasks(self): - """Verify yaql expressions are working in custom graph - - Scenario: - 1. Revert snapshot 'pre_provision_ubuntu_slaves_3' - 2. Create cluster - 3. Add 1 node with controller role - 4. Add 1 node with compute role - 5. Add 1 node with storage role - 6. Create custom graph 'yaql_graph' - 7. Upload tasks to 'yaql_graph' - 8. Provision the cluster - 9. Deploy the cluster - 10. Re-deploy the cluster - 11. Check yaql on controller - 12. Check yaql on compute - 13. Check yaql on cinder - - Duration 30m - """ - self.show_step(1) # Revert snapshot 'pre_provision_ubuntu_slaves_3' - self.env.revert_snapshot('pre_provision_ubuntu_slaves_3') - graph_type = 'yaql_graph' - - self.show_step(2) # Create cluster - cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__) - rel_id = self.fuel_web.get_cluster_release_id(cluster_id) - self.move_ubuntu_target_image(rel_id, cluster_id) - - self.show_step(3) # Add 1 node with controller role - self.show_step(4) # Add 1 node with compute role - self.show_step(5) # Add 1 node with storage role - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'], - } - ) - - self.show_step(6) # Create custom graph 'yaql_graph' - self.show_step(7) # Upload tasks to 'yaql_graph' - tasks_filename = 'custom_yaql_tasks.yaml' - local_tasks_file = os.path.join(os.path.dirname(fuelweb_test.__file__), - 'config_templates', - tasks_filename) - upload_tasks_path = '/tmp/{}'.format(tasks_filename) - self.ssh_manager.upload_to_remote( - ip=self.ssh_manager.admin_ip, - source=local_tasks_file, - target=upload_tasks_path) - upload_tasks_cmd = 'fuel2 graph upload -e {cluster_id} -t ' \ - '{graph_type} -f {path}'.format( - cluster_id=cluster_id, - graph_type=graph_type, - path=upload_tasks_path - ) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=upload_tasks_cmd) - - self.show_step(8) # Provision the cluster - self.fuel_web.provisioning_cluster_wait(cluster_id) - self.env.check_slaves_are_ready() - self.show_step(9) # Deploy the cluster - self.fuel_web.deploy_custom_graph_wait(cluster_id, graph_type) - - self.show_step(10) # Re-deploy the cluster - self.fuel_web.deploy_custom_graph_wait(cluster_id, graph_type) - - self.show_step(11) # Check yaql on controller - controller_node = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller'])[0] - check_yaql_cmd = 'cat /tmp/yaql_task_on_all_nodes |wc -l' - times_echoed = self.ssh_manager.execute_on_remote( - ip=controller_node['ip'], - cmd=check_yaql_cmd)['stdout'][0].strip() - assert_equal('1', times_echoed) - - self.show_step(12) # Check yaql on compute - compute_node = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'])[0] - check_yaql_cmd = 'cat /tmp/yaql_task_on_all_nodes |wc -l' - times_echoed = self.ssh_manager.execute_on_remote( - ip=compute_node['ip'], - cmd=check_yaql_cmd)['stdout'][0].strip() - assert_equal('1', times_echoed) - - self.show_step(13) # Check yaql on cinder - cinder_node = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['cinder'])[0] - check_yaql_cmd = 'cat /tmp/yaql_task_on_all_nodes |wc -l' - times_echoed = self.ssh_manager.execute_on_remote( - ip=cinder_node['ip'], - cmd=check_yaql_cmd)['stdout'][0].strip() - assert_equal('1', times_echoed) - - @test(depends_on=[pre_provision_ubuntu_slaves_3], - groups=['custom_graph', 'graph_meta']) - @log_snapshot_after_test - def information_at_graphs_handler(self): - """Get info of api handlers - - Scenario: - 1. Revert snapshot 'pre_provision_ubuntu_slaves_3' - 2. Create cluster - 3. Add 1 node with controller role - 4. Add 1 node with compute role - 5. Add 1 node with storage role - 6. Provision cluster - 7. Upload 'custom_graph' tasks to release - 8. Upload 'yaql_graph' tasks to release - 9. Verify list shows 'default' tasks - 10. Verify list shows 'custom' tasks - 11. Verify list shows 'yaql' tasks - - Duration 30m - """ - self.show_step(1) # Revert snapshot 'pre_provision_ubuntu_slaves_3' - self.env.revert_snapshot('pre_provision_ubuntu_slaves_3') - - self.show_step(2) # Create cluster - cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__) - rel_id = self.fuel_web.get_cluster_release_id(cluster_id) - self.move_ubuntu_target_image(rel_id, cluster_id) - - self.show_step(3) # Add 1 node with controller role - self.show_step(4) # Add 1 node with compute role - self.show_step(5) # Add 1 node with storage role - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'], - } - ) - - self.show_step(6) # Create cluster - self.fuel_web.provisioning_cluster_wait(cluster_id) - self.env.check_slaves_are_ready() - self.show_step(7) # Upload 'custom_graph' tasks to release - graph_type = 'custom_graph' - tasks_filename = 'custom_graph_tasks.yaml' - local_tasks_file = os.path.join(os.path.dirname(fuelweb_test.__file__), - 'config_templates', - tasks_filename) - upload_tasks_path = '/tmp/{}'.format(tasks_filename) - self.ssh_manager.upload_to_remote( - ip=self.ssh_manager.admin_ip, - source=local_tasks_file, - target=upload_tasks_path) - upload_tasks_cmd = 'fuel2 graph upload -e {cluster_id} -t ' \ - '{graph_type} -f {path}'.format( - cluster_id=cluster_id, - graph_type=graph_type, - path=upload_tasks_path - ) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=upload_tasks_cmd) - - self.show_step(8) # Upload 'yaql_graph' tasks to release - graph_type = 'yaql_graph' - tasks_filename = 'custom_yaql_tasks.yaml' - local_tasks_file = os.path.join(os.path.dirname(fuelweb_test.__file__), - 'config_templates', - tasks_filename) - upload_tasks_path = '/tmp/{}'.format(tasks_filename) - self.ssh_manager.upload_to_remote( - ip=self.ssh_manager.admin_ip, - source=local_tasks_file, - target=upload_tasks_path) - upload_tasks_cmd = 'fuel2 graph upload -e {cluster_id} -t ' \ - '{graph_type} -f {path}'.format( - cluster_id=cluster_id, - graph_type=graph_type, - path=upload_tasks_path - ) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=upload_tasks_cmd) - - self.show_step(9) # Verify list shows 'default' tasks - check_default_cmd = 'fuel2 graph list -e {c_id}|grep default'.format( - c_id=cluster_id) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=check_default_cmd) - - self.show_step(10) # Verify list shows 'custom' tasks - check_custom_cmd = 'fuel2 graph list -e {c_id}|grep custom'.format( - c_id=cluster_id) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=check_custom_cmd) - - self.show_step(11) # Verify list shows 'yaql' tasks - check_yaql_cmd = 'fuel2 graph list -e {c_id}|grep yaql'.format( - c_id=cluster_id) - self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=check_yaql_cmd) diff --git a/fuelweb_test/tests/tests_deployments/__init__.py b/fuelweb_test/tests/tests_deployments/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/tests_deployments/tests_neutron_tun/__init__.py b/fuelweb_test/tests/tests_deployments/tests_neutron_tun/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/tests_deployments/tests_neutron_tun/test_ha_tun_group_1.py b/fuelweb_test/tests/tests_deployments/tests_neutron_tun/test_ha_tun_group_1.py deleted file mode 100644 index 73a4a2674..000000000 --- a/fuelweb_test/tests/tests_deployments/tests_neutron_tun/test_ha_tun_group_1.py +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import logging - -from proboscis import test - -from fuelweb_test import settings -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["ha_tun_group_1"]) -class HaTunGroup1(TestBasic): - """This class implements part of Acceptance tests - Deployment with - NeutronTUN network provider. - - Includes: - 1. 3 controllers + operation system roles. - 2. External DNS, NTP, Ceph for images and RadosGW for objects. - """ - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["tun_controller_base_os"]) - @log_snapshot_after_test - def tun_controller_base_os(self): - """Deploy 3 controllers with base_os using Neutron Tun - - Scenario: - 1. Create new environment - 2. Choose Neutron, tunnelling segmentation - 3. Add 3 controller+operating system - 4. Add 2 compute - 5. Add 1 cinder - 6. Verify networks - 7. Deploy the environment - 8. Verify networks - 9. Run OSTF tests - - Duration XXXm - Snapshot tun_controller_base_os - """ - - self.env.revert_snapshot("ready_with_9_slaves") - - data = { - 'tenant': 'TunBaseOS', - 'user': 'TunBaseOS', - 'password': 'TunBaseOS', - 'net_provider': settings.NEUTRON, - 'net_segment_type': settings.NEUTRON_SEGMENT['tun'], - } - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'base-os'], - 'slave-02': ['controller', 'base-os'], - 'slave-03': ['controller', 'base-os'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['cinder'], - } - ) - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("tun_controller_base_os") - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["tun_ceph_for_images_and_objects"]) - @log_snapshot_after_test - def tun_ceph_for_images_and_objects(self): - """Deployment with 3 controllers, NeutronTUN, - with Ceph for images and RadosGW - - Scenario: - 1. Create new environment - 2. Choose Neutron VxLAN - 3. Choose Ceph for images - 4. Choose Ceph RadosGW for objects - 5. Add 3 controller - 6. Add 2 compute - 7. Add 1 cinder - 8. Add 3 ceph nodes - 9. Change default dns server to any 2 public dns servers - to the 'Host OS DNS Servers' on Settings tab - 10. Change default ntp servers to any 2 public ntp servers - to the 'Host OS NTP Servers' on Settings tab - 11. Verify networks - 12. Deploy the environment - 13. Verify networks - 14. Run OSTF tests - - Duration XXXm - Snapshot tun_ceph_images_rados_objects - """ - self.env.revert_snapshot("ready_with_9_slaves") - - if len(settings.EXTERNAL_DNS) < 2: - logging.warning("Less than 2 DNS servers was configured!") - - if len(settings.EXTERNAL_NTP) < 2: - logging.warning("Less than 2 NTP servers was configured!") - - data = { - 'tenant': 'TunCephImagesObjects', - 'user': 'TunCephImagesObjects', - 'password': 'TunCephImagesObjects', - - 'net_provider': settings.NEUTRON, - 'net_segment_type': settings.NEUTRON_SEGMENT['tun'], - - 'dns_list': settings.EXTERNAL_DNS, - 'ntp_list': settings.EXTERNAL_NTP, - - 'volumes_lvm': True, - 'volumes_ceph': False, - 'images_ceph': True, - 'objects_ceph': True - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['cinder'], - 'slave-07': ['ceph-osd'], - 'slave-08': ['ceph-osd'], - 'slave-09': ['ceph-osd'] - } - ) - - self.fuel_web.verify_network(cluster_id) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.check_ceph_status(cluster_id) - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, roles=['controller']) - vrouter_vip = self.fuel_web.get_management_vrouter_vip(cluster_id) - for node in ctrls: - checkers.external_dns_check(node['ip']) - checkers.external_ntp_check(node['ip'], vrouter_vip) - - self.env.make_snapshot("tun_ceph_images_rados_objects") diff --git a/fuelweb_test/tests/tests_deployments/tests_neutron_tun/test_ha_tun_group_2.py b/fuelweb_test/tests/tests_deployments/tests_neutron_tun/test_ha_tun_group_2.py deleted file mode 100644 index cf0832b9b..000000000 --- a/fuelweb_test/tests/tests_deployments/tests_neutron_tun/test_ha_tun_group_2.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["ha_tun_group_2"]) -class HaTunGroup2(TestBasic): - """This class implements part of Acceptance tests - Deployment with - NeutronTUN network provider. - - Includes: - 1. Ceph for all and separated operation system node - 2. Ceph for all, untag networks and changed openstack credentials - """ - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["tun_ha_ceph_base_os"]) - @log_snapshot_after_test - def tun_ha_ceph_base_os(self): - """Deploy 3 controllers, 1 base_os, 2 computes - and 3 ceph nodes with Neutron Tun - - Scenario: - 1. Create new environment - 2. Choose Neutron TUN - 3. Choose Ceph for all - 4. Add 3 controller - 5. Add 2 compute - 6. Add 3 ceph - 7. Add 1 Operating System nodes - 8. Verify networks - 9. Deploy the environment - 10. Verify networks - 11. Run OSTF tests - - Duration XXXm - Snapshot tun_ha_ceph_base_os - """ - - self.env.revert_snapshot("ready_with_9_slaves") - - data = { - 'tenant': 'TunBaseOS', - 'user': 'TunBaseOS', - 'password': 'TunBaseOS', - - 'net_provider': settings.NEUTRON, - 'net_segment_type': settings.NEUTRON_SEGMENT['tun'], - - 'volumes_lvm': False, - 'volumes_ceph': True, - 'images_ceph': True, - 'objects_ceph': True, - 'ephemeral_ceph': True, - } - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['ceph-osd'], - 'slave-07': ['ceph-osd'], - 'slave-08': ['ceph-osd'], - 'slave-09': ['base-os'], - } - ) - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.check_ceph_status(cluster_id) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("tun_ha_ceph_base_os") - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["tun_ceph_all"]) - @log_snapshot_after_test - def tun_ceph_all(self): - """Deployment with 3 controllers, NeutronVxLAN, - with Ceph for volumes and images, ephemeral and Rados GW for objects - - Scenario: - 1. Create new environment - 2. Choose Neutron, VxLAN - 3. Choose Ceph for volumes and images, ceph for ephemeral - and Rados GW for objects - 4. Add 3 controller - 5. Add 2 compute - 6. Add 3 ceph nodes - 7. Untag management and storage networks - and move them to separate interfaces - 8. Verify networks - 9. Start deployment - 10. Verify networks - 11. Run OSTF - - Duration XXXm - Snapshot tun_ceph_all - """ - self.env.revert_snapshot("ready_with_9_slaves") - - data = { - 'net_provider': settings.NEUTRON, - 'net_segment_type': settings.NEUTRON_SEGMENT['tun'], - - 'tenant': 'TunCephAll', - 'user': 'TunCephAll', - 'password': 'TunCephAll', - - 'volumes_lvm': False, - 'volumes_ceph': True, - 'images_ceph': True, - 'objects_ceph': True, - 'ephemeral_ceph': True, - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['ceph-osd'], - 'slave-07': ['ceph-osd'], - 'slave-08': ['ceph-osd'], - } - ) - - self.fuel_web.verify_network(cluster_id) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.check_ceph_status(cluster_id) - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.env.make_snapshot("tun_ceph_all") diff --git a/fuelweb_test/tests/tests_deployments/tests_neutron_tun/test_ha_tun_group_3.py b/fuelweb_test/tests/tests_deployments/tests_neutron_tun/test_ha_tun_group_3.py deleted file mode 100644 index 505287bf8..000000000 --- a/fuelweb_test/tests/tests_deployments/tests_neutron_tun/test_ha_tun_group_3.py +++ /dev/null @@ -1,214 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import logging - -from proboscis import test - -from fuelweb_test import settings -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["ha_tun_group_3"]) -class HaTunGroup3(TestBasic): - """This class implements part of Acceptance tests - Deployment with - NeutronTUN network provider. - - Includes: - 1. No storage for volumes, Ceph for Images and ephemeral, changed - partitioning for ceph vdc, changed public network mask. - 2. 5 controllers, Ceph for ephemeral volumes, changed vdc partition on - Ceph nodes and changed public CIDR from /24 to /25 - """ - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["tun_no_volumes_ceph_for_images_and_ephemeral"]) - @log_snapshot_after_test - def tun_no_volumes_ceph_for_images_and_ephemeral(self): - """Deployment with 3 controllers, NeutronVxLAN, - with no storage for volumes and ceph for images and ephemeral - - Scenario: - 1. Create new environment - 2. Choose Neutron, VxLAN - 3. Uncheck cinder for volumes and choose ceph for images, - ceph for ephemeral - 4. Change ceph replication factor to 2 - 5. Add 3 controller - 6. Add 2 compute - 7. Add 2 ceph nodes - 8. Change default disks partitioning for ceph nodes for 'vdc' - 9. Change default dns server to any 2 public dns servers - to the 'Host OS DNS Servers' on Settings tab - 10. Change default ntp servers to any 2 public ntp servers - to the 'Host OS NTP Servers' on Settings tab - 11. Change default public net mask from /24 to /25 - 12. Verify networks - 13. Deploy cluster - 14. Verify networks - 15. Run OSTF - - Duration 180m - Snapshot tun_no_volumes_ceph_for_images_and_ephemeral - """ - self.env.revert_snapshot("ready_with_9_slaves") - - if len(settings.EXTERNAL_DNS) < 2: - logging.warning("Less than 2 DNS servers was configured!") - - if len(settings.EXTERNAL_NTP) < 2: - logging.warning("Less than 2 NTP servers was configured!") - - data = { - 'tenant': 'TunNoVolumesCeph', - 'user': 'TunNoVolumesCeph', - 'password': 'TunNoVolumesCeph', - - 'net_provider': settings.NEUTRON, - 'net_segment_type': settings.NEUTRON_SEGMENT['tun'], - - 'dns_list': settings.EXTERNAL_DNS, - 'ntp_list': settings.EXTERNAL_NTP, - - 'volumes_lvm': False, - 'volumes_ceph': False, - 'images_ceph': True, - 'objects_ceph': False, - 'ephemeral_ceph': True, - 'osd_pool_size': '2' - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['ceph-osd'], - 'slave-07': ['ceph-osd'], - } - ) - - ceph_nodes = self.fuel_web.\ - get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'], - role_status='pending_roles') - for ceph_node in ceph_nodes: - ceph_image_size = self.fuel_web.\ - update_node_partitioning(ceph_node, node_role='ceph') - - self.fuel_web.update_network_cidr(cluster_id, 'public') - - self.fuel_web.verify_network(cluster_id) - self.fuel_web.deploy_cluster_wait(cluster_id) - - for ceph in ceph_nodes: - checkers.check_ceph_image_size(ceph['ip'], ceph_image_size) - - ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, roles=['controller']) - vrouter_vip = self.fuel_web.get_management_vrouter_vip(cluster_id) - for node in ctrls: - checkers.external_dns_check(node['ip']) - checkers.external_ntp_check(node['ip'], vrouter_vip) - - self.fuel_web.check_ceph_status(cluster_id) - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.env.make_snapshot("tun_no_volumes_ceph_for_images_and_ephemeral") - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["tun_5_ctrl_ceph_ephemeral"]) - @log_snapshot_after_test - def tun_5_ctrl_ceph_ephemeral(self): - """Deployment with 5 controllers, NeutronTUN, - with Ceph RBD for ephemeral volumes - - Scenario: - 1. Create new environment - 2. Choose Neutron, tunnelling segmentation - 3. Choose Ceph RBD for ephemeral volumes - and uncheck Cinder LVM over iSCSI for volumes - 4. Add 5 controllers - 5. Add 1 compute - 6. Add 3 ceph - 7. Change default disks partitioning for ceph nodes for vdc - 8. Change public default mask from /24 to /25 - 9. Verify networks - 10. Deploy the environment - 11. Verify networks - 12. Run OSTF tests - - Duration XXXm - Snapshot tun_5_ctrl_ceph_ephemeral - """ - self.env.revert_snapshot("ready_with_9_slaves") - - data = { - 'net_provider': settings.NEUTRON, - 'net_segment_type': settings.NEUTRON_SEGMENT['tun'], - - 'tenant': 'TunCephEphemeral', - 'user': 'TunCephEphemeral', - 'password': 'TunCephEphemeral', - - 'volumes_lvm': False, - 'ephemeral_ceph': True, - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['controller'], - 'slave-05': ['controller'], - 'slave-06': ['ceph-osd'], - 'slave-07': ['ceph-osd'], - 'slave-08': ['ceph-osd'], - 'slave-09': ['compute'], - } - ) - ceph_nodes = self.fuel_web.\ - get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'], - role_status='pending_roles') - for ceph_node in ceph_nodes: - ceph_image_size = self.fuel_web.\ - update_node_partitioning(ceph_node, node_role='ceph') - - self.fuel_web.update_network_cidr(cluster_id, 'public') - - self.fuel_web.verify_network(cluster_id) - self.fuel_web.deploy_cluster_wait(cluster_id) - - for ceph in ceph_nodes: - # TODO: add pool size check - checkers.check_ceph_image_size(ceph['ip'], ceph_image_size) - - self.fuel_web.check_ceph_status(cluster_id) - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.env.make_snapshot("tun_5_ctrl_ceph_ephemeral") diff --git a/fuelweb_test/tests/tests_deployments/tests_neutron_vlan/__init__.py b/fuelweb_test/tests/tests_deployments/tests_neutron_vlan/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_1.py b/fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_1.py deleted file mode 100644 index d18c080a6..000000000 --- a/fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_1.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test - -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["ha_vlan_group_1"]) -class HaVlanGroup1(TestBasic): - """HaVlanGroup1.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["cinder_ceph_for_images"]) - @log_snapshot_after_test - def cinder_ceph_for_images(self): - """Deployment with 3 controllers, NeutronVLAN, - with Ceph for images and other disk configuration - - Scenario: - 1. Create new environment - 2. Choose Neutron, VLAN - 3. Choose Ceph for images - 4. Add 3 controller - 5. Add 2 compute - 6. Add 1 cinder - 7. Add 3 ceph - 8. Change disk configuration for both Ceph nodes. - Change 'Ceph' volume for vdc - 9. Verify networks - 10. Deploy the environment - 11. Verify networks - 12. Run OSTF tests - - Duration 180m - Snapshot cinder_ceph_for_images - """ - - self.env.revert_snapshot("ready_with_9_slaves") - - data = { - 'volumes_lvm': True, - 'volumes_ceph': False, - 'images_ceph': True, - 'tenant': 'cindercephforimages', - 'user': 'cindercephforimages', - 'password': 'cindercephforimages', - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - } - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['cinder'], - 'slave-07': ['ceph-osd'], - 'slave-08': ['ceph-osd'], - 'slave-09': ['ceph-osd'] - } - ) - self.fuel_web.verify_network(cluster_id) - - ceph_nodes = self.fuel_web.\ - get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'], - role_status='pending_roles') - for ceph_node in ceph_nodes: - ceph_image_size = self.fuel_web.\ - update_node_partitioning(ceph_node, node_role='ceph') - - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.check_ceph_status(cluster_id) - self.fuel_web.verify_network(cluster_id) - - for ceph in ceph_nodes: - checkers.check_ceph_image_size(ceph['ip'], ceph_image_size) - - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("cinder_ceph_for_images") - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["ceph_for_volumes_swift"]) - @log_snapshot_after_test - def ceph_for_volumes_swift(self): - """Deployment with 5 controllers, NeutronVLAN, with Ceph for volumes - - Scenario: - 1. Create new environment - 2. Choose Neutron, VLAN - 3. Choose Ceph for volumes - 4. Add 5 controller - 5. Add 2 compute - 6. Add 2 ceph nodes - 7. Change default partitioning scheme for both ceph nodes for 'vdc' - 8. Change ceph replication factor to 2 - 9. Verify networks - 10. Deploy cluster - 11. Verify networks - 12. Run OSTF tests - - Duration 180m - Snapshot ceph_for_volumes_swift - """ - - self.env.revert_snapshot("ready_with_9_slaves") - - data = { - 'volumes_lvm': False, - 'volumes_ceph': True, - 'images_ceph': False, - 'tenant': 'cephforvolumesswift', - 'user': 'cephforvolumesswift', - 'password': 'cephforvolumesswift', - 'osd_pool_size': "2", - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - } - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['controller'], - 'slave-05': ['controller'], - 'slave-06': ['compute'], - 'slave-07': ['compute'], - 'slave-08': ['ceph-osd'], - 'slave-09': ['ceph-osd'] - } - ) - - ceph_nodes = self.fuel_web.\ - get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'], - role_status='pending_roles') - for ceph_node in ceph_nodes: - ceph_image_size = self.fuel_web.\ - update_node_partitioning(ceph_node, node_role='ceph') - - self.fuel_web.verify_network(cluster_id) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.check_ceph_status(cluster_id) - self.fuel_web.verify_network(cluster_id) - for ceph in ceph_nodes: - checkers.check_ceph_image_size(ceph['ip'], ceph_image_size) - - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("ceph_for_volumes_swift") diff --git a/fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_2.py b/fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_2.py deleted file mode 100644 index b57fc309c..000000000 --- a/fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_2.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["ha_vlan_group_2"]) -class HaVlanGroup2(TestBasic): - """HaVlanGroup2.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["cinder_ceph_for_ephemeral"]) - @log_snapshot_after_test - def cinder_ceph_for_ephemeral(self): - """Deployment with 3 controllers, NeutronVLAN, with Ceph for ephemeral - - Scenario: - 1. Create new environment - 2. Choose Neutron, VLAN - 3. Choose cinder for volumes and Ceph for ephemeral - 4. Add 3 controller - 5. Add 2 compute - 6. Add 1 cinder - 7. Add 3 ceph - 8. Verify networks - 9. Deploy the environment - 10. Verify networks - 11. Run OSTF tests - - Duration 180m - Snapshot cinder_ceph_for_ephemeral - """ - - self.env.revert_snapshot("ready_with_9_slaves") - - data = { - 'volumes_lvm': True, - 'ephemeral_ceph': True, - 'tenant': 'cindercephephemeral', - 'user': 'cindercephephemeral', - 'password': 'cindercephephemeral', - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - } - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['cinder'], - 'slave-07': ['ceph-osd'], - 'slave-08': ['ceph-osd'], - 'slave-09': ['ceph-osd'] - } - ) - - self.fuel_web.verify_network(cluster_id) - # Cluster deploy - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.check_ceph_status(cluster_id) - self.fuel_web.verify_network(cluster_id) - - # Run ostf - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("cinder_ceph_for_ephemeral") - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["cinder_ceph_for_images_ephemeral"]) - @log_snapshot_after_test - def cinder_ceph_for_images_ephemeral(self): - """Deployment with 3 controllers, NeutronVLAN, with Ceph for - images and ephemeral - - Scenario: - 1. Create new environment - 2. Choose Neutron, VLAN - 3. Choose Ceph for images and ceph for ephemeral - 4. Add 3 controller - 5. Add 2 compute - 6. Add 1 cinder - 7. Add 3 ceph - 8. Untag management and storage networks and move them to separate - interfaces - 9. Verify networks - 10. Deploy the environment - 11. Verify networks - 12. Run OSTF tests - - Duration 180m - Snapshot cinder_ceph_for_images_ephemeral - """ - - self.env.revert_snapshot("ready_with_9_slaves") - - data = { - 'volumes_lvm': True, - 'images_ceph': True, - 'ephemeral_ceph': True, - 'tenant': 'cindercephimagesephemeral', - 'user': 'cindercephimagesephemeral', - 'password': 'cindercephimagesephemeral', - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - } - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['cinder'], - 'slave-07': ['ceph-osd'], - 'slave-08': ['ceph-osd'], - 'slave-09': ['ceph-osd'] - } - ) - - self.fuel_web.verify_network(cluster_id) - # Cluster deploy - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.check_ceph_status(cluster_id) - self.fuel_web.verify_network(cluster_id) - - # Run ostf - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("cinder_ceph_for_images_ephemeral") diff --git a/fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_3.py b/fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_3.py deleted file mode 100644 index 9e1ffe01e..000000000 --- a/fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_3.py +++ /dev/null @@ -1,174 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=['ha_vlan_group_3']) -class HaVlanGroup3(TestBasic): - """HaVlanGroup3.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["no_storage_for_volumes_swift"]) - @log_snapshot_after_test - def no_storage_for_volumes_swift(self): - """Deployment with 3 controllers, - NeutronVLAN with no storage for volumes and swift - - Scenario: - 1. Create new environment - 2. Choose Neutron, VLAN - 3. Uncheck cinder for volumes - 4. Add 3 controller - 5. Add 2 compute - 6. Change public net mask from /24 to /25 - 7. Verify networks - 8. Deploy the environment - 9. Verify networks - 10. Run OSTF tests - - Duration: 180 min - Snapshot: no_storage_for_volumes_swift - """ - - self.env.revert_snapshot("ready_with_5_slaves") - - data = { - 'volumes_lvm': False, - 'volumes_ceph': False, - 'images_ceph': False - } - - self.show_step(1, initialize=True) - self.show_step(2) - self.show_step(3) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - - self.show_step(4) - self.show_step(5) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'] - } - ) - - self.show_step(6) - self.fuel_web.update_network_cidr(cluster_id, 'public') - - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - - self.show_step(8) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(9) - self.fuel_web.verify_network(cluster_id) - - self.show_step(10) - self.fuel_web.run_ostf(cluster_id) - self.env.make_snapshot('no_storage_for_volumes_swift') - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["ceph_volumes_ephemeral"]) - @log_snapshot_after_test - def ceph_volumes_ephemeral(self): - """Deployment with 3 controllers, NeutronVLAN, - with Ceph for volumes and ephemeral - - Scenario: - 1. Create new environment - 2. Choose Neutron, VLAN - 3. Choose Ceph for volumes and Ceph for ephemeral - 4. Change openstack username, password, tenant - 5. Add 3 controller - 6. Add 2 compute - 7. Add 3 ceph nodes - 8. Change default management net mask from /24 to /25 - 9. Verify networks - 10. Start deployment - 11. Verify networks - 12. Run OSTF - - Duration: 180m - Snapshot: ceph_volumes_ephemeral - """ - - self.env.revert_snapshot('ready_with_9_slaves') - - data = { - 'volumes_lvm': False, - 'volumes_ceph': True, - 'images_ceph': False, - 'ephemeral_ceph': True, - 'tenant': 'hagroup3', - 'password': 'hagroup3', - 'user': 'hagroup3' - } - - self.show_step(1, initialize=True) - self.show_step(2) - self.show_step(3) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - - self.show_step(4) - self.show_step(5) - self.show_step(6) - self.show_step(7) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['ceph-osd'], - 'slave-07': ['ceph-osd'], - 'slave-08': ['ceph-osd'] - } - ) - - self.show_step(8) - self.fuel_web.update_network_cidr(cluster_id, 'management') - - self.show_step(9) - self.fuel_web.verify_network(cluster_id) - - self.show_step(10) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(11) - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.check_ceph_status(cluster_id) - - self.show_step(12) - self.fuel_web.run_ostf(cluster_id) - self.env.make_snapshot('ceph_volumes_ephemeral') diff --git a/fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_4.py b/fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_4.py deleted file mode 100644 index 8ad0b2ba7..000000000 --- a/fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_4.py +++ /dev/null @@ -1,183 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from proboscis import test - -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=['ha_vlan_group_4']) -class HaVlanGroup4(TestBasic): - """HaVlanGroup4.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["four_controllers"]) - @log_snapshot_after_test - def four_controllers(self): - """Deployment with 4 controllers, NeutronVLAN, - and other disk configuration - - Scenario: - 1. Create new environment - 2. Choose Neutron, VLAN - 3. Add 4 controller - 4. Add 2 compute - 5. Add 3 cinder - 6. Change disk configuration for all Cinder nodes. - Change 'Cinder' volume for vdc - 7. Verify networks - 8. Deploy the environment - 9. Verify networks - 10. Check disk configuration - 11. Run OSTF tests - - Notation: "By default recommended use uneven numbers of controllers, - but nowhere there is information we cannot deploy with even - numbers of controllers. So we need to check it." - - Duration: 180 min - Snapshot: four_controllers - """ - - self.env.revert_snapshot("ready_with_9_slaves") - - self.show_step(1, initialize=True) - self.show_step(2) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - ) - - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['controller'], - 'slave-05': ['compute'], - 'slave-06': ['compute'], - 'slave-07': ['cinder'], - 'slave-08': ['cinder'], - 'slave-09': ['cinder'], - } - ) - self.show_step(6) - cinders = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id=cluster_id, - roles=['cinder'], - role_status='pending_roles' - ) - - for node in cinders: - cinder_image_size = self.fuel_web.update_node_partitioning(node) - - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - - self.show_step(8) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(9) - self.fuel_web.verify_network(cluster_id) - - self.show_step(10) - for cinder in cinders: - checkers.check_cinder_image_size(cinder['ip'], cinder_image_size) - - self.show_step(11) - self.fuel_web.run_ostf(cluster_id) - self.env.make_snapshot('four_controllers') - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["ceph_rados_gw_no_storage_volumes"]) - @log_snapshot_after_test - def ceph_rados_gw_no_storage_volumes(self): - """Deployment with 3 controllers, NeutronVLAN, with no storage for - volumes and ceph for images and Rados GW - - Scenario: - 1. Create new environment - 2. Choose Neutron, VLAN - 3. Uncheck cinder storage for volumes and choose ceph - for images and Rados GW - 4. Change openstack username, password, tenant - 5. Add 3 controller - 6. Add 2 compute - 7. Add 3 ceph nodes - 8. Change storage net mask /24 to /25 - 9. Verify networks - 10. Start deployment - 11. Verify networks - 12. Run OSTF - - Duration: 180 min - Snapshot: ceph_rados_gw_no_storage_volumes - """ - - self.env.revert_snapshot('ready_with_9_slaves') - - data = { - 'volumes_lvm': False, - 'images_ceph': True, - 'objects_ceph': True, - 'tenant': 'hagroup4', - 'user': 'hagroup4', - 'password': 'hagroup4' - } - - self.show_step(1, initialize=True) - self.show_step(2) - self.show_step(3) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.show_step(4) - self.show_step(5) - self.show_step(6) - self.show_step(7) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['ceph-osd'], - 'slave-07': ['ceph-osd'], - 'slave-08': ['ceph-osd'] - } - ) - self.show_step(8) - self.fuel_web.update_network_cidr(cluster_id, 'storage') - - self.show_step(9) - self.fuel_web.verify_network(cluster_id) - - self.show_step(10) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(11) - self.fuel_web.verify_network(cluster_id) - - self.show_step(12) - self.fuel_web.run_ostf(cluster_id) - self.env.make_snapshot('ceph_rados_gw_no_storage_volumes') diff --git a/fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_5.py b/fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_5.py deleted file mode 100644 index ef4928c9e..000000000 --- a/fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_5.py +++ /dev/null @@ -1,301 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test - -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["ha_vlan_group_5"]) -class HaVlanGroup5(TestBasic): - """HaVlanGroup5.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["ceph_for_volumes_images_ephemeral_rados"]) - @log_snapshot_after_test - def ceph_for_volumes_images_ephemeral_rados(self): - """Deployment with 3 controllers, NeutronVLAN, - with Ceph for volumes and images, ephemeral and Rados GW for objects - - Scenario: - 1. Create environment using fuel-qa - 2. Choose Neutron, VLAN - 3. Choose Ceph for volumes and images, - ceph for ephemeral and Rados GW for objects - 4. Add 3 controller - 5. Add 2 compute - 6. Add 3 ceph nodes - 7. Untag all networks and move them to separate interfaces - 8. Verify networks - 9. Deploy cluster - 10. Verify networks - 11. Run OSTF - - Duration 180m - Snapshot ceph_for_volumes_images_ephemeral_rados - """ - - self.env.revert_snapshot("ready_with_9_slaves") - - data = { - 'volumes_lvm': False, - 'volumes_ceph': True, - 'images_ceph': True, - 'ephemeral_ceph': True, - 'objects_ceph': True, - 'tenant': 'cephvolumesimagesephemeralrados', - 'user': 'cephvolumesimagesephemeralrados', - 'password': 'cephvolumesimagesephemeralrados' - } - self.show_step(1, initialize=True) - self.show_step(2) - self.show_step(3) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.show_step(4) - self.show_step(5) - self.show_step(6) - self.show_step(7) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['ceph-osd'], - 'slave-07': ['ceph-osd'], - 'slave-08': ['ceph-osd'] - } - ) - self.show_step(8) - self.fuel_web.verify_network(cluster_id) - - self.show_step(9) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.check_ceph_status(cluster_id) - - self.show_step(10) - self.fuel_web.verify_network(cluster_id) - - self.show_step(11) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("ceph_for_volumes_images_ephemeral_rados") - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["cinder_ceph_for_images_ephemeral_rados"]) - @log_snapshot_after_test - def cinder_ceph_for_images_ephemeral_rados(self): - """Deployment with 3 controllers, NeutronVLAN, with cinder for volumes - and ceph for images, ephemeral and Rados GW for objects - - Scenario: - 1. Create new environment - 2. Choose Neutron, VLAN - 3. Choose cinder for volumes and ceph for images, ceph for - ephemeral and Rados GW for objects - 4. Add 3 controller - 5. Add 2 compute - 6. Add 3 ceph nodes - 7. Add 1 cinder node - 8. Change default public net mask from /24 to /25 - 9. Change default partitioning for ceph and cinder nodes for vdc - 10. Change default dns server to any 2 public dns servers to the - 'Host OS DNS Servers' on Settings tab - 11. Change default ntp servers to any 2 public ntp servers to the - 'Host OS NTP Servers' on Settings tab - 12. Verify networks - 13. Deploy cluster - 14. Verify networks - 15. Run OSTF - - Duration 180m - Snapshot cinder_ceph_for_images_ephemeral_rados - """ - - self.env.revert_snapshot("ready_with_9_slaves") - - data = { - 'volumes_lvm': True, - 'images_ceph': True, - 'ephemeral_ceph': True, - 'objects_ceph': True, - 'tenant': 'cindercephforimagesephemeralrados', - 'user': 'cindercephforimagesephemeralrados', - 'password': 'cindercephforimagesephemeralrados', - 'ntp_list': settings.EXTERNAL_NTP, - 'dns_list': settings.EXTERNAL_DNS - } - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.show_step(6) - self.show_step(7) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['ceph-osd'], - 'slave-07': ['ceph-osd'], - 'slave-08': ['ceph-osd'], - 'slave-09': ['cinder'] - } - ) - self.show_step(8) - self.fuel_web.update_network_cidr(cluster_id, 'public') - - self.show_step(9) - self.show_step(10) - self.show_step(11) - ceph_nodes = self.fuel_web.\ - get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'], - role_status='pending_roles') - for ceph_node in ceph_nodes: - ceph_image_size = self.fuel_web.\ - update_node_partitioning(ceph_node, node_role='ceph') - - cinder_nodes = self.fuel_web.\ - get_nailgun_cluster_nodes_by_roles(cluster_id, ['cinder'], - role_status='pending_roles') - for cinder_node in cinder_nodes: - cinder_image_size = self.fuel_web.\ - update_node_partitioning(cinder_node, node_role='cinder') - - self.show_step(12) - self.fuel_web.verify_network(cluster_id) - self.show_step(13) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.check_ceph_status(cluster_id) - self.show_step(14) - self.fuel_web.verify_network(cluster_id) - - for ceph in ceph_nodes: - checkers.check_ceph_image_size(ceph['ip'], ceph_image_size) - - for cinder in cinder_nodes: - checkers.check_cinder_image_size(cinder['ip'], cinder_image_size) - - self.show_step(15) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("cinder_ceph_for_images_ephemeral_rados") - - -@test(groups=["ha_5_contr_rados"]) -class Ha5ContrRados(TestBasic): - """Ha5ContrRados.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_all], - groups=["deploy_5_contr_rados_delete"]) - @log_snapshot_after_test - def deploy_5_contr_rados_delete(self): - """Deployment with 5 controllers, NeutronVLAN, - with Ceph for volumes and images, Rados GW for objects - - Scenario: - 1. Create environment 5 controller, 2 ceph Rados GW for objects, - 2 compute, Neutron VLAN. - 2. Change default disks partitioning for ceph nodes for 'vdc' - 3. Change default dns server to any 2 public dns servers to the - 'Host OS DNS Servers' on Settings tab - 4. Change default ntp servers to any 2 public ntp servers to the - 'Host OS NTP Servers' on Settings tab - 5. Verify networks - 6. Deploy cluster - 7. Verify networks - 8. Run OSTF - 9. Delete env - - Duration 180m - Snapshot deploy_5_contr_rados_delete - """ - - self.env.revert_snapshot("ready_with_all_slaves") - - data = { - 'volumes_lvm': True, - 'images_ceph': True, - 'ephemeral_ceph': True, - 'objects_ceph': True, - 'osd_pool_size': "2", - 'tenant': 'deploy_5_contr_rados_delete', - 'user': 'deploy_5_contr_rados_delete', - 'password': 'deploy_5_contr_rados_delete', - 'ntp_list': settings.EXTERNAL_NTP, - 'dns_list': settings.EXTERNAL_DNS - } - self.show_step(1) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['controller'], - 'slave-05': ['controller'], - 'slave-06': ['ceph-osd'], - 'slave-07': ['ceph-osd'], - 'slave-08': ['compute'], - 'slave-09': ['compute'] - } - ) - - ceph_nodes = self.fuel_web.\ - get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'], - role_status='pending_roles') - for ceph_node in ceph_nodes: - ceph_image_size = self.fuel_web.\ - update_node_partitioning(ceph_node, node_role='ceph') - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - self.show_step(6) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.check_ceph_status(cluster_id) - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - - for ceph in ceph_nodes: - checkers.check_ceph_image_size(ceph['ip'], ceph_image_size) - - self.show_step(8) - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.show_step(9) - self.fuel_web.delete_env_wait(cluster_id=cluster_id) - self.env.make_snapshot("deploy_5_contr_rados_delete") diff --git a/fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_6.py b/fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_6.py deleted file mode 100644 index 05b59cb4a..000000000 --- a/fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_6.py +++ /dev/null @@ -1,208 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test - -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["ha_vlan_group_6"]) -class HaVlanGroup6(TestBasic): - """HaVlanGroup6.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["ceph_for_images_ephemeral_rados"]) - @log_snapshot_after_test - def ceph_for_images_ephemeral_rados(self): - """Deployment with 3 controllers, NeutronVLAN, with no storage for - volumes and ceph for images, ephemeral and Rados GW for objects - - Scenario: - 1. Create new environment - 2. Choose Neutron, VLAN - 3. Uncheck cinder for volumes and choose ceph for images, - ceph for ephemeral and Rados GW for objects - 4. Add 3 controller - 5. Add 2 compute - 6. Add 3 ceph nodes - 7. Verify networks - 8. Change default disks partitioning for ceph nodes for 'vdc' - 9. Change default dns server to any 2 public dns servers to the - 'Host OS DNS Servers' on Settings tab - 10. Change default ntp servers to any 2 public ntp servers to the - 'Host OS NTP Servers' on Settings tab - 11. Deploy cluster - 12. Verify networks - 13. Run OSTF - - Duration 180m - Snapshot ceph_for_images_ephemeral_rados - """ - - self.env.revert_snapshot("ready_with_9_slaves") - - data = { - 'volumes_lvm': False, - 'volumes_ceph': False, - 'images_ceph': True, - 'ephemeral_ceph': True, - 'objects_ceph': True, - 'tenant': 'cephforimagesephemeralrados', - 'user': 'cephforimagesephemeralrados', - 'password': 'cephforimagesephemeralrados', - 'ntp_list': settings.EXTERNAL_NTP, - 'dns_list': settings.EXTERNAL_DNS - } - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.show_step(6) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['ceph-osd'], - 'slave-07': ['ceph-osd'], - 'slave-08': ['ceph-osd'] - } - ) - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - - self.show_step(8) - self.show_step(9) - self.show_step(10) - ceph_nodes = self.fuel_web.\ - get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'], - role_status='pending_roles') - for ceph_node in ceph_nodes: - ceph_image_size = self.fuel_web.\ - update_node_partitioning(ceph_node, node_role='ceph') - - self.show_step(11) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.check_ceph_status(cluster_id) - self.show_step(12) - self.fuel_web.verify_network(cluster_id) - - for ceph in ceph_nodes: - checkers.check_ceph_image_size(ceph['ip'], ceph_image_size) - - self.show_step(13) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("ceph_for_images_ephemeral_rados") - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["ceph_for_volumes_images_ephemeral"]) - @log_snapshot_after_test - def ceph_for_volumes_images_ephemeral(self): - """Deployment with 5 controllers, NeutronVLAN, - with Ceph for volumes and images, ephemeral - - Scenario: - 1. Create new environment - 2. Choose Neutron, VLAN - 3. Choose Ceph for volumes and images, ceph for ephemeral - 4. Add 5 controller - 5. Add 2 compute - 6. Add 2 ceph nodes - 7. Change ceph replication factor to 2 - 8. Change management net default mask from /24 to /25 - 9. Change default disk partitioning for ceph nodes for vdc - 10. Verify networks - 11. Deploy changes - 12. Verify networks - 13. Run OSTF - - Duration 180m - Snapshot ceph_for_volumes_images_ephemeral - """ - - self.env.revert_snapshot("ready_with_9_slaves") - - data = { - 'volumes_lvm': False, - 'volumes_ceph': True, - 'images_ceph': True, - 'ephemeral_ceph': True, - 'osd_pool_size': "2", - 'tenant': 'cephforvolumesimagesephemeral', - 'user': 'cephforvolumesimagesephemeral', - 'password': 'cephforvolumesimagesephemeral' - } - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.show_step(6) - self.show_step(7) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['controller'], - 'slave-05': ['controller'], - 'slave-06': ['compute'], - 'slave-07': ['compute'], - 'slave-08': ['ceph-osd'], - 'slave-09': ['ceph-osd'] - } - ) - self.show_step(8) - self.fuel_web.update_network_cidr(cluster_id, 'management') - - self.show_step(9) - ceph_nodes = self.fuel_web.\ - get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'], - role_status='pending_roles') - for ceph_node in ceph_nodes: - ceph_image_size = self.fuel_web.\ - update_node_partitioning(ceph_node, node_role='ceph') - - self.show_step(10) - self.fuel_web.verify_network(cluster_id) - self.show_step(11) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.check_ceph_status(cluster_id) - self.show_step(12) - self.fuel_web.verify_network(cluster_id) - for ceph in ceph_nodes: - checkers.check_ceph_image_size(ceph['ip'], ceph_image_size) - - self.show_step(13) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("ceph_for_volumes_images_ephemeral") diff --git a/fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_7.py b/fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_7.py deleted file mode 100644 index f1876496b..000000000 --- a/fuelweb_test/tests/tests_deployments/tests_neutron_vlan/test_ha_vlan_group_7.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test - -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["ha_vlan_group_7"]) -class HaVlanGroup7(TestBasic): - """HaVlanGroup7.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["ceph_for_images"]) - @log_snapshot_after_test - def ceph_for_images(self): - """Deployment with 3 controllers, NeutronVLAN, - with no storage for volumes and ceph for images - - Scenario: - 1. Create new environment - 2. Choose Neutron, VLAN - 3. Uncheck cinder for volumes and choose ceph for images - 4. Add 3 controller - 5. Add 2 compute - 6. Add 3 ceph nodes - 7. Change default disks partitioning for ceph nodes for 'vdc' - 8. Change default dns server to any 2 public dns servers to the - 'Host OS DNS Servers' on Settings tab - 9. Change default ntp servers to any 2 public ntp servers to the - 'Host OS NTP Servers' on Settings tab - 10. Untag management and storage networks - and move them to separate interfaces - 11. Verify networks - 12. Deploy cluster - 13. Verify networks - 14. Run OSTF - - Duration 180m - Snapshot ceph_for_images - """ - - self.env.revert_snapshot("ready_with_9_slaves") - - data = { - 'volumes_lvm': False, - 'volumes_ceph': False, - 'images_ceph': True, - 'osd_pool_size': "3", - 'tenant': 'cephforimages', - 'user': 'cephforimages', - 'password': 'cephforimages', - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - 'ntp_list': settings.EXTERNAL_NTP, - 'dns_list': settings.EXTERNAL_DNS - } - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['ceph-osd'], - 'slave-07': ['ceph-osd'], - 'slave-08': ['ceph-osd'] - } - ) - self.fuel_web.verify_network(cluster_id) - - ceph_nodes = self.fuel_web.\ - get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'], - role_status='pending_roles') - for ceph_node in ceph_nodes: - ceph_image_size = self.fuel_web.\ - update_node_partitioning(ceph_node, node_role='ceph') - - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.check_ceph_status(cluster_id) - self.fuel_web.verify_network(cluster_id) - - for ceph in ceph_nodes: - checkers.check_ceph_image_size(ceph['ip'], ceph_image_size) - - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("ceph_for_images") - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["ha_vlan_operating_system"]) - @log_snapshot_after_test - def ha_vlan_operating_system(self): - """Deployment with 3 controllers, NeutronVlan, with Operating System - - Scenario: - 1. Create new environment - 2. Choose Neutron Vlan - 3. Add 3 controller - 4. Add 2 compute - 5. Add 1 Operating System node - 6. Verify networks - 7. Deploy the environment - 8. Verify networks - 9. Run OSTF tests - - Duration 180m - Snapshot ceph_for_volumes_swift - """ - - self.env.revert_snapshot("ready_with_9_slaves") - - data = { - 'tenant': 'operatingsystem', - 'user': 'operatingsystem', - 'password': 'operatingsystem', - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - } - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['compute'], - 'slave-07': ['base-os'] - } - ) - - self.fuel_web.verify_network(cluster_id) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("ha_vlan_operating_system") diff --git a/fuelweb_test/tests/tests_extra_computes/__init__.py b/fuelweb_test/tests/tests_extra_computes/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/tests_extra_computes/base_extra_computes.py b/fuelweb_test/tests/tests_extra_computes/base_extra_computes.py deleted file mode 100644 index f2ec97417..000000000 --- a/fuelweb_test/tests/tests_extra_computes/base_extra_computes.py +++ /dev/null @@ -1,575 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import division -import re - -from devops.error import TimeoutError -from devops.helpers.helpers import tcp_ping -from devops.helpers.helpers import wait -from proboscis import asserts - -from fuelweb_test import settings -from fuelweb_test import logger -from fuelweb_test.tests.base_test_case import TestBasic - - -class ExtraComputesBase(TestBasic): - """Extra computes tests base""" - - def check_slaves_are_ready(self): - devops_nodes = [node for node in self.env.d_env.nodes().slaves - if node.driver.node_active(node)] - - for node in devops_nodes: - ip = self.fuel_web.get_node_ip_by_devops_name(node.name) - try: - self.wait_for_slave_provision(ip) - except TimeoutError: - asserts.assert_true( - tcp_ping(ip, 22), - 'Node {0} has not become online ' - 'after revert'.format(node.name)) - logger.debug('Node {0} became online.'.format(node.name)) - return True - - @staticmethod - def wait_for_slave_provision(node_ip, timeout=10 * 60): - """Wait for a target node provision. - - :param node_ip: IP address of target node. - :param timeout: Timeout for wait function. - """ - wait(lambda: tcp_ping(node_ip, 22), - timeout=timeout, timeout_msg="Node doesn't appear in network") - - @staticmethod - def wait_for_slave_network_down(node_ip, timeout=10 * 20): - """Wait for a target node network down. - - :param node_ip: IP address of target node. - :param timeout: Timeout for wait function. - """ - wait(lambda: (not tcp_ping(node_ip, 22)), interval=1, - timeout=timeout, timeout_msg="Node doesn't gone offline") - - def warm_restart_nodes(self, devops_nodes): - logger.info('Reboot (warm restart) nodes ' - '{0}'.format([n.name for n in devops_nodes])) - self.warm_shutdown_nodes(devops_nodes) - self.warm_start_nodes(devops_nodes) - - def warm_shutdown_nodes(self, devops_nodes): - logger.info('Shutting down (warm) nodes ' - '{0}'.format([n.name for n in devops_nodes])) - for node in devops_nodes: - ip = self.fuel_web.get_node_ip_by_devops_name(node.name) - logger.debug('Shutdown node {0}'.format(node.name)) - self.ssh_manager.execute(ip, '/sbin/shutdown -Ph now & exit') - - for node in devops_nodes: - ip = self.fuel_web.get_node_ip_by_devops_name(node.name) - logger.info('Wait a {0} node offline status'.format(node.name)) - try: - self.wait_for_slave_network_down(ip) - except TimeoutError: - asserts.assert_false( - tcp_ping(ip, 22), - 'Node {0} has not become ' - 'offline after warm shutdown'.format(node.name)) - node.destroy() - - def warm_start_nodes(self, devops_nodes): - logger.info('Starting nodes ' - '{0}'.format([n.name for n in devops_nodes])) - for node in devops_nodes: - node.start() - for node in devops_nodes: - ip = self.fuel_web.get_node_ip_by_devops_name(node.name) - try: - self.wait_for_slave_provision(ip) - except TimeoutError: - asserts.assert_true( - tcp_ping(ip, 22), - 'Node {0} has not become online ' - 'after warm start'.format(node.name)) - logger.info('Node {0} became online.'.format(node.name)) - - @staticmethod - def connect_extra_compute_image(slave): - """Upload extra compute image into a target node. - - :param slave: Target node name. - """ - path = settings.EXTRA_COMP_IMAGE_PATH + settings.EXTRA_COMP_IMAGE - - def find_system_drive(node): - drives = node.disk_devices - for drive in drives: - if drive.device == 'disk' and 'system' in drive.volume.name: - return drive - raise Exception('Can not find suitable volume to proceed') - - system_disk = find_system_drive(slave) - vol_path = system_disk.volume.get_path() - - try: - system_disk.volume.upload(path) - except Exception as e: - logger.error(e) - logger.debug("Volume path: {0}".format(vol_path)) - logger.debug("Image path: {0}".format(path)) - - def verify_image_connected(self, ip, types='rh'): - """Check that correct image connected to a target node system volume. - - :param ip: Remote node ip to proceed. - :param types: rh or ol - """ - if types is 'rh': - cmd = "cat /etc/redhat-release" - else: - cmd = "cat /etc/oracle-release" - self.ssh_manager.execute_on_remote( - ip, cmd, err_msg="Image doesn't connected") - - def register_rh_subscription(self, ip): - """Register RH subscription. - - :param ip: Remote node ip to proceed. - """ - reg_command = ( - "/usr/sbin/subscription-manager register " - "--username={0} --password={1}".format( - settings.RH_LICENSE_USERNAME, - settings.RH_LICENSE_PASSWORD) - ) - - if settings.RH_SERVER_URL: - reg_command += " --serverurl={0}".format( - settings.RH_SERVER_URL) - - if settings.RH_REGISTERED_ORG_NAME: - reg_command += " --org={0}".format( - settings.RH_REGISTERED_ORG_NAME) - - if settings.RH_RELEASE: - reg_command += " --release={0}".format( - settings.RH_RELEASE) - - if settings.RH_ACTIVATION_KEY: - reg_command += " --activationkey={0}".format( - settings.RH_ACTIVATION_KEY) - - if settings.RH_POOL_HASH: - self.ssh_manager.execute_on_remote( - ip, reg_command, err_msg='RH registration failed') - reg_pool_cmd = ("/usr/sbin/subscription-manager " - "attach --pool={0}".format(settings.RH_POOL_HASH)) - self.ssh_manager.execute_on_remote( - ip, reg_pool_cmd, - err_msg='Can not attach node to subscription pool') - else: - cmd = reg_command + " --auto-attach" - self.ssh_manager.execute_on_remote( - ip, cmd, err_msg='RH registration with auto-attaching failed') - - def enable_extra_compute_repos(self, ip, types='rh'): - """Enable requested family mirrors on a target node. - - :param ip: Remote node ip for proceed. - :param types: rh or ol - """ - if types is 'rh': - cmd = ( - "yum-config-manager --enable rhel-{0}-server-optional-rpms &&" - " yum-config-manager --enable rhel-{0}-server-extras-rpms &&" - " yum-config-manager --enable rhel-{0}-server-rh-common-rpms" - .format(settings.RH_MAJOR_RELEASE)) - else: - cmd = ("yum-config-manager --enable ol{0}_addons && " - "yum-config-manager --enable ol{0}_optional_latest" - .format(settings.OL_MAJOR_RELEASE)) - - self.ssh_manager.execute_on_remote( - ip, cmd, err_msg='Enabling requested family repos failed') - - def set_hostname(self, ip, types='rh', host_number=1): - """Set hostname with domain for a target node. - - :param host_number: Node index nubmer (1 by default). - :param types: rh or ol - :param ip: Remote node ip for proceed. - """ - hostname = "{0}-{1}.test.domain.local".format(types, host_number) - cmd = ("sysctl kernel.hostname={0} && " - "echo '{0}' > /etc/hostname".format(hostname)) - - self.ssh_manager.execute_on_remote( - ip, cmd, err_msg='Setting up hostname for node failed') - return hostname - - def puppet_apply(self, puppets, ip): - """Apply list of puppets on a target node. - - :param puppets: of puppets. - :param ip: Remote node ip for proceed. - """ - logger.debug("Applying puppets...") - for puppet in puppets: - logger.debug('Applying: {0}'.format(puppet)) - self.ssh_manager.execute_on_remote( - ip, - 'puppet apply -vd -l /var/log/puppet.log {0}'.format(puppet), - err_msg='Puppet run failed. Task: {0}'.format(puppet)) - - def apply_first_part_puppet(self, ip): - """Apply first part of puppet modular tasks on target node. - - :param ip: Remote node ip for proceed. - """ - first_puppet_run = [ - "/etc/puppet/modules/osnailyfacter/modular/hiera/hiera.pp", - "/etc/puppet/modules/osnailyfacter/modular/" - "hiera/override_configuration.pp", - "/etc/puppet/modules/osnailyfacter/modular/" - "netconfig/reserved_ports.pp", - "/etc/puppet/modules/osnailyfacter/modular/fuel_pkgs/fuel_pkgs.pp", - "/etc/puppet/modules/osnailyfacter/modular/globals/globals.pp", - "/etc/puppet/modules/osnailyfacter/modular/tools/tools.pp" - ] - - self.puppet_apply(first_puppet_run, ip) - - def apply_networking_puppet(self, ip): - """Apply networking puppet on a target node. - - Puppet task will executed in screen to prevent disconnections while - interfaces configuring. - - :param ip: Remote node ip for proceed. - """ - iface_check = "test -f /etc/sysconfig/network-scripts/ifcfg-eth0" - result = self.ssh_manager.execute(ip, iface_check) - if result['exit_code'] == 0: - remove_iface = "rm -f /etc/sysconfig/network-scripts/ifcfg-eth0" - self.ssh_manager.execute_on_remote(ip, remove_iface) - prep = "screen -dmS netconf" - self.ssh_manager.execute_on_remote(ip, prep, - err_msg='Can not create screen') - - net_puppet = ('screen -r netconf -p 0 -X stuff ' - '$"puppet apply -vd -l /var/log/puppet.log ' - '/etc/puppet/modules/osnailyfacter/modular/' - 'netconfig/netconfig.pp && touch ~/success ^M"') - self.ssh_manager.execute_on_remote( - ip, net_puppet, - err_msg='Can not create screen with netconfig task') - - def check_netconfig_success(self, ip, timeout=10 * 20): - """Check that netconfig.pp modular task is succeeded. - - :param ip: Remote node ip for proceed. - :param timeout: Timeout for wait function. - """ - - def file_checker(target_ip): - cmd = "test -f ~/success" - result = self.ssh_manager.execute(target_ip, cmd) - logger.debug(result) - if result['exit_code'] != 0: - return False - else: - return True - wait(lambda: file_checker(ip), timeout=timeout, - timeout_msg='Netconfig puppet task unsuccessful') - - def apply_last_part_puppet(self, ip, ceph=False): - """Apply final part of puppet modular tasks on a target node. - - :param ip: Remote node ip for proceed. - """ - last_puppet_run = [ - "/etc/puppet/modules/osnailyfacter/modular/firewall/firewall.pp", - "/etc/puppet/modules/osnailyfacter/modular/hosts/hosts.pp", - "/etc/puppet/modules/osnailyfacter/modular/roles/compute.pp", - "/etc/puppet/modules/osnailyfacter/modular/" - "openstack-network/common-config.pp", - "/etc/puppet/modules/osnailyfacter/modular/" - "openstack-network/plugins/ml2.pp", - "/etc/puppet/modules/osnailyfacter/modular/" - "openstack-network/agents/l3.pp", - "/etc/puppet/modules/osnailyfacter/modular/" - "openstack-network/agents/metadata.pp", - "/etc/puppet/modules/osnailyfacter/modular/" - "openstack-network/compute-nova.pp", - "/etc/puppet/modules/osnailyfacter/modular/" - "roles/enable_compute.pp", - "/etc/puppet/modules/osnailyfacter/modular/dns/dns-client.pp", - "/etc/puppet/modules/osnailyfacter/modular/netconfig/" - "configure_default_route.pp" - ] - - if ceph: - last_puppet_run.append("/etc/puppet/modules/osnailyfacter/" - "modular/ceph/ceph_compute.pp") - last_puppet_run.append("/etc/puppet/modules/osnailyfacter/modular/" - "ntp/ntp-client.pp") - - self.puppet_apply(last_puppet_run, ip) - - def backup_required_information(self, ip, target_ip, node=1, ceph=False): - """Back up required information for compute from target node. - - :param ip: Remote Fuel master node ip. - :param target_ip: Target node ip to back up from. - :param node: Node number - :param ceph: Enabled or disabled Ceph storage. - """ - - logger.debug('Target node ip: {0}'.format(target_ip)) - cmd = ("cd ~/ && mkdir rh_backup-{1}; " - "scp -r {0}:/root/.ssh rh_backup-{1}/. ; " - "scp {0}:/etc/astute.yaml rh_backup-{1}/ ; " - "scp -r {0}:/var/lib/astute/nova rh_backup-{1}/" - .format(target_ip, node)) - if ceph: - cmd += (" ; scp -r {0}:/var/lib/astute/ceph rh_backup-{1}/" - .format(target_ip, node)) - self.ssh_manager.execute_on_remote( - ip, cmd, err_msg='Can not back up required information from node') - logger.debug("Backed up ssh-keys and astute.yaml") - - @staticmethod - def clean_string(string, twice=True): - """Clean string of redundant characters. - - :param string: String. - :param twice: Boolean. Use function twice or not. - :return: - """ - k = str(string) - pattern = "^\s+|\[|\]|\n|,|'|\r|\s+$" - res = re.sub(pattern, '', k) - if twice: - res = res.strip('/\\n') - # NOTE(freerunner): Using sub twice to collect key without extra - # whitespaces. - res = re.sub(pattern, '', res) - res = res.strip('/\\n') - return res - - def restore_information(self, ip, remote_admin_ip, ceph=False, node=1): - - """Restore information on a target node. - - :param ip: Remote node ip. - :param remote_admin_ip: Remote admin node for proceed. - """ - - cmd = "cat ~/rh_backup-{0}/.ssh/authorized_keys".format(node) - result = self.ssh_manager.execute_on_remote( - remote_admin_ip, cmd, - err_msg='Can not get backed up ssh key.') - key = result['stdout'] - - key = self.clean_string(key) - - cmd = "mkdir ~/.ssh; echo '{0}' >> ~/.ssh/authorized_keys".format(key) - self.ssh_manager.execute_on_remote( - ip, cmd, err_msg='Can not recover ssh key for node') - - cmd = "cd ~/rh_backup-{2} && scp astute.yaml {0}@{1}:/etc/.".format( - settings.EXTRA_COMP_IMAGE_USER, ip, node) - logger.debug("Restoring astute.yaml for node with ip {0}".format(ip)) - self.ssh_manager.execute_on_remote( - remote_admin_ip, cmd, err_msg='Can not restore astute.yaml') - - cmd = "mkdir -p /var/lib/astute" - logger.debug("Prepare node for restoring nova ssh-keys") - self.ssh_manager.execute_on_remote(ip, cmd, - err_msg='Preparation failed') - - cmd = ( - "cd ~/rh_backup-{2} && scp -r nova {0}@{1}:/var/lib/astute/.". - format(settings.EXTRA_COMP_IMAGE_USER, ip, node) - ) - logger.debug("Restoring nova ssh-keys") - self.ssh_manager.execute_on_remote( - remote_admin_ip, cmd, err_msg='Can not restore ssh-keys for nova') - - if ceph: - cmd = ( - "cd ~/rh_backup-{2} && scp -r ceph {0}@{1}:/var/lib/astute/." - .format(settings.EXTRA_COMP_IMAGE_USER, ip, node) - ) - logger.debug("Restoring ceph ssh-keys") - self.ssh_manager.execute_on_remote( - remote_admin_ip, cmd, - err_msg='Can not restore ssh-keys for ceph') - - def install_yum_components(self, ip): - """Install required yum components on a target node. - - :param ip: Remote node ip for proceed. - """ - cmd = "yum install yum-utils yum-priorities -y" - self.ssh_manager.execute_on_remote( - ip, cmd, err_msg='Can not install required yum components.') - - def set_repo_for_perestroika(self, ip): - """Set Perestroika repos. - - :param ip: Remote node ip for proceed. - """ - repo = settings.PERESTROIKA_REPO - cmd = ("curl {0}".format(repo)) - - self.ssh_manager.execute_on_remote( - ip, cmd, err_msg='Perestroika repos unavailable from node.') - - cmd = ("echo '[mos]\n" - "name=mos\n" - "type=rpm-md\n" - "baseurl={0}\n" - "gpgcheck=0\n" - "enabled=1\n" - "priority=5' >" - "/etc/yum.repos.d/mos.repo && " - "yum clean all".format(repo)) - self.ssh_manager.execute_on_remote( - ip, cmd, err_msg='Can not create config file for repo') - - def check_hiera_installation(self, ip): - """Check hiera installation on node. - - :param ip: Remote node ip for proceed. - """ - cmd = "yum list installed | grep hiera" - logger.debug('Checking hiera installation...') - result = self.ssh_manager.execute(ip, cmd) - if result['exit_code'] == 0: - cmd = "yum remove hiera -y" - logger.debug('Found existing installation of hiera. Removing...') - result = self.ssh_manager.execute(ip, cmd) - asserts.assert_equal(result['exit_code'], 0, 'Can not remove ' - 'hiera') - cmd = "ls /etc/hiera" - logger.debug('Checking hiera files for removal...') - result = self.ssh_manager.execute(ip, cmd) - if result['exit_code'] == 0: - logger.debug('Found redundant hiera files. Removing...') - cmd = "rm -rf /etc/hiera" - self.ssh_manager.execute_on_remote( - ip, cmd, err_msg='Can not remove hiera files') - - def check_rsync_installation(self, ip): - """Check rsync installation on node. - - :param ip: Remote node ip for proceed. - """ - cmd = "yum list installed | grep rsync" - logger.debug("Checking rsync installation...") - result = self.ssh_manager.execute(ip, cmd) - if result['exit_code'] != 0: - logger.debug("Rsync is not found. Installing rsync...") - cmd = "yum clean all && yum install rsync -y" - self.ssh_manager.execute_on_remote( - ip, cmd, err_msg='Can not install rsync on node.') - - def remove_old_compute_services(self, ip, hostname): - """Remove old redundant services which was removed from services base. - - :param ip: Remote node ip for proceed. - :param hostname: Old compute hostname. - """ - cmd = ("source ~/openrc && for i in $(nova service-list | " - "awk '/{:s}/{{print $2}}'); do nova service-delete $i; " - "done".format(hostname)) - self.ssh_manager.execute_on_remote( - ip, cmd, err_msg='Can not remove old nova computes') - - cmd = ("source ~/openrc && for i in $(neutron agent-list | " - "awk '/{:s}/{{print $2}}'); do neutron agent-delete $i; " - "done".format(hostname)) - self.ssh_manager.execute_on_remote( - ip, cmd, err_msg='Can not remove old neutron agents') - - def install_ruby_puppet(self, ip): - """Install ruby and puppet on a target node. - - :param ip: Remote node ip for proceed. - """ - puppet_install_cmd = "yum install puppet ruby -y" - self.ssh_manager.execute_on_remote( - ip, puppet_install_cmd, - err_msg='Ruby and puppet installation failed') - - def rsync_puppet_modules(self, master_node_ip, ip): - """Rsync puppet modules from remote node to node with specified ip. - - :param master_node_ip: Remote node ip for proceed. - :param ip: IP address of a target node where to sync. - """ - cmd = ("rsync -avz /etc/puppet/modules/* " - "{0}@{1}:/etc/puppet/modules/". - format(settings.EXTRA_COMP_IMAGE_USER, ip)) - self.ssh_manager.execute_on_remote( - master_node_ip, cmd, err_msg='Rsync puppet modules failed') - - def save_node_hostname(self, ip): - """Save hostname of a node. - - :param ip: Remote node ip for proceed. - :return: Node hostname. - """ - cmd = "hostname" - result = self.ssh_manager.execute_on_remote( - ip, cmd, err_msg='Can not get hostname for remote') - nodename = self.clean_string(result['stdout'][0], twice=False) - return nodename - - def backup_hosts_file(self, ip, target_ip): - """Backing up hosts file - - :param ip: Remote node ip to backup to. - :param target_ip: Node ip to backup from. - """ - cmd = "cd ~/ && scp {0}:/etc/hosts .".format(target_ip) - self.ssh_manager.execute_on_remote( - ip, cmd, err_msg='Can not save hosts file from remote') - - def prepare_hosts_file(self, ip, old_host, new_host): - cmd = "cd ~/ && sed -i 's/{0}/{1}/g' hosts".format(old_host, new_host) - self.ssh_manager.execute_on_remote( - ip, cmd, err_msg='Can not prepare hosts file.') - - def restore_hosts_file(self, ip, target_ip): - """Restore host file - - :param ip: Node ip to restore from. - :param target_ip: Node ip to restore to. - """ - cmd = "cd ~/ && scp hosts {0}:/etc/".format(target_ip) - self.ssh_manager.execute_on_remote( - ip, cmd, err_msg='Can not restore hosts file.') - - def clean_custom_repos(self, ip): - """Remove custom repo files - - :param ip: Node ip to clean - """ - cmd = "cd /etc/yum.repos.d && rm -f *mir* && yum clean all" - self.ssh_manager.execute_on_remote( - ip, cmd, err_msg='Can not clean custom repos.') diff --git a/fuelweb_test/tests/tests_extra_computes/test_ol_basic_actions.py b/fuelweb_test/tests/tests_extra_computes/test_ol_basic_actions.py deleted file mode 100644 index c79fd52a7..000000000 --- a/fuelweb_test/tests/tests_extra_computes/test_ol_basic_actions.py +++ /dev/null @@ -1,336 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from devops.helpers.helpers import tcp_ping -from devops.helpers.helpers import wait -from proboscis import asserts -from proboscis import test - -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers import os_actions -from fuelweb_test import settings -from fuelweb_test import logger -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.tests_extra_computes.base_extra_computes \ - import ExtraComputesBase - - -@test(groups=["ol", "ol.ha_one_controller", "ol.basic"]) -class OlHaOneController(ExtraComputesBase): - """OL-based compute HA One Controller basic test""" - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_ol_compute_ha_one_controller_tun"]) - @log_snapshot_after_test - def deploy_ol_compute_ha_one_controller_tun(self): - """Deploy OL-based compute in HA One Controller mode - with Neutron VXLAN - - Scenario: - 1. Check required image. - 2. Revert snapshot 'ready_with_3_slaves'. - 3. Create a Fuel cluster. - 4. Update cluster nodes with required roles. - 5. Deploy the Fuel cluster. - 6. Run OSTF. - 7. Backup astute.yaml and ssh keys from compute. - 8. Boot compute with OL image. - 9. Prepare node for Puppet run. - 10. Execute modular tasks for compute. - 11. Run OSTF. - - Duration: 150m - Snapshot: deploy_ol_compute_ha_one_controller_tun - - """ - self.show_step(1, initialize=True) - logger.debug('Check MD5 sum of OL 7 image') - check_image = checkers.check_image( - settings.EXTRA_COMP_IMAGE, - settings.EXTRA_COMP_IMAGE_MD5, - settings.EXTRA_COMP_IMAGE_PATH) - asserts.assert_true(check_image, - 'Provided image is incorrect. ' - 'Please, check image path and md5 sum of it.') - - self.show_step(2) - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(3) - logger.debug('Create Fuel cluster OL-based compute tests') - data = { - 'volumes_lvm': True, - 'net_provider': 'neutron', - 'net_segment_type': settings.NEUTRON_SEGMENT['tun'], - 'tenant': 'admin', - 'user': 'admin', - 'password': 'admin' - } - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings=data - ) - - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - } - ) - - self.show_step(5) - self.fuel_web.deploy_cluster_wait(cluster_id) - - cluster_vip = self.fuel_web.get_public_vip(cluster_id) - os_conn = os_actions.OpenStackActions( - cluster_vip, data['user'], data['password'], data['tenant']) - - self.show_step(6) - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['smoke', 'sanity']) - - self.show_step(7) - compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'])[0] - controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller'])[0] - logger.debug('Got node: {0}'.format(compute)) - target_node = self.fuel_web.get_devops_node_by_nailgun_node( - compute) - logger.debug('DevOps Node: {0}'.format(target_node)) - target_node_ip = compute['ip'] - controller_ip = controller['ip'] - logger.debug('Acquired ip: {0} for node: {1}'.format( - target_node_ip, target_node.name)) - - old_hostname = self.save_node_hostname(target_node_ip) - - self.backup_required_information(self.ssh_manager.admin_ip, - target_node_ip) - - self.show_step(8) - - target_node.destroy() - asserts.assert_false(target_node.driver.node_active(node=target_node), - 'Target node still active') - self.connect_extra_compute_image(target_node) - target_node.start() - asserts.assert_true(target_node.driver.node_active(node=target_node), - 'Target node did not start') - self.wait_for_slave_provision(target_node_ip) - self.verify_image_connected(target_node_ip, types='ol') - - self.show_step(9) - - self.restore_information(target_node_ip, self.ssh_manager.admin_ip) - - self.set_hostname(target_node_ip, types='ol') - self.clean_custom_repos(target_node_ip) - self.install_yum_components(target_node_ip) - self.enable_extra_compute_repos(target_node_ip, types='ol') - self.set_repo_for_perestroika(target_node_ip) - self.check_hiera_installation(target_node_ip) - self.install_ruby_puppet(target_node_ip) - self.check_rsync_installation(target_node_ip) - - self.rsync_puppet_modules(self.ssh_manager.admin_ip, target_node_ip) - - self.show_step(10) - self.apply_first_part_puppet(target_node_ip) - self.apply_networking_puppet(target_node_ip) - self.check_netconfig_success(target_node_ip) - self.apply_last_part_puppet(target_node_ip) - - self.remove_old_compute_services(controller_ip, old_hostname) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5) - - self.show_step(11) - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['smoke', 'sanity']) - - self.env.make_snapshot("ready_ha_one_controller_with_ol_compute", - is_make=True) - - -@test(groups=['ol', 'ol.failover_group']) -class OlFailoverGroup(ExtraComputesBase): - """Failover tests for OL-based computes""" - - @test(depends_on_groups=['deploy_ol_compute_ha_one_controller_tun'], - groups=['check_ol_warm_reboot']) - @log_snapshot_after_test - def check_ol_warm_reboot(self): - """Resume VM after warm reboot of OL-based compute - - Scenario: - 1. Revert environment with OL-compute. - 2. Check that services are ready. - 3. Boot VM on compute and check its connectivity via floating ip. - 4. Warm reboot OL-based compute. - 5. Verify VM connectivity via floating ip after successful reboot - and VM resume action. - - Duration: 20m - Snapshot: check_ol_warm_reboot - """ - - self.show_step(1) - self.env.revert_snapshot('ready_ha_one_controller_with_ol_compute', - skip_timesync=True, skip_slaves_check=True) - self.check_slaves_are_ready() - logger.debug('All slaves online.') - - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5) - logger.debug('Cluster up and ready.') - - self.show_step(3) - controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, roles=('controller',)) - asserts.assert_equal(len(controllers), 1, - 'Environment does not have 1 controller node, ' - 'found {} nodes!'.format(len(controllers))) - compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'])[0] - target_node = self.fuel_web.get_devops_node_by_nailgun_node( - compute) - net_label = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - vm = os_conn.create_server_for_migration( - neutron=True, label=net_label) - vm_floating_ip = os_conn.assign_floating_ip(vm) - logger.info('Trying to get vm via tcp.') - wait(lambda: tcp_ping(vm_floating_ip.ip, 22), - timeout=120, - timeout_msg='Can not ping instance ' - 'by floating ip {0}'.format(vm_floating_ip.ip)) - logger.info('VM is accessible via ip: {0}'.format(vm_floating_ip.ip)) - self.show_step(4) - self.warm_restart_nodes([target_node]) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5) - logger.info('All cluster services up and ' - 'running after compute reboot.') - - self.show_step(5) - asserts.assert_equal( - os_conn.get_instance_detail(vm).status, "ACTIVE", - "Instance did not reach active state after compute back online, " - "current state is {0}".format( - os_conn.get_instance_detail(vm).status)) - logger.info('Spawned VM is ACTIVE. Trying to ' - 'access it via ip: {0}'.format(vm_floating_ip.ip)) - wait(lambda: tcp_ping(vm_floating_ip.ip, 22), - timeout=120, - timeout_msg='Can not ping instance ' - 'by floating ip {0}'.format(vm_floating_ip.ip)) - logger.info('VM is accessible. Deleting it.') - os_conn.delete_instance(vm) - os_conn.verify_srv_deleted(vm) - - self.env.make_snapshot("check_ol_warm_reboot") - - @test(depends_on_groups=['deploy_ol_compute_ha_one_controller_tun'], - groups=['check_ol_hard_reboot']) - @log_snapshot_after_test - def check_ol_hard_reboot(self): - """Resume VM after hard reboot of OL-based compute - - Scenario: - 1. Revert environment with OL-compute. - 2. Check that services are ready. - 3. Boot VM on compute and check its connectivity via floating ip. - 4. Hard reboot OL-based compute. - 5. Verify VM connectivity via floating ip after successful reboot - and VM resume action. - - Duration: 20m - Snapshot: check_ol_hard_reboot - """ - - self.show_step(1) - self.env.revert_snapshot('ready_ha_one_controller_with_ol_compute', - skip_timesync=True, skip_slaves_check=True) - self.check_slaves_are_ready() - logger.debug('All slaves online.') - - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5) - logger.debug('Cluster up and ready.') - - self.show_step(3) - cluster_id = self.fuel_web.get_last_created_cluster() - controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, roles=('controller',)) - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - asserts.assert_equal(len(controllers), 1, - 'Environment does not have 1 controller node, ' - 'found {} nodes!'.format(len(controllers))) - compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'])[0] - target_node = self.fuel_web.get_devops_node_by_nailgun_node( - compute) - target_node_ip = self.fuel_web.get_node_ip_by_devops_name( - target_node.name) - net_label = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - vm = os_conn.create_server_for_migration( - neutron=True, label=net_label) - vm_floating_ip = os_conn.assign_floating_ip(vm) - logger.info('Trying to get vm via tcp.') - wait(lambda: tcp_ping(vm_floating_ip.ip, 22), - timeout=120, - timeout_msg='Can not ping instance ' - 'by floating ip {0}'.format(vm_floating_ip.ip)) - logger.info('VM is accessible via ip: {0}'.format(vm_floating_ip.ip)) - self.show_step(4) - target_node.destroy() - asserts.assert_false(target_node.driver.node_active(node=target_node), - 'Target node still active') - target_node.start() - asserts.assert_true(target_node.driver.node_active(node=target_node), - 'Target node did not start') - self.wait_for_slave_provision(target_node_ip) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5) - logger.info('All cluster services up and ' - 'running after compute hard reboot.') - - self.show_step(5) - asserts.assert_equal( - os_conn.get_instance_detail(vm).status, "ACTIVE", - "Instance did not reach active state after compute back online, " - "current state is {0}".format( - os_conn.get_instance_detail(vm).status)) - logger.info('Spawned VM is ACTIVE. Trying to ' - 'access it via ip: {0}'.format(vm_floating_ip.ip)) - wait(lambda: tcp_ping(vm_floating_ip.ip, 22), - timeout=120, - timeout_msg='Can not ping instance ' - 'by floating ip {0}'.format(vm_floating_ip.ip)) - logger.info('VM is accessible. Deleting it.') - os_conn.delete_instance(vm) - os_conn.verify_srv_deleted(vm) - - self.env.make_snapshot("check_ol_hard_reboot") diff --git a/fuelweb_test/tests/tests_extra_computes/test_ol_migration.py b/fuelweb_test/tests/tests_extra_computes/test_ol_migration.py deleted file mode 100644 index c174e2cb0..000000000 --- a/fuelweb_test/tests/tests_extra_computes/test_ol_migration.py +++ /dev/null @@ -1,226 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import division - -from proboscis import asserts -from proboscis import test - -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers import os_actions -from fuelweb_test import settings -from fuelweb_test import logger -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.tests_extra_computes import base_extra_computes - - -@test(groups=['ol.migration']) -class OlHAOneControllerMigration(base_extra_computes.ExtraComputesBase): - """OL-based compute HA migration test""" - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["check_vm_migration_ol_ha_one_controller_tun"]) - @log_snapshot_after_test - def check_vm_migration_ol_ha_one_controller_tun(self): - """Deploy environment with OL computes - - Scenario: - 1. Check required image. - 2. Revert snapshot 'ready_with_5_slaves'. - 3. Create a Fuel cluster. - 4. Update cluster nodes with required roles. - 5. Deploy the Fuel cluster. - 6. Run OSTF. - 7. Backup astute.yaml and ssh keys from one of computes. - 8. Boot computes with OL image. - 9. Prepare node for Puppet run. - 10. Execute modular tasks for compute. - 11. Run OSTF. - - - Duration: 150m - Snapshot: check_vm_migration_ol_ha_one_controller_tun - - """ - self.show_step(1) - logger.debug('Check MD5 sum of OL 7 image') - check_image = checkers.check_image( - settings.EXTRA_COMP_IMAGE, - settings.EXTRA_COMP_IMAGE_MD5, - settings.EXTRA_COMP_IMAGE_PATH) - asserts.assert_true(check_image, - 'Provided image is incorrect. ' - 'Please, check image path and md5 sum of it.') - - self.show_step(2) - self.env.revert_snapshot("ready_with_5_slaves") - - self.show_step(3) - logger.debug('Create Fuel cluster OL-based compute tests') - data = { - 'net_provider': 'neutron', - 'net_segment_type': settings.NEUTRON_SEGMENT['tun'], - 'tenant': 'OlHAMigration', - 'user': 'OlHAMigration', - 'password': 'OlHAMigration', - 'volumes_ceph': True, - 'ephemeral_ceph': True, - 'images_ceph': True, - 'objects_ceph': True, - 'osd_pool_size': "1" - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings=data - ) - - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['compute'], - 'slave-04': ['ceph-osd'], - } - ) - - self.show_step(5) - self.fuel_web.deploy_cluster_wait(cluster_id) - - cluster_vip = self.fuel_web.get_public_vip(cluster_id) - os_conn = os_actions.OpenStackActions( - cluster_vip, data['user'], data['password'], data['tenant']) - - self.show_step(6) - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['smoke', 'sanity']) - - self.show_step(7) - compute_one = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'])[0] - controller_ip = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller'])[0]['ip'] - logger.debug('Got node: {0}'.format(compute_one)) - target_node_one = self.fuel_web.get_devops_node_by_nailgun_node( - compute_one) - logger.debug('DevOps Node: {0}'.format(target_node_one)) - target_node_one_ip = compute_one['ip'] - logger.debug('Acquired ip: {0} for node: {1}'.format( - target_node_one_ip, target_node_one.name)) - - compute_two = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'])[1] - logger.debug('Got node: {0}'.format(compute_two)) - target_node_two = self.fuel_web.get_devops_node_by_nailgun_node( - compute_two) - logger.debug('DevOps Node: {0}'.format(target_node_two)) - target_node_two_ip = compute_two['ip'] - logger.debug('Acquired ip: {0} for node: {1}'.format( - target_node_two_ip, target_node_two.name)) - - old_hostname_one = self.save_node_hostname(target_node_one_ip) - old_hostname_two = self.save_node_hostname(target_node_two_ip) - - self.backup_required_information(self.ssh_manager.admin_ip, - target_node_one_ip, ceph=True) - self.backup_required_information(self.ssh_manager.admin_ip, - target_node_two_ip, ceph=True, - node=2) - self.backup_hosts_file(self.ssh_manager.admin_ip, controller_ip) - - self.show_step(8) - - target_node_one.destroy() - target_node_two.destroy() - asserts.assert_false( - target_node_one.driver.node_active(node=target_node_one), - 'Target node still active') - asserts.assert_false( - target_node_two.driver.node_active(node=target_node_two), - 'Target node still active') - self.connect_extra_compute_image(target_node_one) - self.connect_extra_compute_image(target_node_two) - target_node_one.start() - asserts.assert_true( - target_node_one.driver.node_active(node=target_node_one), - 'Target node did not start') - self.wait_for_slave_provision(target_node_one_ip) - target_node_two.start() - asserts.assert_true( - target_node_two.driver.node_active(node=target_node_two), - 'Target node did not start') - self.wait_for_slave_provision(target_node_two_ip) - self.verify_image_connected(target_node_one_ip, types='ol') - self.verify_image_connected(target_node_two_ip, types='ol') - - self.show_step(9) - - self.restore_information(target_node_one_ip, - self.ssh_manager.admin_ip, ceph=True) - self.restore_information(target_node_two_ip, - self.ssh_manager.admin_ip, ceph=True, node=2) - - new_host_one = self.set_hostname(target_node_one_ip, types='ol') - self.clean_custom_repos(target_node_one_ip) - self.install_yum_components(target_node_one_ip) - self.enable_extra_compute_repos(target_node_one_ip, types='ol') - self.set_repo_for_perestroika(target_node_one_ip) - self.check_hiera_installation(target_node_one_ip) - self.install_ruby_puppet(target_node_one_ip) - self.check_rsync_installation(target_node_one_ip) - - new_host_two = self.set_hostname(target_node_two_ip, host_number=2, - types='ol') - self.clean_custom_repos(target_node_two_ip) - self.install_yum_components(target_node_two_ip) - self.enable_extra_compute_repos(target_node_two_ip, types='ol') - self.set_repo_for_perestroika(target_node_two_ip) - self.check_hiera_installation(target_node_two_ip) - self.install_ruby_puppet(target_node_two_ip) - self.check_rsync_installation(target_node_two_ip) - - self.rsync_puppet_modules(self.ssh_manager.admin_ip, - target_node_one_ip) - self.rsync_puppet_modules(self.ssh_manager.admin_ip, - target_node_two_ip) - self.prepare_hosts_file(self.ssh_manager.admin_ip, old_hostname_one, - new_host_one) - self.prepare_hosts_file(self.ssh_manager.admin_ip, old_hostname_two, - new_host_two) - self.restore_hosts_file(self.ssh_manager.admin_ip, target_node_one_ip) - self.restore_hosts_file(self.ssh_manager.admin_ip, target_node_two_ip) - - self.show_step(10) - self.apply_first_part_puppet(target_node_one_ip) - self.apply_first_part_puppet(target_node_two_ip) - self.apply_networking_puppet(target_node_one_ip) - self.apply_networking_puppet(target_node_two_ip) - self.check_netconfig_success(target_node_one_ip) - self.apply_last_part_puppet(target_node_one_ip, ceph=True) - self.check_netconfig_success(target_node_two_ip) - self.apply_last_part_puppet(target_node_two_ip, ceph=True) - - self.remove_old_compute_services(controller_ip, old_hostname_one) - self.remove_old_compute_services(controller_ip, old_hostname_two) - - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=6) - - self.show_step(11) - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['smoke', 'sanity']) - - self.env.make_snapshot("check_vm_migration_ol_ha_one_controller_tun") diff --git a/fuelweb_test/tests/tests_extra_computes/test_rh_basic_actions.py b/fuelweb_test/tests/tests_extra_computes/test_rh_basic_actions.py deleted file mode 100644 index 7e5756755..000000000 --- a/fuelweb_test/tests/tests_extra_computes/test_rh_basic_actions.py +++ /dev/null @@ -1,332 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from devops.helpers.helpers import tcp_ping -from devops.helpers.helpers import wait -from proboscis import asserts -from proboscis import test - -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers import os_actions -from fuelweb_test import settings -from fuelweb_test import logger -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.tests_extra_computes.base_extra_computes \ - import ExtraComputesBase - - -@test(groups=["rh", "rh.ha_one_controller", "rh.basic"]) -class RhHaOneController(ExtraComputesBase): - """RH-based compute HA One Controller basic test""" - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["deploy_rh_compute_ha_one_controller_tun"]) - @log_snapshot_after_test - def deploy_rh_compute_ha_one_controller_tun(self): - """Deploy RH-based compute in HA One Controller mode - with Neutron VXLAN - - Scenario: - 1. Check required image. - 2. Revert snapshot 'ready_with_3_slaves'. - 3. Create a Fuel cluster. - 4. Update cluster nodes with required roles. - 5. Deploy the Fuel cluster. - 6. Run OSTF. - 7. Backup astute.yaml and ssh keys from compute. - 8. Boot compute with RH image. - 9. Prepare node for Puppet run. - 10. Execute modular tasks for compute. - 11. Run OSTF. - - Duration: 150m - Snapshot: deploy_rh_compute_ha_one_controller_tun - - """ - self.show_step(1, initialize=True) - logger.debug('Check MD5 sum of RH 7 image') - check_image = checkers.check_image( - settings.EXTRA_COMP_IMAGE, - settings.EXTRA_COMP_IMAGE_MD5, - settings.EXTRA_COMP_IMAGE_PATH) - asserts.assert_true(check_image, - 'Provided image is incorrect. ' - 'Please, check image path and md5 sum of it.') - - self.show_step(2) - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(3) - logger.debug('Create Fuel cluster RH-based compute tests') - data = { - 'volumes_lvm': True, - 'net_provider': 'neutron', - 'net_segment_type': settings.NEUTRON_SEGMENT['tun'], - 'tenant': 'admin', - 'user': 'admin', - 'password': 'admin' - } - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings=data - ) - - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - } - ) - - self.show_step(5) - self.fuel_web.deploy_cluster_wait(cluster_id) - - cluster_vip = self.fuel_web.get_public_vip(cluster_id) - os_conn = os_actions.OpenStackActions( - cluster_vip, data['user'], data['password'], data['tenant']) - - self.show_step(6) - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['smoke', 'sanity']) - - self.show_step(7) - compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'])[0] - controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller'])[0] - logger.debug('Got node: {0}'.format(compute)) - target_node = self.fuel_web.get_devops_node_by_nailgun_node( - compute) - logger.debug('DevOps Node: {0}'.format(target_node)) - target_node_ip = compute['ip'] - controller_ip = controller['ip'] - logger.debug('Acquired ip: {0} for node: {1}'.format( - target_node_ip, target_node.name)) - - old_hostname = self.save_node_hostname(target_node_ip) - - self.backup_required_information(self.ssh_manager.admin_ip, - target_node_ip) - - self.show_step(8) - - target_node.destroy() - asserts.assert_false(target_node.driver.node_active(node=target_node), - 'Target node still active') - self.connect_extra_compute_image(target_node) - target_node.start() - asserts.assert_true(target_node.driver.node_active(node=target_node), - 'Target node did not start') - self.wait_for_slave_provision(target_node_ip) - self.verify_image_connected(target_node_ip) - - self.show_step(9) - - self.restore_information(target_node_ip, self.ssh_manager.admin_ip) - - self.set_hostname(target_node_ip) - if not settings.CENTOS_DUMMY_DEPLOY: - self.register_rh_subscription(target_node_ip) - self.install_yum_components(target_node_ip) - if not settings.CENTOS_DUMMY_DEPLOY: - self.enable_extra_compute_repos(target_node_ip) - self.set_repo_for_perestroika(target_node_ip) - self.check_hiera_installation(target_node_ip) - self.install_ruby_puppet(target_node_ip) - self.check_rsync_installation(target_node_ip) - - self.rsync_puppet_modules(self.ssh_manager.admin_ip, target_node_ip) - - self.show_step(10) - self.apply_first_part_puppet(target_node_ip) - self.apply_networking_puppet(target_node_ip) - self.check_netconfig_success(target_node_ip) - self.apply_last_part_puppet(target_node_ip) - - self.remove_old_compute_services(controller_ip, old_hostname) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5) - - self.show_step(11) - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['smoke', 'sanity']) - - self.env.make_snapshot("ready_ha_one_controller_with_rh_compute", - is_make=True) - - -@test(groups=['rh', 'rh.failover_group']) -class RhFailoverGroup(ExtraComputesBase): - """Failover tests for RH-based computes""" - - @test(depends_on_groups=['deploy_rh_compute_ha_one_controller_tun'], - groups=['check_rh_warm_reboot']) - @log_snapshot_after_test - def check_rh_warm_reboot(self): - """Check that resumed VM is working properly after warm reboot of - RH-based compute - - Scenario: - 1. Revert environment with RH-compute. - 2. Check that services are ready. - 3. Boot VM on compute and check its connectivity via floating ip. - 4. Warm reboot RH-based compute. - 5. Verify VM connectivity via floating ip after successful reboot - and VM resume action. - - Duration 20m - Snapshot check_rh_warm_reboot - """ - - self.show_step(1, initialize=True) - self.env.revert_snapshot('ready_ha_one_controller_with_rh_compute', - skip_timesync=True, skip_slaves_check=True) - self.check_slaves_are_ready() - logger.debug('All slaves online.') - - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5) - logger.debug('Cluster up and ready.') - - self.show_step(3) - controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, roles=('controller',)) - asserts.assert_equal(len(controllers), 1, - 'Environment does not have 1 controller node, ' - 'found {} nodes!'.format(len(controllers))) - compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'])[0] - target_node = self.fuel_web.get_devops_node_by_nailgun_node( - compute) - net_label = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - vm = os_conn.create_server_for_migration( - neutron=True, label=net_label) - vm_floating_ip = os_conn.assign_floating_ip(vm) - logger.info('Trying to get vm via tcp.') - wait(lambda: tcp_ping(vm_floating_ip.ip, 22), timeout=120, - timeout_msg='Can not ping instance' - ' by floating ip {0}'.format(vm_floating_ip.ip)) - logger.info('VM is accessible via ip: {0}'.format(vm_floating_ip.ip)) - self.show_step(4) - self.warm_restart_nodes([target_node]) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5) - logger.info('All cluster services up and ' - 'running after compute reboot.') - - self.show_step(5) - asserts.assert_equal( - os_conn.get_instance_detail(vm).status, "ACTIVE", - "Instance did not reach active state after compute back online, " - "current state is {0}".format( - os_conn.get_instance_detail(vm).status)) - logger.info('Spawned VM is ACTIVE. Trying to ' - 'access it via ip: {0}'.format(vm_floating_ip.ip)) - wait(lambda: tcp_ping(vm_floating_ip.ip, 22), timeout=120, - timeout_msg='Can not ping instance' - ' by floating ip {0}'.format(vm_floating_ip.ip)) - logger.info('VM is accessible. Deleting it.') - os_conn.delete_instance(vm) - os_conn.verify_srv_deleted(vm) - - @test(depends_on_groups=['deploy_rh_compute_ha_one_controller_tun'], - groups=['check_rh_hard_reboot']) - @log_snapshot_after_test - def check_rh_hard_reboot(self): - """Check that resumed VM is working properly after hard reboot of - RH-based compute - - Scenario: - 1. Revert environment with RH-compute. - 2. Check that services are ready. - 3. Boot VM on compute and check its connectivity via floating ip. - 4. Hard reboot RH-based compute. - 5. Verify VM connectivity via floating ip after successful reboot - and VM resume action. - - Duration 20m - Snapshot check_rh_hard_reboot - """ - - self.show_step(1, initialize=True) - self.env.revert_snapshot('ready_ha_one_controller_with_rh_compute', - skip_timesync=True, skip_slaves_check=True) - self.check_slaves_are_ready() - logger.debug('All slaves online.') - - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5) - logger.debug('Cluster up and ready.') - - self.show_step(3) - cluster_id = self.fuel_web.get_last_created_cluster() - controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, roles=('controller',)) - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - asserts.assert_equal(len(controllers), 1, - 'Environment does not have 1 controller node, ' - 'found {} nodes!'.format(len(controllers))) - compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'])[0] - target_node = self.fuel_web.get_devops_node_by_nailgun_node( - compute) - target_node_ip = self.fuel_web.get_node_ip_by_devops_name( - target_node.name) - net_label = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - vm = os_conn.create_server_for_migration( - neutron=True, label=net_label) - vm_floating_ip = os_conn.assign_floating_ip(vm) - logger.info('Trying to get vm via tcp.') - wait(lambda: tcp_ping(vm_floating_ip.ip, 22), timeout=120, - timeout_msg='Can not ping instance' - ' by floating ip {0}'.format(vm_floating_ip.ip)) - logger.info('VM is accessible via ip: {0}'.format(vm_floating_ip.ip)) - self.show_step(4) - target_node.destroy() - asserts.assert_false(target_node.driver.node_active(node=target_node), - 'Target node still active') - target_node.start() - asserts.assert_true(target_node.driver.node_active(node=target_node), - 'Target node did not start') - self.wait_for_slave_provision(target_node_ip) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5) - logger.info('All cluster services up and ' - 'running after compute hard reboot.') - - self.show_step(5) - asserts.assert_equal( - os_conn.get_instance_detail(vm).status, "ACTIVE", - "Instance did not reach active state after compute back online, " - "current state is {0}".format( - os_conn.get_instance_detail(vm).status)) - logger.info('Spawned VM is ACTIVE. Trying to ' - 'access it via ip: {0}'.format(vm_floating_ip.ip)) - wait(lambda: tcp_ping(vm_floating_ip.ip, 22), timeout=120, - timeout_msg='Can not ping instance' - ' by floating ip {0}'.format(vm_floating_ip.ip)) - logger.info('VM is accessible. Deleting it.') - os_conn.delete_instance(vm) - os_conn.verify_srv_deleted(vm) diff --git a/fuelweb_test/tests/tests_extra_computes/test_rh_migration.py b/fuelweb_test/tests/tests_extra_computes/test_rh_migration.py deleted file mode 100644 index 2abba3edc..000000000 --- a/fuelweb_test/tests/tests_extra_computes/test_rh_migration.py +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import asserts -from proboscis import test - -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers import os_actions -from fuelweb_test import settings -from fuelweb_test import logger -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.tests_extra_computes.base_extra_computes \ - import ExtraComputesBase - - -@test(groups=['rh.migration']) -class RhHAOneControllerMigration(ExtraComputesBase): - """RH-based compute HA migration test""" - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["check_vm_migration_rh_ha_one_controller_tun"]) - @log_snapshot_after_test - def check_vm_migration_rh_ha_one_controller_tun(self): - """Deploy environment with RH and Ubuntu computes in HA mode with - neutron VXLAN - - Scenario: - 1. Check required image. - 2. Revert snapshot 'ready_with_5_slaves'. - 3. Create a Fuel cluster. - 4. Update cluster nodes with required roles. - 5. Deploy the Fuel cluster. - 6. Run OSTF. - 7. Backup astute.yaml and ssh keys from one of computes. - 8. Boot compute with RH image. - 9. Prepare node for Puppet run. - 10. Execute modular tasks for compute. - 11. Run OSTF. - - - Duration: 150m - Snapshot: check_vm_migration_rh_ha_one_controller_tun - - """ - self.show_step(1, initialize=True) - logger.debug('Check MD5 sum of RH 7 image') - check_image = checkers.check_image( - settings.EXTRA_COMP_IMAGE, - settings.EXTRA_COMP_IMAGE_MD5, - settings.EXTRA_COMP_IMAGE_PATH) - asserts.assert_true(check_image, - 'Provided image is incorrect. ' - 'Please, check image path and md5 sum of it.') - - self.show_step(2) - self.env.revert_snapshot("ready_with_5_slaves") - - self.show_step(3) - logger.debug('Create Fuel cluster RH-based compute tests') - data = { - 'net_provider': 'neutron', - 'net_segment_type': settings.NEUTRON_SEGMENT['tun'], - 'tenant': 'RhHAMigration', - 'user': 'RhHAMigration', - 'password': 'RhHAMigration', - 'volumes_ceph': True, - 'ephemeral_ceph': True, - 'images_ceph': True, - 'objects_ceph': True, - 'osd_pool_size': "1" - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings=data - ) - - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['compute'], - 'slave-04': ['ceph-osd'], - } - ) - - self.show_step(5) - self.fuel_web.deploy_cluster_wait(cluster_id) - - cluster_vip = self.fuel_web.get_public_vip(cluster_id) - os_conn = os_actions.OpenStackActions( - cluster_vip, data['user'], data['password'], data['tenant']) - - self.show_step(6) - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['smoke', 'sanity']) - - self.show_step(7) - compute_one = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'])[0] - controller_ip = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller'])[0]['ip'] - logger.debug('Got node: {0}'.format(compute_one)) - target_node_one = self.fuel_web.get_devops_node_by_nailgun_node( - compute_one) - logger.debug('DevOps Node: {0}'.format(target_node_one)) - target_node_one_ip = compute_one['ip'] - logger.debug('Acquired ip: {0} for node: {1}'.format( - target_node_one_ip, target_node_one.name)) - - compute_two = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute'])[1] - logger.debug('Got node: {0}'.format(compute_two)) - target_node_two = self.fuel_web.get_devops_node_by_nailgun_node( - compute_two) - logger.debug('DevOps Node: {0}'.format(target_node_two)) - target_node_two_ip = compute_two['ip'] - logger.debug('Acquired ip: {0} for node: {1}'.format( - target_node_two_ip, target_node_two.name)) - - old_hostname_one = self.save_node_hostname(target_node_one_ip) - old_hostname_two = self.save_node_hostname(target_node_two_ip) - - self.backup_required_information(self.ssh_manager.admin_ip, - target_node_one_ip, ceph=True) - self.backup_required_information(self.ssh_manager.admin_ip, - target_node_two_ip, ceph=True, - node=2) - self.backup_hosts_file(self.ssh_manager.admin_ip, controller_ip) - - self.show_step(8) - - target_node_one.destroy() - target_node_two.destroy() - asserts.assert_false( - target_node_one.driver.node_active(node=target_node_one), - 'Target node still active') - asserts.assert_false( - target_node_two.driver.node_active(node=target_node_two), - 'Target node still active') - self.connect_extra_compute_image(target_node_one) - self.connect_extra_compute_image(target_node_two) - target_node_one.start() - asserts.assert_true( - target_node_one.driver.node_active(node=target_node_one), - 'Target node did not start') - self.wait_for_slave_provision(target_node_one_ip) - target_node_two.start() - asserts.assert_true( - target_node_two.driver.node_active(node=target_node_two), - 'Target node did not start') - self.wait_for_slave_provision(target_node_two_ip) - self.verify_image_connected(target_node_one_ip) - self.verify_image_connected(target_node_two_ip) - - self.show_step(9) - - self.restore_information(target_node_one_ip, - self.ssh_manager.admin_ip, ceph=True) - self.restore_information(target_node_two_ip, - self.ssh_manager.admin_ip, ceph=True, node=2) - - new_host_one = self.set_hostname(target_node_one_ip) - if not settings.CENTOS_DUMMY_DEPLOY: - self.register_rh_subscription(target_node_one_ip) - self.install_yum_components(target_node_one_ip) - if not settings.CENTOS_DUMMY_DEPLOY: - self.enable_extra_compute_repos(target_node_one_ip) - self.set_repo_for_perestroika(target_node_one_ip) - self.check_hiera_installation(target_node_one_ip) - self.install_ruby_puppet(target_node_one_ip) - self.check_rsync_installation(target_node_one_ip) - - new_host_two = self.set_hostname(target_node_two_ip, host_number=2) - if not settings.CENTOS_DUMMY_DEPLOY: - self.register_rh_subscription(target_node_two_ip) - self.install_yum_components(target_node_two_ip) - if not settings.CENTOS_DUMMY_DEPLOY: - self.enable_extra_compute_repos(target_node_two_ip) - self.set_repo_for_perestroika(target_node_two_ip) - self.check_hiera_installation(target_node_two_ip) - self.install_ruby_puppet(target_node_two_ip) - self.check_rsync_installation(target_node_two_ip) - - self.rsync_puppet_modules(self.ssh_manager.admin_ip, - target_node_one_ip) - self.rsync_puppet_modules(self.ssh_manager.admin_ip, - target_node_two_ip) - self.prepare_hosts_file(self.ssh_manager.admin_ip, old_hostname_one, - new_host_one) - self.prepare_hosts_file(self.ssh_manager.admin_ip, old_hostname_two, - new_host_two) - self.restore_hosts_file(self.ssh_manager.admin_ip, target_node_one_ip) - self.restore_hosts_file(self.ssh_manager.admin_ip, target_node_two_ip) - - self.show_step(10) - self.apply_first_part_puppet(target_node_one_ip) - self.apply_first_part_puppet(target_node_two_ip) - self.apply_networking_puppet(target_node_one_ip) - self.apply_networking_puppet(target_node_two_ip) - self.check_netconfig_success(target_node_one_ip) - self.apply_last_part_puppet(target_node_one_ip, ceph=True) - self.check_netconfig_success(target_node_two_ip) - self.apply_last_part_puppet(target_node_two_ip, ceph=True) - - self.remove_old_compute_services(controller_ip, old_hostname_one) - self.remove_old_compute_services(controller_ip, old_hostname_two) - - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=6) - - self.show_step(11) - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['smoke', 'sanity']) - - self.env.make_snapshot("ready_ha_one_controller_with_rh_computes") diff --git a/fuelweb_test/tests/tests_ibp/__init__.py b/fuelweb_test/tests/tests_ibp/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/tests_ibp/test_ibp.py b/fuelweb_test/tests/tests_ibp/test_ibp.py deleted file mode 100644 index 11e980fc0..000000000 --- a/fuelweb_test/tests/tests_ibp/test_ibp.py +++ /dev/null @@ -1,296 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test -from proboscis.asserts import assert_true - -from fuelweb_test.helpers.checkers import check_package_version -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import logger -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["test_ibp"]) -class IBPTest(TestBasic): - """IBP test.""" # TODO(vshypyguzov) documentation - - def check_node_packages(self, node_name, pkg_list): - node_ip = self.fuel_web.get_nailgun_node_by_base_name(node_name)['ip'] - cmd = "dpkg-query -W -f='${Package}'\r" - node_pkgs = self.ssh_manager.execute_on_remote( - node_ip, - cmd)['stdout_str'].splitlines() - node_pkgs = set(node_pkgs) - logger.debug('Node packages are: {}'.format(node_pkgs)) - assert_true( - pkg_list.issubset(node_pkgs), - 'Not all packages are present on node.' - ' Missing packages: {}'.format(pkg_list - node_pkgs) - ) - - @test(depends_on=[SetupEnvironment.prepare_slaves_1], - groups=["check_mcollective_version"]) - @log_snapshot_after_test - def check_mcollective_version(self): - """Check mcollective package version on bootstrap and provisioned node - - Scenario: - 1. Check mcollective version on bootstrap - 2. Create cluster - 3. Add one node to cluster - 4. Provision nodes - 5. Check mcollective version on node - - Duration 5m - """ - self.env.revert_snapshot("ready_with_1_slaves", skip_timesync=True) - self.show_step(1) - - node = self.env.d_env.get_node(name__in=["slave-01"]) - _ip = self.fuel_web.get_nailgun_node_by_devops_node(node)['ip'] - check_package_version(_ip, 'mcollective', '2.3.3', 'ge') - - self.show_step(2) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__ - ) - pkg_list = self.fuel_web.get_cluster_ibp_packages(cluster_id) - logger.debug('Cluster IBP packages: {}'.format(pkg_list)) - - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - } - ) - - self.show_step(4) - self.fuel_web.provisioning_cluster_wait(cluster_id) - - self.show_step(5) - check_package_version(_ip, 'mcollective', '2.3.3', 'ge') - - @test(depends_on=[SetupEnvironment.prepare_slaves_1], - groups=["check_ibp_default_package_list"]) - @log_snapshot_after_test - def check_ibp_default_package_list(self): - """Provision one node with default package list - - Scenario: - 1. Create cluster - 2. Add one node to cluster - 3. Provision nodes - 4. Check that all default packages are installed on the node - - Duration 60m - Snapshot check_ibp_default_package_list - - """ - self.env.revert_snapshot("ready_with_1_slaves") - - self.show_step(1) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__ - ) - pkg_list = self.fuel_web.get_cluster_ibp_packages(cluster_id) - logger.debug('Cluster IBP packages: {}'.format(pkg_list)) - - self.show_step(2) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - } - ) - - self.show_step(3) - self.fuel_web.provisioning_cluster_wait(cluster_id) - - self.show_step(4) - self.check_node_packages('slave-01', pkg_list) - - self.env.make_snapshot("check_ibp_default_package_list") - - @test(depends_on=[SetupEnvironment.prepare_slaves_1], - groups=["check_ibp_add_package"]) - @log_snapshot_after_test - def check_ibp_add_package(self): - """Add package to package list and provision one node. Check that - added package is installed. - - Scenario: - 1. Create cluster - 2. Add one package to the initial packages list - 3. Add one node to cluster - 4. Provision nodes - 5. Check that all packages including added one are installed - - Duration 60m - Snapshot check_ibp_add_package - - """ - self.env.revert_snapshot("ready_with_1_slaves") - - self.show_step(1) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__ - ) - self.show_step(2) - pkg_list = self.fuel_web.get_cluster_ibp_packages(cluster_id) - logger.debug( - 'Cluster IBP packages before update: {}'.format(sorted(pkg_list)) - ) - pkg_to_add = 'lynx' - assert_true( - pkg_to_add not in pkg_list, - message='{} is already present in package list'.format(pkg_to_add) - ) - logger.debug( - 'Adding {} to the initial packages list'.format(pkg_to_add) - ) - pkg_list.add(pkg_to_add) - pkg_list = self.fuel_web.update_cluster_ibp_packages( - cluster_id, pkg_list) - logger.debug( - 'Cluster IBP packages after update: {}'.format(sorted(pkg_list)) - ) - - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - } - ) - - self.show_step(4) - self.fuel_web.provisioning_cluster_wait(cluster_id) - - self.show_step(5) - self.check_node_packages('slave-01', pkg_list) - - self.env.make_snapshot("check_ibp_add_package") - - @test(depends_on=[SetupEnvironment.prepare_slaves_1], - groups=["check_ibp_remove_package"]) - @log_snapshot_after_test - def check_ibp_remove_package(self): - """Remove package from package list and provision one node. Check that - removed package is not installed. - - Scenario: - 1. Create cluster - 2. Remove one package from the initial packages list - 3. Add one node to cluster - 4. Provision nodes - 5. Check that all packages besides removed are installed - - Duration 60m - Snapshot check_ibp_remove_package - - """ - self.env.revert_snapshot("ready_with_1_slaves") - - self.show_step(1) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__ - ) - self.show_step(2) - pkg_list = self.fuel_web.get_cluster_ibp_packages(cluster_id) - logger.debug( - 'Cluster IBP packages before update: {}'.format(sorted(pkg_list)) - ) - pkg_for_removal = pkg_list.pop() - logger.debug('Removing {} from the initial packages list'.format( - pkg_for_removal)) - - pkg_list = self.fuel_web.update_cluster_ibp_packages( - cluster_id, pkg_list) - logger.debug( - 'Cluster IBP packages after update: {}'.format(sorted(pkg_list)) - ) - - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - } - ) - - self.show_step(4) - self.fuel_web.provisioning_cluster_wait(cluster_id) - - self.show_step(5) - self.check_node_packages('slave-01', pkg_list) - - self.env.make_snapshot("check_ibp_remove_package") - - @test(depends_on=[SetupEnvironment.prepare_slaves_1], - groups=["check_ibp_add_wrong_package"]) - @log_snapshot_after_test - def check_ibp_add_wrong_package(self): - """Add package with wrong name to package list and provision one node. - Check that provision ends with error. - - Scenario: - 1. Create cluster - 2. Add one package to the initial packages list - 3. Add one node to cluster - 4. Provision nodes - 5. Check that provisioning ends with error - - Duration 60m - Snapshot check_ibp_add_wrong_package - - """ - self.env.revert_snapshot("ready_with_1_slaves") - - self.show_step(1) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__ - ) - self.show_step(2) - default_pkg_list = self.fuel_web.get_cluster_ibp_packages(cluster_id) - logger.debug( - 'Cluster IBP packages before update: {}'.format( - sorted(default_pkg_list) - ) - ) - logger.debug('Adding non-existent-pckg to the initial packages list') - default_pkg_list.add('non-existent-pckg') - pkg_list = self.fuel_web.update_cluster_ibp_packages( - cluster_id, default_pkg_list) - default_pkg_list.remove('non-existent-pckg') - logger.debug( - 'Cluster IBP packages after update: {}'.format(sorted(pkg_list)) - ) - - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - } - ) - - self.show_step(4) - task = self.fuel_web.client.provision_nodes(cluster_id) - - self.show_step(5) - self.fuel_web.assert_task_failed(task) - - self.env.make_snapshot("check_ibp_add_wrong_package") diff --git a/fuelweb_test/tests/tests_lcm/__init__.py b/fuelweb_test/tests/tests_lcm/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/tests_lcm/base_lcm_test.py b/fuelweb_test/tests/tests_lcm/base_lcm_test.py deleted file mode 100644 index bdd544ccb..000000000 --- a/fuelweb_test/tests/tests_lcm/base_lcm_test.py +++ /dev/null @@ -1,857 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fileinput -import os - -from proboscis import asserts -from proboscis import test -import yaml - -from fuelweb_test import logger -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.ssh_manager import SSHManager -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.settings import NEUTRON_SEGMENT -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -# NOTE: Setup yaml to work with puppet report -def construct_ruby_object(loader, suffix, node): - """Define a specific constructor""" - return loader.construct_yaml_map(node) - - -def construct_ruby_sym(loader, node): - """Define a specific multi constructor""" - return loader.construct_yaml_str(node) - - -TASKS_BLACKLIST = [ - "pre_hiera_config", - "reboot_provisioned_nodes", - "hiera", - "configure_default_route", - "netconfig", - "upload_provision_data"] - - -SETTINGS_SKIPLIST = ( - "dns_list", - "ntp_list", - "repo_setup" -) - - -class DeprecatedFixture(Exception): - def __init__(self, msg): - super(DeprecatedFixture, self).__init__(msg) - - -class LCMTestBasic(TestBasic): - """LCMTestBasic.""" # TODO documentation - - def __init__(self): - super(LCMTestBasic, self).__init__() - yaml.add_multi_constructor(u"!ruby/object:", construct_ruby_object) - yaml.add_constructor(u"!ruby/sym", construct_ruby_sym) - - @staticmethod - def node_roles(node): - """Compose a string that represents all roles assigned to given node - - :param node: dict, node data - :return: str - """ - return "_".join(sorted(node["roles"])) - - # FIXME: after implementation of the main functional of PROD-2510 - @staticmethod - def get_nodes_tasks(node_id): - """ - :param node_id: an integer number of node id - :return: a set of deployment tasks for corresponding node - """ - tasks = set() - ssh = SSHManager() - - result = ssh.execute_on_remote(ssh.admin_ip, "ls /var/log/astute") - filenames = [filename.strip() for filename in result['stdout']] - - for filename in filenames: - ssh.download_from_remote( - ssh.admin_ip, - destination="/var/log/astute/{0}".format(filename), - target="/tmp/{0}".format(filename)) - - data = fileinput.FileInput( - files=["/tmp/{0}".format(filename) for filename in filenames], - openhook=fileinput.hook_compressed) - for line in data: - if "Task time summary" in line \ - and "node {}".format(node_id) in line: - # FIXME: define an exact search of task - task_name = line.split("Task time summary: ")[1].split()[0] - check = any([excluded_task in task_name - for excluded_task in TASKS_BLACKLIST]) - if check: - continue - tasks.add(task_name) - return tasks - - @staticmethod - def get_task_type(tasks, task_id): - """Get task type - - :param tasks: a list of dictionaries with task description - :param task_id: a string, name of deployment task - :return: a string of task type or a boolean value "False" - """ - for task in tasks: - if task.get('id', '') == task_id: - return task.get('type', False) - return False - - @staticmethod - def get_puppet_report(node): - """Get puppet run report from corresponding node - - :param node: a dictionary with node description - :return: a dictionary with puppet report data - """ - ssh = SSHManager() - ip = node['ip'] - report_file = "/var/lib/puppet/state/last_run_report.yaml" - asserts.assert_true(ssh.isfile_on_remote(ip, report_file), - 'File {!r} not found on node {!r}' - .format(report_file, node['id'])) - with ssh.open_on_remote(ip, report_file) as f: - data = yaml.load(f) - ssh.rm_rf_on_remote(ip, report_file) - return data - - @staticmethod - def load_fixture(deployment_type, role, idmp=True): - """Load fixture for corresponding kind of deployment - - :param deployment_type: a string, name of the deployment kind - :param role: a string, node role - :param idmp: bool, indicates whether idempotency or ensurability - fixture is loaded - :return: a dictionary with loaded fixture data - """ - subdir = "idempotency" if idmp else "ensurability" - fixture_path = os.path.join( - os.path.dirname(__file__), "fixtures", - deployment_type, subdir, "{}.yaml".format(role)) - with open(fixture_path) as f: - fixture = yaml.load(f) - - default_attrs = {"no_puppet_run": False, - "type": "puppet", - "skip": []} - - # NOTE: Populate fixture with default values - for task in fixture['tasks']: - task_name, task_attrs = task.items()[0] - if task_attrs is None: - task_attrs = {} - - for default_attr, default_value in default_attrs.items(): - if default_attr not in task_attrs: - task_attrs[default_attr] = default_value - - task[task_name] = task_attrs - return fixture - - def get_fixture_relevance(self, actual_tasks, fixture): - """Get fixture relevance between actual deployment tasks - and tasks from fixture files - - :param actual_tasks: a list of actual tasks - :param fixture: a dictionary with fixture data - :return: a tuple of task sets - """ - actual_tasks = set(actual_tasks) - fixture_tasks = set([i.keys()[0] for i in fixture["tasks"]]) - tasks_description = self.env.admin_actions.get_tasks_description() - - extra_actual_tasks = actual_tasks.difference(fixture_tasks) - extra_fixture_tasks = fixture_tasks.difference(actual_tasks) - - # NOTE: in ideal case we need to avoid tasks with wrong types - wrong_types = {} - for task in fixture["tasks"]: - task_name, attrs = task.items()[0] - expected_type = self.get_task_type(tasks_description, task_name) - if not expected_type: - logger.error("No type or no such task {!r}".format(task_name)) - else: - if expected_type != attrs["type"]: - wrong_types.update({task_name: expected_type}) - - logger.info("Actual tasks {}contain extra tasks: {}" - .format("" if extra_actual_tasks else "don't ", - extra_actual_tasks)) - logger.info("Fixture tasks {}contain extra tasks: {}" - .format("" if extra_fixture_tasks else "don't ", - extra_fixture_tasks)) - - return extra_actual_tasks, extra_fixture_tasks, wrong_types - - def define_pr_ctrl(self): - """Define primary controller - - :return: dict, node info - """ - devops_pr_controller = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - - pr_ctrl = self.fuel_web.get_nailgun_node_by_devops_node( - devops_pr_controller) - return pr_ctrl - - def check_extra_tasks(self, slave_nodes, deployment, idmp=True, ha=False): - """Check existing extra tasks regarding to fixture and actual task - or tasks with a wrong type - - :param slave_nodes: a list of nailgun nodes - :param deployment: a string, name of the deployment kind - :param idmp: bool, indicates whether idempotency or ensurability - fixture is checked - :param ha: bool, indicates ha mode is enabled or disabled - :return: a list with nodes for which extra tasks regarding to fixture - and actual task or tasks with a wrong type were found - """ - result = {'extra_actual_tasks': {}, - 'extra_fixture_tasks': {}, - 'wrong_types': {}, - 'failed_tasks': {}} - - pr_ctrl = self.define_pr_ctrl() if ha else {} - for node in slave_nodes: - node_roles = self.node_roles(node) - if node.get('name') == pr_ctrl.get('name', None): - node_roles = 'primary-' + node_roles - node_ref = "{}_{}".format(node["id"], node_roles) - fixture = self.load_fixture(deployment, node_roles, idmp) - node_tasks = self.get_nodes_tasks(node["id"]) - extra_actual_tasks, extra_fixture_tasks, wrong_types = \ - self.get_fixture_relevance(node_tasks, fixture) - result['extra_actual_tasks'][node_ref] = extra_actual_tasks - result['extra_fixture_tasks'][node_ref] = extra_fixture_tasks - result['wrong_types'][node_ref] = wrong_types - result['failed_tasks'][node_ref] = \ - extra_actual_tasks | \ - extra_fixture_tasks | \ - set([task for task in wrong_types.keys()]) - - logger.warning("Uncovered deployment tasks:\n{}" - .format(yaml.dump(result, default_flow_style=False))) - failed_nodes = [node_refs - for node_refs, failed_tasks in - result['failed_tasks'].items() - if failed_tasks] - return failed_nodes - - def generate_fixture(self, node_refs, cluster_id, slave_nodes, ha=False): - """Generate fixture with description of task idempotency - - :param node_refs: a string, refs to nailgun node - :param cluster_id: an integer, number of cluster id - :param slave_nodes: a list of nailgun nodes - :param ha: bool, indicates ha mode is enabled or disabled - :return: None - """ - result = {} - pr_ctrl = self.define_pr_ctrl() if ha else {} - for node in slave_nodes: - node_roles = self.node_roles(node) - if node.get('name') == pr_ctrl.get('name', None): - node_roles = 'primary-' + node_roles - node_ref = "{}_{}".format(node["id"], node_roles) - if node_ref not in node_refs: - logger.debug('Node {!r} was skipped because the current ' - 'fixtures are actual for deployment tasks which ' - 'are executed on this node'.format(node_ref)) - continue - node_tasks = self.get_nodes_tasks(node["id"]) - tasks_description = self.env.admin_actions.get_tasks_description() - tasks = [] - - for task in node_tasks: - task_type = self.get_task_type(tasks_description, task) - if task_type != "puppet": - logger.info("Skip checking of {!r} task,it is not puppet" - .format(task)) - tasks.append({task: {"type": task_type}}) - continue - - self.fuel_web.execute_task_on_node(task, node["id"], - cluster_id) - - try: - report = self.get_puppet_report(node) - except AssertionError: - # NOTE: in ideal case we need to avoid puppet - # tasks with "no_puppet_run": True - tasks.append({task: {"no_puppet_run": True}}) - msg = ("Unexpected no_puppet_run for task: {}" - .format(task)) - logger.info(msg) - continue - - failed = False - task_resources = [] - - for res_name, res_stats in report['resource_statuses'].items(): - if res_stats['changed']: - failed = True - msg = ("Non-idempotent task {!r}, resource: {}" - .format(task, res_name)) - logger.error(msg) - task_resources.append(res_name) - - if failed: - tasks.append({ - task: {"skip": task_resources} - }) - else: - tasks.append({ - task: None - }) - logger.info( - "Task {!r} on node {!r} was executed successfully" - .format(task, node['id'])) - - result.update( - { - node_ref: { - "role": node_roles, - "tasks": tasks - } - } - ) - - logger.info("Generated fixture:\n{}" - .format(yaml.dump(result, default_flow_style=False))) - - @staticmethod - def _parse_settings(settings): - """Select only values and their types from settings - - :param settings: dict, (env or node) settings - :return: dict, settings in short format - """ - parsed = {} - for group in settings: - if group in SETTINGS_SKIPLIST: - continue - parsed[group] = {} - for attr, params in settings[group].items(): - if attr in SETTINGS_SKIPLIST: - continue - try: - parsed[group][attr] = { - 'value': params['value'], - 'type': params['type'] - } - except KeyError: - logger.debug("Do not include {} setting as it doesn't " - "have value".format(params['label'])) - if not parsed[group]: - logger.debug("Do not include {} group as it doesn't have " - "settings with values".format(group)) - del parsed[group] - return parsed - - @staticmethod - def _get_settings_difference(settings1, settings2): - """Select values and/or groups of set1 that are not present in set2 - - :param settings1: dict, group of dicts - :param settings2: dict, group of dicts - :return: dict, set1 items not present in set2 - """ - diff = {} - new_groups = set(settings1) - set(settings2) - if new_groups: - diff.update([(g, settings1[g]) for g in new_groups]) - for group in settings1: - if group in new_groups: - continue - new_params = set(settings1[group]) - set(settings2[group]) - if new_params: - diff[group] = {} - diff[group].update( - [(s, settings1[group][s]) for s in new_params]) - return diff - - def _cmp_settings(self, settings, fixtures): - """Compare current and stored settings - - Return values and/or groups of settings that are new, comparing to - what is stored in fixtures. - Return values and/or groups of settings in fixtures that are outdated, - comparing to what is available in the cluster under test. - - :param settings: dict, current settings in short format - :param fixtures: dict, stored settings in short format - :return: tuple, (new settings, outdated settings) pair - """ - new_s = self._get_settings_difference(settings, fixtures) - outdated_f = self._get_settings_difference(fixtures, settings) - return new_s, outdated_f - - def get_cluster_settings(self, cluster_id): - """Get cluster settings and return them in short format - - :param cluster_id: int, ID of the cluster under test - :return: dict, cluster settings in short format - """ - settings = self.fuel_web.client.get_cluster_attributes( - cluster_id)['editable'] - return self._parse_settings(settings) - - def get_nodes_settings(self, cluster_id): - """Get node settings and return them in short format - - :param cluster_id: int, ID of the cluster under test - :return: dict, node settings in short format - """ - nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - - node_settings = {} - for node in nodes: - node_attrs = self.fuel_web.client.get_node_attributes(node['id']) - roles = self.node_roles(node) - node_settings[roles] = self._parse_settings(node_attrs) - return node_settings - - @staticmethod - def load_settings_fixtures(deployment): - """Load stored settings for the given cluster configuration - - :param deployment: str, name of cluster configuration - (e.g. 1_ctrl_1_cmp_1_cinder) - :return: tuple, (cluster, nodes) pair of stored settings - """ - f_path = os.path.join(os.path.dirname(__file__), "fixtures", - deployment, "ensurability", "{}") - - with open(f_path.format("cluster_settings.yaml")) as f: - cluster_fixture = yaml.load(f) - with open(f_path.format("nodes_settings.yaml")) as f: - nodes_fixture = yaml.load(f) - - return cluster_fixture, nodes_fixture - - def check_cluster_settings_consistency(self, settings, fixtures): - """Check if stored cluster settings require update - - :param settings: dict, settings of the cluster under test - :param fixtures: dict, stored cluster settings - :return: tuple, (new settings, outdated settings) pair; this indicates - whether fixtures require update - """ - return self._cmp_settings(settings, fixtures) - - def check_nodes_settings_consistency(self, settings, fixtures): - """Check if stored node settings require update - - :param settings: dict, node settings of the cluster under test - :param fixtures: dict, stored node settings - :return: tuple, (new settings, outdated settings) pair; this indicates - whether fixtures require update - """ - new_settings = {} - outdated_fixtures = {} - for node in fixtures: - new_s, outdated_f = self._cmp_settings( - settings[node], fixtures[node]) - if new_s: - new_settings[node] = new_s - if outdated_f: - outdated_fixtures[node] = outdated_f - return new_settings, outdated_fixtures - - def check_settings_consistency(self, deployment, cluster_id): - """Check if settings fixtures are up to date. - - :param cluster_id: int, env under test - :param deployment: str, name of env configuration under test - :return: None - """ - cluster_f, nodes_f = self.load_settings_fixtures(deployment) - cluster_s = self.get_cluster_settings(cluster_id) - nodes_s = self.get_nodes_settings(cluster_id) - - consistency = {} - new_cluster_s, old_cluster_f = \ - self.check_cluster_settings_consistency(cluster_s, cluster_f) - new_nodes_s, old_nodes_f = \ - self.check_nodes_settings_consistency(nodes_s, nodes_f) - - consistency["fixtures"] = { - 'old_cluster_fixtures': old_cluster_f, - 'old_nodes_fixtures': old_nodes_f - } - consistency["settings"] = { - 'new_cluster_settings': new_cluster_s, - 'new_nodes_settings': new_nodes_s - } - - nonconsistent = False - if new_cluster_s or new_nodes_s.values(): - logger.info( - "Settings fixtures require update as new options are " - "available now for configuring an environment\n{}".format( - yaml.safe_dump(consistency["settings"], - default_flow_style=False)) - ) - nonconsistent = True - if old_cluster_f or old_nodes_f.values(): - logger.info( - "Settings fixtures require update as some options are no " - "longer available for configuring an environment\n{}".format( - yaml.safe_dump(consistency["fixtures"], - default_flow_style=False)) - ) - nonconsistent = True - if nonconsistent: - self.generate_settings_fixture(cluster_id) - msg = ('Please update setting fixtures in the repo ' - 'according to generated data') - raise DeprecatedFixture(msg) - - def generate_settings_fixture(self, cluster_id): - """Get environment and nodes settings, and print them to console. - - :return: None - """ - cluster_s = self.get_cluster_settings(cluster_id) - nodes_s = self.get_nodes_settings(cluster_id) - - logger.info("Generated environment settings fixture:\n{}".format( - yaml.safe_dump(cluster_s, default_flow_style=False))) - logger.info("Generated nodes settings fixture:\n{}".format( - yaml.safe_dump(nodes_s, default_flow_style=False))) - - -@test(groups=['deploy_lcm_environment']) -class SetupLCMEnvironment(LCMTestBasic): - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=['lcm_deploy_1_ctrl_1_cmp_1_cinder']) - @log_snapshot_after_test - def lcm_deploy_1_ctrl_1_cmp_1_cinder(self): - """Create cluster with cinder - - Scenario: - 1. Revert snapshot "ready_with_3_slaves" - 2. Create cluster - 3. Add 1 controller - 4. Add 1 compute node - 5. Add 1 cinder node - 6. Deploy cluster - 7. Check extra deployment tasks - - Duration 180m - Snapshot: "lcm_deploy_1_ctrl_1_cmp_1_cinder" - """ - deployment = '1_ctrl_1_cmp_1_cinder' - snapshotname = 'lcm_deploy_{}'.format(deployment) - self.check_run(snapshotname) - self.show_step(1) - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(2) - segment_type = NEUTRON_SEGMENT['tun'] - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_segment_type": segment_type - } - ) - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - } - ) - - self.show_step(6) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(7) - slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - node_refs = self.check_extra_tasks(slave_nodes, deployment) - if node_refs: - logger.info('Generating a new fixture . . .') - self.generate_fixture(node_refs, cluster_id, slave_nodes) - msg = ('Please update idempotency fixtures in the repo ' - 'according to generated fixtures') - raise DeprecatedFixture(msg) - self.env.make_snapshot(snapshotname, is_make=True) - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=['lcm_deploy_1_ctrl_1_cmp_1_mongo']) - @log_snapshot_after_test - def lcm_deploy_1_ctrl_1_cmp_1_mongo(self): - """Create cluster with Ceilometer - - Scenario: - 1. Revert snapshot "ready_with_3_slaves" - 2. Create cluster - 3. Add 1 controller - 4. Add 1 compute node - 5. Add 1 mongo node - 6. Deploy cluster - 7. Check extra deployment tasks - - Duration 180m - Snapshot: "lcm_deploy_1_ctrl_1_cmp_1_mongo" - """ - deployment = '1_ctrl_1_cmp_1_mongo' - snapshotname = 'lcm_deploy_{}'.format(deployment) - self.check_run(snapshotname) - self.show_step(1) - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(2) - segment_type = NEUTRON_SEGMENT['vlan'] - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - 'ceilometer': True, - 'net_segment_type': segment_type - } - ) - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['mongo'] - } - ) - - self.show_step(6) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(7) - slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - node_refs = self.check_extra_tasks(slave_nodes, deployment) - if node_refs: - logger.info('Generating a new fixture . . .') - self.generate_fixture(node_refs, cluster_id, slave_nodes) - msg = ('Please update idempotency fixtures in the repo ' - 'according to generated fixtures') - raise DeprecatedFixture(msg) - self.env.make_snapshot(snapshotname, is_make=True) - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=['lcm_deploy_1_ctrl_1_cmp_3_ceph']) - @log_snapshot_after_test - def lcm_deploy_1_ctrl_1_cmp_3_ceph(self): - """Create cluster with ceph - - Scenario: - 1. Revert snapshot "ready_with_5_slaves" - 2. Create cluster - 3. Add 1 controller - 4. Add 1 compute node - 5. Add 3 ceph-osd nodes - 6. Deploy cluster - 7. Check extra deployment tasks - - Duration 240m - Snapshot: "lcm_deploy_1_ctrl_1_cmp_3_ceph" - """ - deployment = '1_ctrl_1_cmp_3_ceph' - snapshotname = 'lcm_deploy_{}'.format(deployment) - self.check_run(snapshotname) - self.show_step(1) - self.env.revert_snapshot("ready_with_5_slaves") - - self.show_step(2) - segment_type = NEUTRON_SEGMENT['tun'] - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - 'volumes_lvm': False, - 'volumes_ceph': True, - 'images_ceph': True, - 'objects_ceph': True, - 'net_segment_type': segment_type - } - ) - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['ceph-osd'], - 'slave-04': ['ceph-osd'], - 'slave-05': ['ceph-osd'] - } - ) - - self.show_step(6) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(7) - slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - node_refs = self.check_extra_tasks(slave_nodes, deployment) - if node_refs: - logger.info('Generating a new fixture . . .') - self.generate_fixture(node_refs, cluster_id, slave_nodes) - msg = ('Please update idempotency fixtures in the repo ' - 'according to generated fixtures') - raise DeprecatedFixture(msg) - self.env.make_snapshot(snapshotname, is_make=True) - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=['lcm_deploy_3_ctrl_3_cmp_ceph_sahara']) - @log_snapshot_after_test - def lcm_deploy_3_ctrl_3_cmp_ceph_sahara(self): - """Create cluster with Sahara, Ceilometer, Ceph in HA mode - - Scenario: - 1. Revert snapshot "ready_with_9_slaves" - 2. Create cluster - 3. Add 3 controllers with mongo role - 4. Add 3 compute node with ceph-osd role - 5. Deploy cluster - 6. Check extra deployment tasks - - Duration 240m - Snapshot: "lcm_deploy_3_ctrl_3_cmp_ceph_sahara" - """ - deployment = '3_ctrl_3_cmp_ceph_sahara' - snapshotname = 'lcm_deploy_{}'.format(deployment) - self.check_run(snapshotname) - self.show_step(1) - self.env.revert_snapshot("ready_with_9_slaves") - - self.show_step(2) - segment_type = NEUTRON_SEGMENT['tun'] - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - 'ceilometer': True, - "sahara": True, - 'volumes_lvm': False, - 'volumes_ceph': True, - 'images_ceph': True, - 'objects_ceph': True, - "net_segment_type": segment_type - } - ) - self.show_step(3) - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'mongo'], - 'slave-02': ['controller', 'mongo'], - 'slave-03': ['controller', 'mongo'], - 'slave-04': ['compute', 'ceph-osd'], - 'slave-05': ['compute', 'ceph-osd'], - 'slave-06': ['compute', 'ceph-osd'] - } - ) - - self.show_step(5) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(6) - slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - node_refs = self.check_extra_tasks(slave_nodes, deployment, ha=True) - if node_refs: - logger.info('Generating a new fixture . . .') - self.generate_fixture(node_refs, cluster_id, slave_nodes, ha=True) - msg = ('Please update idempotency fixtures in the repo ' - 'according to generated fixtures') - raise DeprecatedFixture(msg) - self.env.make_snapshot(snapshotname, is_make=True) - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=['lcm_deploy_1_ctrl_1_cmp_1_ironic']) - @log_snapshot_after_test - def lcm_deploy_1_ctrl_1_cmp_1_ironic(self): - """Deploy cluster with Ironic: - - Scenario: - 1. Create cluster - 2. Add 1 controller node - 3. Add 1 compute node - 4. Add 1 ironic node - 5. Deploy cluster - 6. Check extra deployment tasks - - Duration 180m - Snapshot: lcm_deploy_1_ctrl_1_cmp_1_ironic - """ - deployment = '1_ctrl_1_cmp_1_ironic' - snapshotname = 'lcm_deploy_{}'.format(deployment) - self.check_run(snapshotname) - - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(1) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_segment_type": NEUTRON_SEGMENT['vlan'], - "ironic": True, - } - ) - - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['ironic'], - } - ) - - self.show_step(5) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(6) - slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - node_refs = self.check_extra_tasks(slave_nodes, deployment) - if node_refs: - logger.info('Generating a new fixture . . .') - self.generate_fixture(node_refs, cluster_id, slave_nodes) - msg = ('Please update idempotency fixtures in the repo ' - 'according to generated fixtures') - raise DeprecatedFixture(msg) - self.env.make_snapshot(snapshotname, is_make=True) diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/ensurability/cinder.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/ensurability/cinder.yaml deleted file mode 100644 index 962e8b95d..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/ensurability/cinder.yaml +++ /dev/null @@ -1,63 +0,0 @@ -tasks: -- update_hosts: - resources: [] -- clear_nodes_info: - type: skipped -- copy_keys_ceph: - type: copy_files -- globals: - resources: [] -- fuel_pkgs: - resources: [] -- tools: - resources: [] -- enable_cinder_volume_service: - resources: [] -- rsync_core_puppet: - type: sync -- cgroups: - resources: [] -- upload_nodes_info: - type: skipped -- copy_keys: - type: copy_files -- override_configuration: - resources: [] -- setup_repositories: - resources: [] -- dns-client: - resources: [] -- allocate_hugepages: - resources: [] -- plugins_setup_repositories: - no_puppet_run: true -- ssl-keys-saving: - no_puppet_run: true -- upload_configuration: - type: upload_file -- firewall: - resources: [] -- top-role-cinder: - resources: - - Service[cinder-volume] -- logging: - resources: [] -- sync_time: - type: shell -- plugins_rsync: - resources: - - Exec[sync_time_shell] -- connectivity_tests: - resources: [] -- configuration_symlink: - type: shell -- hosts: - resources: [] -- copy_haproxy_keys: - type: copy_files -- ntp-client: - resources: [] -- ssl-add-trust-chain: - no_puppet_run: true -- reserved_ports: - resources: [] \ No newline at end of file diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/ensurability/cluster_settings.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/ensurability/cluster_settings.yaml deleted file mode 100644 index 700beac9d..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/ensurability/cluster_settings.yaml +++ /dev/null @@ -1,421 +0,0 @@ -access: - email: - type: text - value: admin_upd@localhost - password: - type: password - value: admin - tenant: - type: text - value: admin - user: - type: text - value: admin -additional_components: - ceilometer: - type: checkbox - value: false - heat: - type: hidden - value: false - ironic: - type: checkbox - value: false - mongo: - type: checkbox - value: false - murano: - type: checkbox - value: false - murano-cfapi: - type: checkbox - value: false - murano_glance_artifacts_plugin: - type: checkbox - value: false - sahara: - type: checkbox - value: false -common: - auth_key: - type: hidden - value: '' - auto_assign_floating_ip: - type: checkbox - value: true - debug: - type: checkbox - value: false - libvirt_type: - type: radio - value: qemu - nova_quota: - type: checkbox - value: true - propagate_task_deploy: - type: hidden - value: false - puppet_debug: - type: checkbox - value: false - resume_guests_state_on_host_boot: - type: checkbox - value: false - task_deploy: - type: hidden - value: true - use_cow_images: - type: checkbox - value: true -corosync: - group: - type: text - value: 226.94.1.1 - port: - type: text - value: '12000' - verified: - type: checkbox - value: false -external_mongo: - hosts_ip: - type: text - value: '' - mongo_db_name: - type: text - value: ceilometer - mongo_password: - type: password - value: ceilometer - mongo_replset: - type: text - value: '' - mongo_user: - type: text - value: ceilometer -kernel_params: - kernel: - type: text - value: console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90 nomodeset -murano_settings: - murano_repo_url: - type: text - value: http://storage.apps.openstack.org/ -neutron_advanced_configuration: - neutron_dvr: - type: checkbox - value: false - neutron_l2_pop: - type: checkbox - value: false - neutron_l3_ha: - type: checkbox - value: false - neutron_qos: - type: checkbox - value: false -operator_user: - authkeys: - type: textarea - value: '' - homedir: - type: text - value: /home/fueladmin - name: - type: text - value: fueladmin - password: - type: password - value: xalFdhQSGrB7xgdPrPiM3vZm - sudo: - type: textarea - value: 'ALL=(ALL) NOPASSWD: ALL' -provision: - method: - type: hidden - value: image - packages: - type: textarea - value: 'acl - - anacron - - bash-completion - - bridge-utils - - bsdmainutils - - build-essential - - cloud-init - - curl - - daemonize - - debconf-utils - - gdisk - - grub-pc - - hpsa-dkms - - hwloc - - i40e-dkms - - linux-firmware - - linux-firmware-nonfree - - linux-headers-generic-lts-trusty - - linux-image-generic-lts-trusty - - lvm2 - - mcollective - - mdadm - - multipath-tools - - multipath-tools-boot - - nailgun-agent - - nailgun-mcagents - - network-checker - - ntp - - openssh-client - - openssh-server - - puppet - - python-amqp - - ruby-augeas - - ruby-ipaddress - - ruby-json - - ruby-netaddr - - ruby-openstack - - ruby-shadow - - ruby-stomp - - telnet - - ubuntu-minimal - - ubuntu-standard - - uuid-runtime - - vim - - virt-what - - vlan - - ' -public_network_assignment: - assign_to_all_nodes: - type: checkbox - value: false -public_ssl: - cert_data: - type: file - value: - content: '-----BEGIN CERTIFICATE----- - - MIIC7TCCAdUCAgPoMA0GCSqGSIb3DQEBBQUAMDwxHjAcBgNVBAsMFU1pcmFudGlz - - IEZ1ZWwtUUEgVGVhbTEaMBgGA1UEAwwRcHVibGljLmZ1ZWwubG9jYWwwHhcNMTYw - - NDE5MTkxMTU1WhcNMjYwNDE3MTkxMTU1WjA8MR4wHAYDVQQLDBVNaXJhbnRpcyBG - - dWVsLVFBIFRlYW0xGjAYBgNVBAMMEXB1YmxpYy5mdWVsLmxvY2FsMIIBIjANBgkq - - hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoZBouZH+0S1jPYy+FxvNAkdGxsNVzsOI - - g7OybWx+DIskdRvONwrCFFtvP2InKJowPCebGcCqDqGF2zgFLmA9yQN/05A9f8bX - - hFrtjfNb/YYJxDE4itSYNgSzSfnitii7AJme9UBw94s0p3749irGTB++ZhcPzwdg - - Nx0Ymk2uFFNU18YxSx8PAk2w73a36t61E0P++MT6sYIM1GAx+9pm9Ddrj5r0b/M7 - - ikHGIUuB7M6t3mNHUveld+ZyXjaONMHZI5WQ16AMZwtHunUu/42k+o6RSS4h+zT8 - - ZiWW5cxZVLn6xqJkDkXMDdsS7PrveSuODq3LuaG4fwRpf1u2hqvyuwIDAQABMA0G - - CSqGSIb3DQEBBQUAA4IBAQBfAjtVxKItKMFAQl/EufHjk4rBpRiaHGLH2CIJHWJ1 - - i+z7gI5XazzwMCprOxsCUrJUpr8ChobenyebNPJSnDI0R0z8ZTX6kTNk7A2ZFVrp - - lL5TlpwhdtUjWxF3Coi+w694MbyLmJ4pA6QZTYVqSilZZ1cncLNA+Fc97STfLukK - - wqjwCYovRVjUn4jLRjy2kcw89060xxZopVpkY9cPfg0P+PICo/eS4EunQ5rd/EDV - - 7DBfCbzthArBjF8/72J8PYhqwEc+i5PDkn2CNIXoT0coxC9YAHJ+zFHgxHnKa0/q - - TPlvi+wJKrrSnXb5Oc34tVOxDF/WQjNuve8vHg7hvaIM - - -----END CERTIFICATE----- - - -----BEGIN PRIVATE KEY----- - - MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQChkGi5kf7RLWM9 - - jL4XG80CR0bGw1XOw4iDs7JtbH4MiyR1G843CsIUW28/YicomjA8J5sZwKoOoYXb - - OAUuYD3JA3/TkD1/xteEWu2N81v9hgnEMTiK1Jg2BLNJ+eK2KLsAmZ71QHD3izSn - - fvj2KsZMH75mFw/PB2A3HRiaTa4UU1TXxjFLHw8CTbDvdrfq3rUTQ/74xPqxggzU - - YDH72mb0N2uPmvRv8zuKQcYhS4Hszq3eY0dS96V35nJeNo40wdkjlZDXoAxnC0e6 - - dS7/jaT6jpFJLiH7NPxmJZblzFlUufrGomQORcwN2xLs+u95K44Orcu5obh/BGl/ - - W7aGq/K7AgMBAAECggEAI6RyFg5JQuhabmewP/TYI1qKGKtbMMQeR7/K6yz2GdpQ - - bq11rtrmugr53efPb7ukTIEITIQegB/OIfCX5AVDXCczef7mMlwxi3dr1NcNQj7h - - xLB/ItGHytL6oqVICJuvtZAuaziOM244bYMrdVM2b/DI1kjcKfYcmcwHc3MTplPq - - Nh+L5u2ue6bYvT+XRF4KrwuKmKuyJghyMeoiLI9JupkKw79ZB/l0Mh8vmxKMPj8g - - MNxoJbwoTkjQxuJELmet+ysBg2KT+gJEirfRmZiouDxx8Zukd8O6YvnlsOiRFokX - - 05r33fna1z5IBpGnwe+rn6pQaeXflSd6dqotoBp4QQKBgQDLrhAdsZnDXDpYuOv+ - - ITMpc33J4NW7yE+26ebzWkLYRUW5i7YDCtJdFi5pHCeA3+QD3RlYiinQlzcy3i3R - - 4Uv4riuKyDbgaw9sYOhmgluhPKDuznEWXomloEVu8jFrHg3TKY2v/GCcv99N5grQ - - Jg9rScFpZXkTj23KzqHf23uTEQKBgQDLENH7QzoNsBv6eS7kElBx3BQWNa0dhXab - - kRXo62/++tIDGMkzaq38hsjcAZi8uZDZY0QJTmBMdZN3LLBln5C2g8Y6Eym5ITvf - - pxkMUK0++MygbK/Vwmp+xu7XMiPNMG/E8NqQkca3F/6Ld08PAauZ8gpgoAsnjlNg - - pPUdWRCRCwKBgEiEB17bDXidjHRsGjFXVqTKZp2Ke+4oaiEgc8Zue2AOgb2GvV2l - - 67GSpSFtEa9zhvXNMSnxvuNyAwgMTFnuEaNPN1do4wjRdSNL+VIN1Vu5fz6mp2Kk - - c/NQ9YeDmQ6fG6Lzp2thum/0bCeK4IytEE5NaxwAMbRCG3/aQ4200fFRAoGAMwg5 - - HSIZ9tKpVVsbE6oemV6rlaFLrj2aPyJJFU4FyViTar/R4KAQtYPR+qhUECm6Y0d1 - - E7mkrdJmiu6qLf/ZyGR5bqLeO25Es8I0o0mrIEY6dp6Z2eiQBuhLob0yDiD8FcxJ - - wUdBX0YibD5Bmg3baEbRoNLXussj3QfXqdZ2OV0CgYEAyovcXc1ibwrwNO59yw99 - - 7zCoMFjXzZgtxn5JQDwMsdt9UKd/4nOPbbiRPL3ynr5zboDZzRxihXB5zzKjrYlE - - o4QZIWV0VgGS2eQSni3CGOsG4VhE4/9EFF7UqeA0hYkGAZMS+EKSdPpIujStD/ck - - sQ/BZiYxMSE8+synlzp3gss= - - -----END PRIVATE KEY----- - - ' - name: ca.pem - cert_source: - type: radio - value: user_uploaded - horizon: - type: checkbox - value: false - hostname: - type: text - value: public.fuel.local - services: - type: checkbox - value: false -service_user: - homedir: - type: hidden - value: /var/lib/fuel - name: - type: hidden - value: fuel - password: - type: hidden - value: WEwz5aKA0hYDrcERjX7irQzS - root_password: - type: hidden - value: r00tme - sudo: - type: hidden - value: 'ALL=(ALL) NOPASSWD: ALL' -storage: - admin_key: - type: hidden - value: AQDzghZXAAAAABAA7obspvgNjPa/HBWSOUzI1w== - bootstrap_osd_key: - type: hidden - value: AQDzghZXAAAAABAAWaiWslWwse+hsaKLzbtQFw== - ephemeral_ceph: - type: checkbox - value: false - fsid: - type: hidden - value: 4b0ab6f5-b82b-44e4-ac3a-15c76f960b82 - images_ceph: - type: checkbox - value: false - mon_key: - type: hidden - value: AQDzghZXAAAAABAAVi1udBHvkQbZbDgNnT7gXA== - objects_ceph: - type: checkbox - value: false - osd_pool_size: - type: text - value: '3' - radosgw_key: - type: hidden - value: AQDzghZXAAAAABAA8jY8KftsCK4l726rNdu/Zg== - volumes_block_device: - type: checkbox - value: true - volumes_ceph: - type: checkbox - value: false - volumes_lvm: - type: checkbox - value: false -syslog: - syslog_port: - type: text - value: '514' - syslog_server: - type: text - value: '' - syslog_transport: - type: radio - value: tcp -workloads_collector: - enabled: - type: hidden - value: false - password: - type: password - value: 8qtWdXhhY84wFoxwBbZcpq3P - tenant: - type: text - value: services - user: - type: text - value: fuel_stats_user diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/ensurability/compute.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/ensurability/compute.yaml deleted file mode 100644 index b29497f24..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/ensurability/compute.yaml +++ /dev/null @@ -1,89 +0,0 @@ -tasks: -- update_hosts: - resources: [] -- openstack-network-start: - type: skipped -- openstack-network-common-config: - resources: [] -- clear_nodes_info: - type: skipped -- openstack-network-agents-sriov: - resources: [] -- copy_keys_ceph: - type: copy_files -- globals: - resources: [] -- fuel_pkgs: - resources: [] -- openstack-network-agents-l3: - resources: [] -- openstack-network-agents-metadata: - resources: [] -- tools: - resources: [] -- rsync_core_puppet: - type: sync -- enable_nova_compute_service: - resources: [] -- cgroups: - resources: [] -- upload_nodes_info: - type: skipped -- copy_keys: - type: copy_files -- override_configuration: - resources: [] -- setup_repositories: - resources: [] -- dns-client: - resources: [] -- openstack-network-plugins-l2: - resources: [] -- allocate_hugepages: - resources: [] -- plugins_setup_repositories: - no_puppet_run: true -- ceph-compute: - no_puppet_run: true -- ssl-keys-saving: - no_puppet_run: true -- sriov_iommu_check: - resources: - - Exec[sriov_iommu_check] -- openstack-network-end: - type: skipped -- ceilometer-compute: - no_puppet_run: true -- upload_configuration: - type: upload_file -- firewall: - resources: [] -- logging: - resources: [] -- top-role-compute: - resources: - - Notify[Module openstack_tasks cannot notify service nova-compute on packages - update] - - Nova_config[DEFAULT/resume_guests_state_on_host_boot] - - Nova_config[vnc/novncproxy_base_url] - - Service[nova-compute] -- sync_time: - type: shell -- openstack-network-compute-nova: - resources: [] -- plugins_rsync: - no_puppet_run: true -- connectivity_tests: - resources: [] -- configuration_symlink: - type: shell -- hosts: - resources: [] -- copy_haproxy_keys: - type: copy_files -- ntp-client: - resources: [] -- ssl-add-trust-chain: - no_puppet_run: true -- reserved_ports: - resources: [] \ No newline at end of file diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/ensurability/controller.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/ensurability/controller.yaml deleted file mode 100644 index ba0dc3776..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/ensurability/controller.yaml +++ /dev/null @@ -1,322 +0,0 @@ -tasks: -- ironic_post_swift_key: - type: shell -- openstack-haproxy-mysqld: - resources: [] -- cinder-db: - resources: [] -- dump_rabbitmq_definitions: - resources: [] -- rsync_core_puppet: - type: sync -- ssl-dns-setup: - resources: - - Exec[rsync_core_puppet_shell] -- ceilometer-controller: - no_puppet_run: true -- override_configuration: - resources: [] -- ceilometer-keystone: - no_puppet_run: true -- nova-db: - resources: [] -- workloads_collector_add: - resources: [] -- primary-openstack-network-plugins-l2: - resources: [] -- radosgw-keystone: - resources: [] -- virtual_ips: - resources: [] -- primary-dns-server: - resources: [] -- openstack-haproxy-murano: - resources: [] -- openstack-network-end: - type: skipped -- openstack-haproxy-radosgw: - resources: [] -- openstack-haproxy-swift: - resources: [] -- heat-db: - resources: [] -- openstack-haproxy-neutron: - resources: [] -- updatedb: - no_puppet_run: true -- ironic-db: - no_puppet_run: true -- plugins_rsync: - no_puppet_run: true -- ceilometer-radosgw-user: - no_puppet_run: true -- openstack-haproxy-keystone: - resources: [] -- hosts: - resources: [] -- primary-rabbitmq: - resources: [] -- primary-cluster-haproxy: - resources: [] -- openstack-network-routers: - resources: [] -- reserved_ports: - resources: [] -- controller_remaining_tasks: - resources: [] -- glance-keystone: - resources: [] -- openstack-haproxy-aodh: - resources: [] -- murano-cfapi: - no_puppet_run: true -- ironic-compute: - no_puppet_run: true -- primary-openstack-network-agents-metadata: - resources: [] -- cinder-keystone: - resources: [] -- copy_keys: - type: copy_files -- enable_rados: - no_puppet_run: true -- ntp-check: - resources: [] -- aodh-db: - no_puppet_run: true -- disable_keystone_service_token: - resources: [] -- umm: - resources: [] -- memcached: - resources: [] -- allocate_hugepages: - resources: [] -- openrc-delete: - resources: - - File[/root/openrc] -- plugins_setup_repositories: - no_puppet_run: true -- sahara-keystone: - no_puppet_run: true -- openstack-haproxy-sahara: - resources: [] -- ssl-keys-saving: - no_puppet_run: true -- primary-cluster: - resources: [] -- upload_cirros: - type: shell -- primary-keystone: - resources: - - File[/root/openrc] -- primary-openstack-network-agents-l3: - resources: [] -- upload_configuration: - type: upload_file -- create-cinder-types: - resources: [] -- neutron-keystone: - resources: - - Keystone_endpoint[RegionOne/neutron::network] -- logging: - resources: [] -- nova-keystone: - resources: - - Keystone_endpoint[RegionOne/nova::compute] - - Keystone_endpoint[RegionOne/novav3::computev3] -- update_hosts: - resources: [] -- ironic-keystone: - no_puppet_run: true -- connectivity_tests: - resources: [] -- swift-storage: - resources: [] -- primary-heat: - resources: - - Heat_config[keystone_authtoken/auth_uri] -- conntrackd: - resources: [] -- sahara-db: - no_puppet_run: true -- horizon: - resources: - - File[/var/lib/puppet/concat/_etc_openstack-dashboard_local_settings.py/fragments/50_local_settings.py] - - File[/etc/openstack-dashboard/local_settings.py] - - Exec[concat_/etc/openstack-dashboard/local_settings.py] -- openstack-haproxy-ceilometer: - resources: [] -- openstack-network-common-config: - resources: [] -- firewall: - resources: [] -- apache: - resources: [] -- globals: - resources: - - File[/etc/hiera/globals.yaml] -- aodh-keystone: - no_puppet_run: true -- glance: - resources: - - Glance_glare_config[DEFAULT/default_log_levels] - - Glance_registry_config[DEFAULT/default_log_levels] - - Glance_api_config[DEFAULT/default_log_levels] - - Glance_cache_config[DEFAULT/debug] - - Glance_api_config[DEFAULT/debug] - - Glance_glare_config[DEFAULT/debug] - - Glance_registry_config[DEFAULT/debug] -- tools: - resources: [] -- openstack-haproxy: - resources: [] -- cgroups: - resources: [] -- murano-cfapi-keystone: - no_puppet_run: true -- aodh: - no_puppet_run: true -- ceph_create_pools: - no_puppet_run: true -- openstack-haproxy-ironic: - no_puppet_run: true -- setup_repositories: - resources: [] -- openstack-network-routers-ha: - no_puppet_run: true -- glance-db: - resources: [] -- neutron-db: - resources: [] -- ironic_upload_images: - type: shell -- swift-rebalance-cron: - resources: [] -- primary-ceph-mon: - resources: [] -- openstack-haproxy-stats: - resources: [] -- ironic-api: - no_puppet_run: true -- primary-ceph-radosgw: - resources: [] -- dns-client: - resources: [] -- cluster-vrouter: - resources: [] -- murano-rabbitmq: - no_puppet_run: true -- api-proxy: - resources: [] -- cluster_health: - resources: [] -- heat-keystone: - resources: - - Keystone_endpoint[RegionOne/heat-cfn::cloudformation] - - Keystone_endpoint[RegionOne/heat::orchestration] -- openstack-haproxy-horizon: - resources: - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_015-horizon.cfg/fragments/00_horizon_listen_block] - - File[/etc/haproxy/conf.d/015-horizon.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_015-horizon.cfg/fragments/01-horizon_horizon_balancermember_horizon] - - Exec[concat_/etc/haproxy/conf.d/015-horizon.cfg] -- openstack-network-start: - type: skipped -- clear_nodes_info: - type: skipped -- murano-db: - resources: - - Exec[clear_nodes_info_shell] -- copy_keys_ceph: - type: copy_files -- sahara: - no_puppet_run: true -- fuel_pkgs: - resources: [] -- swift-keystone: - resources: - - Keystone_endpoint[RegionOne/swift::object-store] - - Keystone_endpoint[RegionOne/swift_s3::s3] -- public_vip_ping: - resources: [] -- upload_nodes_info: - type: skipped -- openstack-haproxy-glance: - resources: - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_080-glance-api.cfg/fragments/00_glance-api_listen_block] - - Exec[concat_/etc/haproxy/conf.d/080-glance-api.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_081-glance-glare.cfg/fragments/00_glance-glare_listen_block] - - File[/etc/haproxy/conf.d/080-glance-api.cfg] - - Exec[concat_/etc/haproxy/conf.d/081-glance-glare.cfg] - - File[/etc/haproxy/conf.d/081-glance-glare.cfg] -- murano: - no_puppet_run: true -- ceph_ready_check: - type: shell -- enable_quorum: - type: shell -- openstack-haproxy-nova: - resources: - - File[/etc/haproxy/conf.d/040-nova-api.cfg] - - File[/etc/haproxy/conf.d/170-nova-novncproxy.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_040-nova-api.cfg/fragments/00_nova-api_listen_block] - - Exec[concat_/etc/haproxy/conf.d/040-nova-api.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_170-nova-novncproxy.cfg/fragments/00_nova-novncproxy_listen_block] - - Exec[concat_/etc/haproxy/conf.d/170-nova-novncproxy.cfg] -- openstack-network-server-config: - resources: [] -- primary-database: - resources: - - File[/root/.my.cnf] -- openstack-haproxy-cinder: - resources: - - File[/etc/haproxy/conf.d/070-cinder-api.cfg] - - Exec[concat_/etc/haproxy/conf.d/070-cinder-api.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_070-cinder-api.cfg/fragments/00_cinder-api_listen_block] -- ntp-server: - resources: [] -- murano-keystone: - no_puppet_run: true -- primary-openstack-network-agents-dhcp: - resources: - - Neutron_dhcp_agent_config[DEFAULT/debug] -- openstack-haproxy-heat: - resources: [] -- primary-openstack-controller: - resources: - - Nova_config[DEFAULT/quota_driver] - - Nova_config[DEFAULT/debug] - - Nova_config[DEFAULT/default_log_levels] -- openstack-cinder: - resources: - - Cinder_config[DEFAULT/scheduler_default_filters] - - Cinder_config[DEFAULT/default_log_levels] - - Cinder_config[DEFAULT/debug] -- keystone-db: - resources: - - File[/root/.my.cnf] -- sync_time: - type: shell -- configuration_symlink: - type: shell -- openstack-network-server-nova: - resources: [] -- copy_haproxy_keys: - type: copy_files -- primary-swift-proxy: - resources: - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments/10_account_frag-account] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/00_swift_proxy] - - File[/etc/swift/proxy-server.conf] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments/10_object_frag-object] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments/10_swift_server_frag-swift_server] - - Exec[concat_/etc/swift/proxy-server.conf] - - Exec[concat_/etc/rsyncd.conf] - - File[/etc/rsyncd.conf] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments/10_container_frag-container] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments/10_swift_backups_frag-swift_backups] -- openstack-network-networks: - resources: [] -- ssl-add-trust-chain: - no_puppet_run: true \ No newline at end of file diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/ensurability/nodes_settings.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/ensurability/nodes_settings.yaml deleted file mode 100644 index 12acfee81..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/ensurability/nodes_settings.yaml +++ /dev/null @@ -1,51 +0,0 @@ -controller: - cpu_pinning: - dpdk: - type: number - value: 1 - nova: - type: number - value: 1 - hugepages: - dpdk: - type: number - value: 128 - nova: - type: custom_hugepages - value: - '1048576': 1 - '2048': 550 -cinder: - cpu_pinning: - dpdk: - type: number - value: 1 - nova: - type: number - value: 1 - hugepages: - dpdk: - type: number - value: 128 - nova: - type: custom_hugepages - value: - '1048576': 1 - '2048': 550 -compute: - cpu_pinning: - dpdk: - type: number - value: 1 - nova: - type: number - value: 1 - hugepages: - dpdk: - type: number - value: 128 - nova: - type: custom_hugepages - value: - '1048576': 1 - '2048': 550 diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/idempotency/cinder.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/idempotency/cinder.yaml deleted file mode 100644 index 6e9154fbf..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/idempotency/cinder.yaml +++ /dev/null @@ -1,48 +0,0 @@ -roles: - cinder -tasks: - - update_hosts: null - - clear_nodes_info: - type: skipped - - override_configuration: null - - globals: null - - fuel_pkgs: null - - tools: null - - enable_cinder_volume_service: null - - rsync_core_puppet: - type: sync - - cgroups: null - - upload_nodes_info: - type: skipped - - copy_keys: - type: copy_files - - copy_deleted_nodes: - type: copy_files - - setup_repositories: null - - dns-client: null - - allocate_hugepages: null - - plugins_setup_repositories: - no_puppet_run: true - - ssl-keys-saving: null - - copy_keys_ceph: - type: copy_files - - upload_configuration: - type: upload_file - - firewall: null - - top-role-cinder: - skip: - - Service[cinder-volume] - - logging: null - - sync_time: - type: shell - - plugins_rsync: - no_puppet_run: true - - connectivity_tests: null - - configuration_symlink: - type: shell - - hosts: null - - copy_haproxy_keys: - type: copy_files - - ntp-client: null - - ssl-add-trust-chain: null - - reserved_ports: null \ No newline at end of file diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/idempotency/compute.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/idempotency/compute.yaml deleted file mode 100644 index d3c984327..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/idempotency/compute.yaml +++ /dev/null @@ -1,67 +0,0 @@ -roles: - compute -tasks: - - update_hosts: null - - openstack-network-start: - type: skipped - - openstack-network-common-config: null - - clear_nodes_info: - type: skipped - - openstack-network-agents-sriov: null - - override_configuration: null - - globals: null - - fuel_pkgs: null - - openstack-network-agents-l3: null - - openstack-network-agents-metadata: null - - tools: null - - rsync_core_puppet: - type: sync - - enable_nova_compute_service: null - - cgroups: null - - upload_nodes_info: - type: skipped - - copy_keys: - type: copy_files - - copy_deleted_nodes: - type: copy_files - - setup_repositories: null - - dns-client: null - - openstack-network-plugins-l2: null - - allocate_hugepages: null - - plugins_setup_repositories: - no_puppet_run: true - - ceph-compute: - no_puppet_run: true - - ssl-keys-saving: null - - sriov_iommu_check: - skip: - - Exec[sriov_iommu_check] - - openstack-network-end: - type: skipped - - copy_keys_ceph: - type: copy_files - - upload_configuration: - type: upload_file - - firewall: null - - logging: null - - top-role-compute: - skip: - - Notify[Module openstack_tasks cannot notify service nova-compute on packages - update] - - Service[nova-compute] - - sync_time: - type: shell - - openstack-network-compute-nova: null - - plugins_rsync: - no_puppet_run: true - - connectivity_tests: null - - configuration_symlink: - type: shell - - ceilometer-compute: - no_puppet_run: true - - hosts: null - - copy_haproxy_keys: - type: copy_files - - ntp-client: null - - ssl-add-trust-chain: null - - reserved_ports: null \ No newline at end of file diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/idempotency/controller.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/idempotency/controller.yaml deleted file mode 100644 index 769f1dab8..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_cinder/idempotency/controller.yaml +++ /dev/null @@ -1,194 +0,0 @@ -roles: - controller -tasks: - - ironic_post_swift_key: - type: shell - - openstack-haproxy-mysqld: null - - cinder-db: null - - dump_rabbitmq_definitions: - skip: - - Dump_rabbitmq_definitions[/etc/rabbitmq/definitions.full] - - rsync_core_puppet: - type: sync - - ssl-dns-setup: null - - ceilometer-controller: - no_puppet_run: true - - override_configuration: null - - ceilometer-keystone: - no_puppet_run: true - - nova-db: null - - workloads_collector_add: null - - primary-openstack-network-plugins-l2: null - - radosgw-keystone: null - - virtual_ips: null - - primary-dns-server: null - - openstack-haproxy-murano: - no_puppet_run: true - - openstack-network-end: - type: skipped - - openstack-haproxy-radosgw: null - - openstack-haproxy-swift: null - - heat-db: null - - openstack-haproxy-neutron: null - - updatedb: - no_puppet_run: true - - ironic-db: - no_puppet_run: true - - plugins_rsync: - no_puppet_run: true - - ceilometer-radosgw-user: - no_puppet_run: true - - openstack-haproxy-keystone: null - - hosts: null - - primary-rabbitmq: null - - primary-cluster-haproxy: null - - openstack-network-routers: null - - reserved_ports: null - - controller_remaining_tasks: null - - glance-keystone: null - - openstack-haproxy-aodh: null - - murano-cfapi: - no_puppet_run: true - - ironic-compute: - no_puppet_run: true - - primary-openstack-network-agents-metadata: null - - cinder-keystone: null - - copy_keys: - type: copy_files - - enable_rados: - no_puppet_run: true - - ntp-check: null - - aodh-db: - no_puppet_run: true - - disable_keystone_service_token: - no_puppet_run: true - - umm: null - - memcached: null - - allocate_hugepages: null - - openrc-delete: - skip: - - File[/root/openrc] - - plugins_setup_repositories: - no_puppet_run: true - - sahara-keystone: - no_puppet_run: true - - openstack-haproxy-sahara: - no_puppet_run: true - - ssl-keys-saving: null - - primary-cluster: - skip: - - Pcmk_property[no-quorum-policy] - - upload_cirros: - type: shell - - primary-keystone: - skip: - - File[/root/openrc] - - primary-openstack-network-agents-l3: null - - upload_configuration: - type: upload_file - - create-cinder-types: null - - neutron-keystone: null - - logging: null - - nova-keystone: null - - update_hosts: null - - ironic-keystone: - no_puppet_run: true - - connectivity_tests: null - - primary-heat: null - - conntrackd: null - - sahara-db: - no_puppet_run: true - - horizon: null - - openstack-haproxy-ceilometer: - no_puppet_run: true - - openstack-network-common-config: null - - firewall: null - - apache: null - - globals: null - - aodh-keystone: - no_puppet_run: true - - glance: null - - tools: null - - openstack-haproxy: null - - cgroups: null - - murano-cfapi-keystone: - no_puppet_run: true - - aodh: - no_puppet_run: true - - copy_deleted_nodes: - type: copy_files - - openstack-haproxy-ironic: - no_puppet_run: true - - setup_repositories: null - - openstack-network-routers-ha: - no_puppet_run: true - - upload_murano_package: - no_puppet_run: true - - glance-db: null - - neutron-db: null - - ironic_upload_images: - type: shell - - swift-rebalance-cron: null - - primary-ceph-mon: null - - openstack-haproxy-stats: null - - ironic-api: - no_puppet_run: true - - primary-ceph-radosgw: null - - dns-client: null - - cluster-vrouter: null - - murano-rabbitmq: - no_puppet_run: true - - api-proxy: null - - cluster_health: null - - heat-keystone: null - - openstack-haproxy-horizon: null - - openstack-network-start: - type: skipped - - clear_nodes_info: - type: skipped - - murano-db: - no_puppet_run: true - - copy_keys_ceph: - type: copy_files - - sahara: - no_puppet_run: true - - fuel_pkgs: null - - swift-keystone: null - - public_vip_ping: null - - upload_nodes_info: - type: skipped - - openstack-haproxy-glance: null - - murano: - no_puppet_run: true - - ceph_ready_check: - type: shell - - enable_quorum: - type: shell - - openstack-haproxy-nova: null - - openstack-network-server-config: null - - primary-database: - skip: - - File[/root/.my.cnf] - - openstack-haproxy-cinder: null - - ntp-server: null - - murano-keystone: - no_puppet_run: true - - swift-proxy_storage: null - - primary-openstack-network-agents-dhcp: null - - openstack-haproxy-heat: null - - primary-openstack-controller: null - - openstack-cinder: null - - ceph_create_pools: - no_puppet_run: true - - keystone-db: - skip: - - File[/root/.my.cnf] - - sync_time: - type: shell - - configuration_symlink: - type: shell - - openstack-network-server-nova: null - - copy_haproxy_keys: - type: copy_files - - openstack-network-networks: null - - ssl-add-trust-chain: null \ No newline at end of file diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/ensurability/cluster_settings.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/ensurability/cluster_settings.yaml deleted file mode 100644 index 40bf1dd4a..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/ensurability/cluster_settings.yaml +++ /dev/null @@ -1,421 +0,0 @@ -access: - email: - type: text - value: admin_upd@localhost - password: - type: password - value: admin - tenant: - type: text - value: admin - user: - type: text - value: admin -additional_components: - ceilometer: - type: checkbox - value: false - heat: - type: hidden - value: false - ironic: - type: checkbox - value: true - mongo: - type: checkbox - value: false - murano: - type: checkbox - value: false - murano-cfapi: - type: checkbox - value: false - sahara: - type: checkbox - value: false -common: - auth_key: - type: hidden - value: '' - auto_assign_floating_ip: - type: checkbox - value: true - debug: - type: checkbox - value: false - libvirt_type: - type: radio - value: qemu - nova_quota: - type: checkbox - value: true - propagate_task_deploy: - type: hidden - value: false - puppet_debug: - type: checkbox - value: false - resume_guests_state_on_host_boot: - type: checkbox - value: false - task_deploy: - type: hidden - value: true - use_cow_images: - type: checkbox - value: true -corosync: - group: - type: text - value: 226.94.1.1 - port: - type: text - value: '12000' - verified: - type: checkbox - value: false -external_mongo: - hosts_ip: - type: text - value: '' - mongo_db_name: - type: text - value: ceilometer - mongo_password: - type: password - value: ceilometer - mongo_replset: - type: text - value: '' - mongo_user: - type: text - value: ceilometer -kernel_params: - kernel: - type: text - value: console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90 nomodeset -murano_settings: - murano_glance_artifacts_plugin: - type: checkbox - value: false - murano_repo_url: - type: text - value: http://storage.apps.openstack.org/ -neutron_advanced_configuration: - neutron_dvr: - type: checkbox - value: false - neutron_l2_pop: - type: checkbox - value: false - neutron_l3_ha: - type: checkbox - value: false - neutron_qos: - type: checkbox - value: false -operator_user: - authkeys: - type: textarea - value: '' - homedir: - type: text - value: /home/fueladmin - name: - type: text - value: fueladmin - password: - type: password - value: xalFdhQSGrB7xgdPrPiM3vZm - sudo: - type: textarea - value: 'ALL=(ALL) NOPASSWD: ALL' -provision: - method: - type: hidden - value: image - packages: - type: textarea - value: 'acl - - anacron - - bash-completion - - bridge-utils - - bsdmainutils - - build-essential - - cloud-init - - curl - - daemonize - - debconf-utils - - gdisk - - grub-pc - - hpsa-dkms - - hwloc - - i40e-dkms - - linux-firmware - - linux-firmware-nonfree - - linux-headers-generic-lts-trusty - - linux-image-generic-lts-trusty - - lvm2 - - mcollective - - mdadm - - multipath-tools - - multipath-tools-boot - - nailgun-agent - - nailgun-mcagents - - network-checker - - ntp - - openssh-client - - openssh-server - - puppet - - python-amqp - - ruby-augeas - - ruby-ipaddress - - ruby-json - - ruby-netaddr - - ruby-openstack - - ruby-shadow - - ruby-stomp - - telnet - - ubuntu-minimal - - ubuntu-standard - - uuid-runtime - - vim - - virt-what - - vlan - - ' -public_network_assignment: - assign_to_all_nodes: - type: checkbox - value: false -public_ssl: - cert_data: - type: file - value: - content: '-----BEGIN CERTIFICATE----- - - MIIC7TCCAdUCAgPoMA0GCSqGSIb3DQEBBQUAMDwxHjAcBgNVBAsMFU1pcmFudGlz - - IEZ1ZWwtUUEgVGVhbTEaMBgGA1UEAwwRcHVibGljLmZ1ZWwubG9jYWwwHhcNMTYw - - NDE5MTkxMTU1WhcNMjYwNDE3MTkxMTU1WjA8MR4wHAYDVQQLDBVNaXJhbnRpcyBG - - dWVsLVFBIFRlYW0xGjAYBgNVBAMMEXB1YmxpYy5mdWVsLmxvY2FsMIIBIjANBgkq - - hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoZBouZH+0S1jPYy+FxvNAkdGxsNVzsOI - - g7OybWx+DIskdRvONwrCFFtvP2InKJowPCebGcCqDqGF2zgFLmA9yQN/05A9f8bX - - hFrtjfNb/YYJxDE4itSYNgSzSfnitii7AJme9UBw94s0p3749irGTB++ZhcPzwdg - - Nx0Ymk2uFFNU18YxSx8PAk2w73a36t61E0P++MT6sYIM1GAx+9pm9Ddrj5r0b/M7 - - ikHGIUuB7M6t3mNHUveld+ZyXjaONMHZI5WQ16AMZwtHunUu/42k+o6RSS4h+zT8 - - ZiWW5cxZVLn6xqJkDkXMDdsS7PrveSuODq3LuaG4fwRpf1u2hqvyuwIDAQABMA0G - - CSqGSIb3DQEBBQUAA4IBAQBfAjtVxKItKMFAQl/EufHjk4rBpRiaHGLH2CIJHWJ1 - - i+z7gI5XazzwMCprOxsCUrJUpr8ChobenyebNPJSnDI0R0z8ZTX6kTNk7A2ZFVrp - - lL5TlpwhdtUjWxF3Coi+w694MbyLmJ4pA6QZTYVqSilZZ1cncLNA+Fc97STfLukK - - wqjwCYovRVjUn4jLRjy2kcw89060xxZopVpkY9cPfg0P+PICo/eS4EunQ5rd/EDV - - 7DBfCbzthArBjF8/72J8PYhqwEc+i5PDkn2CNIXoT0coxC9YAHJ+zFHgxHnKa0/q - - TPlvi+wJKrrSnXb5Oc34tVOxDF/WQjNuve8vHg7hvaIM - - -----END CERTIFICATE----- - - -----BEGIN PRIVATE KEY----- - - MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQChkGi5kf7RLWM9 - - jL4XG80CR0bGw1XOw4iDs7JtbH4MiyR1G843CsIUW28/YicomjA8J5sZwKoOoYXb - - OAUuYD3JA3/TkD1/xteEWu2N81v9hgnEMTiK1Jg2BLNJ+eK2KLsAmZ71QHD3izSn - - fvj2KsZMH75mFw/PB2A3HRiaTa4UU1TXxjFLHw8CTbDvdrfq3rUTQ/74xPqxggzU - - YDH72mb0N2uPmvRv8zuKQcYhS4Hszq3eY0dS96V35nJeNo40wdkjlZDXoAxnC0e6 - - dS7/jaT6jpFJLiH7NPxmJZblzFlUufrGomQORcwN2xLs+u95K44Orcu5obh/BGl/ - - W7aGq/K7AgMBAAECggEAI6RyFg5JQuhabmewP/TYI1qKGKtbMMQeR7/K6yz2GdpQ - - bq11rtrmugr53efPb7ukTIEITIQegB/OIfCX5AVDXCczef7mMlwxi3dr1NcNQj7h - - xLB/ItGHytL6oqVICJuvtZAuaziOM244bYMrdVM2b/DI1kjcKfYcmcwHc3MTplPq - - Nh+L5u2ue6bYvT+XRF4KrwuKmKuyJghyMeoiLI9JupkKw79ZB/l0Mh8vmxKMPj8g - - MNxoJbwoTkjQxuJELmet+ysBg2KT+gJEirfRmZiouDxx8Zukd8O6YvnlsOiRFokX - - 05r33fna1z5IBpGnwe+rn6pQaeXflSd6dqotoBp4QQKBgQDLrhAdsZnDXDpYuOv+ - - ITMpc33J4NW7yE+26ebzWkLYRUW5i7YDCtJdFi5pHCeA3+QD3RlYiinQlzcy3i3R - - 4Uv4riuKyDbgaw9sYOhmgluhPKDuznEWXomloEVu8jFrHg3TKY2v/GCcv99N5grQ - - Jg9rScFpZXkTj23KzqHf23uTEQKBgQDLENH7QzoNsBv6eS7kElBx3BQWNa0dhXab - - kRXo62/++tIDGMkzaq38hsjcAZi8uZDZY0QJTmBMdZN3LLBln5C2g8Y6Eym5ITvf - - pxkMUK0++MygbK/Vwmp+xu7XMiPNMG/E8NqQkca3F/6Ld08PAauZ8gpgoAsnjlNg - - pPUdWRCRCwKBgEiEB17bDXidjHRsGjFXVqTKZp2Ke+4oaiEgc8Zue2AOgb2GvV2l - - 67GSpSFtEa9zhvXNMSnxvuNyAwgMTFnuEaNPN1do4wjRdSNL+VIN1Vu5fz6mp2Kk - - c/NQ9YeDmQ6fG6Lzp2thum/0bCeK4IytEE5NaxwAMbRCG3/aQ4200fFRAoGAMwg5 - - HSIZ9tKpVVsbE6oemV6rlaFLrj2aPyJJFU4FyViTar/R4KAQtYPR+qhUECm6Y0d1 - - E7mkrdJmiu6qLf/ZyGR5bqLeO25Es8I0o0mrIEY6dp6Z2eiQBuhLob0yDiD8FcxJ - - wUdBX0YibD5Bmg3baEbRoNLXussj3QfXqdZ2OV0CgYEAyovcXc1ibwrwNO59yw99 - - 7zCoMFjXzZgtxn5JQDwMsdt9UKd/4nOPbbiRPL3ynr5zboDZzRxihXB5zzKjrYlE - - o4QZIWV0VgGS2eQSni3CGOsG4VhE4/9EFF7UqeA0hYkGAZMS+EKSdPpIujStD/ck - - sQ/BZiYxMSE8+synlzp3gss= - - -----END PRIVATE KEY----- - - ' - name: ca.pem - cert_source: - type: radio - value: user_uploaded - horizon: - type: checkbox - value: false - hostname: - type: text - value: public.fuel.local - services: - type: checkbox - value: false -service_user: - homedir: - type: hidden - value: /var/lib/fuel - name: - type: hidden - value: fuel - password: - type: hidden - value: WEwz5aKA0hYDrcERjX7irQzS - root_password: - type: hidden - value: r00tme - sudo: - type: hidden - value: 'ALL=(ALL) NOPASSWD: ALL' -storage: - admin_key: - type: hidden - value: AQDzghZXAAAAABAA7obspvgNjPa/HBWSOUzI1w== - bootstrap_osd_key: - type: hidden - value: AQDzghZXAAAAABAAWaiWslWwse+hsaKLzbtQFw== - ephemeral_ceph: - type: checkbox - value: false - fsid: - type: hidden - value: 4b0ab6f5-b82b-44e4-ac3a-15c76f960b82 - images_ceph: - type: checkbox - value: false - mon_key: - type: hidden - value: AQDzghZXAAAAABAAVi1udBHvkQbZbDgNnT7gXA== - objects_ceph: - type: checkbox - value: false - osd_pool_size: - type: text - value: '3' - radosgw_key: - type: hidden - value: AQDzghZXAAAAABAA8jY8KftsCK4l726rNdu/Zg== - volumes_block_device: - type: checkbox - value: true - volumes_ceph: - type: checkbox - value: false - volumes_lvm: - type: checkbox - value: false -syslog: - syslog_port: - type: text - value: '514' - syslog_server: - type: text - value: '' - syslog_transport: - type: radio - value: tcp -workloads_collector: - enabled: - type: hidden - value: false - password: - type: password - value: 8qtWdXhhY84wFoxwBbZcpq3P - tenant: - type: text - value: services - user: - type: text - value: fuel_stats_user diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/ensurability/compute.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/ensurability/compute.yaml deleted file mode 100644 index 7cc4c19cf..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/ensurability/compute.yaml +++ /dev/null @@ -1,94 +0,0 @@ -tasks: - - update_hosts: - resources: [] - - openstack-network-start: - type: skipped - - openstack-network-common-config: - resources: [] - - clear_nodes_info: - type: skipped - - openstack-network-agents-sriov: - resources: [] - - override_configuration: - resources: [] - - globals: - resources: [] - - fuel_pkgs: - resources: [] - - openstack-network-agents-l3: - resources: [] - - openstack-network-agents-metadata: - resources: [] - - tools: - resources: [] - - rsync_core_puppet: - type: sync - - enable_nova_compute_service: - resources: [] - - cgroups: - resources: [] - - upload_nodes_info: - type: skipped - - copy_keys: - type: copy_files - - copy_deleted_nodes: - type: copy_files - - setup_repositories: - resources: [] - - dns-client: - resources: [] - - openstack-network-plugins-l2: - resources: [] - - allocate_hugepages: - resources: [] - - plugins_setup_repositories: - no_puppet_run: true - - ceph-compute: - no_puppet_run: true - - ssl-keys-saving: - no_puppet_run: true - - sriov_iommu_check: - resources: - - Exec[sriov_iommu_check] - - openstack-network-end: - type: skipped - - copy_keys_ceph: - type: copy_files - - upload_configuration: - type: upload_file - - firewall: - resources: [] - - logging: - resources: [] - - top-role-compute: - resources: - - Notify[Module openstack_tasks cannot notify service nova-compute on packages - update] - - Nova_config[DEFAULT/resume_guests_state_on_host_boot] - - Nova_config[vnc/novncproxy_base_url] - - Nova_config[DEFAULT/vcpu_pin_set] - - Service[nova-compute] - - sync_time: - type: shell - - openstack-network-compute-nova: - resources: - - Exec[undefine_libvirt_default_network] - - plugins_rsync: - no_puppet_run: true - - connectivity_tests: - resources: [] - - configuration_symlink: - type: shell - - ceilometer-compute: - resources: - - Exec[configuration_symlink_shell] - - hosts: - resources: [] - - copy_haproxy_keys: - type: copy_files - - ntp-client: - resources: [] - - ssl-add-trust-chain: - no_puppet_run: true - - reserved_ports: - resources: [] \ No newline at end of file diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/ensurability/controller.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/ensurability/controller.yaml deleted file mode 100644 index ffe0ea153..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/ensurability/controller.yaml +++ /dev/null @@ -1,335 +0,0 @@ -tasks: - - ironic_post_swift_key: - type: shell - - openstack-haproxy-mysqld: - resources: [] - - cinder-db: - resources: [] - - dump_rabbitmq_definitions: - resources: - - Dump_rabbitmq_definitions[/etc/rabbitmq/definitions.full] - - rsync_core_puppet: - type: sync - - ssl-dns-setup: - resources: - - Exec[rsync_core_puppet_shell] - - ceilometer-controller: - no_puppet_run: true - - override_configuration: - resources: [] - - ceilometer-keystone: - no_puppet_run: true - - nova-db: - resources: [] - - workloads_collector_add: - resources: [] - - primary-openstack-network-plugins-l2: - resources: [] - - radosgw-keystone: - resources: [] - - virtual_ips: - resources: [] - - primary-dns-server: - resources: [] - - openstack-haproxy-murano: - no_puppet_run: true - - openstack-network-end: - type: skipped - - openstack-haproxy-radosgw: - resources: [] - - openstack-haproxy-swift: - resources: [] - - heat-db: - resources: [] - - openstack-haproxy-neutron: - resources: [] - - updatedb: - no_puppet_run: true - - ironic-db: - resources: [] - - plugins_rsync: - no_puppet_run: true - - ceilometer-radosgw-user: - no_puppet_run: true - - openstack-haproxy-keystone: - resources: [] - - hosts: - resources: [] - - primary-rabbitmq: - resources: [] - - primary-cluster-haproxy: - resources: [] - - openstack-network-routers: - resources: [] - - reserved_ports: - resources: [] - - controller_remaining_tasks: - resources: [] - - glance-keystone: - resources: [] - - openstack-haproxy-aodh: - resources: [] - - murano-cfapi: - no_puppet_run: true - - ironic-compute: - resources: [] - - primary-openstack-network-agents-metadata: - resources: [] - - cinder-keystone: - resources: [] - - copy_keys: - type: copy_files - - enable_rados: - no_puppet_run: true - - ntp-check: - resources: [] - - aodh-db: - no_puppet_run: true - - disable_keystone_service_token: - no_puppet_run: true - - umm: - resources: [] - - memcached: - resources: [] - - allocate_hugepages: - resources: [] - - openrc-delete: - resources: - - File[/root/openrc] - - plugins_setup_repositories: - no_puppet_run: true - - sahara-keystone: - no_puppet_run: true - - openstack-haproxy-sahara: - no_puppet_run: true - - ssl-keys-saving: - no_puppet_run: true - - primary-cluster: - resources: [] - - upload_cirros: - type: shell - - primary-keystone: - resources: - - File[/root/openrc] - - primary-openstack-network-agents-l3: - resources: [] - - upload_configuration: - type: upload_file - - create-cinder-types: - resources: [] - - neutron-keystone: - resources: - - Keystone_endpoint[RegionOne/neutron::network] - - logging: - resources: [] - - nova-keystone: - resources: - - Keystone_endpoint[RegionOne/nova::compute] - - Keystone_endpoint[RegionOne/compute_legacy::compute_legacy] - - update_hosts: - resources: [] - - ironic-keystone: - resources: - - Keystone_endpoint[RegionOne/ironic::baremetal] - - connectivity_tests: - resources: [] - - primary-heat: - resources: - - Heat_config[keystone_authtoken/auth_uri] - - conntrackd: - resources: [] - - sahara-db: - no_puppet_run: true - - horizon: - resources: - - File[/var/lib/puppet/concat/_etc_openstack-dashboard_local_settings.py/fragments/50_local_settings.py] - - File[/etc/openstack-dashboard/local_settings.py] - - Exec[concat_/etc/openstack-dashboard/local_settings.py] - - openstack-haproxy-ceilometer: - no_puppet_run: true - - openstack-network-common-config: - resources: [] - - firewall: - resources: [] - - apache: - resources: [] - - globals: - resources: - - File[/etc/hiera/globals.yaml] - - aodh-keystone: - no_puppet_run: true - - glance: - resources: - - Glance_glare_config[DEFAULT/default_log_levels] - - Glance_registry_config[DEFAULT/default_log_levels] - - Glance_api_config[DEFAULT/default_log_levels] - - Glance_cache_config[DEFAULT/debug] - - Glance_api_config[DEFAULT/debug] - - Glance_glare_config[DEFAULT/debug] - - Glance_registry_config[DEFAULT/debug] - - tools: - resources: [] - - openstack-haproxy: - resources: [] - - cgroups: - resources: [] - - murano-cfapi-keystone: - no_puppet_run: true - - aodh: - no_puppet_run: true - - copy_deleted_nodes: - type: copy_files - - openstack-haproxy-ironic: - resources: - - File[/etc/haproxy/conf.d/180-ironic.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_180-ironic.cfg/fragments/00_ironic_listen_block] - - Exec[concat_/etc/haproxy/conf.d/180-ironic.cfg] - - setup_repositories: - resources: [] - - openstack-network-routers-ha: - no_puppet_run: true - - upload_murano_package: - no_puppet_run: true - - glance-db: - resources: [] - - neutron-db: - resources: [] - - ironic_upload_images: - type: shell - - swift-rebalance-cron: - resources: [] - - primary-ceph-mon: - resources: [] - - openstack-haproxy-stats: - resources: [] - - ironic-api: - resources: - - Ironic_config[DEFAULT/default_log_levels] - - Ironic_config[DEFAULT/debug] - - primary-ceph-radosgw: - resources: [] - - dns-client: - resources: [] - - cluster-vrouter: - resources: [] - - murano-rabbitmq: - no_puppet_run: true - - api-proxy: - resources: [] - - cluster_health: - resources: [] - - heat-keystone: - resources: - - Keystone_endpoint[RegionOne/heat-cfn::cloudformation] - - Keystone_endpoint[RegionOne/heat::orchestration] - - openstack-haproxy-horizon: - resources: - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_015-horizon.cfg/fragments/00_horizon_listen_block] - - File[/etc/haproxy/conf.d/015-horizon.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_015-horizon.cfg/fragments/01-horizon_horizon_balancermember_horizon] - - Exec[concat_/etc/haproxy/conf.d/015-horizon.cfg] - - openstack-network-start: - type: skipped - - clear_nodes_info: - type: skipped - - murano-db: - no_puppet_run: true - - copy_keys_ceph: - type: copy_files - - sahara: - no_puppet_run: true - - fuel_pkgs: - resources: [] - - swift-keystone: - resources: - - Keystone_endpoint[RegionOne/swift::object-store] - - Keystone_endpoint[RegionOne/swift_s3::s3] - - public_vip_ping: - resources: [] - - upload_nodes_info: - type: skipped - - openstack-haproxy-glance: - resources: - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_080-glance-api.cfg/fragments/00_glance-api_listen_block] - - Exec[concat_/etc/haproxy/conf.d/080-glance-api.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_081-glance-glare.cfg/fragments/00_glance-glare_listen_block] - - File[/etc/haproxy/conf.d/080-glance-api.cfg] - - Exec[concat_/etc/haproxy/conf.d/081-glance-glare.cfg] - - File[/etc/haproxy/conf.d/081-glance-glare.cfg] - - murano: - no_puppet_run: true - - ceph_ready_check: - type: shell - - enable_quorum: - type: shell - - openstack-haproxy-nova: - resources: - - File[/etc/haproxy/conf.d/040-nova-api.cfg] - - File[/etc/haproxy/conf.d/170-nova-novncproxy.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_040-nova-api.cfg/fragments/00_nova-api_listen_block] - - Exec[concat_/etc/haproxy/conf.d/040-nova-api.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_170-nova-novncproxy.cfg/fragments/00_nova-novncproxy_listen_block] - - Exec[concat_/etc/haproxy/conf.d/170-nova-novncproxy.cfg] - - openstack-network-server-config: - resources: [] - - primary-database: - resources: - - File[mysql-config-file] - - File[/root/.my.cnf] - - openstack-haproxy-cinder: - resources: - - File[/etc/haproxy/conf.d/070-cinder-api.cfg] - - Exec[concat_/etc/haproxy/conf.d/070-cinder-api.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_070-cinder-api.cfg/fragments/00_cinder-api_listen_block] - - ntp-server: - resources: [] - - murano-keystone: - no_puppet_run: true - - swift-proxy_storage: - resources: - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/00_swift_proxy] - - File[/etc/swift/proxy-server.conf] - - Exec[concat_/etc/swift/proxy-server.conf] - - primary-openstack-network-agents-dhcp: - resources: - - Neutron_dhcp_agent_config[DEFAULT/debug] - - openstack-haproxy-heat: - no_puppet_run: true - - primary-openstack-controller: - resources: - - Nova_config[DEFAULT/quota_driver] - - Nova_config[database/max_retries] - - Nova_config[DEFAULT/debug] - - Nova_config[database/max_pool_size] - - Nova_config[database/max_overflow] - - Nova_config[DEFAULT/notify_on_state_change] - - Nova_config[DEFAULT/notification_driver] - - Nova_config[cinder/catalog_info] - - Nova_config[database/idle_timeout] - - Nova_config[DEFAULT/scheduler_default_filters] - - Nova_config[DEFAULT/allow_resize_to_same_host] - - Nova_config[DEFAULT/default_log_levels] - - Nova_config[DEFAULT/use_stderr] - - Nova_config[glance/api_servers] - - openstack-cinder: - resources: - - Cinder_config[DEFAULT/scheduler_default_filters] - - Cinder_config[DEFAULT/default_log_levels] - - Cinder_config[DEFAULT/debug] - - ceph_create_pools: - no_puppet_run: true - - keystone-db: - resources: - - File[/root/.my.cnf] - - sync_time: - type: shell - - configuration_symlink: - type: shell - - openstack-network-server-nova: - resources: [] - - copy_haproxy_keys: - type: copy_files - - openstack-network-networks: - resources: [] - - ssl-add-trust-chain: - no_puppet_run: true \ No newline at end of file diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/ensurability/ironic.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/ensurability/ironic.yaml deleted file mode 100644 index 08e092132..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/ensurability/ironic.yaml +++ /dev/null @@ -1,56 +0,0 @@ -tasks: - - update_hosts: - resources: [] - - clear_nodes_info: - type: skipped - - override_configuration: - resources: [] - - globals: - resources: [] - - fuel_pkgs: - resources: [] - - ironic_copy_bootstrap_key: - type: copy_files - - rsync_core_puppet: - type: sync - - cgroups: - resources: [] - - upload_nodes_info: - type: skipped - - copy_keys: - type: copy_files - - tools: - resources: [] - - copy_deleted_nodes: - type: copy_files - - setup_repositories: - resources: [] - - allocate_hugepages: - resources: [] - - plugins_setup_repositories: - no_puppet_run: true - - ironic-conductor: - resources: [] - - copy_keys_ceph: - type: copy_files - - upload_configuration: - type: upload_file - - firewall: - resources: [] - - logging: - resources: [] - - sync_time: - type: shell - - plugins_rsync: - resources: - - Exec[sync_time_shell] - - connectivity_tests: - resources: [] - - configuration_symlink: - type: shell - - hosts: - resources: [] - - copy_haproxy_keys: - type: copy_files - - reserved_ports: - resources: [] \ No newline at end of file diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/ensurability/nodes_settings.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/ensurability/nodes_settings.yaml deleted file mode 100644 index dbf35145c..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/ensurability/nodes_settings.yaml +++ /dev/null @@ -1,51 +0,0 @@ -controller: - cpu_pinning: - dpdk: - type: number - value: 1 - nova: - type: number - value: 1 - hugepages: - dpdk: - type: number - value: 128 - nova: - type: custom_hugepages - value: - '1048576': 1 - '2048': 550 -ironic: - cpu_pinning: - dpdk: - type: number - value: 1 - nova: - type: number - value: 1 - hugepages: - dpdk: - type: number - value: 128 - nova: - type: custom_hugepages - value: - '1048576': 1 - '2048': 550 -compute: - cpu_pinning: - dpdk: - type: number - value: 1 - nova: - type: number - value: 1 - hugepages: - dpdk: - type: number - value: 128 - nova: - type: custom_hugepages - value: - '1048576': 1 - '2048': 550 diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/idempotency/compute.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/idempotency/compute.yaml deleted file mode 100644 index d3c984327..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/idempotency/compute.yaml +++ /dev/null @@ -1,67 +0,0 @@ -roles: - compute -tasks: - - update_hosts: null - - openstack-network-start: - type: skipped - - openstack-network-common-config: null - - clear_nodes_info: - type: skipped - - openstack-network-agents-sriov: null - - override_configuration: null - - globals: null - - fuel_pkgs: null - - openstack-network-agents-l3: null - - openstack-network-agents-metadata: null - - tools: null - - rsync_core_puppet: - type: sync - - enable_nova_compute_service: null - - cgroups: null - - upload_nodes_info: - type: skipped - - copy_keys: - type: copy_files - - copy_deleted_nodes: - type: copy_files - - setup_repositories: null - - dns-client: null - - openstack-network-plugins-l2: null - - allocate_hugepages: null - - plugins_setup_repositories: - no_puppet_run: true - - ceph-compute: - no_puppet_run: true - - ssl-keys-saving: null - - sriov_iommu_check: - skip: - - Exec[sriov_iommu_check] - - openstack-network-end: - type: skipped - - copy_keys_ceph: - type: copy_files - - upload_configuration: - type: upload_file - - firewall: null - - logging: null - - top-role-compute: - skip: - - Notify[Module openstack_tasks cannot notify service nova-compute on packages - update] - - Service[nova-compute] - - sync_time: - type: shell - - openstack-network-compute-nova: null - - plugins_rsync: - no_puppet_run: true - - connectivity_tests: null - - configuration_symlink: - type: shell - - ceilometer-compute: - no_puppet_run: true - - hosts: null - - copy_haproxy_keys: - type: copy_files - - ntp-client: null - - ssl-add-trust-chain: null - - reserved_ports: null \ No newline at end of file diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/idempotency/controller.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/idempotency/controller.yaml deleted file mode 100644 index ed3d30546..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/idempotency/controller.yaml +++ /dev/null @@ -1,187 +0,0 @@ -roles: - controller -tasks: - - ironic_post_swift_key: - type: shell - - openstack-haproxy-mysqld: null - - cinder-db: null - - dump_rabbitmq_definitions: - skip: - - Dump_rabbitmq_definitions[/etc/rabbitmq/definitions.full] - - rsync_core_puppet: - type: sync - - ssl-dns-setup: null - - ceilometer-controller: - no_puppet_run: true - - override_configuration: null - - ceilometer-keystone: - no_puppet_run: true - - nova-db: null - - workloads_collector_add: null - - primary-openstack-network-plugins-l2: null - - radosgw-keystone: null - - virtual_ips: null - - primary-dns-server: null - - openstack-haproxy-murano: - no_puppet_run: true - - openstack-network-end: - type: skipped - - openstack-haproxy-radosgw: null - - openstack-haproxy-swift: null - - heat-db: null - - openstack-haproxy-neutron: null - - updatedb: - no_puppet_run: true - - ironic-db: null - - plugins_rsync: - no_puppet_run: true - - ceilometer-radosgw-user: - no_puppet_run: true - - openstack-haproxy-keystone: null - - hosts: null - - primary-rabbitmq: null - - primary-cluster-haproxy: null - - openstack-network-routers: null - - reserved_ports: null - - controller_remaining_tasks: null - - glance-keystone: null - - openstack-haproxy-aodh: null - - murano-cfapi: - no_puppet_run: true - - ironic-compute: null - - primary-openstack-network-agents-metadata: null - - cinder-keystone: null - - copy_keys: - type: copy_files - - enable_rados: - no_puppet_run: true - - ntp-check: null - - aodh-db: - no_puppet_run: true - - disable_keystone_service_token: - no_puppet_run: true - - umm: null - - memcached: null - - allocate_hugepages: null - - openrc-delete: - skip: - - File[/root/openrc] - - plugins_setup_repositories: - no_puppet_run: true - - sahara-keystone: - no_puppet_run: true - - openstack-haproxy-sahara: - no_puppet_run: true - - ssl-keys-saving: null - - primary-cluster: null - - upload_cirros: - type: shell - - primary-keystone: - skip: - - File[/root/openrc] - - primary-openstack-network-agents-l3: null - - upload_configuration: - type: upload_file - - create-cinder-types: null - - neutron-keystone: null - - logging: null - - nova-keystone: null - - update_hosts: null - - ironic-keystone: null - - connectivity_tests: null - - primary-heat: null - - conntrackd: null - - sahara-db: - no_puppet_run: true - - horizon: null - - openstack-haproxy-ceilometer: - no_puppet_run: true - - openstack-network-common-config: null - - firewall: null - - apache: null - - globals: null - - aodh-keystone: - no_puppet_run: true - - glance: null - - tools: null - - openstack-haproxy: null - - cgroups: null - - murano-cfapi-keystone: - no_puppet_run: true - - aodh: - no_puppet_run: true - - copy_deleted_nodes: - type: copy_files - - openstack-haproxy-ironic: null - - setup_repositories: null - - openstack-network-routers-ha: - no_puppet_run: true - - upload_murano_package: - no_puppet_run: true - - glance-db: null - - neutron-db: null - - ironic_upload_images: - type: shell - - swift-rebalance-cron: null - - primary-ceph-mon: null - - openstack-haproxy-stats: null - - ironic-api: null - - primary-ceph-radosgw: null - - dns-client: null - - cluster-vrouter: null - - murano-rabbitmq: - no_puppet_run: true - - api-proxy: null - - cluster_health: null - - heat-keystone: null - - openstack-haproxy-horizon: null - - openstack-network-start: - type: skipped - - clear_nodes_info: - type: skipped - - murano-db: - no_puppet_run: true - - copy_keys_ceph: - type: copy_files - - sahara: - no_puppet_run: true - - fuel_pkgs: null - - swift-keystone: null - - public_vip_ping: null - - upload_nodes_info: - type: skipped - - openstack-haproxy-glance: null - - murano: - no_puppet_run: true - - ceph_ready_check: - type: shell - - enable_quorum: - type: shell - - openstack-haproxy-nova: null - - openstack-network-server-config: null - - primary-database: - skip: - - File[/root/.my.cnf] - - openstack-haproxy-cinder: null - - ntp-server: null - - murano-keystone: - no_puppet_run: true - - swift-proxy_storage: null - - primary-openstack-network-agents-dhcp: null - - openstack-haproxy-heat: null - - primary-openstack-controller: null - - openstack-cinder: null - - ceph_create_pools: - no_puppet_run: true - - keystone-db: - skip: - - File[/root/.my.cnf] - - sync_time: - type: shell - - configuration_symlink: - type: shell - - openstack-network-server-nova: null - - copy_haproxy_keys: - type: copy_files - - openstack-network-networks: null - - ssl-add-trust-chain: null \ No newline at end of file diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/idempotency/ironic.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/idempotency/ironic.yaml deleted file mode 100644 index f9ad97c6a..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_ironic/idempotency/ironic.yaml +++ /dev/null @@ -1,43 +0,0 @@ -roles: - ironic -tasks: - - update_hosts: null - - clear_nodes_info: - type: skipped - - override_configuration: null - - globals: null - - fuel_pkgs: null - - ironic_copy_bootstrap_key: - type: copy_files - - rsync_core_puppet: - type: sync - - cgroups: null - - upload_nodes_info: - type: skipped - - copy_keys: - type: copy_files - - tools: null - - copy_deleted_nodes: - type: copy_files - - setup_repositories: null - - allocate_hugepages: null - - plugins_setup_repositories: - no_puppet_run: true - - ironic-conductor: null - - copy_keys_ceph: - type: copy_files - - upload_configuration: - type: upload_file - - firewall: null - - logging: null - - sync_time: - type: shell - - plugins_rsync: - no_puppet_run: true - - connectivity_tests: null - - configuration_symlink: - type: shell - - hosts: null - - copy_haproxy_keys: - type: copy_files - - reserved_ports: null \ No newline at end of file diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/ensurability/cluster_settings.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/ensurability/cluster_settings.yaml deleted file mode 100644 index 700beac9d..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/ensurability/cluster_settings.yaml +++ /dev/null @@ -1,421 +0,0 @@ -access: - email: - type: text - value: admin_upd@localhost - password: - type: password - value: admin - tenant: - type: text - value: admin - user: - type: text - value: admin -additional_components: - ceilometer: - type: checkbox - value: false - heat: - type: hidden - value: false - ironic: - type: checkbox - value: false - mongo: - type: checkbox - value: false - murano: - type: checkbox - value: false - murano-cfapi: - type: checkbox - value: false - murano_glance_artifacts_plugin: - type: checkbox - value: false - sahara: - type: checkbox - value: false -common: - auth_key: - type: hidden - value: '' - auto_assign_floating_ip: - type: checkbox - value: true - debug: - type: checkbox - value: false - libvirt_type: - type: radio - value: qemu - nova_quota: - type: checkbox - value: true - propagate_task_deploy: - type: hidden - value: false - puppet_debug: - type: checkbox - value: false - resume_guests_state_on_host_boot: - type: checkbox - value: false - task_deploy: - type: hidden - value: true - use_cow_images: - type: checkbox - value: true -corosync: - group: - type: text - value: 226.94.1.1 - port: - type: text - value: '12000' - verified: - type: checkbox - value: false -external_mongo: - hosts_ip: - type: text - value: '' - mongo_db_name: - type: text - value: ceilometer - mongo_password: - type: password - value: ceilometer - mongo_replset: - type: text - value: '' - mongo_user: - type: text - value: ceilometer -kernel_params: - kernel: - type: text - value: console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90 nomodeset -murano_settings: - murano_repo_url: - type: text - value: http://storage.apps.openstack.org/ -neutron_advanced_configuration: - neutron_dvr: - type: checkbox - value: false - neutron_l2_pop: - type: checkbox - value: false - neutron_l3_ha: - type: checkbox - value: false - neutron_qos: - type: checkbox - value: false -operator_user: - authkeys: - type: textarea - value: '' - homedir: - type: text - value: /home/fueladmin - name: - type: text - value: fueladmin - password: - type: password - value: xalFdhQSGrB7xgdPrPiM3vZm - sudo: - type: textarea - value: 'ALL=(ALL) NOPASSWD: ALL' -provision: - method: - type: hidden - value: image - packages: - type: textarea - value: 'acl - - anacron - - bash-completion - - bridge-utils - - bsdmainutils - - build-essential - - cloud-init - - curl - - daemonize - - debconf-utils - - gdisk - - grub-pc - - hpsa-dkms - - hwloc - - i40e-dkms - - linux-firmware - - linux-firmware-nonfree - - linux-headers-generic-lts-trusty - - linux-image-generic-lts-trusty - - lvm2 - - mcollective - - mdadm - - multipath-tools - - multipath-tools-boot - - nailgun-agent - - nailgun-mcagents - - network-checker - - ntp - - openssh-client - - openssh-server - - puppet - - python-amqp - - ruby-augeas - - ruby-ipaddress - - ruby-json - - ruby-netaddr - - ruby-openstack - - ruby-shadow - - ruby-stomp - - telnet - - ubuntu-minimal - - ubuntu-standard - - uuid-runtime - - vim - - virt-what - - vlan - - ' -public_network_assignment: - assign_to_all_nodes: - type: checkbox - value: false -public_ssl: - cert_data: - type: file - value: - content: '-----BEGIN CERTIFICATE----- - - MIIC7TCCAdUCAgPoMA0GCSqGSIb3DQEBBQUAMDwxHjAcBgNVBAsMFU1pcmFudGlz - - IEZ1ZWwtUUEgVGVhbTEaMBgGA1UEAwwRcHVibGljLmZ1ZWwubG9jYWwwHhcNMTYw - - NDE5MTkxMTU1WhcNMjYwNDE3MTkxMTU1WjA8MR4wHAYDVQQLDBVNaXJhbnRpcyBG - - dWVsLVFBIFRlYW0xGjAYBgNVBAMMEXB1YmxpYy5mdWVsLmxvY2FsMIIBIjANBgkq - - hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoZBouZH+0S1jPYy+FxvNAkdGxsNVzsOI - - g7OybWx+DIskdRvONwrCFFtvP2InKJowPCebGcCqDqGF2zgFLmA9yQN/05A9f8bX - - hFrtjfNb/YYJxDE4itSYNgSzSfnitii7AJme9UBw94s0p3749irGTB++ZhcPzwdg - - Nx0Ymk2uFFNU18YxSx8PAk2w73a36t61E0P++MT6sYIM1GAx+9pm9Ddrj5r0b/M7 - - ikHGIUuB7M6t3mNHUveld+ZyXjaONMHZI5WQ16AMZwtHunUu/42k+o6RSS4h+zT8 - - ZiWW5cxZVLn6xqJkDkXMDdsS7PrveSuODq3LuaG4fwRpf1u2hqvyuwIDAQABMA0G - - CSqGSIb3DQEBBQUAA4IBAQBfAjtVxKItKMFAQl/EufHjk4rBpRiaHGLH2CIJHWJ1 - - i+z7gI5XazzwMCprOxsCUrJUpr8ChobenyebNPJSnDI0R0z8ZTX6kTNk7A2ZFVrp - - lL5TlpwhdtUjWxF3Coi+w694MbyLmJ4pA6QZTYVqSilZZ1cncLNA+Fc97STfLukK - - wqjwCYovRVjUn4jLRjy2kcw89060xxZopVpkY9cPfg0P+PICo/eS4EunQ5rd/EDV - - 7DBfCbzthArBjF8/72J8PYhqwEc+i5PDkn2CNIXoT0coxC9YAHJ+zFHgxHnKa0/q - - TPlvi+wJKrrSnXb5Oc34tVOxDF/WQjNuve8vHg7hvaIM - - -----END CERTIFICATE----- - - -----BEGIN PRIVATE KEY----- - - MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQChkGi5kf7RLWM9 - - jL4XG80CR0bGw1XOw4iDs7JtbH4MiyR1G843CsIUW28/YicomjA8J5sZwKoOoYXb - - OAUuYD3JA3/TkD1/xteEWu2N81v9hgnEMTiK1Jg2BLNJ+eK2KLsAmZ71QHD3izSn - - fvj2KsZMH75mFw/PB2A3HRiaTa4UU1TXxjFLHw8CTbDvdrfq3rUTQ/74xPqxggzU - - YDH72mb0N2uPmvRv8zuKQcYhS4Hszq3eY0dS96V35nJeNo40wdkjlZDXoAxnC0e6 - - dS7/jaT6jpFJLiH7NPxmJZblzFlUufrGomQORcwN2xLs+u95K44Orcu5obh/BGl/ - - W7aGq/K7AgMBAAECggEAI6RyFg5JQuhabmewP/TYI1qKGKtbMMQeR7/K6yz2GdpQ - - bq11rtrmugr53efPb7ukTIEITIQegB/OIfCX5AVDXCczef7mMlwxi3dr1NcNQj7h - - xLB/ItGHytL6oqVICJuvtZAuaziOM244bYMrdVM2b/DI1kjcKfYcmcwHc3MTplPq - - Nh+L5u2ue6bYvT+XRF4KrwuKmKuyJghyMeoiLI9JupkKw79ZB/l0Mh8vmxKMPj8g - - MNxoJbwoTkjQxuJELmet+ysBg2KT+gJEirfRmZiouDxx8Zukd8O6YvnlsOiRFokX - - 05r33fna1z5IBpGnwe+rn6pQaeXflSd6dqotoBp4QQKBgQDLrhAdsZnDXDpYuOv+ - - ITMpc33J4NW7yE+26ebzWkLYRUW5i7YDCtJdFi5pHCeA3+QD3RlYiinQlzcy3i3R - - 4Uv4riuKyDbgaw9sYOhmgluhPKDuznEWXomloEVu8jFrHg3TKY2v/GCcv99N5grQ - - Jg9rScFpZXkTj23KzqHf23uTEQKBgQDLENH7QzoNsBv6eS7kElBx3BQWNa0dhXab - - kRXo62/++tIDGMkzaq38hsjcAZi8uZDZY0QJTmBMdZN3LLBln5C2g8Y6Eym5ITvf - - pxkMUK0++MygbK/Vwmp+xu7XMiPNMG/E8NqQkca3F/6Ld08PAauZ8gpgoAsnjlNg - - pPUdWRCRCwKBgEiEB17bDXidjHRsGjFXVqTKZp2Ke+4oaiEgc8Zue2AOgb2GvV2l - - 67GSpSFtEa9zhvXNMSnxvuNyAwgMTFnuEaNPN1do4wjRdSNL+VIN1Vu5fz6mp2Kk - - c/NQ9YeDmQ6fG6Lzp2thum/0bCeK4IytEE5NaxwAMbRCG3/aQ4200fFRAoGAMwg5 - - HSIZ9tKpVVsbE6oemV6rlaFLrj2aPyJJFU4FyViTar/R4KAQtYPR+qhUECm6Y0d1 - - E7mkrdJmiu6qLf/ZyGR5bqLeO25Es8I0o0mrIEY6dp6Z2eiQBuhLob0yDiD8FcxJ - - wUdBX0YibD5Bmg3baEbRoNLXussj3QfXqdZ2OV0CgYEAyovcXc1ibwrwNO59yw99 - - 7zCoMFjXzZgtxn5JQDwMsdt9UKd/4nOPbbiRPL3ynr5zboDZzRxihXB5zzKjrYlE - - o4QZIWV0VgGS2eQSni3CGOsG4VhE4/9EFF7UqeA0hYkGAZMS+EKSdPpIujStD/ck - - sQ/BZiYxMSE8+synlzp3gss= - - -----END PRIVATE KEY----- - - ' - name: ca.pem - cert_source: - type: radio - value: user_uploaded - horizon: - type: checkbox - value: false - hostname: - type: text - value: public.fuel.local - services: - type: checkbox - value: false -service_user: - homedir: - type: hidden - value: /var/lib/fuel - name: - type: hidden - value: fuel - password: - type: hidden - value: WEwz5aKA0hYDrcERjX7irQzS - root_password: - type: hidden - value: r00tme - sudo: - type: hidden - value: 'ALL=(ALL) NOPASSWD: ALL' -storage: - admin_key: - type: hidden - value: AQDzghZXAAAAABAA7obspvgNjPa/HBWSOUzI1w== - bootstrap_osd_key: - type: hidden - value: AQDzghZXAAAAABAAWaiWslWwse+hsaKLzbtQFw== - ephemeral_ceph: - type: checkbox - value: false - fsid: - type: hidden - value: 4b0ab6f5-b82b-44e4-ac3a-15c76f960b82 - images_ceph: - type: checkbox - value: false - mon_key: - type: hidden - value: AQDzghZXAAAAABAAVi1udBHvkQbZbDgNnT7gXA== - objects_ceph: - type: checkbox - value: false - osd_pool_size: - type: text - value: '3' - radosgw_key: - type: hidden - value: AQDzghZXAAAAABAA8jY8KftsCK4l726rNdu/Zg== - volumes_block_device: - type: checkbox - value: true - volumes_ceph: - type: checkbox - value: false - volumes_lvm: - type: checkbox - value: false -syslog: - syslog_port: - type: text - value: '514' - syslog_server: - type: text - value: '' - syslog_transport: - type: radio - value: tcp -workloads_collector: - enabled: - type: hidden - value: false - password: - type: password - value: 8qtWdXhhY84wFoxwBbZcpq3P - tenant: - type: text - value: services - user: - type: text - value: fuel_stats_user diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/ensurability/compute.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/ensurability/compute.yaml deleted file mode 100644 index b29497f24..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/ensurability/compute.yaml +++ /dev/null @@ -1,89 +0,0 @@ -tasks: -- update_hosts: - resources: [] -- openstack-network-start: - type: skipped -- openstack-network-common-config: - resources: [] -- clear_nodes_info: - type: skipped -- openstack-network-agents-sriov: - resources: [] -- copy_keys_ceph: - type: copy_files -- globals: - resources: [] -- fuel_pkgs: - resources: [] -- openstack-network-agents-l3: - resources: [] -- openstack-network-agents-metadata: - resources: [] -- tools: - resources: [] -- rsync_core_puppet: - type: sync -- enable_nova_compute_service: - resources: [] -- cgroups: - resources: [] -- upload_nodes_info: - type: skipped -- copy_keys: - type: copy_files -- override_configuration: - resources: [] -- setup_repositories: - resources: [] -- dns-client: - resources: [] -- openstack-network-plugins-l2: - resources: [] -- allocate_hugepages: - resources: [] -- plugins_setup_repositories: - no_puppet_run: true -- ceph-compute: - no_puppet_run: true -- ssl-keys-saving: - no_puppet_run: true -- sriov_iommu_check: - resources: - - Exec[sriov_iommu_check] -- openstack-network-end: - type: skipped -- ceilometer-compute: - no_puppet_run: true -- upload_configuration: - type: upload_file -- firewall: - resources: [] -- logging: - resources: [] -- top-role-compute: - resources: - - Notify[Module openstack_tasks cannot notify service nova-compute on packages - update] - - Nova_config[DEFAULT/resume_guests_state_on_host_boot] - - Nova_config[vnc/novncproxy_base_url] - - Service[nova-compute] -- sync_time: - type: shell -- openstack-network-compute-nova: - resources: [] -- plugins_rsync: - no_puppet_run: true -- connectivity_tests: - resources: [] -- configuration_symlink: - type: shell -- hosts: - resources: [] -- copy_haproxy_keys: - type: copy_files -- ntp-client: - resources: [] -- ssl-add-trust-chain: - no_puppet_run: true -- reserved_ports: - resources: [] \ No newline at end of file diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/ensurability/controller.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/ensurability/controller.yaml deleted file mode 100644 index d96b7079e..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/ensurability/controller.yaml +++ /dev/null @@ -1,332 +0,0 @@ -tasks: -- ironic_post_swift_key: - type: shell -- openstack-haproxy-mysqld: - resources: [] -- cinder-db: - resources: [] -- dump_rabbitmq_definitions: - resources: [] -- rsync_core_puppet: - type: sync -- ssl-dns-setup: - resources: - - Exec[rsync_core_puppet_shell] -- ceilometer-controller: - no_puppet_run: true -- override_configuration: - resources: [] -- ceilometer-keystone: - no_puppet_run: true -- nova-db: - resources: [] -- workloads_collector_add: - resources: [] -- primary-openstack-network-plugins-l2: - resources: [] -- radosgw-keystone: - resources: [] -- virtual_ips: - resources: [] -- primary-dns-server: - resources: [] -- openstack-haproxy-murano: - resources: [] -- openstack-network-end: - type: skipped -- openstack-haproxy-radosgw: - resources: [] -- openstack-haproxy-swift: - resources: [] -- heat-db: - resources: [] -- openstack-haproxy-neutron: - resources: [] -- updatedb: - no_puppet_run: true -- ironic-db: - no_puppet_run: true -- plugins_rsync: - no_puppet_run: true -- ceilometer-radosgw-user: - no_puppet_run: true -- openstack-haproxy-keystone: - resources: [] -- hosts: - resources: [] -- primary-rabbitmq: - resources: [] -- primary-cluster-haproxy: - resources: [] -- openstack-network-routers: - resources: [] -- reserved_ports: - resources: [] -- controller_remaining_tasks: - resources: [] -- glance-keystone: - resources: [] -- openstack-haproxy-aodh: - resources: [] -- murano-cfapi: - no_puppet_run: true -- ironic-compute: - no_puppet_run: true -- primary-openstack-network-agents-metadata: - resources: [] -- cinder-keystone: - resources: [] -- copy_keys: - type: copy_files -- enable_rados: - no_puppet_run: true -- ntp-check: - resources: [] -- aodh-db: - no_puppet_run: true -- disable_keystone_service_token: - resources: [] -- umm: - resources: [] -- memcached: - resources: [] -- allocate_hugepages: - resources: [] -- openrc-delete: - resources: - - File[/root/openrc] -- plugins_setup_repositories: - no_puppet_run: true -- sahara-keystone: - no_puppet_run: true -- openstack-haproxy-sahara: - resources: [] -- ssl-keys-saving: - no_puppet_run: true -- primary-cluster: - resources: [] -- upload_cirros: - type: shell -- primary-keystone: - resources: - - File[/root/openrc] -- primary-openstack-network-agents-l3: - resources: [] -- upload_configuration: - type: upload_file -- create-cinder-types: - resources: [] -- neutron-keystone: - resources: - - Keystone_endpoint[RegionOne/neutron::network] -- logging: - resources: [] -- nova-keystone: - resources: - - Keystone_endpoint[RegionOne/nova::compute] - - Keystone_endpoint[RegionOne/novav3::computev3] -- update_hosts: - resources: [] -- ironic-keystone: - no_puppet_run: true -- connectivity_tests: - resources: [] -- swift-storage: - resources: [] -- primary-heat: - resources: - - Heat_config[keystone_authtoken/auth_uri] -- conntrackd: - resources: [] -- sahara-db: - no_puppet_run: true -- horizon: - resources: - - File[/var/lib/puppet/concat/_etc_openstack-dashboard_local_settings.py/fragments/50_local_settings.py] - - File[/etc/openstack-dashboard/local_settings.py] - - Exec[concat_/etc/openstack-dashboard/local_settings.py] -- openstack-haproxy-ceilometer: - resources: - - Exec[concat_/etc/haproxy/conf.d/140-ceilometer.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_140-ceilometer.cfg/fragments/00_ceilometer_listen_block] - - File[/etc/haproxy/conf.d/140-ceilometer.cfg] -- openstack-network-common-config: - resources: - - Neutron_config[DEFAULT/service_plugins] -- firewall: - resources: [] -- apache: - resources: [] -- globals: - resources: - - File[/etc/hiera/globals.yaml] -- aodh-keystone: - no_puppet_run: true -- glance: - resources: - - Glance_glare_config[DEFAULT/default_log_levels] - - Glance_registry_config[DEFAULT/default_log_levels] - - Glance_api_config[DEFAULT/notification_driver] - - Glance_api_config[DEFAULT/default_log_levels] - - Glance_cache_config[DEFAULT/debug] - - Glance_api_config[DEFAULT/debug] - - Glance_glare_config[DEFAULT/debug] - - Glance_registry_config[DEFAULT/debug] -- tools: - resources: [] -- openstack-haproxy: - resources: [] -- cgroups: - resources: [] -- murano-cfapi-keystone: - no_puppet_run: true -- aodh: - no_puppet_run: true -- ceph_create_pools: - no_puppet_run: true -- openstack-haproxy-ironic: - no_puppet_run: true -- setup_repositories: - resources: [] -- openstack-network-routers-ha: - no_puppet_run: true -- glance-db: - resources: [] -- neutron-db: - resources: [] -- ironic_upload_images: - type: shell -- swift-rebalance-cron: - resources: [] -- primary-ceph-mon: - resources: [] -- openstack-haproxy-stats: - resources: [] -- ironic-api: - no_puppet_run: true -- primary-ceph-radosgw: - resources: [] -- dns-client: - resources: [] -- cluster-vrouter: - resources: [] -- murano-rabbitmq: - no_puppet_run: true -- api-proxy: - resources: [] -- cluster_health: - resources: [] -- heat-keystone: - resources: - - Keystone_endpoint[RegionOne/heat-cfn::cloudformation] - - Keystone_endpoint[RegionOne/heat::orchestration] -- openstack-haproxy-horizon: - resources: - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_015-horizon.cfg/fragments/00_horizon_listen_block] - - File[/etc/haproxy/conf.d/015-horizon.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_015-horizon.cfg/fragments/01-horizon_horizon_balancermember_horizon] - - Exec[concat_/etc/haproxy/conf.d/015-horizon.cfg] -- openstack-network-start: - type: skipped -- clear_nodes_info: - type: skipped -- murano-db: - resources: - - Exec[clear_nodes_info_shell] -- copy_keys_ceph: - type: copy_files -- sahara: - no_puppet_run: true -- fuel_pkgs: - resources: [] -- swift-keystone: - resources: - - Keystone_endpoint[RegionOne/swift::object-store] - - Keystone_endpoint[RegionOne/swift_s3::s3] -- public_vip_ping: - resources: [] -- upload_nodes_info: - type: skipped -- openstack-haproxy-glance: - resources: - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_080-glance-api.cfg/fragments/00_glance-api_listen_block] - - Exec[concat_/etc/haproxy/conf.d/080-glance-api.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_081-glance-glare.cfg/fragments/00_glance-glare_listen_block] - - File[/etc/haproxy/conf.d/080-glance-api.cfg] - - Exec[concat_/etc/haproxy/conf.d/081-glance-glare.cfg] - - File[/etc/haproxy/conf.d/081-glance-glare.cfg] -- murano: - no_puppet_run: true -- ceph_ready_check: - type: shell -- enable_quorum: - type: shell -- openstack-haproxy-nova: - resources: - - File[/etc/haproxy/conf.d/040-nova-api.cfg] - - File[/etc/haproxy/conf.d/170-nova-novncproxy.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_040-nova-api.cfg/fragments/00_nova-api_listen_block] - - Exec[concat_/etc/haproxy/conf.d/040-nova-api.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_170-nova-novncproxy.cfg/fragments/00_nova-novncproxy_listen_block] - - Exec[concat_/etc/haproxy/conf.d/170-nova-novncproxy.cfg] -- openstack-network-server-config: - resources: - - Neutron_config[DEFAULT/router_distributed] - - Neutron_config[qos/notification_drivers] - - Neutron_plugin_ml2[ml2/mechanism_drivers] - - Neutron_plugin_ml2[ml2/extension_drivers] -- primary-database: - resources: - - File[/root/.my.cnf] -- openstack-haproxy-cinder: - resources: - - File[/etc/haproxy/conf.d/070-cinder-api.cfg] - - Exec[concat_/etc/haproxy/conf.d/070-cinder-api.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_070-cinder-api.cfg/fragments/00_cinder-api_listen_block] -- ntp-server: - resources: [] -- murano-keystone: - no_puppet_run: true -- primary-openstack-network-agents-dhcp: - resources: - - Neutron_dhcp_agent_config[DEFAULT/debug] -- openstack-haproxy-heat: - resources: [] -- primary-openstack-controller: - resources: - - Nova_config[DEFAULT/quota_driver] - - Nova_config[DEFAULT/debug] - - Nova_config[DEFAULT/default_log_levels] -- openstack-cinder: - resources: - - Cinder_config[DEFAULT/scheduler_default_filters] - - Cinder_config[DEFAULT/default_log_levels] - - Cinder_config[DEFAULT/debug] -- keystone-db: - resources: - - File[/root/.my.cnf] -- sync_time: - type: shell -- configuration_symlink: - type: shell -- openstack-network-server-nova: - resources: [] -- copy_haproxy_keys: - type: copy_files -- primary-swift-proxy: - resources: - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments/10_account_frag-account] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/00_swift_proxy] - - File[/etc/swift/proxy-server.conf] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments/10_object_frag-object] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments/10_swift_server_frag-swift_server] - - Exec[concat_/etc/swift/proxy-server.conf] - - Exec[concat_/etc/rsyncd.conf] - - File[/etc/rsyncd.conf] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments/10_container_frag-container] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments/10_swift_backups_frag-swift_backups] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/33_swift_ceilometer] -- openstack-network-networks: - resources: [] -- ssl-add-trust-chain: - no_puppet_run: true \ No newline at end of file diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/ensurability/mongo.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/ensurability/mongo.yaml deleted file mode 100644 index 1a0bf27c8..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/ensurability/mongo.yaml +++ /dev/null @@ -1,60 +0,0 @@ -tasks: -- update_hosts: - resources: [] -- clear_nodes_info: - type: skipped -- top-role-primary-mongo: - resources: [] -- copy_keys_ceph: - type: copy_files -- globals: - resources: [] -- fuel_pkgs: - resources: [] -- tools: - resources: [] -- rsync_core_puppet: - type: sync -- cgroups: - resources: [] -- upload_nodes_info: - type: skipped -- copy_keys: - type: copy_files -- override_configuration: - resources: [] -- setup_repositories: - resources: [] -- dns-client: - resources: [] -- allocate_hugepages: - resources: [] -- plugins_setup_repositories: - no_puppet_run: true -- ssl-keys-saving: - no_puppet_run: true -- upload_configuration: - type: upload_file -- firewall: - resources: [] -- logging: - resources: [] -- sync_time: - type: shell -- plugins_rsync: - resources: - - Exec[sync_time_shell] -- connectivity_tests: - resources: [] -- configuration_symlink: - type: shell -- hosts: - resources: [] -- copy_haproxy_keys: - type: copy_files -- ntp-client: - resources: [] -- ssl-add-trust-chain: - no_puppet_run: true -- reserved_ports: - resources: [] \ No newline at end of file diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/ensurability/nodes_settings.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/ensurability/nodes_settings.yaml deleted file mode 100644 index faacea06a..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/ensurability/nodes_settings.yaml +++ /dev/null @@ -1,51 +0,0 @@ -controller: - cpu_pinning: - dpdk: - type: number - value: 1 - nova: - type: number - value: 1 - hugepages: - dpdk: - type: number - value: 128 - nova: - type: custom_hugepages - value: - '1048576': 1 - '2048': 550 -mongo: - cpu_pinning: - dpdk: - type: number - value: 1 - nova: - type: number - value: 1 - hugepages: - dpdk: - type: number - value: 128 - nova: - type: custom_hugepages - value: - '1048576': 1 - '2048': 550 -compute: - cpu_pinning: - dpdk: - type: number - value: 1 - nova: - type: number - value: 1 - hugepages: - dpdk: - type: number - value: 128 - nova: - type: custom_hugepages - value: - '1048576': 1 - '2048': 550 diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/idempotency/compute.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/idempotency/compute.yaml deleted file mode 100644 index 64063a8c9..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/idempotency/compute.yaml +++ /dev/null @@ -1,66 +0,0 @@ -roles: - compute -tasks: - - update_hosts: null - - openstack-network-start: - type: skipped - - openstack-network-common-config: null - - clear_nodes_info: - type: skipped - - openstack-network-agents-sriov: null - - override_configuration: null - - globals: null - - fuel_pkgs: null - - openstack-network-agents-l3: null - - openstack-network-agents-metadata: null - - tools: null - - rsync_core_puppet: - type: sync - - enable_nova_compute_service: null - - cgroups: null - - upload_nodes_info: - type: skipped - - copy_keys: - type: copy_files - - copy_deleted_nodes: - type: copy_files - - setup_repositories: null - - dns-client: null - - openstack-network-plugins-l2: null - - allocate_hugepages: null - - plugins_setup_repositories: - no_puppet_run: true - - ceph-compute: - no_puppet_run: true - - ssl-keys-saving: null - - sriov_iommu_check: - skip: - - Exec[sriov_iommu_check] - - openstack-network-end: - type: skipped - - copy_keys_ceph: - type: copy_files - - upload_configuration: - type: upload_file - - firewall: null - - logging: null - - top-role-compute: - skip: - - Notify[Module openstack_tasks cannot notify service nova-compute on packages - update] - - Service[nova-compute] - - sync_time: - type: shell - - openstack-network-compute-nova: null - - plugins_rsync: - no_puppet_run: true - - connectivity_tests: null - - configuration_symlink: - type: shell - - ceilometer-compute: null - - hosts: null - - copy_haproxy_keys: - type: copy_files - - ntp-client: null - - ssl-add-trust-chain: null - - reserved_ports: null diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/idempotency/controller.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/idempotency/controller.yaml deleted file mode 100644 index 44dec00b3..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/idempotency/controller.yaml +++ /dev/null @@ -1,188 +0,0 @@ -roles: - controller -tasks: - - ironic_post_swift_key: - type: shell - - openstack-haproxy-mysqld: null - - cinder-db: null - - dump_rabbitmq_definitions: - skip: - - Dump_rabbitmq_definitions[/etc/rabbitmq/definitions.full] - - rsync_core_puppet: - type: sync - - ssl-dns-setup: null - - ceilometer-controller: null - - override_configuration: null - - ceilometer-keystone: null - - nova-db: null - - workloads_collector_add: null - - primary-openstack-network-plugins-l2: null - - radosgw-keystone: null - - virtual_ips: null - - primary-dns-server: null - - openstack-haproxy-murano: - no_puppet_run: true - - openstack-network-end: - type: skipped - - openstack-haproxy-radosgw: null - - openstack-haproxy-swift: null - - heat-db: null - - openstack-haproxy-neutron: null - - updatedb: - no_puppet_run: true - - ironic-db: - no_puppet_run: true - - plugins_rsync: - no_puppet_run: true - - ceilometer-radosgw-user: - no_puppet_run: true - - openstack-haproxy-keystone: null - - hosts: null - - primary-rabbitmq: null - - primary-cluster-haproxy: null - - openstack-network-routers: null - - reserved_ports: null - - controller_remaining_tasks: null - - glance-keystone: null - - openstack-haproxy-aodh: null - - murano-cfapi: - no_puppet_run: true - - ironic-compute: - no_puppet_run: true - - primary-openstack-network-agents-metadata: null - - cinder-keystone: null - - copy_keys: - type: copy_files - - enable_rados: - no_puppet_run: true - - ntp-check: null - - aodh-db: null - - disable_keystone_service_token: - no_puppet_run: true - - umm: null - - memcached: null - - allocate_hugepages: null - - openrc-delete: - skip: - - File[/root/openrc] - - plugins_setup_repositories: - no_puppet_run: true - - sahara-keystone: - no_puppet_run: true - - openstack-haproxy-sahara: - no_puppet_run: true - - ssl-keys-saving: null - - primary-cluster: - skip: - - Pcmk_property[no-quorum-policy] - - upload_cirros: - type: shell - - primary-keystone: - skip: - - File[/root/openrc] - - primary-openstack-network-agents-l3: null - - upload_configuration: - type: upload_file - - create-cinder-types: null - - neutron-keystone: null - - logging: null - - nova-keystone: null - - update_hosts: null - - ironic-keystone: - no_puppet_run: true - - connectivity_tests: null - - primary-heat: null - - conntrackd: null - - sahara-db: - no_puppet_run: true - - horizon: null - - openstack-haproxy-ceilometer: null - - openstack-network-common-config: null - - firewall: null - - apache: null - - globals: null - - aodh-keystone: null - - glance: null - - tools: null - - openstack-haproxy: null - - cgroups: null - - murano-cfapi-keystone: - no_puppet_run: true - - aodh: null - - copy_deleted_nodes: - type: copy_files - - openstack-haproxy-ironic: - no_puppet_run: true - - setup_repositories: null - - openstack-network-routers-ha: - no_puppet_run: true - - upload_murano_package: - no_puppet_run: true - - glance-db: null - - neutron-db: null - - ironic_upload_images: - type: shell - - swift-rebalance-cron: null - - primary-ceph-mon: null - - openstack-haproxy-stats: null - - ironic-api: - no_puppet_run: true - - primary-ceph-radosgw: null - - dns-client: null - - cluster-vrouter: null - - murano-rabbitmq: - no_puppet_run: true - - api-proxy: null - - cluster_health: null - - heat-keystone: null - - openstack-haproxy-horizon: null - - openstack-network-start: - type: skipped - - clear_nodes_info: - type: skipped - - murano-db: - no_puppet_run: true - - copy_keys_ceph: - type: copy_files - - sahara: - no_puppet_run: true - - fuel_pkgs: null - - swift-keystone: null - - public_vip_ping: null - - upload_nodes_info: - type: skipped - - openstack-haproxy-glance: null - - murano: - no_puppet_run: true - - ceph_ready_check: - type: shell - - enable_quorum: - type: shell - - openstack-haproxy-nova: null - - openstack-network-server-config: null - - primary-database: - skip: - - File[/root/.my.cnf] - - openstack-haproxy-cinder: null - - ntp-server: null - - murano-keystone: - no_puppet_run: true - - swift-proxy_storage: null - - primary-openstack-network-agents-dhcp: null - - openstack-haproxy-heat: null - - primary-openstack-controller: null - - openstack-cinder: null - - ceph_create_pools: - no_puppet_run: true - - keystone-db: - skip: - - File[/root/.my.cnf] - - sync_time: - type: shell - - configuration_symlink: - type: shell - - openstack-network-server-nova: null - - copy_haproxy_keys: - type: copy_files - - openstack-network-networks: null - - ssl-add-trust-chain: null diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/idempotency/mongo.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/idempotency/mongo.yaml deleted file mode 100644 index eca85ed26..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_1_mongo/idempotency/mongo.yaml +++ /dev/null @@ -1,45 +0,0 @@ -roles: - mongo -tasks: - - update_hosts: null - - clear_nodes_info: - type: skipped - - top-role-primary-mongo: null - - override_configuration: null - - globals: null - - fuel_pkgs: null - - tools: null - - rsync_core_puppet: - type: sync - - cgroups: null - - upload_nodes_info: - type: skipped - - copy_keys: - type: copy_files - - copy_deleted_nodes: - type: copy_files - - setup_repositories: null - - dns-client: null - - allocate_hugepages: null - - plugins_setup_repositories: - no_puppet_run: true - - ssl-keys-saving: null - - copy_keys_ceph: - type: copy_files - - upload_configuration: - type: upload_file - - firewall: null - - logging: null - - sync_time: - type: shell - - plugins_rsync: - no_puppet_run: true - - connectivity_tests: null - - configuration_symlink: - type: shell - - hosts: null - - copy_haproxy_keys: - type: copy_files - - ntp-client: null - - ssl-add-trust-chain: null - - reserved_ports: null diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/ensurability/ceph-osd.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/ensurability/ceph-osd.yaml deleted file mode 100644 index 8b9cb1659..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/ensurability/ceph-osd.yaml +++ /dev/null @@ -1,62 +0,0 @@ -tasks: - - update_hosts: - resources: [] - - clear_nodes_info: - type: skipped - - copy_keys_ceph: - type: copy_files - - globals: - resources: [] - - fuel_pkgs: - resources: [] - - tools: - resources: [] - - rsync_core_puppet: - type: sync - - cgroups: - resources: [] - - upload_nodes_info: - type: skipped - - copy_keys: - type: copy_files - - override_configuration: - resources: [] - - setup_repositories: - resources: [] - - dns-client: - resources: [] - - allocate_hugepages: - resources: [] - - plugins_setup_repositories: - no_puppet_run: true - - ssl-keys-saving: - no_puppet_run: true - - upload_configuration: - type: upload_file - - firewall: - resources: [] - - top-role-ceph-osd: - resources: [] - - logging: - resources: [] - - updatedb: - no_puppet_run: true - - sync_time: - type: shell - - plugins_rsync: - resources: - - Exec[sync_time_shell] - - connectivity_tests: - resources: [] - - configuration_symlink: - type: shell - - hosts: - resources: [] - - copy_haproxy_keys: - type: copy_files - - ntp-client: - resources: [] - - ssl-add-trust-chain: - no_puppet_run: true - - reserved_ports: - resources: [] \ No newline at end of file diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/ensurability/cluster_settings.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/ensurability/cluster_settings.yaml deleted file mode 100644 index c4eb20f83..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/ensurability/cluster_settings.yaml +++ /dev/null @@ -1,421 +0,0 @@ -access: - email: - type: text - value: admin_upd@localhost - password: - type: password - value: admin - tenant: - type: text - value: admin - user: - type: text - value: admin -additional_components: - ceilometer: - type: checkbox - value: false - heat: - type: hidden - value: false - ironic: - type: checkbox - value: false - mongo: - type: checkbox - value: false - murano: - type: checkbox - value: false - murano-cfapi: - type: checkbox - value: false - murano_glance_artifacts_plugin: - type: checkbox - value: false - sahara: - type: checkbox - value: false -common: - auth_key: - type: hidden - value: '' - auto_assign_floating_ip: - type: checkbox - value: true - debug: - type: checkbox - value: false - libvirt_type: - type: radio - value: qemu - nova_quota: - type: checkbox - value: true - propagate_task_deploy: - type: hidden - value: false - puppet_debug: - type: checkbox - value: false - resume_guests_state_on_host_boot: - type: checkbox - value: false - task_deploy: - type: hidden - value: true - use_cow_images: - type: checkbox - value: true -corosync: - group: - type: text - value: 226.94.1.1 - port: - type: text - value: '12000' - verified: - type: checkbox - value: false -external_mongo: - hosts_ip: - type: text - value: '' - mongo_db_name: - type: text - value: ceilometer - mongo_password: - type: password - value: ceilometer - mongo_replset: - type: text - value: '' - mongo_user: - type: text - value: ceilometer -kernel_params: - kernel: - type: text - value: console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90 nomodeset -murano_settings: - murano_repo_url: - type: text - value: http://storage.apps.openstack.org/ -neutron_advanced_configuration: - neutron_dvr: - type: checkbox - value: false - neutron_l2_pop: - type: checkbox - value: false - neutron_l3_ha: - type: checkbox - value: false - neutron_qos: - type: checkbox - value: false -operator_user: - authkeys: - type: textarea - value: '' - homedir: - type: text - value: /home/fueladmin - name: - type: text - value: fueladmin - password: - type: password - value: xalFdhQSGrB7xgdPrPiM3vZm - sudo: - type: textarea - value: 'ALL=(ALL) NOPASSWD: ALL' -provision: - method: - type: hidden - value: image - packages: - type: textarea - value: 'acl - - anacron - - bash-completion - - bridge-utils - - bsdmainutils - - build-essential - - cloud-init - - curl - - daemonize - - debconf-utils - - gdisk - - grub-pc - - hpsa-dkms - - hwloc - - i40e-dkms - - linux-firmware - - linux-firmware-nonfree - - linux-headers-generic-lts-trusty - - linux-image-generic-lts-trusty - - lvm2 - - mcollective - - mdadm - - multipath-tools - - multipath-tools-boot - - nailgun-agent - - nailgun-mcagents - - network-checker - - ntp - - openssh-client - - openssh-server - - puppet - - python-amqp - - ruby-augeas - - ruby-ipaddress - - ruby-json - - ruby-netaddr - - ruby-openstack - - ruby-shadow - - ruby-stomp - - telnet - - ubuntu-minimal - - ubuntu-standard - - uuid-runtime - - vim - - virt-what - - vlan - - ' -public_network_assignment: - assign_to_all_nodes: - type: checkbox - value: false -public_ssl: - cert_data: - type: file - value: - content: '-----BEGIN CERTIFICATE----- - - MIIC7TCCAdUCAgPoMA0GCSqGSIb3DQEBBQUAMDwxHjAcBgNVBAsMFU1pcmFudGlz - - IEZ1ZWwtUUEgVGVhbTEaMBgGA1UEAwwRcHVibGljLmZ1ZWwubG9jYWwwHhcNMTYw - - NDE5MTkxMTU1WhcNMjYwNDE3MTkxMTU1WjA8MR4wHAYDVQQLDBVNaXJhbnRpcyBG - - dWVsLVFBIFRlYW0xGjAYBgNVBAMMEXB1YmxpYy5mdWVsLmxvY2FsMIIBIjANBgkq - - hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoZBouZH+0S1jPYy+FxvNAkdGxsNVzsOI - - g7OybWx+DIskdRvONwrCFFtvP2InKJowPCebGcCqDqGF2zgFLmA9yQN/05A9f8bX - - hFrtjfNb/YYJxDE4itSYNgSzSfnitii7AJme9UBw94s0p3749irGTB++ZhcPzwdg - - Nx0Ymk2uFFNU18YxSx8PAk2w73a36t61E0P++MT6sYIM1GAx+9pm9Ddrj5r0b/M7 - - ikHGIUuB7M6t3mNHUveld+ZyXjaONMHZI5WQ16AMZwtHunUu/42k+o6RSS4h+zT8 - - ZiWW5cxZVLn6xqJkDkXMDdsS7PrveSuODq3LuaG4fwRpf1u2hqvyuwIDAQABMA0G - - CSqGSIb3DQEBBQUAA4IBAQBfAjtVxKItKMFAQl/EufHjk4rBpRiaHGLH2CIJHWJ1 - - i+z7gI5XazzwMCprOxsCUrJUpr8ChobenyebNPJSnDI0R0z8ZTX6kTNk7A2ZFVrp - - lL5TlpwhdtUjWxF3Coi+w694MbyLmJ4pA6QZTYVqSilZZ1cncLNA+Fc97STfLukK - - wqjwCYovRVjUn4jLRjy2kcw89060xxZopVpkY9cPfg0P+PICo/eS4EunQ5rd/EDV - - 7DBfCbzthArBjF8/72J8PYhqwEc+i5PDkn2CNIXoT0coxC9YAHJ+zFHgxHnKa0/q - - TPlvi+wJKrrSnXb5Oc34tVOxDF/WQjNuve8vHg7hvaIM - - -----END CERTIFICATE----- - - -----BEGIN PRIVATE KEY----- - - MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQChkGi5kf7RLWM9 - - jL4XG80CR0bGw1XOw4iDs7JtbH4MiyR1G843CsIUW28/YicomjA8J5sZwKoOoYXb - - OAUuYD3JA3/TkD1/xteEWu2N81v9hgnEMTiK1Jg2BLNJ+eK2KLsAmZ71QHD3izSn - - fvj2KsZMH75mFw/PB2A3HRiaTa4UU1TXxjFLHw8CTbDvdrfq3rUTQ/74xPqxggzU - - YDH72mb0N2uPmvRv8zuKQcYhS4Hszq3eY0dS96V35nJeNo40wdkjlZDXoAxnC0e6 - - dS7/jaT6jpFJLiH7NPxmJZblzFlUufrGomQORcwN2xLs+u95K44Orcu5obh/BGl/ - - W7aGq/K7AgMBAAECggEAI6RyFg5JQuhabmewP/TYI1qKGKtbMMQeR7/K6yz2GdpQ - - bq11rtrmugr53efPb7ukTIEITIQegB/OIfCX5AVDXCczef7mMlwxi3dr1NcNQj7h - - xLB/ItGHytL6oqVICJuvtZAuaziOM244bYMrdVM2b/DI1kjcKfYcmcwHc3MTplPq - - Nh+L5u2ue6bYvT+XRF4KrwuKmKuyJghyMeoiLI9JupkKw79ZB/l0Mh8vmxKMPj8g - - MNxoJbwoTkjQxuJELmet+ysBg2KT+gJEirfRmZiouDxx8Zukd8O6YvnlsOiRFokX - - 05r33fna1z5IBpGnwe+rn6pQaeXflSd6dqotoBp4QQKBgQDLrhAdsZnDXDpYuOv+ - - ITMpc33J4NW7yE+26ebzWkLYRUW5i7YDCtJdFi5pHCeA3+QD3RlYiinQlzcy3i3R - - 4Uv4riuKyDbgaw9sYOhmgluhPKDuznEWXomloEVu8jFrHg3TKY2v/GCcv99N5grQ - - Jg9rScFpZXkTj23KzqHf23uTEQKBgQDLENH7QzoNsBv6eS7kElBx3BQWNa0dhXab - - kRXo62/++tIDGMkzaq38hsjcAZi8uZDZY0QJTmBMdZN3LLBln5C2g8Y6Eym5ITvf - - pxkMUK0++MygbK/Vwmp+xu7XMiPNMG/E8NqQkca3F/6Ld08PAauZ8gpgoAsnjlNg - - pPUdWRCRCwKBgEiEB17bDXidjHRsGjFXVqTKZp2Ke+4oaiEgc8Zue2AOgb2GvV2l - - 67GSpSFtEa9zhvXNMSnxvuNyAwgMTFnuEaNPN1do4wjRdSNL+VIN1Vu5fz6mp2Kk - - c/NQ9YeDmQ6fG6Lzp2thum/0bCeK4IytEE5NaxwAMbRCG3/aQ4200fFRAoGAMwg5 - - HSIZ9tKpVVsbE6oemV6rlaFLrj2aPyJJFU4FyViTar/R4KAQtYPR+qhUECm6Y0d1 - - E7mkrdJmiu6qLf/ZyGR5bqLeO25Es8I0o0mrIEY6dp6Z2eiQBuhLob0yDiD8FcxJ - - wUdBX0YibD5Bmg3baEbRoNLXussj3QfXqdZ2OV0CgYEAyovcXc1ibwrwNO59yw99 - - 7zCoMFjXzZgtxn5JQDwMsdt9UKd/4nOPbbiRPL3ynr5zboDZzRxihXB5zzKjrYlE - - o4QZIWV0VgGS2eQSni3CGOsG4VhE4/9EFF7UqeA0hYkGAZMS+EKSdPpIujStD/ck - - sQ/BZiYxMSE8+synlzp3gss= - - -----END PRIVATE KEY----- - - ' - name: ca.pem - cert_source: - type: radio - value: user_uploaded - horizon: - type: checkbox - value: false - hostname: - type: text - value: public.fuel.local - services: - type: checkbox - value: false -service_user: - homedir: - type: hidden - value: /var/lib/fuel - name: - type: hidden - value: fuel - password: - type: hidden - value: WEwz5aKA0hYDrcERjX7irQzS - root_password: - type: hidden - value: r00tme - sudo: - type: hidden - value: 'ALL=(ALL) NOPASSWD: ALL' -storage: - admin_key: - type: hidden - value: AQDzghZXAAAAABAA7obspvgNjPa/HBWSOUzI1w== - bootstrap_osd_key: - type: hidden - value: AQDzghZXAAAAABAAWaiWslWwse+hsaKLzbtQFw== - ephemeral_ceph: - type: checkbox - value: false - fsid: - type: hidden - value: 4b0ab6f5-b82b-44e4-ac3a-15c76f960b82 - images_ceph: - type: checkbox - value: false - mon_key: - type: hidden - value: AQDzghZXAAAAABAAVi1udBHvkQbZbDgNnT7gXA== - objects_ceph: - type: checkbox - value: false - osd_pool_size: - type: text - value: '3' - radosgw_key: - type: hidden - value: AQDzghZXAAAAABAA8jY8KftsCK4l726rNdu/Zg== - volumes_block_device: - type: checkbox - value: true - volumes_ceph: - type: checkbox - value: false - volumes_lvm: - type: checkbox - value: true -syslog: - syslog_port: - type: text - value: '514' - syslog_server: - type: text - value: '' - syslog_transport: - type: radio - value: tcp -workloads_collector: - enabled: - type: hidden - value: false - password: - type: password - value: 8qtWdXhhY84wFoxwBbZcpq3P - tenant: - type: text - value: services - user: - type: text - value: fuel_stats_user diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/ensurability/compute.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/ensurability/compute.yaml deleted file mode 100644 index 1b088197f..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/ensurability/compute.yaml +++ /dev/null @@ -1,89 +0,0 @@ -tasks: - - update_hosts: - resources: [] - - openstack-network-start: - type: skipped - - openstack-network-common-config: - resources: [] - - clear_nodes_info: - type: skipped - - openstack-network-agents-sriov: - resources: [] - - copy_keys_ceph: - type: copy_files - - globals: - resources: [] - - fuel_pkgs: - resources: [] - - openstack-network-agents-l3: - resources: [] - - openstack-network-agents-metadata: - resources: [] - - tools: - resources: [] - - rsync_core_puppet: - type: sync - - enable_nova_compute_service: - resources: [] - - cgroups: - resources: [] - - upload_nodes_info: - type: skipped - - copy_keys: - type: copy_files - - override_configuration: - resources: [] - - setup_repositories: - resources: [] - - dns-client: - resources: [] - - openstack-network-plugins-l2: - resources: [] - - allocate_hugepages: - resources: [] - - plugins_setup_repositories: - no_puppet_run: true - - ceph-compute: - no_puppet_run: true - - ssl-keys-saving: - no_puppet_run: true - - sriov_iommu_check: - resources: - - Exec[sriov_iommu_check] - - openstack-network-end: - type: skipped - - ceilometer-compute: - no_puppet_run: true - - upload_configuration: - type: upload_file - - firewall: - resources: [] - - logging: - resources: [] - - top-role-compute: - resources: - - Notify[Module openstack_tasks cannot notify service nova-compute on packages - update] - - Nova_config[DEFAULT/resume_guests_state_on_host_boot] - - Nova_config[vnc/novncproxy_base_url] - - Service[nova-compute] - - sync_time: - type: shell - - openstack-network-compute-nova: - resources: [] - - plugins_rsync: - no_puppet_run: true - - connectivity_tests: - resources: [] - - configuration_symlink: - type: shell - - hosts: - resources: [] - - copy_haproxy_keys: - type: copy_files - - ntp-client: - resources: [] - - ssl-add-trust-chain: - no_puppet_run: true - - reserved_ports: - resources: [] \ No newline at end of file diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/ensurability/controller.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/ensurability/controller.yaml deleted file mode 100644 index d44451898..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/ensurability/controller.yaml +++ /dev/null @@ -1,406 +0,0 @@ -tasks: - - ironic_post_swift_key: - type: shell - - openstack-haproxy-mysqld: - resources: [] - - cinder-db: - resources: [] - - dump_rabbitmq_definitions: - resources: - - Dump_rabbitmq_definitions[/etc/rabbitmq/definitions.full] - - rsync_core_puppet: - type: sync - - ssl-dns-setup: - resources: - - Exec[rsync_core_puppet_shell] - - ceilometer-controller: - no_puppet_run: true - - override_configuration: - resources: [] - - ceilometer-keystone: - no_puppet_run: true - - nova-db: - resources: [] - - workloads_collector_add: - resources: [] - - primary-openstack-network-plugins-l2: - resources: [] - - radosgw-keystone: - resources: [] - - virtual_ips: - resources: [] - - primary-dns-server: - resources: [] - - openstack-haproxy-murano: - resources: [] - - openstack-network-end: - type: skipped - - openstack-haproxy-radosgw: - resources: [] - - openstack-haproxy-swift: - resources: [] - - heat-db: - resources: [] - - openstack-haproxy-neutron: - resources: [] - - updatedb: - no_puppet_run: true - - ironic-db: - no_puppet_run: true - - plugins_rsync: - no_puppet_run: true - - ceilometer-radosgw-user: - no_puppet_run: true - - openstack-haproxy-keystone: - resources: [] - - hosts: - resources: [] - - primary-rabbitmq: - resources: [] - - primary-cluster-haproxy: - resources: [] - - openstack-network-routers: - resources: [] - - reserved_ports: - resources: [] - - controller_remaining_tasks: - resources: [] - - glance-keystone: - resources: [] - - openstack-haproxy-aodh: - resources: [] - - murano-cfapi: - no_puppet_run: true - - ironic-compute: - no_puppet_run: true - - primary-openstack-network-agents-metadata: - resources: [] - - cinder-keystone: - resources: [] - - copy_keys: - type: copy_files - - enable_rados: - no_puppet_run: true - - ntp-check: - resources: [] - - aodh-db: - no_puppet_run: true - - disable_keystone_service_token: - resources: [] - - umm: - resources: [] - - memcached: - resources: [] - - allocate_hugepages: - resources: [] - - openrc-delete: - resources: - - File[/root/openrc] - - plugins_setup_repositories: - no_puppet_run: true - - sahara-keystone: - no_puppet_run: true - - openstack-haproxy-sahara: - resources: [] - - ssl-keys-saving: - no_puppet_run: true - - primary-cluster: - resources: [] - - upload_cirros: - type: shell - - primary-keystone: - resources: - - File[/root/openrc] - - primary-openstack-network-agents-l3: - resources: [] - - upload_configuration: - type: upload_file - - create-cinder-types: - resources: [] - - neutron-keystone: - resources: - - Keystone_endpoint[RegionOne/neutron::network] - - logging: - resources: [] - - nova-keystone: - resources: - - Keystone_endpoint[RegionOne/nova::compute] - - Keystone_endpoint[RegionOne/novav3::computev3] - - update_hosts: - resources: [] - - ironic-keystone: - no_puppet_run: true - - connectivity_tests: - resources: [] - - swift-storage: - resources: [] - - primary-heat: - resources: - - Heat_config[keystone_authtoken/auth_uri] - - conntrackd: - resources: [] - - sahara-db: - no_puppet_run: true - - horizon: - resources: - - File[/var/lib/puppet/concat/_etc_openstack-dashboard_local_settings.py/fragments/50_local_settings.py] - - File[/etc/openstack-dashboard/local_settings.py] - - Exec[concat_/etc/openstack-dashboard/local_settings.py] - - openstack-haproxy-ceilometer: - resources: [] - - openstack-network-common-config: - resources: [] - - firewall: - resources: [] - - apache: - resources: [] - - globals: - resources: - - File[/etc/hiera/globals.yaml] - - aodh-keystone: - no_puppet_run: true - - glance: - resources: - - Glance_swift_config[ref1/user] - - Glance_glare_config[glance_store/swift_store_create_container_on_put] - - Glance_glare_config[glance_store/swift_store_config_file] - - Package[swift] - - Glance_api_config[glance_store/default_store] - - Glance_api_config[glance_store/swift_store_create_container_on_put] - - Glance_swift_config[ref1/auth_address] - - Glance_api_config[glance_store/swift_store_endpoint_type] - - Glance_glare_config[DEFAULT/default_log_levels] - - Glance_api_config[glance_store/stores] - - Glance_cache_config[DEFAULT/image_cache_max_size] - - Glance_glare_config[glance_store/default_store] - - Glance_registry_config[DEFAULT/default_log_levels] - - Glance_api_config[DEFAULT/default_log_levels] - - Glance_glare_config[glance_store/swift_store_region] - - Glance_cache_config[DEFAULT/debug] - - Glance_glare_config[glance_store/swift_store_endpoint_type] - - Glance_api_config[DEFAULT/debug] - - Glance_glare_config[DEFAULT/debug] - - Glance_api_config[glance_store/swift_store_config_file] - - Glance_api_config[DEFAULT/show_image_direct_url] - - Glance_glare_config[glance_store/stores] - - Glance_api_config[glance_store/default_swift_reference] - - Glance_glare_config[glance_store/swift_store_large_object_size] - - Glance_registry_config[DEFAULT/debug] - - Glance_api_config[glance_store/swift_store_large_object_size] - - Glance_glare_config[glance_store/default_swift_reference] - - Glance_swift_config[ref1/key] - - Glance_glare_config[glance_store/swift_store_container] - - Glance_api_config[glance_store/swift_store_container] - - Glance_api_config[glance_store/swift_store_region] - - tools: - resources: [] - - openstack-haproxy: - resources: [] - - cgroups: - resources: [] - - murano-cfapi-keystone: - no_puppet_run: true - - aodh: - no_puppet_run: true - - ceph_create_pools: - no_puppet_run: true - - openstack-haproxy-ironic: - no_puppet_run: true - - setup_repositories: - resources: [] - - openstack-network-routers-ha: - no_puppet_run: true - - glance-db: - resources: [] - - neutron-db: - resources: [] - - ironic_upload_images: - type: shell - - swift-rebalance-cron: - resources: - - File[/usr/local/bin/swift-rings-rebalance.sh] - - Cron[swift-rings-rebalance] - - primary-ceph-mon: - resources: [] - - openstack-haproxy-stats: - resources: [] - - ironic-api: - no_puppet_run: true - - primary-ceph-radosgw: - resources: [] - - dns-client: - resources: [] - - cluster-vrouter: - resources: [] - - murano-rabbitmq: - no_puppet_run: true - - api-proxy: - resources: [] - - cluster_health: - resources: [] - - heat-keystone: - resources: - - Keystone_endpoint[RegionOne/heat-cfn::cloudformation] - - Keystone_endpoint[RegionOne/heat::orchestration] - - openstack-haproxy-horizon: - resources: - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_015-horizon.cfg/fragments/00_horizon_listen_block] - - File[/etc/haproxy/conf.d/015-horizon.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_015-horizon.cfg/fragments/01-horizon_horizon_balancermember_horizon] - - Exec[concat_/etc/haproxy/conf.d/015-horizon.cfg] - - openstack-network-start: - type: skipped - - clear_nodes_info: - type: skipped - - murano-db: - resources: - - Exec[clear_nodes_info_shell] - - copy_keys_ceph: - type: copy_files - - sahara: - no_puppet_run: true - - fuel_pkgs: - resources: [] - - swift-keystone: - resources: - - Keystone_role[SwiftOperator] - - Keystone_user[swift] - - Keystone_endpoint[RegionOne/swift::object-store] - - Keystone_user_role[swift@services] - - Keystone_service[swift_s3::s3] - - Keystone_endpoint[RegionOne/swift_s3::s3] - - public_vip_ping: - resources: [] - - upload_nodes_info: - type: skipped - - openstack-haproxy-glance: - resources: - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_080-glance-api.cfg/fragments/00_glance-api_listen_block] - - Exec[concat_/etc/haproxy/conf.d/080-glance-api.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_081-glance-glare.cfg/fragments/00_glance-glare_listen_block] - - File[/etc/haproxy/conf.d/080-glance-api.cfg] - - Exec[concat_/etc/haproxy/conf.d/081-glance-glare.cfg] - - File[/etc/haproxy/conf.d/081-glance-glare.cfg] - - murano: - no_puppet_run: true - - ceph_ready_check: - type: shell - - enable_quorum: - type: shell - - openstack-haproxy-nova: - resources: - - File[/etc/haproxy/conf.d/040-nova-api.cfg] - - File[/etc/haproxy/conf.d/170-nova-novncproxy.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_040-nova-api.cfg/fragments/00_nova-api_listen_block] - - Exec[concat_/etc/haproxy/conf.d/040-nova-api.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_170-nova-novncproxy.cfg/fragments/00_nova-novncproxy_listen_block] - - Exec[concat_/etc/haproxy/conf.d/170-nova-novncproxy.cfg] - - openstack-network-server-config: - resources: [] - - primary-database: - resources: - - File[/root/.my.cnf] - - openstack-haproxy-cinder: - resources: - - File[/etc/haproxy/conf.d/070-cinder-api.cfg] - - Exec[concat_/etc/haproxy/conf.d/070-cinder-api.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_070-cinder-api.cfg/fragments/00_cinder-api_listen_block] - - ntp-server: - resources: [] - - murano-keystone: - no_puppet_run: true - - primary-openstack-network-agents-dhcp: - resources: - - Neutron_dhcp_agent_config[DEFAULT/debug] - - openstack-haproxy-heat: - resources: [] - - primary-openstack-controller: - resources: - - Nova_config[DEFAULT/quota_driver] - - Nova_config[DEFAULT/debug] - - Nova_config[DEFAULT/default_log_levels] - - openstack-cinder: - resources: - - Cinder_config[DEFAULT/scheduler_default_filters] - - Cinder_config[DEFAULT/default_log_levels] - - Cinder_config[DEFAULT/debug] - - keystone-db: - resources: - - File[/root/.my.cnf] - - sync_time: - type: shell - - configuration_symlink: - type: shell - - openstack-network-server-nova: - resources: [] - - copy_haproxy_keys: - type: copy_files - - primary-swift-proxy: - resources: - - Swift_dispersion_config[dispersion/dispersion_coverage] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/81_swift_container_quotas] - - Ring_account_device[10.109.2.2:6002/1] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf] - - File[/etc/swift/dispersion.conf] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/31_swift-proxy-formpost] - - Package[swift-proxy] - - Exec[create_account] - - Swift_dispersion_config[dispersion/auth_url] - - Swift_dispersion_config[dispersion/dump_json] - - Ring_container_device[10.109.2.2:6001/2] - - File[/var/lib/swift] - - File[/etc/xinetd.d/rsync] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/00_swift_proxy] - - Swift_dispersion_config[dispersion/concurrency] - - File[/etc/swift/swift.conf] - - Swift_config[swift-hash/swift_hash_path_suffix] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/25_swift_healthcheck] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/80_swift_account_quotas] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/35_swift_slo] - - Swift_dispersion_config[dispersion/retries] - - File[/var/cache/swift] - - File[/etc/swift/proxy-server.conf] - - Package[swift-plugin-s3] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments/10_swift_server_frag-swift_server] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/24_swift_catch_errors] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/28_swift_s3token] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/35_swift_crossdomain] - - File[/var/run/swift] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments.concat] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/29_swift-proxy-tempurl] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/26_swift_ratelimit] - - Swift_dispersion_config[dispersion/auth_user] - - Swift_dispersion_config[dispersion/endpoint_type] - - Exec[create_object] - - Ring_object_device[10.109.2.2:6000/1] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf] - - Swift_dispersion_config[dispersion/auth_key] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments.concat.out] - - Exec[create_container] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/23_swift_cache] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/32_swift-proxy-staticweb] - - Swift_dispersion_config[dispersion/auth_version] - - Exec[concat_/etc/swift/proxy-server.conf] - - Ring_object_device[10.109.2.2:6000/2] - - Exec[concat_/etc/rsyncd.conf] - - Service[swift-proxy-server] - - Swift_dispersion_config[dispersion/swift_dir] - - File[/etc/rsyncd.conf] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/79_swift_keystone] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments/00_header_rsyncd_conf_header] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/27_swift_swift3] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments.concat] - - Ring_account_device[10.109.2.2:6002/2] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/22_swift_authtoken] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments] - - Ring_container_device[10.109.2.2:6001/1] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments/10_swift_backups_frag-swift_backups] - - Swift_config[swift-constraints/max_header_size] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments.concat.out] - - File[/etc/swift] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/21_swift_bulk] - - openstack-network-networks: - resources: [] - - ssl-add-trust-chain: - no_puppet_run: true \ No newline at end of file diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/ensurability/nodes_settings.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/ensurability/nodes_settings.yaml deleted file mode 100644 index e1d8e93e9..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/ensurability/nodes_settings.yaml +++ /dev/null @@ -1,51 +0,0 @@ -controller: - cpu_pinning: - dpdk: - type: number - value: 1 - nova: - type: number - value: 1 - hugepages: - dpdk: - type: number - value: 128 - nova: - type: custom_hugepages - value: - '1048576': 1 - '2048': 550 -ceph-osd: - cpu_pinning: - dpdk: - type: number - value: 1 - nova: - type: number - value: 1 - hugepages: - dpdk: - type: number - value: 128 - nova: - type: custom_hugepages - value: - '1048576': 1 - '2048': 550 -compute: - cpu_pinning: - dpdk: - type: number - value: 1 - nova: - type: number - value: 1 - hugepages: - dpdk: - type: number - value: 128 - nova: - type: custom_hugepages - value: - '1048576': 1 - '2048': 550 diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/idempotency/ceph-osd.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/idempotency/ceph-osd.yaml deleted file mode 100644 index 18c4d7815..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/idempotency/ceph-osd.yaml +++ /dev/null @@ -1,46 +0,0 @@ -roles: - ceph-osd -tasks: - - update_hosts: null - - clear_nodes_info: - type: skipped - - override_configuration: null - - globals: null - - fuel_pkgs: null - - tools: null - - rsync_core_puppet: - type: sync - - cgroups: null - - upload_nodes_info: - type: skipped - - copy_keys: - type: copy_files - - copy_deleted_nodes: - type: copy_files - - setup_repositories: null - - dns-client: null - - allocate_hugepages: null - - plugins_setup_repositories: - no_puppet_run: true - - ssl-keys-saving: null - - copy_keys_ceph: - type: copy_files - - upload_configuration: - type: upload_file - - firewall: null - - top-role-ceph-osd: null - - logging: null - - updatedb: null - - sync_time: - type: shell - - plugins_rsync: - no_puppet_run: true - - connectivity_tests: null - - configuration_symlink: - type: shell - - hosts: null - - copy_haproxy_keys: - type: copy_files - - ntp-client: null - - ssl-add-trust-chain: null - - reserved_ports: null \ No newline at end of file diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/idempotency/compute.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/idempotency/compute.yaml deleted file mode 100644 index 1bc0b57aa..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/idempotency/compute.yaml +++ /dev/null @@ -1,66 +0,0 @@ -roles: - compute -tasks: - - update_hosts: null - - openstack-network-start: - type: skipped - - openstack-network-common-config: null - - clear_nodes_info: - type: skipped - - openstack-network-agents-sriov: null - - override_configuration: null - - globals: null - - fuel_pkgs: null - - openstack-network-agents-l3: null - - openstack-network-agents-metadata: null - - tools: null - - rsync_core_puppet: - type: sync - - enable_nova_compute_service: null - - cgroups: null - - upload_nodes_info: - type: skipped - - copy_keys: - type: copy_files - - copy_deleted_nodes: - type: copy_files - - setup_repositories: null - - dns-client: null - - openstack-network-plugins-l2: null - - allocate_hugepages: null - - plugins_setup_repositories: - no_puppet_run: true - - ceph-compute: null - - ssl-keys-saving: null - - sriov_iommu_check: - skip: - - Exec[sriov_iommu_check] - - openstack-network-end: - type: skipped - - copy_keys_ceph: - type: copy_files - - upload_configuration: - type: upload_file - - firewall: null - - logging: null - - top-role-compute: - skip: - - Notify[Module openstack_tasks cannot notify service nova-compute on packages - update] - - Service[nova-compute] - - sync_time: - type: shell - - openstack-network-compute-nova: null - - plugins_rsync: - no_puppet_run: true - - connectivity_tests: null - - configuration_symlink: - type: shell - - ceilometer-compute: - no_puppet_run: true - - hosts: null - - copy_haproxy_keys: - type: copy_files - - ntp-client: null - - ssl-add-trust-chain: null - - reserved_ports: null diff --git a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/idempotency/controller.yaml b/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/idempotency/controller.yaml deleted file mode 100644 index ef1947554..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/1_ctrl_1_cmp_3_ceph/idempotency/controller.yaml +++ /dev/null @@ -1,194 +0,0 @@ -roles: - controller -tasks: - - ironic_post_swift_key: - type: shell - - openstack-haproxy-mysqld: null - - cinder-db: null - - dump_rabbitmq_definitions: - skip: - - Dump_rabbitmq_definitions[/etc/rabbitmq/definitions.full] - - rsync_core_puppet: - type: sync - - ssl-dns-setup: null - - ceilometer-controller: - no_puppet_run: true - - override_configuration: null - - ceilometer-keystone: - no_puppet_run: true - - nova-db: null - - workloads_collector_add: null - - primary-openstack-network-plugins-l2: null - - radosgw-keystone: null - - virtual_ips: null - - primary-dns-server: null - - openstack-haproxy-murano: - no_puppet_run: true - - openstack-network-end: - type: skipped - - openstack-haproxy-radosgw: null - - openstack-haproxy-swift: null - - heat-db: null - - openstack-haproxy-neutron: null - - updatedb: null - - ironic-db: - no_puppet_run: true - - plugins_rsync: - no_puppet_run: true - - ceilometer-radosgw-user: - no_puppet_run: true - - openstack-haproxy-keystone: null - - hosts: null - - primary-rabbitmq: null - - primary-cluster-haproxy: null - - openstack-network-routers: null - - reserved_ports: null - - controller_remaining_tasks: null - - glance-keystone: null - - openstack-haproxy-aodh: null - - murano-cfapi: - no_puppet_run: true - - ironic-compute: - no_puppet_run: true - - primary-openstack-network-agents-metadata: null - - cinder-keystone: null - - copy_keys: - type: copy_files - - enable_rados: null - - ntp-check: null - - aodh-db: - no_puppet_run: true - - disable_keystone_service_token: - no_puppet_run: true - - umm: null - - memcached: null - - allocate_hugepages: null - - openrc-delete: - skip: - - File[/root/openrc] - - plugins_setup_repositories: - no_puppet_run: true - - sahara-keystone: - no_puppet_run: true - - openstack-haproxy-sahara: - no_puppet_run: true - - ssl-keys-saving: null - - primary-cluster: - skip: - - Pcmk_property[no-quorum-policy] - - upload_cirros: - type: shell - - primary-keystone: - skip: - - File[/root/openrc] - - primary-openstack-network-agents-l3: null - - upload_configuration: - type: upload_file - - create-cinder-types: null - - neutron-keystone: null - - logging: null - - nova-keystone: null - - update_hosts: null - - ironic-keystone: - no_puppet_run: true - - connectivity_tests: null - - primary-heat: null - - conntrackd: null - - sahara-db: - no_puppet_run: true - - horizon: null - - openstack-haproxy-ceilometer: - no_puppet_run: true - - openstack-network-common-config: null - - firewall: null - - apache: null - - globals: null - - aodh-keystone: - no_puppet_run: true - - glance: null - - tools: null - - openstack-haproxy: null - - cgroups: null - - murano-cfapi-keystone: - no_puppet_run: true - - aodh: - no_puppet_run: true - - copy_deleted_nodes: - type: copy_files - - openstack-haproxy-ironic: - no_puppet_run: true - - setup_repositories: null - - openstack-network-routers-ha: - no_puppet_run: true - - upload_murano_package: - no_puppet_run: true - - glance-db: null - - neutron-db: null - - ironic_upload_images: - type: shell - - swift-rebalance-cron: - no_puppet_run: true - - primary-ceph-mon: null - - openstack-haproxy-stats: null - - ironic-api: - no_puppet_run: true - - primary-ceph-radosgw: null - - dns-client: null - - cluster-vrouter: null - - murano-rabbitmq: - no_puppet_run: true - - api-proxy: null - - cluster_health: null - - heat-keystone: null - - openstack-haproxy-horizon: null - - openstack-network-start: - type: skipped - - clear_nodes_info: - type: skipped - - murano-db: - no_puppet_run: true - - copy_keys_ceph: - type: copy_files - - sahara: - no_puppet_run: true - - fuel_pkgs: null - - swift-keystone: - no_puppet_run: true - - public_vip_ping: null - - upload_nodes_info: - type: skipped - - openstack-haproxy-glance: null - - murano: - no_puppet_run: true - - ceph_ready_check: - type: shell - - enable_quorum: - type: shell - - openstack-haproxy-nova: null - - openstack-network-server-config: null - - primary-database: - skip: - - File[/root/.my.cnf] - - openstack-haproxy-cinder: null - - ntp-server: null - - murano-keystone: - no_puppet_run: true - - swift-proxy_storage: - no_puppet_run: true - - primary-openstack-network-agents-dhcp: null - - openstack-haproxy-heat: null - - primary-openstack-controller: null - - openstack-cinder: null - - ceph_create_pools: null - - keystone-db: - skip: - - File[/root/.my.cnf] - - sync_time: - type: shell - - configuration_symlink: - type: shell - - openstack-network-server-nova: null - - copy_haproxy_keys: - type: copy_files - - openstack-network-networks: null - - ssl-add-trust-chain: null diff --git a/fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/ensurability/ceph-osd_compute.yaml b/fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/ensurability/ceph-osd_compute.yaml deleted file mode 100644 index 2b3e9a996..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/ensurability/ceph-osd_compute.yaml +++ /dev/null @@ -1,106 +0,0 @@ -tasks: - - plugins_rsync: - resources: - - Nova_config[libvirt/rbd_user] - - Nova_config[libvirt/rbd_secret_uuid] - - Nova_config[libvirt/images_rbd_pool] - - File[/etc/ceph/ceph.client.compute.keyring] - - Exec[Create compute pool] - - Exec[Populate compute keyring] - - Exec[Create compute Cephx user and ACL] - - Nova_config[libvirt/images_type] - - Exec[Set Ceph RBD secret for Nova] - - File[/root/.secret_attrs.xml] - - openstack-network-start: - type: skipped - - openstack-network-common-config: - resources: [] - - clear_nodes_info: - type: skipped - - openstack-network-agents-sriov: - resources: [] - - ceilometer-compute: - no_puppet_run: true - - globals: - resources: [] - - fuel_pkgs: - resources: [] - - openstack-network-agents-l3: - resources: [] - - configuration_symlink: - type: shell - - tools: - resources: [] - - rsync_core_puppet: - type: sync - - enable_nova_compute_service: - resources: [] - - cgroups: - resources: [] - - upload_nodes_info: - type: skipped - - copy_keys: - type: copy_files - - sync_time: - type: shell - - override_configuration: - resources: [] - - setup_repositories: - resources: [] - - openstack-network-plugins-l2: - resources: [] - - allocate_hugepages: - resources: [] - - plugins_setup_repositories: - no_puppet_run: true - - connectivity_tests: - resources: [] - - ceph-compute: - no_puppet_run: true - - ssl-keys-saving: - no_puppet_run: true - - sriov_iommu_check: - resources: - - Exec[sriov_iommu_check] - - openstack-network-end: - type: skipped - - copy_deleted_nodes: - type: copy_files - - copy_keys_ceph: - type: copy_files - - upload_configuration: - type: upload_file - - firewall: - resources: [] - - top-role-ceph-osd: - resources: [] - - logging: - resources: [] - - updatedb: - no_puppet_run: true - - top-role-compute: - resources: - - Notify[Module openstack_tasks cannot notify service nova-compute on packages - update] - - Nova_config[DEFAULT/resume_guests_state_on_host_boot] - - Nova_config[vnc/novncproxy_base_url] - - Nova_config[DEFAULT/vcpu_pin_set] - - Service[nova-compute] - - openstack-network-compute-nova: - resources: [] - - update_hosts: - resources: [] - - dns-client: - resources: [] - - openstack-network-agents-metadata: - resources: [] - - hosts: - resources: [] - - copy_haproxy_keys: - type: copy_files - - ntp-client: - resources: [] - - ssl-add-trust-chain: - no_puppet_run: true - - reserved_ports: - resources: [] \ No newline at end of file diff --git a/fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/ensurability/cluster_settings.yaml b/fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/ensurability/cluster_settings.yaml deleted file mode 100644 index 7a1458646..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/ensurability/cluster_settings.yaml +++ /dev/null @@ -1,421 +0,0 @@ -access: - email: - type: text - value: admin_upd@localhost - password: - type: password - value: admin - tenant: - type: text - value: admin - user: - type: text - value: admin -additional_components: - ceilometer: - type: checkbox - value: false - heat: - type: hidden - value: false - ironic: - type: checkbox - value: false - mongo: - type: checkbox - value: false - murano: - type: checkbox - value: false - murano-cfapi: - type: checkbox - value: false - murano_glance_artifacts_plugin: - type: checkbox - value: false - sahara: - type: checkbox - value: false -common: - auth_key: - type: hidden - value: '' - auto_assign_floating_ip: - type: checkbox - value: true - debug: - type: checkbox - value: false - libvirt_type: - type: radio - value: qemu - nova_quota: - type: checkbox - value: true - propagate_task_deploy: - type: hidden - value: false - puppet_debug: - type: checkbox - value: false - resume_guests_state_on_host_boot: - type: checkbox - value: false - task_deploy: - type: hidden - value: true - use_cow_images: - type: checkbox - value: true -corosync: - group: - type: text - value: 226.94.1.1 - port: - type: text - value: '12000' - verified: - type: checkbox - value: false -external_mongo: - hosts_ip: - type: text - value: '' - mongo_db_name: - type: text - value: ceilometer - mongo_password: - type: password - value: ceilometer - mongo_replset: - type: text - value: '' - mongo_user: - type: text - value: ceilometer -kernel_params: - kernel: - type: text - value: console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90 nomodeset -murano_settings: - murano_repo_url: - type: text - value: http://storage.apps.openstack.org/ -neutron_advanced_configuration: - neutron_dvr: - type: checkbox - value: false - neutron_l2_pop: - type: checkbox - value: false - neutron_l3_ha: - type: checkbox - value: false - neutron_qos: - type: checkbox - value: false -operator_user: - authkeys: - type: textarea - value: '' - homedir: - type: text - value: /home/fueladmin - name: - type: text - value: fueladmin - password: - type: password - value: DulGjE3FkHxyXoi9vrRyU8y2 - sudo: - type: textarea - value: 'ALL=(ALL) NOPASSWD: ALL' -provision: - method: - type: hidden - value: image - packages: - type: textarea - value: 'acl - - anacron - - bash-completion - - bridge-utils - - bsdmainutils - - build-essential - - cloud-init - - curl - - daemonize - - debconf-utils - - gdisk - - grub-pc - - hpsa-dkms - - hwloc - - i40e-dkms - - linux-firmware - - linux-firmware-nonfree - - linux-headers-generic-lts-trusty - - linux-image-generic-lts-trusty - - lvm2 - - mcollective - - mdadm - - multipath-tools - - multipath-tools-boot - - nailgun-agent - - nailgun-mcagents - - network-checker - - ntp - - openssh-client - - openssh-server - - puppet - - python-amqp - - ruby-augeas - - ruby-ipaddress - - ruby-json - - ruby-netaddr - - ruby-openstack - - ruby-shadow - - ruby-stomp - - telnet - - ubuntu-minimal - - ubuntu-standard - - uuid-runtime - - vim - - virt-what - - vlan - - ' -public_network_assignment: - assign_to_all_nodes: - type: checkbox - value: false -public_ssl: - cert_data: - type: file - value: - content: '-----BEGIN CERTIFICATE----- - - MIIC2zCCAcMCAgPoMA0GCSqGSIb3DQEBBQUAMDMxFTATBgNVBAsMDEZ1ZWwtUUEg - - VGVhbTEaMBgGA1UEAwwRcHVibGljLmZ1ZWwubG9jYWwwHhcNMTYwNTEwMTIxMDU5 - - WhcNMjYwNTA4MTIxMDU5WjAzMRUwEwYDVQQLDAxGdWVsLVFBIFRlYW0xGjAYBgNV - - BAMMEXB1YmxpYy5mdWVsLmxvY2FsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB - - CgKCAQEAv1QRq6UmK/Tlj3Mk4lvZ5Iytm0Cq+tlVeCU0uXshLT12JZhz8DbQG9B+ - - p2mU/lZdw+8f5W7S3+Tm8jCnXbUM8AIhiGxQLREP67SCXsQHFKGivQrYeRl/6qI+ - - PS34ufQ9tBRitd5M3Gsm1JJYG1GYNr3cormukvldn5AnHl4rvFSlXnCy0p0kipaJ - - PEoA0Db0pmrJ9bL8k+aY7D/20yHNIz/HJRz68CX2G9dAo9ZuzGOKvCHAFl5eyUPb - - kk0YS1KggyXB6pxgw4eZh/IfqvOQ5Pwfqff1dDDV6I9WmSx1srbVYkrcOYmZkEIv - - OVhtsdJfHBV9vUnJG2asRxN/VyyiBQIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQCl - - zVkc3W+xpsQOrBdu99RBS1Y2n71J3jQ4Y7vKgWObW2DLz2e5KIz+YabJiVDdku4w - - U37gfbtFlFqWtIrujALm8wpUOn0DNONF4UkrJprnZ68zWuxSZbsohv/Kzb++cLMY - - QIy1Bd1YTDLpe4VoPFVlFKyDddp47YRLGAMz7s3R9q5saW5eFEoU6t7R3/mqXG9V - - s7LQYedf0PU+MSVe1DBivHnWJHWDws+cEiBDoPeoextHw8BYVrNfDH23JZhgL2Df - - uHT7yTe6wzs8odrZ0hRGtaHyt1q53Nuzk+hZYOvyKFy4hFZwOrK6dmKWgKI2R76s - - s+i9Yapl+QW2HJ2yqf9d - - -----END CERTIFICATE----- - - -----BEGIN PRIVATE KEY----- - - MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC/VBGrpSYr9OWP - - cyTiW9nkjK2bQKr62VV4JTS5eyEtPXYlmHPwNtAb0H6naZT+Vl3D7x/lbtLf5Oby - - MKddtQzwAiGIbFAtEQ/rtIJexAcUoaK9Cth5GX/qoj49Lfi59D20FGK13kzcaybU - - klgbUZg2vdyiua6S+V2fkCceXiu8VKVecLLSnSSKlok8SgDQNvSmasn1svyT5pjs - - P/bTIc0jP8clHPrwJfYb10Cj1m7MY4q8IcAWXl7JQ9uSTRhLUqCDJcHqnGDDh5mH - - 8h+q85Dk/B+p9/V0MNXoj1aZLHWyttViStw5iZmQQi85WG2x0l8cFX29SckbZqxH - - E39XLKIFAgMBAAECggEAZbhxiJtOFxKltcWW942z47HzXtZb0PAZY74MMQzIj7/c - - f5J1GWJa9F69543Xu/z47wHEKHvqO0T8kAjUApsOXNAVOe20FrG6kCsxIOcBiLOC - - zRTmEERBYOKPSoGVem/jGkm4TU75+Dozs9J2a1fPIFOC6TTlHpjbcMj4M0oH4KEH - - KHIIG3ETW05wJ746K7Er3b6frpZGBV3vCD2NP7XNYWrJYmuamvDHqkTymDBQIi9z - - b+RkFxXbk6foE1SnRf8NM+SGJi1q6ckSjP0nJd99+vDt9vh7l5BPdpdE5GvDiVi4 - - XosHhiK8eYB4jgQALQL5rWSKExJ0ToMYFTzicKX1RQKBgQD5PiZ/+wxiJfTJkV0k - - mwSipOnEWfdVCrux8Y3kvrHcYCdG6qKTd8TkbTpC/ZFc9923WyCcgl1fALoKki8P - - W4Us3TYAOammuY09TDPjj5efgu0ybULgdcEiE6u4O47MpYDiWYYxUisrejLA9BDl - - SVVWfqOVoZiYEQmkeEHDoQVt/wKBgQDEg/fvT2AoqBS+jpHFCuN810aZ0LyCdPho - - uRSWEjXu7lchWDhJQR4jegvrpdKCUfU8Ft1bp/leK2vdGYEI2eQDgBIkMqjZefFV - - 3ORyYXn+hEhNUVmNoqIcmL8fDg/72M0MKO55EPPNZnRHUb5doeYOtAObPoHBRkM1 - - OeIvo8g3+wKBgQC67tsvR5n9DtyuRSEnsEBkv+04QAWZucgWaMX0VvXNBCzRKm+u - - zLhWXnnA/q8EnD6lI2qtF7Vx6HigLw2upgSXJn424wiTrDz7SWjtdqfQkgDWvk3X - - TVoB9eDn93tdj4+/tEdKPId2P1tLjoIRhdPEYIzOT/nDq5ugyDUCxYTt/wKBgQCr - - ojMugR4UBFEx6WUpOoxbYc4Y03WBX1YCaPuw4rDWQLY6uklWuZfwFk1JACguiBRu - - 6e1levzSIYxwrDDFoUCDSh0OHmzYnKeKfYXM3JPnFnPri4xypsN9xG7QWcDeDolw - - AGPfu02kbMJL8/oW3ysGVnT75wj+3DYgy7BgOWTKdwKBgD9hKDYDgOJSMTajS9Wv - - EAKn2LZ8W73RBeGHRf1ohPhlOpSn82Bw392NfRfaY+rq6gHSXAkj6fzxfPmzB80o - - o4XfeSLYeTWr2o81fIZY0eeeXFGlbzCGElPNqLBR585bzdtaCaXbzPVjr/ksTB5j - - pq7lIfIMDOaiUEf/PHc1byIw - - -----END PRIVATE KEY----- - - ' - name: ca.pem - cert_source: - type: radio - value: user_uploaded - horizon: - type: checkbox - value: false - hostname: - type: text - value: public.fuel.local - services: - type: checkbox - value: false -service_user: - homedir: - type: hidden - value: /var/lib/fuel - name: - type: hidden - value: fuel - password: - type: hidden - value: 58V3KqzTYKv3kln6D3bkpvA0 - root_password: - type: hidden - value: r00tme - sudo: - type: hidden - value: 'ALL=(ALL) NOPASSWD: ALL' -storage: - admin_key: - type: hidden - value: AQDLzzFXAAAAABAA4+pFrUekVhZ/UZiH8hzB3w== - bootstrap_osd_key: - type: hidden - value: AQDLzzFXAAAAABAAlttJW3NCcPGkvnTLP9Lz4g== - ephemeral_ceph: - type: checkbox - value: false - fsid: - type: hidden - value: 1c80fd26-fe2f-44a9-b9f4-74e6235eaa46 - images_ceph: - type: checkbox - value: false - mon_key: - type: hidden - value: AQDLzzFXAAAAABAAD8tvscw1K5KOBsEHbfxTGg== - objects_ceph: - type: checkbox - value: false - osd_pool_size: - type: text - value: '3' - radosgw_key: - type: hidden - value: AQDLzzFXAAAAABAA5IKvoBQEHTMdItiodpG3dw== - volumes_block_device: - type: checkbox - value: true - volumes_ceph: - type: checkbox - value: false - volumes_lvm: - type: checkbox - value: true -syslog: - syslog_port: - type: text - value: '514' - syslog_server: - type: text - value: '' - syslog_transport: - type: radio - value: tcp -workloads_collector: - enabled: - type: hidden - value: false - password: - type: password - value: MPe3muVqcTsKREgBkdJt1U8u - tenant: - type: text - value: services - user: - type: text - value: fuel_stats_user \ No newline at end of file diff --git a/fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/ensurability/controller_mongo.yaml b/fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/ensurability/controller_mongo.yaml deleted file mode 100644 index ca94b3b5d..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/ensurability/controller_mongo.yaml +++ /dev/null @@ -1,268 +0,0 @@ -tasks: - - openstack-haproxy-mysqld: - resources: [] - - openstack-network-agents-l3: - resources: [] - - dump_rabbitmq_definitions: - resources: - - Dump_rabbitmq_definitions[/etc/rabbitmq/definitions.full] - - rsync_core_puppet: - type: sync - - ssl-dns-setup: - resources: - - Exec[rsync_core_puppet_shell] - - ceilometer-controller: - no_puppet_run: true - - override_configuration: - resources: [] - - ceph_create_pools: - no_puppet_run: true - - virtual_ips: - resources: [] - - openstack-haproxy-murano: - no_puppet_run: true - - openstack-network-end: - type: skipped - - openstack-haproxy-radosgw: - resources: [] - - openstack-haproxy-swift: - resources: [] - - openstack-haproxy-neutron: - resources: [] - - updatedb: - no_puppet_run: true - - plugins_rsync: - no_puppet_run: true - - openstack-controller: - resources: [] - - ceilometer-radosgw-user: - no_puppet_run: true - - openstack-haproxy-keystone: - resources: [] - - hosts: - resources: [] - - ntp-client: - resources: [] - - reserved_ports: - resources: [] - - controller_remaining_tasks: - resources: [] - - openstack-haproxy-aodh: - resources: [] - - murano-cfapi: - no_puppet_run: true - - rabbitmq: - resources: [] - - ironic-compute: - no_puppet_run: true - - dns-server: - resources: [] - - swift-proxy_storage: - resources: [] - - copy_keys: - type: copy_files - - enable_rados: - no_puppet_run: true - - ntp-check: - resources: [] - - keystone: - resources: [] - - disable_keystone_service_token: - no_puppet_run: true - - umm: - resources: [] - - ceph-mon: - resources: [] - - memcached: - resources: [] - - allocate_hugepages: - resources: [] - - openrc-delete: - resources: - - File[/root/openrc] - - plugins_setup_repositories: - no_puppet_run: true - - ceph-radosgw: - resources: [] - - openstack-haproxy-sahara: - no_puppet_run: true - - ssl-keys-saving: - no_puppet_run: true - - apache: - resources: [] - - upload_configuration: - type: upload_file - - logging: - resources: [] - - update_hosts: - resources: [] - - connectivity_tests: - resources: [] - - openstack-network-agents-metadata: - resources: [] - - conntrackd: - resources: [] - - horizon: - resources: - - File[/var/lib/puppet/concat/_etc_openstack-dashboard_local_settings.py/fragments/50_local_settings.py] - - File[/etc/openstack-dashboard/local_settings.py] - - Exec[concat_/etc/openstack-dashboard/local_settings.py] - - openstack-haproxy-ceilometer: - no_puppet_run: true - - openstack-network-common-config: - resources: [] - - firewall: - resources: [] - - cluster-haproxy: - resources: [] - - globals: - resources: - - File[/etc/hiera/globals.yaml] - - glance: - resources: - - Glance_swift_config[ref1/user] - - Glance_glare_config[glance_store/swift_store_create_container_on_put] - - Glance_glare_config[glance_store/swift_store_config_file] - - Package[swift] - - Glance_api_config[glance_store/default_store] - - Glance_api_config[glance_store/swift_store_create_container_on_put] - - Glance_swift_config[ref1/auth_address] - - Glance_api_config[glance_store/swift_store_endpoint_type] - - Glance_glare_config[DEFAULT/default_log_levels] - - Glance_api_config[glance_store/stores] - - Glance_cache_config[DEFAULT/image_cache_max_size] - - Glance_glare_config[glance_store/default_store] - - Glance_registry_config[DEFAULT/default_log_levels] - - Glance_api_config[DEFAULT/notification_driver] - - Glance_api_config[DEFAULT/default_log_levels] - - Glance_glare_config[glance_store/swift_store_region] - - Glance_cache_config[DEFAULT/debug] - - Glance_glare_config[glance_store/swift_store_endpoint_type] - - Glance_api_config[DEFAULT/debug] - - Glance_glare_config[DEFAULT/debug] - - Glance_api_config[glance_store/swift_store_config_file] - - Glance_api_config[DEFAULT/show_image_direct_url] - - Glance_glare_config[glance_store/stores] - - Glance_api_config[glance_store/default_swift_reference] - - Glance_glare_config[glance_store/swift_store_large_object_size] - - Glance_registry_config[DEFAULT/debug] - - Glance_api_config[glance_store/swift_store_large_object_size] - - Glance_glare_config[glance_store/default_swift_reference] - - Glance_swift_config[ref1/key] - - Glance_glare_config[glance_store/swift_store_container] - - Glance_api_config[glance_store/swift_store_container] - - Glance_api_config[glance_store/swift_store_region] - - tools: - resources: [] - - openstack-haproxy: - resources: [] - - cgroups: - resources: [] - - aodh: - no_puppet_run: true - - copy_deleted_nodes: - type: copy_files - - openstack-haproxy-ironic: - no_puppet_run: true - - setup_repositories: - resources: [] - - swift-rebalance-cron: - resources: - - Cron[swift-rings-sync] - - File[/usr/local/bin/swift-rings-sync.sh] - - heat: - resources: - - Heat_config[DEFAULT/reauthentication_auth_method] - - Heat_config[keystone_authtoken/auth_uri] - - Heat_config[DEFAULT/default_log_levels] - - Heat_config[DEFAULT/debug] - - openstack-haproxy-stats: - resources: [] - - ironic-api: - no_puppet_run: true - - top-role-mongo: - no_puppet_run: true - - dns-client: - resources: [] - - cluster-vrouter: - resources: [] - - murano-rabbitmq: - no_puppet_run: true - - api-proxy: - resources: [] - - cluster_health: - resources: [] - - openstack-haproxy-horizon: - resources: - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_015-horizon.cfg/fragments/00_horizon_listen_block] - - File[/etc/haproxy/conf.d/015-horizon.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_015-horizon.cfg/fragments/01-horizon_horizon_balancermember_horizon] - - Exec[concat_/etc/haproxy/conf.d/015-horizon.cfg] - - openstack-network-start: - type: skipped - - clear_nodes_info: - type: skipped - - copy_keys_ceph: - type: copy_files - - cluster: - resources: [] - - sahara: - no_puppet_run: true - - fuel_pkgs: - resources: [] - - public_vip_ping: - resources: [] - - upload_nodes_info: - type: skipped - - openstack-haproxy-glance: - resources: - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_080-glance-api.cfg/fragments/00_glance-api_listen_block] - - Exec[concat_/etc/haproxy/conf.d/080-glance-api.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_081-glance-glare.cfg/fragments/00_glance-glare_listen_block] - - File[/etc/haproxy/conf.d/080-glance-api.cfg] - - Exec[concat_/etc/haproxy/conf.d/081-glance-glare.cfg] - - File[/etc/haproxy/conf.d/081-glance-glare.cfg] - - murano: - no_puppet_run: true - - openstack-network-plugins-l2: - resources: [] - - openstack-network-agents-dhcp: - resources: - - Neutron_dhcp_agent_config[DEFAULT/debug] - - openstack-haproxy-nova: - resources: - - File[/etc/haproxy/conf.d/040-nova-api.cfg] - - File[/etc/haproxy/conf.d/170-nova-novncproxy.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_040-nova-api.cfg/fragments/00_nova-api_listen_block] - - Exec[concat_/etc/haproxy/conf.d/040-nova-api.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_170-nova-novncproxy.cfg/fragments/00_nova-novncproxy_listen_block] - - Exec[concat_/etc/haproxy/conf.d/170-nova-novncproxy.cfg] - - openstack-network-server-config: - resources: [] - - openstack-haproxy-cinder: - resources: - - File[/etc/haproxy/conf.d/070-cinder-api.cfg] - - Exec[concat_/etc/haproxy/conf.d/070-cinder-api.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_070-cinder-api.cfg/fragments/00_cinder-api_listen_block] - - ntp-server: - resources: [] - - openstack-haproxy-heat: - no_puppet_run: true - - openstack-cinder: - resources: - - Cinder_config[DEFAULT/scheduler_default_filters] - - Cinder_config[DEFAULT/default_log_levels] - - Cinder_config[DEFAULT/debug] - - sync_time: - type: shell - - database: - resources: - - File[mysql-config-file] - - configuration_symlink: - type: shell - - openstack-network-server-nova: - resources: [] - - copy_haproxy_keys: - type: copy_files - - ssl-add-trust-chain: - no_puppet_run: true \ No newline at end of file diff --git a/fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/ensurability/nodes_settings.yaml b/fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/ensurability/nodes_settings.yaml deleted file mode 100644 index 08febc75d..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/ensurability/nodes_settings.yaml +++ /dev/null @@ -1,34 +0,0 @@ -ceph-osd_compute: - cpu_pinning: - dpdk: - type: number - value: 1 - nova: - type: number - value: 1 - hugepages: - dpdk: - type: number - value: 128 - nova: - type: custom_hugepages - value: - '1048576': 1 - '2048': 550 -controller_mongo: - cpu_pinning: - dpdk: - type: number - value: 1 - nova: - type: number - value: 1 - hugepages: - dpdk: - type: number - value: 128 - nova: - type: custom_hugepages - value: - '1048576': 1 - '2048': 550 \ No newline at end of file diff --git a/fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/ensurability/primary-controller_mongo.yaml b/fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/ensurability/primary-controller_mongo.yaml deleted file mode 100644 index 1a823551a..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/ensurability/primary-controller_mongo.yaml +++ /dev/null @@ -1,468 +0,0 @@ -tasks: - - ironic_post_swift_key: - type: shell - - openstack-haproxy-mysqld: - resources: [] - - top-role-primary-mongo: - no_puppet_run: true - - cinder-db: - resources: [] - - dump_rabbitmq_definitions: - resources: - - Dump_rabbitmq_definitions[/etc/rabbitmq/definitions.full] - - rsync_core_puppet: - type: sync - - create-cinder-types: - resources: [] - - ceilometer-controller: - no_puppet_run: true - - override_configuration: - resources: [] - - ceilometer-keystone: - no_puppet_run: true - - ceph_create_pools: - no_puppet_run: true - - workloads_collector_add: - resources: [] - - primary-openstack-network-plugins-l2: - resources: [] - - radosgw-keystone: - resources: [] - - virtual_ips: - resources: [] - - primary-dns-server: - resources: [] - - openstack-haproxy-murano: - no_puppet_run: true - - openstack-network-end: - type: skipped - - openstack-haproxy-radosgw: - resources: [] - - openstack-haproxy-swift: - resources: [] - - heat-db: - resources: [] - - openstack-haproxy-neutron: - resources: [] - - updatedb: - no_puppet_run: true - - ironic-db: - no_puppet_run: true - - plugins_rsync: - no_puppet_run: true - - ceilometer-radosgw-user: - no_puppet_run: true - - openstack-haproxy-keystone: - resources: [] - - hosts: - resources: [] - - primary-rabbitmq: - resources: [] - - primary-cluster-haproxy: - resources: [] - - openstack-network-routers: - resources: [] - - reserved_ports: - resources: [] - - controller_remaining_tasks: - resources: [] - - glance-keystone: - resources: [] - - openstack-haproxy-aodh: - resources: [] - - murano-cfapi: - no_puppet_run: true - - ironic-compute: - no_puppet_run: true - - primary-openstack-network-agents-metadata: - resources: [] - - cinder-keystone: - resources: [] - - copy_keys: - type: copy_files - - enable_rados: - no_puppet_run: true - - ntp-check: - resources: [] - - aodh-db: - no_puppet_run: true - - disable_keystone_service_token: - no_puppet_run: true - - umm: - resources: [] - - memcached: - resources: [] - - allocate_hugepages: - resources: [] - - openrc-delete: - resources: - - File[/root/openrc] - - plugins_setup_repositories: - no_puppet_run: true - - sahara-keystone: - no_puppet_run: true - - openstack-haproxy-sahara: - no_puppet_run: true - - ssl-keys-saving: - no_puppet_run: true - - primary-cluster: - resources: [] - - upload_cirros: - type: shell - - primary-keystone: - resources: - - File[/root/openrc] - - primary-openstack-network-agents-l3: - resources: [] - - upload_configuration: - type: upload_file - - ssl-dns-setup: - no_puppet_run: true - - neutron-keystone: - resources: - - Keystone_endpoint[RegionOne/neutron::network] - - logging: - resources: [] - - ntp-client: - resources: [] - - nova-keystone: - resources: - - Keystone_endpoint[RegionOne/nova::compute] - - Keystone_endpoint[RegionOne/novav3::computev3] - - update_hosts: - resources: [] - - ironic-keystone: - no_puppet_run: true - - connectivity_tests: - resources: [] - - primary-heat: - resources: - - Heat_config[keystone_authtoken/auth_uri] - - conntrackd: - resources: [] - - sahara-db: - no_puppet_run: true - - horizon: - resources: - - File[/var/lib/puppet/concat/_etc_openstack-dashboard_local_settings.py/fragments/50_local_settings.py] - - File[/etc/openstack-dashboard/local_settings.py] - - Exec[concat_/etc/openstack-dashboard/local_settings.py] - - openstack-haproxy-ceilometer: - no_puppet_run: true - - openstack-network-common-config: - resources: [] - - firewall: - resources: [] - - apache: - resources: [] - - globals: - resources: - - File[/etc/hiera/globals.yaml] - - aodh-keystone: - no_puppet_run: true - - glance: - resources: - - Glance_swift_config[ref1/user] - - Glance_glare_config[glance_store/swift_store_create_container_on_put] - - Glance_glare_config[glance_store/swift_store_config_file] - - Package[swift] - - Glance_api_config[glance_store/default_store] - - Glance_api_config[glance_store/swift_store_create_container_on_put] - - Glance_swift_config[ref1/auth_address] - - Glance_api_config[glance_store/swift_store_endpoint_type] - - Glance_glare_config[DEFAULT/default_log_levels] - - Glance_api_config[glance_store/stores] - - Glance_cache_config[DEFAULT/image_cache_max_size] - - Glance_glare_config[glance_store/default_store] - - Glance_registry_config[DEFAULT/default_log_levels] - - Glance_api_config[DEFAULT/notification_driver] - - Glance_api_config[DEFAULT/default_log_levels] - - Glance_glare_config[glance_store/swift_store_region] - - Glance_cache_config[DEFAULT/debug] - - Glance_glare_config[glance_store/swift_store_endpoint_type] - - Glance_api_config[DEFAULT/debug] - - Glance_glare_config[DEFAULT/debug] - - Glance_api_config[glance_store/swift_store_config_file] - - Glance_api_config[DEFAULT/show_image_direct_url] - - Glance_glare_config[glance_store/stores] - - Glance_api_config[glance_store/default_swift_reference] - - Glance_glare_config[glance_store/swift_store_large_object_size] - - Glance_registry_config[DEFAULT/debug] - - Glance_api_config[glance_store/swift_store_large_object_size] - - Glance_glare_config[glance_store/default_swift_reference] - - Glance_swift_config[ref1/key] - - Glance_glare_config[glance_store/swift_store_container] - - Glance_api_config[glance_store/swift_store_container] - - Glance_api_config[glance_store/swift_store_region] - - tools: - resources: [] - - openstack-haproxy: - resources: [] - - cgroups: - resources: [] - - murano-cfapi-keystone: - no_puppet_run: true - - aodh: - no_puppet_run: true - - copy_deleted_nodes: - type: copy_files - - openstack-haproxy-ironic: - no_puppet_run: true - - setup_repositories: - resources: [] - - openstack-network-routers-ha: - no_puppet_run: true - - glance-db: - resources: [] - - neutron-db: - resources: [] - - ironic_upload_images: - type: shell - - nova-db: - resources: [] - - swift-rebalance-cron: - resources: - - File[/usr/local/bin/swift-rings-rebalance.sh] - - Cron[swift-rings-rebalance] - - primary-ceph-mon: - resources: [] - - openstack-haproxy-stats: - resources: [] - - ironic-api: - no_puppet_run: true - - primary-ceph-radosgw: - resources: [] - - dns-client: - resources: [] - - cluster-vrouter: - resources: [] - - murano-rabbitmq: - no_puppet_run: true - - api-proxy: - resources: [] - - cluster_health: - resources: [] - - heat-keystone: - resources: - - Keystone_endpoint[RegionOne/heat-cfn::cloudformation] - - Keystone_endpoint[RegionOne/heat::orchestration] - - openstack-haproxy-horizon: - resources: - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_015-horizon.cfg/fragments/00_horizon_listen_block] - - File[/etc/haproxy/conf.d/015-horizon.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_015-horizon.cfg/fragments/01-horizon_horizon_balancermember_horizon] - - Exec[concat_/etc/haproxy/conf.d/015-horizon.cfg] - - openstack-network-start: - type: skipped - - clear_nodes_info: - type: skipped - - murano-db: - no_puppet_run: true - - copy_keys_ceph: - type: copy_files - - sahara: - no_puppet_run: true - - fuel_pkgs: - resources: [] - - swift-keystone: - resources: - - Keystone_role[SwiftOperator] - - Keystone_user[swift] - - Keystone_endpoint[RegionOne/swift::object-store] - - Keystone_user_role[swift@services] - - Keystone_service[swift_s3::s3] - - Keystone_endpoint[RegionOne/swift_s3::s3] - - public_vip_ping: - resources: [] - - upload_nodes_info: - type: skipped - - openstack-haproxy-glance: - resources: - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_080-glance-api.cfg/fragments/00_glance-api_listen_block] - - Exec[concat_/etc/haproxy/conf.d/080-glance-api.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_081-glance-glare.cfg/fragments/00_glance-glare_listen_block] - - File[/etc/haproxy/conf.d/080-glance-api.cfg] - - Exec[concat_/etc/haproxy/conf.d/081-glance-glare.cfg] - - File[/etc/haproxy/conf.d/081-glance-glare.cfg] - - murano: - no_puppet_run: true - - ceph_ready_check: - type: shell - - enable_quorum: - type: shell - - openstack-haproxy-nova: - resources: - - File[/etc/haproxy/conf.d/040-nova-api.cfg] - - File[/etc/haproxy/conf.d/170-nova-novncproxy.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_040-nova-api.cfg/fragments/00_nova-api_listen_block] - - Exec[concat_/etc/haproxy/conf.d/040-nova-api.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_170-nova-novncproxy.cfg/fragments/00_nova-novncproxy_listen_block] - - Exec[concat_/etc/haproxy/conf.d/170-nova-novncproxy.cfg] - - openstack-network-server-config: - resources: [] - - primary-database: - resources: - - File[mysql-config-file] - - File[/root/.my.cnf] - - openstack-haproxy-cinder: - resources: - - File[/etc/haproxy/conf.d/070-cinder-api.cfg] - - Exec[concat_/etc/haproxy/conf.d/070-cinder-api.cfg] - - File[/var/lib/puppet/concat/_etc_haproxy_conf.d_070-cinder-api.cfg/fragments/00_cinder-api_listen_block] - - ntp-server: - resources: [] - - murano-keystone: - no_puppet_run: true - - swift-proxy_storage: - resources: - - Swift_dispersion_config[dispersion/dispersion_coverage] - - Ring_account_device[10.109.2.2:6002/1] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf] - - File[/etc/swift/dispersion.conf] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/31_swift-proxy-formpost] - - Exec[create_account] - - Swift_dispersion_config[dispersion/auth_url] - - Swift_dispersion_config[dispersion/dump_json] - - Ring_account_device[10.109.2.5:6002/2] - - Package[swift-object] - - File[/var/lib/puppet/concat/_etc_swift_account-server.conf] - - Ring_object_device[10.109.2.5:6000/1] - - File[/etc/swift/swift.conf] - - File[/var/lib/glance] - - Service[swift-account-server] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments/10_swift_server_frag-swift_server] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/35_swift_slo] - - Ring_object_device[10.109.2.2:6000/2] - - Package[swift-plugin-s3] - - File[/etc/swift/account-server.conf] - - Service[swift-account-replicator] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments.concat.out] - - File[/var/lib/puppet/concat/_etc_swift_account-server.conf/fragments.concat.out] - - File[/var/lib/puppet/concat/_etc_swift_account-server.conf/fragments] - - Package[swift-container] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/35_swift_crossdomain] - - File[/var/run/swift] - - File[/var/lib/puppet/concat/_etc_swift_container-server.conf] - - File[/var/lib/puppet/concat/_etc_swift_object-server.conf/fragments.concat.out] - - Ring_object_device[10.109.2.5:6000/2] - - Service[swift-container-replicator] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments.concat] - - Swift_dispersion_config[dispersion/auth_user] - - Swift_dispersion_config[dispersion/endpoint_type] - - Ring_account_device[10.109.2.3:6002/1] - - Service[swift-container-updater] - - File[/var/lib/puppet/concat/_etc_swift_object-server.conf] - - Service[swift-object-updater] - - Exec[create_container] - - File[/etc/swift/object-server.conf] - - Swift_dispersion_config[dispersion/auth_version] - - Ring_container_device[10.109.2.5:6001/2] - - Exec[concat_/etc/rsyncd.conf] - - Service[swift-proxy-server] - - Swift_dispersion_config[dispersion/swift_dir] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments/00_header_rsyncd_conf_header] - - File[/etc/swift/container-server.conf] - - Service[swift-account-auditor] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments.concat] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments] - - Ring_container_device[10.109.2.2:6001/1] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments/10_swift_backups_frag-swift_backups] - - Swift_config[swift-constraints/max_header_size] - - File[/etc/swift] - - Ring_container_device[10.109.2.5:6001/1] - - Ring_container_device[10.109.2.3:6001/1] - - Service[swift-container-server] - - File[/var/lib/puppet/concat/_etc_swift_object-server.conf/fragments.concat] - - File[/var/lib/puppet/concat/_etc_swift_container-server.conf/fragments] - - File[/var/lib/puppet/concat/_etc_swift_object-server.conf/fragments/00_swift-object-6000] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/81_swift_container_quotas] - - File[/etc/swift/container-server/] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/26_swift_ratelimit] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/00_swift_proxy] - - File[/var/cache/swift] - - File[/var/lib/puppet/concat/_etc_swift_account-server.conf/fragments.concat] - - Ring_container_device[10.109.2.2:6001/2] - - File[/var/lib/swift] - - File[/etc/xinetd.d/rsync] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments/10_account_frag-account] - - Swift_dispersion_config[dispersion/concurrency] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/22_swift_authtoken] - - Swift_config[swift-hash/swift_hash_path_suffix] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/80_swift_account_quotas] - - Swift_dispersion_config[dispersion/retries] - - Package[swift-account] - - File[/etc/swift/proxy-server.conf] - - Service[swift-account-reaper] - - Exec[concat_/etc/swift/container-server.conf] - - File[/var/lib/puppet/concat/_etc_swift_container-server.conf/fragments.concat] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments/10_object_frag-object] - - File[/var/lib/puppet/concat/_etc_swift_container-server.conf/fragments/00_swift-container-6001] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/25_swift_healthcheck] - - Ring_account_device[10.109.2.5:6002/1] - - Ring_object_device[10.109.2.3:6000/1] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/23_swift_cache] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/29_swift-proxy-tempurl] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/27_swift_swift3] - - Exec[create_object] - - Ring_object_device[10.109.2.2:6000/1] - - Swift_dispersion_config[dispersion/auth_key] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments.concat.out] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/28_swift_s3token] - - File[/var/lib/puppet/concat/_etc_swift_object-server.conf/fragments] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/32_swift-proxy-staticweb] - - Exec[concat_/etc/swift/account-server.conf] - - Service[swift-container-auditor] - - File[/var/lib/glance/node] - - Service[swift-container-sync] - - Ring_object_device[10.109.2.3:6000/2] - - Exec[concat_/etc/swift/proxy-server.conf] - - Service[swift-object-replicator] - - Ring_container_device[10.109.2.3:6001/2] - - File[/etc/rsyncd.conf] - - Package[swift-proxy] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/79_swift_keystone] - - Service[swift-object-auditor] - - File[/etc/swift/account-server/] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf] - - Ring_account_device[10.109.2.3:6002/2] - - File[/var/lib/puppet/concat/_etc_swift_account-server.conf/fragments/00_swift-account-6002] - - Ring_account_device[10.109.2.2:6002/2] - - File[/var/lib/puppet/concat/_etc_rsyncd.conf/fragments/10_container_frag-container] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments] - - File[/var/lib/puppet/concat/_etc_swift_container-server.conf/fragments.concat.out] - - Service[swift-object-server] - - Exec[concat_/etc/swift/object-server.conf] - - File[/etc/swift/object-server/] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/24_swift_catch_errors] - - File[/var/lib/puppet/concat/_etc_swift_proxy-server.conf/fragments/21_swift_bulk] - - primary-openstack-network-agents-dhcp: - resources: - - Neutron_dhcp_agent_config[DEFAULT/debug] - - openstack-haproxy-heat: - no_puppet_run: true - - primary-openstack-controller: - resources: - - Nova_config[DEFAULT/quota_driver] - - Nova_config[DEFAULT/debug] - - Nova_config[DEFAULT/scheduler_default_filters] - - Nova_config[DEFAULT/default_log_levels] - - openstack-cinder: - resources: - - Cinder_config[DEFAULT/scheduler_default_filters] - - Cinder_config[DEFAULT/default_log_levels] - - Cinder_config[DEFAULT/debug] - - keystone-db: - resources: - - File[/root/.my.cnf] - - sync_time: - type: shell - - configuration_symlink: - type: shell - - openstack-network-server-nova: - resources: [] - - copy_haproxy_keys: - type: copy_files - - openstack-network-networks: - resources: [] - - ssl-add-trust-chain: - no_puppet_run: true \ No newline at end of file diff --git a/fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/idempotency/ceph-osd_compute.yaml b/fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/idempotency/ceph-osd_compute.yaml deleted file mode 100644 index 52299ee7a..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/idempotency/ceph-osd_compute.yaml +++ /dev/null @@ -1,67 +0,0 @@ -roles: - compute -tasks: - - update_hosts: null - - openstack-network-start: - type: skipped - - openstack-network-common-config: null - - clear_nodes_info: - type: skipped - - openstack-network-agents-sriov: null - - override_configuration: null - - globals: null - - fuel_pkgs: null - - openstack-network-agents-l3: null - - openstack-network-agents-metadata: null - - tools: null - - rsync_core_puppet: - type: sync - - enable_nova_compute_service: null - - cgroups: null - - upload_nodes_info: - type: skipped - - copy_keys: - type: copy_files - - copy_deleted_nodes: - type: copy_files - - setup_repositories: null - - dns-client: null - - openstack-network-plugins-l2: null - - allocate_hugepages: null - - plugins_setup_repositories: - no_puppet_run: true - - updatedb: null - - ceph-compute: null - - ssl-keys-saving: null - - sriov_iommu_check: - skip: - - Exec[sriov_iommu_check] - - openstack-network-end: - type: skipped - - copy_keys_ceph: - type: copy_files - - upload_configuration: - type: upload_file - - firewall: null - - top-role-ceph-osd: null - - logging: null - - top-role-compute: - skip: - - Notify[Module openstack_tasks cannot notify service nova-compute on packages - update] - - Service[nova-compute] - - sync_time: - type: shell - - openstack-network-compute-nova: null - - plugins_rsync: - no_puppet_run: true - - connectivity_tests: null - - configuration_symlink: - type: shell - - ceilometer-compute: null - - hosts: null - - copy_haproxy_keys: - type: copy_files - - ntp-client: null - - ssl-add-trust-chain: null - - reserved_ports: null diff --git a/fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/idempotency/controller_mongo.yaml b/fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/idempotency/controller_mongo.yaml deleted file mode 100644 index 6c2b739e0..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/idempotency/controller_mongo.yaml +++ /dev/null @@ -1,132 +0,0 @@ -roles: - controller -tasks: - - openstack-haproxy-mysqld: null - - openstack-network-agents-l3: null - - dump_rabbitmq_definitions: - skip: - - Dump_rabbitmq_definitions[/etc/rabbitmq/definitions.full] - - rsync_core_puppet: - type: sync - - ssl-dns-setup: null - - ceilometer-controller: null - - override_configuration: null - - ceph_create_pools: null - - virtual_ips: null - - openstack-haproxy-murano: - no_puppet_run: true - - openstack-network-end: - type: skipped - - openstack-haproxy-radosgw: null - - openstack-haproxy-swift: null - - openstack-haproxy-neutron: null - - updatedb: null - - plugins_rsync: - no_puppet_run: true - - openstack-controller: null - - rabbitmq: null - - openstack-haproxy-keystone: null - - hosts: null - - ntp-client: null - - reserved_ports: null - - controller_remaining_tasks: null - - openstack-haproxy-aodh: null - - murano-cfapi: - no_puppet_run: true - - ceilometer-radosgw-user: null - - ironic-compute: - no_puppet_run: true - - dns-server: null - - swift-proxy_storage: - no_puppet_run: true - - copy_keys: - type: copy_files - - enable_rados: null - - ntp-check: null - - keystone: null - - disable_keystone_service_token: - no_puppet_run: true - - umm: null - - ceph-mon: null - - memcached: null - - allocate_hugepages: null - - openrc-delete: - skip: - - File[/root/openrc] - - plugins_setup_repositories: - no_puppet_run: true - - ceph-radosgw: null - - openstack-haproxy-sahara: null - - ssl-keys-saving: null - - apache: null - - upload_configuration: - type: upload_file - - logging: null - - update_hosts: null - - connectivity_tests: null - - openstack-network-agents-metadata: null - - conntrackd: null - - horizon: null - - openstack-haproxy-ceilometer: null - - openstack-network-common-config: null - - firewall: null - - cluster-haproxy: null - - globals: null - - glance: null - - tools: null - - openstack-haproxy: null - - cgroups: null - - aodh: null - - copy_deleted_nodes: - type: copy_files - - openstack-haproxy-ironic: - no_puppet_run: true - - setup_repositories: null - - swift-rebalance-cron: - no_puppet_run: true - - heat: null - - openstack-haproxy-stats: null - - ironic-api: - no_puppet_run: true - - top-role-mongo: null - - dns-client: null - - cluster-vrouter: null - - murano-rabbitmq: - no_puppet_run: true - - api-proxy: null - - cluster_health: null - - openstack-haproxy-horizon: null - - openstack-network-start: - type: skipped - - clear_nodes_info: - type: skipped - - copy_keys_ceph: - type: copy_files - - cluster: - skip: - - Pcmk_property[no-quorum-policy] - - sahara: null - - fuel_pkgs: null - - public_vip_ping: null - - upload_nodes_info: - type: skipped - - openstack-haproxy-glance: null - - murano: - no_puppet_run: true - - openstack-network-plugins-l2: null - - openstack-network-agents-dhcp: null - - openstack-haproxy-nova: null - - openstack-network-server-config: null - - openstack-haproxy-cinder: null - - ntp-server: null - - openstack-haproxy-heat: null - - openstack-cinder: null - - sync_time: - type: shell - - database: null - - configuration_symlink: - type: shell - - openstack-network-server-nova: null - - copy_haproxy_keys: - type: copy_files - - ssl-add-trust-chain: null diff --git a/fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/idempotency/primary-controller_mongo.yaml b/fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/idempotency/primary-controller_mongo.yaml deleted file mode 100644 index 3f864c0f2..000000000 --- a/fuelweb_test/tests/tests_lcm/fixtures/3_ctrl_3_cmp_ceph_sahara/idempotency/primary-controller_mongo.yaml +++ /dev/null @@ -1,185 +0,0 @@ -roles: - controller -tasks: - - ironic_post_swift_key: - type: shell - - openstack-haproxy-mysqld: null - - top-role-primary-mongo: null - - cinder-db: null - - dump_rabbitmq_definitions: - skip: - - Dump_rabbitmq_definitions[/etc/rabbitmq/definitions.full] - - rsync_core_puppet: - type: sync - - ssl-dns-setup: null - - ceilometer-controller: null - - override_configuration: null - - ceilometer-keystone: null - - nova-db: null - - workloads_collector_add: null - - primary-openstack-network-plugins-l2: null - - radosgw-keystone: null - - virtual_ips: null - - primary-dns-server: null - - openstack-haproxy-murano: - no_puppet_run: true - - openstack-network-end: - type: skipped - - openstack-haproxy-radosgw: null - - openstack-haproxy-swift: null - - heat-db: null - - openstack-haproxy-neutron: null - - updatedb: null - - ironic-db: - no_puppet_run: true - - plugins_rsync: - no_puppet_run: true - - ceilometer-radosgw-user: null - - openstack-haproxy-keystone: null - - hosts: null - - primary-rabbitmq: null - - primary-cluster-haproxy: null - - openstack-network-routers: null - - reserved_ports: null - - controller_remaining_tasks: null - - glance-keystone: null - - openstack-haproxy-aodh: null - - murano-cfapi: - no_puppet_run: true - - ironic-compute: - no_puppet_run: true - - primary-openstack-network-agents-metadata: null - - cinder-keystone: null - - copy_keys: - type: copy_files - - enable_rados: null - - ntp-check: null - - aodh-db: null - - disable_keystone_service_token: - no_puppet_run: true - - umm: null - - memcached: null - - allocate_hugepages: null - - openrc-delete: - skip: - - File[/root/openrc] - - plugins_setup_repositories: - no_puppet_run: true - - sahara-keystone: null - - openstack-haproxy-sahara: null - - ssl-keys-saving: null - - primary-cluster: - skip: - - Pcmk_property[no-quorum-policy] - - upload_cirros: - type: shell - - primary-keystone: - skip: - - File[/root/openrc] - - apache: null - - upload_configuration: - type: upload_file - - create-cinder-types: null - - neutron-keystone: null - - logging: null - - ntp-client: null - - nova-keystone: null - - update_hosts: null - - ironic-keystone: - no_puppet_run: true - - connectivity_tests: null - - primary-heat: null - - conntrackd: null - - sahara-db: null - - horizon: null - - openstack-haproxy-ceilometer: null - - openstack-network-common-config: null - - firewall: null - - primary-openstack-network-agents-l3: null - - globals: null - - aodh-keystone: null - - glance: null - - tools: null - - openstack-haproxy: null - - cgroups: null - - murano-cfapi-keystone: - no_puppet_run: true - - aodh: null - - copy_deleted_nodes: - type: copy_files - - openstack-haproxy-ironic: - no_puppet_run: true - - setup_repositories: null - - openstack-network-routers-ha: - no_puppet_run: true - - upload_murano_package: - no_puppet_run: true - - glance-db: null - - neutron-db: null - - ironic_upload_images: - type: shell - - swift-rebalance-cron: - no_puppet_run: true - - primary-ceph-mon: null - - openstack-haproxy-stats: null - - ironic-api: - no_puppet_run: true - - primary-ceph-radosgw: null - - dns-client: null - - cluster-vrouter: null - - murano-rabbitmq: - no_puppet_run: true - - api-proxy: null - - cluster_health: null - - heat-keystone: null - - openstack-haproxy-horizon: null - - openstack-network-start: - type: skipped - - clear_nodes_info: - type: skipped - - murano-db: - no_puppet_run: true - - copy_keys_ceph: - type: copy_files - - sahara: null - - fuel_pkgs: null - - swift-keystone: - no_puppet_run: true - - public_vip_ping: null - - upload_nodes_info: - type: skipped - - openstack-haproxy-glance: null - - murano: - no_puppet_run: true - - ceph_ready_check: - type: shell - - enable_quorum: - type: shell - - openstack-haproxy-nova: null - - openstack-network-server-config: null - - primary-database: - skip: - - File[/root/.my.cnf] - - openstack-haproxy-cinder: null - - ntp-server: null - - murano-keystone: - no_puppet_run: true - - swift-proxy_storage: - no_puppet_run: true - - primary-openstack-network-agents-dhcp: null - - openstack-haproxy-heat: null - - primary-openstack-controller: null - - openstack-cinder: null - - ceph_create_pools: null - - keystone-db: - skip: - - File[/root/.my.cnf] - - sync_time: - type: shell - - configuration_symlink: - type: shell - - openstack-network-server-nova: null - - copy_haproxy_keys: - type: copy_files - - openstack-network-networks: null - - ssl-add-trust-chain: null diff --git a/fuelweb_test/tests/tests_lcm/test_ensurability.py b/fuelweb_test/tests/tests_lcm/test_ensurability.py deleted file mode 100644 index 0a88b058f..000000000 --- a/fuelweb_test/tests/tests_lcm/test_ensurability.py +++ /dev/null @@ -1,444 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re -import yaml - -from proboscis.asserts import assert_true -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.ssh_manager import SSHManager -from fuelweb_test import logger -from fuelweb_test.tests.tests_lcm.base_lcm_test import DeprecatedFixture -from fuelweb_test.tests.tests_lcm.base_lcm_test import LCMTestBasic -from fuelweb_test.tests.tests_lcm.base_lcm_test import SetupLCMEnvironment - - -@test -class TaskEnsurability(LCMTestBasic): - """Test suite for verification of deployment tasks ensurability.""" - - @staticmethod - def delete_astute_log(): - """Delete astute.log file(s) on master node. - - This is to ensure that no unwanted tasks are used by tests (e.g. from - previous deployments). - - :return: None - """ - ssh = SSHManager() - ssh.execute_on_remote(ssh.admin_ip, "rm /var/log/astute/astute*") - ssh.execute_on_remote(ssh.admin_ip, "systemctl restart astute.service") - - def deploy_fixtures(self, deployment, cluster_id, slave_nodes): - """Apply stored settings and deploy the changes - - :param deployment: str, name of cluster configuration under test - :param cluster_id: int, cluster ID - :param slave_nodes: list, cluster nodes data - :return: None - """ - self.delete_astute_log() - cluster_f, nodes_f = self.load_settings_fixtures(deployment) - - self.fuel_web.client.update_cluster_attributes( - cluster_id, {'editable': cluster_f}) - for node in slave_nodes: - self.fuel_web.client.upload_node_attributes( - nodes_f[self.node_roles(node)], node["id"]) - - self.fuel_web.deploy_cluster_changes_wait(cluster_id) - - def generate_tasks_fixture(self, deployment, cluster_id, - slave_nodes, ha=False): - """Collect per-node fixtures for tasks executed on deploying changes - - :param deployment: str, name of env configuration under test - :param cluster_id: int, cluster ID - :param slave_nodes: list, cluster nodes data - :param ha: bool, indicates whether HA env is used - :return: None - """ - # For each node get list of tasks executed during end-to-end redeploy - tasks = {} - primary_ctrl_id = self.define_pr_ctrl()['id'] - for node in slave_nodes: - node_ref = self.node_roles(node) - if ha and primary_ctrl_id == node['id']: - node_ref = 'primary-' + node_ref - tasks[node_ref] = self.get_nodes_tasks(node["id"]) - - # Revert snapshot and collect fixtures for the executed tasks - # by running each one separately - self.env.revert_snapshot('lcm_deploy_{}'.format(deployment)) - - cluster_f, nodes_f = self.load_settings_fixtures(deployment) - for node in slave_nodes: - self.fuel_web.client.upload_node_attributes( - nodes_f[self.node_roles(node)], node["id"]) - self.fuel_web.client.update_cluster_attributes( - cluster_id, {'editable': cluster_f}) - - result = {} - tasks_description = self.env.admin_actions.get_tasks_description() - for node in slave_nodes: - task_fixture = [] - node_ref = self.node_roles(node) - - if ha and primary_ctrl_id == node['id']: - node_ref = 'primary-' + node_ref - - for task in tasks[node_ref]: - self.fuel_web.execute_task_on_node( - task, node['id'], cluster_id) - - task_type = self.get_task_type(tasks_description, task) - if task_type != "puppet": - logger.info( - "Executed non-puppet {0} task on node {1}; skip " - "collecting fixture for it".format(task, node['id'])) - task_fixture.append({task: {"type": task_type}}) - continue - - try: - report = self.get_puppet_report(node) - except AssertionError: - task_fixture.append({task: {"no_puppet_run": True}}) - logger.info("Unexpected no_puppet_run for task: " - "{}".format(task)) - continue - - # Remember resources that were changed by the task - task_resources = [] - for res_name, res_stats in report['resource_statuses'].items(): - if res_stats['changed']: - logger.info("Task {} changed resource(s): " - "{}".format(task, res_name)) - task_resources.append(res_name) - task_fixture.append({task: {"resources": task_resources}}) - logger.info("Task {} on node {} was executed " - "successfully".format(task, node['id'])) - - result.update({ - node_ref: { - "tasks": task_fixture - } - }) - - logger.info("Generated tasks fixture:\n{}".format( - yaml.safe_dump(result, default_flow_style=False))) - - def check_ensurability(self, deployment, cluster_id, - slave_nodes, ha=False): - """Check ensurability of tasks for the given env configuration. - - :param deployment: str, name of env configuration under test - :param cluster_id: int, cluster ID - :param slave_nodes: list, cluster nodes data - :param ha: bool, indicates whether HA env is used - :return: None - """ - # Revert snapshot to run each task separately - self.env.revert_snapshot('lcm_deploy_{}'.format(deployment)) - - # Apply the stored settings - cluster_f, nodes_f = self.load_settings_fixtures(deployment) - for node in slave_nodes: - self.fuel_web.client.upload_node_attributes( - nodes_f[self.node_roles(node)], node["id"]) - self.fuel_web.client.update_cluster_attributes( - cluster_id, {'editable': cluster_f}) - - ip_pattern = re.compile(r"(?:\d{1,3}\.){3}\d{1,3}:\d{1,4}/\d") - - result = {} - ensurable = True - primary_ctrl_id = self.define_pr_ctrl()['id'] - for node in slave_nodes: - node_ref = self.node_roles(node) - if ha and primary_ctrl_id == node['id']: - node_ref = 'primary-' + node_ref - fixture = self.load_fixture(deployment, node_ref, idmp=False) - nonensurable_tasks = {} - - for task in fixture["tasks"]: - task_name, task_data = task.items()[0] - self.fuel_web.execute_task_on_node( - task_name, node['id'], cluster_id) - - if task_data["type"] != "puppet": - logger.info( - "Executed non-puppet {0} task on node {1}; skip " - "checks for it".format(task_name, node['id'])) - continue - - try: - report = self.get_puppet_report(node) - except AssertionError: - if not task_data.get("no_puppet_run"): - logger.info("Unexpected no_puppet_run for task: " - "{}".format(task_name)) - continue - - task_resources = [] - skip = task_data.get('skip') - for res_name, res_stats in report['resource_statuses'].items(): - if res_stats['changed'] and res_name not in skip: - logger.info("Task {} changed resource: " - "{}".format(task_name, res_name)) - task_resources.append(res_name) - - expected_resources = [ip_pattern.sub("addr", r) - for r in task_data["resources"]] - actual_resources = [ip_pattern.sub("addr", r) - for r in task_resources] - if sorted(actual_resources) != sorted(expected_resources): - ensurable = False - logger.info("Task {} was executed on node {} and is not " - "ensurable".format(task_name, node['id'])) - nonensurable_tasks.update({ - task_name: { - "actual": task_resources, - "expected": task_data["resources"] - } - }) - else: - logger.info("Task {} on node {} was executed " - "successfully".format(task_name, node['id'])) - result[node_ref] = nonensurable_tasks - - logger.info('Non-ensurable tasks:\n{}'.format( - yaml.safe_dump(result, default_flow_style=False))) - return ensurable - - @test(depends_on=[SetupLCMEnvironment.lcm_deploy_1_ctrl_1_cmp_1_cinder], - groups=['lcm_non_ha', - 'test_ensurability', - 'ensurability_1_ctrl_1_cmp_1_cinder']) - @log_snapshot_after_test - def ensurability_1_ctrl_1_cmp_1_cinder(self): - """Test ensurability for cluster with cinder - - Scenario: - 1. Revert the snapshot 'lcm_deploy_1_ctrl_1_cmp_1_cinder' - 2. Check that stored setting fixtures are up to date - 3. Check that stored task fixtures are up to date - 4. Check ensurability of the tasks - - Snapshot: "ensurability_1_ctrl_1_cmp_1_cinder" - """ - self.show_step(1) - deployment = "1_ctrl_1_cmp_1_cinder" - self.env.revert_snapshot('lcm_deploy_{}'.format(deployment)) - - cluster_id = self.fuel_web.get_last_created_cluster() - slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - - self.show_step(2) - self.check_settings_consistency(deployment, cluster_id) - - self.show_step(3) - self.deploy_fixtures(deployment, cluster_id, slave_nodes) - node_refs = self.check_extra_tasks(slave_nodes, deployment, idmp=False) - if node_refs: - self.generate_tasks_fixture(deployment, cluster_id, slave_nodes) - msg = ('Please update ensurability fixtures in the repo ' - 'according to generated fixtures') - raise DeprecatedFixture(msg) - - self.show_step(4) - assert_true( - self.check_ensurability(deployment, cluster_id, slave_nodes), - "There are not ensurable tasks. " - "Please take a look at the output above!") - - self.env.make_snapshot('ensurability_{}'.format(deployment)) - - @test(depends_on=[SetupLCMEnvironment.lcm_deploy_1_ctrl_1_cmp_1_mongo], - groups=['lcm_non_ha', - 'test_ensurability', - 'ensurability_1_ctrl_1_cmp_1_mongo']) - @log_snapshot_after_test - def ensurability_1_ctrl_1_cmp_1_mongo(self): - """Test ensurability for cluster with mongo - - Scenario: - 1. Revert the snapshot 'lcm_deploy_1_ctrl_1_cmp_1_mongo' - 2. Check that stored setting fixtures are up to date - 3. Check that stored task fixtures are up to date - 4. Check ensurability of the tasks - - Snapshot: "ensurability_1_ctrl_1_cmp_1_mongo" - """ - self.show_step(1) - deployment = "1_ctrl_1_cmp_1_mongo" - self.env.revert_snapshot('lcm_deploy_{}'.format(deployment)) - - cluster_id = self.fuel_web.get_last_created_cluster() - slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - - self.show_step(2) - self.check_settings_consistency(deployment, cluster_id) - - self.show_step(3) - self.deploy_fixtures(deployment, cluster_id, slave_nodes) - node_refs = self.check_extra_tasks(slave_nodes, deployment, idmp=False) - if node_refs: - self.generate_tasks_fixture(deployment, cluster_id, slave_nodes) - msg = ('Please update ensurability fixtures in the repo ' - 'according to generated fixtures') - raise DeprecatedFixture(msg) - - self.show_step(4) - assert_true( - self.check_ensurability(deployment, cluster_id, slave_nodes), - "There are not ensurable tasks. " - "Please take a look at the output above!") - - self.env.make_snapshot('ensurability_{}'.format(deployment)) - - @test(depends_on=[SetupLCMEnvironment.lcm_deploy_1_ctrl_1_cmp_3_ceph], - groups=['lcm_non_ha', - 'test_ensurability', - 'ensurability_1_ctrl_1_cmp_3_ceph']) - @log_snapshot_after_test - def ensurability_1_ctrl_1_cmp_3_ceph(self): - """Test ensurability for cluster with ceph - - Scenario: - 1. Revert the snapshot 'lcm_deploy_1_ctrl_1_cmp_3_ceph' - 2. Check that stored setting fixtures are up to date - 3. Check that stored task fixtures are up to date - 4. Check ensurability of the tasks - - Snapshot: "ensurability_1_ctrl_1_cmp_3_ceph" - """ - self.show_step(1) - deployment = "1_ctrl_1_cmp_3_ceph" - self.env.revert_snapshot('lcm_deploy_{}'.format(deployment)) - - cluster_id = self.fuel_web.get_last_created_cluster() - slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - - self.show_step(2) - self.check_settings_consistency(deployment, cluster_id) - - self.show_step(3) - self.deploy_fixtures(deployment, cluster_id, slave_nodes) - node_refs = self.check_extra_tasks(slave_nodes, deployment, idmp=False) - if node_refs: - self.generate_tasks_fixture(deployment, cluster_id, slave_nodes) - msg = ('Please update ensurability fixtures in the repo ' - 'according to generated fixtures') - raise DeprecatedFixture(msg) - - self.show_step(4) - assert_true( - self.check_ensurability(deployment, cluster_id, slave_nodes), - "There are not ensurable tasks. " - "Please take a look at the output above!") - - self.env.make_snapshot('ensurability_{}'.format(deployment)) - - @test(depends_on=[SetupLCMEnvironment.lcm_deploy_3_ctrl_3_cmp_ceph_sahara], - groups=['lcm_ha', - 'test_ensurability', - 'ensurability_3_ctrl_3_cmp_ceph_sahara']) - @log_snapshot_after_test - def ensurability_3_ctrl_3_cmp_ceph_sahara(self): - """Test ensurability for cluster with Sahara, Ceilometer and Ceph - in HA mode. - - Scenario: - 1. Revert the snapshot 'lcm_deploy_1_ctrl_1_cmp_3_ceph' - 2. Check that stored setting fixtures are up to date - 3. Check that stored task fixtures are up to date - 4. Check ensurability of the tasks - - Snapshot: "ensurability_3_ctrl_3_cmp_ceph_sahara" - """ - self.show_step(1) - deployment = "3_ctrl_3_cmp_ceph_sahara" - self.env.revert_snapshot('lcm_deploy_{}'.format(deployment)) - - cluster_id = self.fuel_web.get_last_created_cluster() - slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - - self.show_step(2) - self.check_settings_consistency(deployment, cluster_id) - - self.show_step(3) - self.deploy_fixtures(deployment, cluster_id, slave_nodes) - node_refs = self.check_extra_tasks( - slave_nodes, deployment, idmp=False, ha=True) - if node_refs: - self.generate_tasks_fixture( - deployment, cluster_id, slave_nodes, ha=True) - msg = ('Please update ensurability fixtures in the repo ' - 'according to generated fixtures') - raise DeprecatedFixture(msg) - - self.show_step(4) - assert_true( - self.check_ensurability( - deployment, cluster_id, slave_nodes, ha=True), - "There are not ensurable tasks. " - "Please take a look at the output above!") - - self.env.make_snapshot('ensurability_{}'.format(deployment)) - - @test(depends_on=[SetupLCMEnvironment.lcm_deploy_1_ctrl_1_cmp_1_ironic], - groups=['lcm_ironic', - 'ensurability_1_ctrl_1_cmp_1_ironic']) - @log_snapshot_after_test - def ensurability_1_ctrl_1_cmp_1_ironic(self): - """Test ensurability for cluster with Ironic - - Scenario: - 1. Revert the snapshot 'lcm_deploy_1_ctrl_1_cmp_1_ironic' - 2. Check that stored setting fixtures are up to date - 3. Check that stored task fixtures are up to date - 4. Check ensurability of the tasks - - Duration: 185m - Snapshot: "ensurability_1_ctrl_1_cmp_1_ironic" - """ - self.show_step(1) - deployment = "1_ctrl_1_cmp_1_ironic" - self.env.revert_snapshot('lcm_deploy_{}'.format(deployment)) - - cluster_id = self.fuel_web.get_last_created_cluster() - slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - - self.show_step(2) - self.check_settings_consistency(deployment, cluster_id) - - self.show_step(3) - self.deploy_fixtures(deployment, cluster_id, slave_nodes) - node_refs = self.check_extra_tasks(slave_nodes, deployment, idmp=False) - if node_refs: - self.generate_tasks_fixture(deployment, cluster_id, slave_nodes) - msg = ('Please update ensurability fixtures in the repo ' - 'according to generated fixtures') - raise DeprecatedFixture(msg) - - self.show_step(4) - assert_true( - self.check_ensurability(deployment, cluster_id, slave_nodes), - "There are not ensurable tasks. " - "Please take a look at the output above!") - - self.env.make_snapshot('ensurability_{}'.format(deployment)) diff --git a/fuelweb_test/tests/tests_lcm/test_idempotency.py b/fuelweb_test/tests/tests_lcm/test_idempotency.py deleted file mode 100644 index bf4f3b624..000000000 --- a/fuelweb_test/tests/tests_lcm/test_idempotency.py +++ /dev/null @@ -1,223 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import asserts -from proboscis import test -import yaml - -from fuelweb_test import logger -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.tests_lcm.base_lcm_test import SetupLCMEnvironment -from fuelweb_test.tests.tests_lcm.base_lcm_test import LCMTestBasic - - -@test -class TaskIdempotency(LCMTestBasic): - """TaskIdempotency.""" # TODO documentation - - def check_idempotency(self, deployment): - """Check task idempotency for corresponding deployment - - :param deployment: a string, name of the deployment kind - :return: a boolean, all tasks is idempotent - True, - some task is not idempotent - False - """ - idempotent = True - cluster_id = self.fuel_web.get_last_created_cluster() - slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - - result = {'tasks_idempotency': {}, - 'timeouterror_tasks': {}} - pr_ctrl = (self.define_pr_ctrl() - if deployment == '3_ctrl_3_cmp_ceph_sahara' - else {}) - for node in slave_nodes: - node_roles = "_".join(sorted(node["roles"])) - if node.get('name') == pr_ctrl.get('name', None): - node_roles = 'primary-' + node_roles - node_ref = "{}_{}".format(node["id"], node_roles) - fixture = self.load_fixture(deployment, node_roles) - - failed_tasks = {} - timeouterror_tasks = [] - - for task in fixture['tasks']: - task_name, fixture_task = task.items()[0] - - if fixture_task['type'] != 'puppet': - logger.info('Skip checking of {!r} task,it is not puppet' - .format(task_name)) - continue - - self.fuel_web.execute_task_on_node(task_name, node["id"], - cluster_id) - - try: - report = self.get_puppet_report(node) - except AssertionError: - if not fixture_task.get('no_puppet_run'): - msg = ('Unexpected no_puppet_run for task: {!r}' - .format(task_name)) - logger.info(msg) - timeouterror_tasks.append(task_name) - continue - - skip = fixture_task.get('skip') - failed = False - task_resources = [] - - for res_name, res_stats in report['resource_statuses'].items(): - if res_stats['changed'] and res_name not in skip: - failed = True - msg = ('Non-idempotent task {!r}, resource: {}' - .format(task, res_name)) - logger.error(msg) - task_resources.append(res_name) - - if failed: - idempotent = False - failed_tasks.update({ - task_name: task_resources - }) - else: - logger.info( - 'Task {!r} on node {!r} was executed successfully' - .format(task_name, node['id'])) - - result['tasks_idempotency'][node_ref] = failed_tasks - result['timeouterror_tasks'][node_ref] = timeouterror_tasks - - logger.warning('Non-idempotent tasks:\n{}' - .format(yaml.dump(result, default_flow_style=False))) - return idempotent - - @test(depends_on=[SetupLCMEnvironment.lcm_deploy_1_ctrl_1_cmp_1_cinder], - groups=['lcm_non_ha', - 'idempotency', - 'idempotency_1_ctrl_1_cmp_1_cinder']) - @log_snapshot_after_test - def idempotency_1_ctrl_1_cmp_1_cinder(self): - """Test idempotency for cluster with cinder - - Scenario: - 1. Revert snapshot "lcm_deploy_1_ctrl_1_cmp_1_cinder" - 2. Check task idempotency - - Duration 60m - Snapshot: "idempotency_1_ctrl_1_cmp_1_cinder" - """ - self.show_step(1) - deployment = "1_ctrl_1_cmp_1_cinder" - self.env.revert_snapshot('lcm_deploy_{}'.format(deployment)) - self.show_step(2) - asserts.assert_true(self.check_idempotency(deployment), - 'There are non-idempotent tasks. ' - 'Please take a look at the output above!') - self.env.make_snapshot('idempotency_{}'.format(deployment)) - - @test(depends_on=[SetupLCMEnvironment.lcm_deploy_1_ctrl_1_cmp_1_mongo], - groups=['lcm_non_ha', - 'idempotency', - 'idempotency_1_ctrl_1_cmp_1_mongo']) - @log_snapshot_after_test - def idempotency_1_ctrl_1_cmp_1_mongo(self): - """Test idempotency for cluster with Ceilometer - - Scenario: - 1. Revert snapshot "lcm_deploy_1_ctrl_1_cmp_1_mongo" - 2. Check task idempotency - - Duration 60m - Snapshot: "idempotency_1_ctrl_1_cmp_1_mongo" - """ - self.show_step(1) - deployment = "1_ctrl_1_cmp_1_mongo" - self.env.revert_snapshot('lcm_deploy_{}'.format(deployment)) - self.show_step(2) - asserts.assert_true(self.check_idempotency(deployment), - 'There are non-idempotent tasks. ' - 'Please take a look at the output above!') - self.env.make_snapshot('idempotency_{}'.format(deployment)) - - @test(depends_on=[SetupLCMEnvironment.lcm_deploy_1_ctrl_1_cmp_3_ceph], - groups=['lcm_non_ha', - 'idempotency', - 'idempotency_1_ctrl_1_cmp_3_ceph']) - @log_snapshot_after_test - def idempotency_1_ctrl_1_cmp_3_ceph(self): - """Test idempotency for cluster with Ceph - - Scenario: - 1. Revert snapshot "lcm_deploy_1_ctrl_1_cmp_3_ceph" - 2. Check task idempotency - - Duration 90m - Snapshot: "idempotency_1_ctrl_1_cmp_3_ceph" - """ - self.show_step(1) - deployment = "1_ctrl_1_cmp_3_ceph" - self.env.revert_snapshot('lcm_deploy_{}'.format(deployment)) - self.show_step(2) - asserts.assert_true(self.check_idempotency(deployment), - 'There are non-idempotent tasks. ' - 'Please take a look at the output above!') - self.env.make_snapshot('idempotency_{}'.format(deployment)) - - @test(depends_on=[SetupLCMEnvironment.lcm_deploy_3_ctrl_3_cmp_ceph_sahara], - groups=['lcm_ha', - 'idempotency', - 'idempotency_3_ctrl_3_cmp_ceph_sahara']) - @log_snapshot_after_test - def idempotency_3_ctrl_3_cmp_ceph_sahara(self): - """Test idempotency for cluster with Sahara, Ceilometer, - Ceph in HA mode - - Scenario: - 1. Revert snapshot "lcm_deploy_3_ctrl_3_cmp_ceph_sahara" - 2. Check task idempotency - - Duration 180m - Snapshot: "idempotency_3_ctrl_3_cmp_ceph_sahara" - """ - self.show_step(1) - deployment = "3_ctrl_3_cmp_ceph_sahara" - self.env.revert_snapshot('lcm_deploy_{}'.format(deployment)) - self.show_step(2) - asserts.assert_true(self.check_idempotency(deployment), - 'There are non-idempotent tasks. ' - 'Please take a look at the output above!') - self.env.make_snapshot('idempotency_{}'.format(deployment)) - - @test(depends_on=[SetupLCMEnvironment.lcm_deploy_1_ctrl_1_cmp_1_ironic], - groups=['lcm_ironic', - 'idempotency_1_ctrl_1_cmp_1_ironic']) - @log_snapshot_after_test - def idempotency_1_ctrl_1_cmp_1_ironic(self): - """Test idempotency for cluster with Ironic - - Scenario: - 1. Revert snapshot "lcm_deploy_1_ctrl_1_cmp_1_ironic" - 2. Check task idempotency - - Duration 60m - Snapshot: "idempotency_1_ctrl_1_cmp_1_ironic" - """ - self.show_step(1) - deployment = "1_ctrl_1_cmp_1_ironic" - self.env.revert_snapshot('lcm_deploy_{}'.format(deployment)) - self.show_step(2) - asserts.assert_true(self.check_idempotency(deployment), - 'There are non-idempotent tasks. ' - 'Please take a look at the output above!') - self.env.make_snapshot('idempotency_{}'.format(deployment)) diff --git a/fuelweb_test/tests/tests_lcm/test_task_coverage.py b/fuelweb_test/tests/tests_lcm/test_task_coverage.py deleted file mode 100644 index bfe877fe4..000000000 --- a/fuelweb_test/tests/tests_lcm/test_task_coverage.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from proboscis import asserts -from proboscis import test -import yaml - -from fuelweb_test import logger -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic -from fuelweb_test.tests.tests_lcm.base_lcm_test import TASKS_BLACKLIST - - -EXCLUDED_TASKS_FROM_COVERAGE = [ - "generate_vms", - "plugins_rsync", - "plugins_setup_repositories", - "upload_murano_package", - "murano-cfapi-keystone", - "murano-keystone", - "murano-cfapi", - "murano-rabbitmq", - "openstack-haproxy-murano", - "murano", - "murano-db", - "disable_keystone_service_token", - "openstack-network-routers-ha" -] - - -@test -class TaskLCMCoverage(TestBasic): - """Test suite for verification of task coverage by LCM tests""" - @staticmethod - def _load_from_file(path, tasks): - """Load fixture from the corresponding yaml file - - :param path: a string, a full path to fixture file - :return: a set of tasks - """ - with open(path) as f: - fixture = yaml.load(f) - for task in fixture['tasks']: - task_name, task_attr = task.items()[0] - if task_attr is None: - tasks.add(task_name) - continue - if 'type' in task_attr or 'no_puppet_run' in task_attr: - continue - tasks.add(task_name) - return tasks - - def load_tasks_fixture_file(self, path, subdir, tasks=None): - """Load task fixtures - - :param path: a string, relative path to fixture directory - :param subdir: a string, indicates whether idempotency or ensurability - fixture is uploaded - :param tasks: a set of taken into consideration tasks - :return: a set of tasks - """ - if not tasks: - tasks = set([]) - if os.path.isdir(path) and os.path.basename(path) == subdir: - for fl in os.listdir(path): - filepath = os.path.join(path, fl) - tasks.update(self._load_from_file(filepath, tasks) or []) - elif os.path.isdir(path): - for fl in os.listdir(path): - filepath = os.path.join(path, fl) - tasks.update( - self.load_tasks_fixture_file( - filepath, subdir, tasks) or []) - return tasks - - @test(depends_on=[SetupEnvironment.prepare_release], - groups=['task_lcm_coverage', - 'task_idempotency_coverage']) - @log_snapshot_after_test - def task_idempotency_coverage(self): - """Setup master node with custom manifests - - Scenario: - 1. Revert snapshot "ready" - 2. Download task graph - 3. Download task from existing fixture files - 4. Define coverage of fuel task by idempotency tests - - Duration 60m - """ - self.show_step(1) - self.env.revert_snapshot('ready') - - self.show_step(2) - release_id = self.fuel_web.client.get_release_id() - deployment_tasks = self.fuel_web.client.get_release_deployment_tasks( - release_id - ) - puppet_tasks = [task['id'] - for task in deployment_tasks - if task['type'] == 'puppet'] - puppet_tasks = set(puppet_tasks) - - self.show_step(3) - path = os.path.join(os.path.dirname(__file__), "fixtures") - fixture_tasks = self.load_tasks_fixture_file(path, 'idempotency') - - self.show_step(4) - task_blacklist = (set(TASKS_BLACKLIST) | - set(EXCLUDED_TASKS_FROM_COVERAGE)) - general_tasks = puppet_tasks & fixture_tasks - extra_deployment_tasks = puppet_tasks - general_tasks - task_blacklist - extra_fixtures_tasks = fixture_tasks - general_tasks - if extra_fixtures_tasks: - logger.warning('There are extra fixture tasks which are not ' - ' included in the current deployment graph: ' - 'list of tasks: {}'.format(extra_fixtures_tasks)) - asserts.assert_equal(extra_deployment_tasks, set(), - 'There are new deployment tasks which ' - 'appeared in the current deployment graph and ' - 'are not included to test LCM fixtures: list ' - 'of tasks: {}'.format(extra_deployment_tasks)) diff --git a/fuelweb_test/tests/tests_mirrors/__init__.py b/fuelweb_test/tests/tests_mirrors/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/tests_mirrors/test_create_mirror.py b/fuelweb_test/tests/tests_mirrors/test_create_mirror.py deleted file mode 100644 index 23c8c8d8f..000000000 --- a/fuelweb_test/tests/tests_mirrors/test_create_mirror.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test -from proboscis import SkipTest - -from fuelweb_test import logger -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=['fuel-mirror']) -class TestCreateMirror(TestBasic): - """Tests to check on CI that create mirror functionality is working. - - That is a part of functional testing. Integration testing is done in - group 'use-mirror'. - Tests are run on subset of mirror to speed up tests. - Tests should be run on prepared OS snapshot as it only checks packaging - subsystem of distribution. - - Tests are checking that user can install package with no dependencies to - ensure that most trivial case working. - Then user need to install package with dependency without dependencies. - At last we need to install package with multiple dependencies. - - Seems that best way is not to hard code packages, but to fetch them with - python debian/rpm package and prepare indexes with it. - - Also we need to check script download behaviour with connectivity issues. - - Code should support rpms and debs in DRY manner. - Code should be maintainable for future versions (no hardcoded mirror paths) - """ - - @test(groups=['fuel-mirror'], - depends_on=[SetupEnvironment.setup_master]) - def prepare_mirrors_environment(self): - # TODO(akostrikov) Create the same Dockerfile for centos 6.5? - # TODO(akostrikov) Test yum. - snapshot_name = 'prepare_mirrors_environment' - self.check_run(snapshot_name) - self.env.revert_snapshot('empty') - logger.info('Prepare environment for mirror checks.') - with self.env.d_env.get_admin_remote() as remote: - remote.check_call('docker pull ubuntu') - remote.check_call('docker pull nginx') - # TODO(akostrikov) add check that images are present. - self.env.make_snapshot(snapshot_name, is_make=True) - - # pylint: disable=no-self-use - @test(groups=['fuel-mirror', 'create-mirror'], - depends_on=[prepare_mirrors_environment]) - def no_dependencies_package_install(self): - # TODO(akostrikov) Run in ubuntu docker image 'create mirror' - # and try to apt-get update - raise SkipTest('Not implemented yet') - - @test(groups=['fuel-mirror', 'create-mirror']) - def check_download_with_network_issues(self): - # TODO(akostrikov) Wait for https://review.openstack.org/#/c/242533/ - raise SkipTest('Not implemented yet') - - @test(groups=['fuel-mirror', 'create-mirror']) - def check_download_with_proxy(self): - # TODO(akostrikov) Wait for https://review.openstack.org/#/c/242533/ - raise SkipTest('Not implemented yet') - # pylint: enable=no-self-use diff --git a/fuelweb_test/tests/tests_mirrors/test_use_mirror.py b/fuelweb_test/tests/tests_mirrors/test_use_mirror.py deleted file mode 100644 index 60dbf1243..000000000 --- a/fuelweb_test/tests/tests_mirrors/test_use_mirror.py +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis.asserts import assert_false -from proboscis import test -from proboscis import SkipTest - -from fuelweb_test.helpers.utils import pretty_log -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.settings import MIRROR_UBUNTU -from fuelweb_test.settings import NEUTRON_SEGMENT -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic -from fuelweb_test import logger - - -@test(groups=['fuel-mirror']) -class TestUseMirror(TestBasic): - """Tests custom mirrors to deploy environment. - - Full documentation is in fuel-qa-docs in /doc folder of fuel-qa. It is - autogenerated and can be found by keyword 'mirror'. - - This test doesn't only checks create mirror utility but also state of our - mirrors. Most probable problem is absence of packet. It is possible that - OS now requires new package for bootstrap, or puppet has new dependency - that is not reflected in our mirror. - """ - - def _get_cluster_repos(self, cluster_id): - all_repos = self.fuel_web.get_cluster_repos(cluster_id)['value'] - return { - 'ubuntu': [repo for repo in all_repos if 'ubuntu' in repo['name']], - 'mos': [repo for repo in all_repos if 'mos' in repo['name']], - 'all': all_repos} - - def _fix_fuel_mirror_config(self, admin_ip): - cfg_path = '/usr/share/fuel-mirror/ubuntu.yaml' - if MIRROR_UBUNTU != '': - ubuntu_url = MIRROR_UBUNTU.split()[1] - replace_cmd = \ - "sed -i 's|http://archive.ubuntu.com/ubuntu|{0}|g' {1}"\ - .format(ubuntu_url, cfg_path) - logger.info( - 'Replacing ubuntu mirrors in the fuel-mirror config with cmd:' - ' {0}'.format(replace_cmd)) - self.ssh_manager.execute_on_remote(ip=admin_ip, - cmd=replace_cmd) - - @test(groups=['fuel-mirror', 'deploy_with_custom_mirror'], - depends_on=[SetupEnvironment.prepare_slaves_5]) - def deploy_with_custom_mirror(self): - """Create local mirrors for Ubuntu repos using fuel-mirror tool - - Scenario: - 1. Create cluster with neutron networking - 2. Add 3 nodes with controller, ceph-osd roles - 3. Add 1 node with cinder role - 4. Add 1 node with compute role - 5. Fix fuel-mirror config according to cluster repos - 6. Run create command for Ubuntu mirrors - 7. Run apply command for Ubuntu mirrors - 8. Check that only Ubuntu mirrors were changed - 9. Run create, apply commands for mos mirrors - 10. Run apply command for mos-mirrors - 11. Check than mos mirrors were also changed - 12. Run network verification - 13. Deploy the cluster - 14. Run OSTF - 15. Create snapshot - - Duration 90m - Snapshot deploy_with_custom_mirror - """ - self.env.revert_snapshot('ready_with_5_slaves') - admin_ip = self.ssh_manager.admin_ip - - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": NEUTRON_SEGMENT['tun'], - 'sahara': True, - 'volumes_lvm': True, - 'volumes_ceph': False, - 'images_ceph': True - } - ) - - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'ceph-osd'], - 'slave-02': ['controller', 'ceph-osd'], - 'slave-03': ['controller', 'ceph-osd'], - 'slave-04': ['cinder'], - 'slave-05': ['compute'] - } - ) - self.show_step(5) - self._fix_fuel_mirror_config(admin_ip) - cluster_repos = self._get_cluster_repos(cluster_id) - message = pretty_log({'Cluster repos': cluster_repos['all']}) - logger.info(message) - self.show_step(6) - create_cmd = 'fuel-mirror create -P ubuntu -G ubuntu ' \ - '--log-file /var/log/ubuntu_mirrors_create.log' - self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=create_cmd) - self.show_step(7) - apply_cmd = 'fuel-mirror apply --replace -P ubuntu -G ubuntu' - self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=apply_cmd) - - self.show_step(8) - - cluster_repos = self._get_cluster_repos(cluster_id) - remote_ubuntu_repos = [ - repo for repo in cluster_repos['ubuntu'] - if admin_ip not in repo['uri'] and - '{settings.MASTER_IP}' not in repo['uri']] - local_mos_repos = [ - repo for repo in cluster_repos['mos'] - if admin_ip in repo['uri'] or - '{settings.MASTER_IP}' in repo['uri']] - repos_log = pretty_log( - {'All': cluster_repos['all'], - 'Remote Ubuntu': remote_ubuntu_repos, - 'Local MOS:': local_mos_repos}) - logger.info(repos_log) - - assert_false(remote_ubuntu_repos, - message="There are some remote Ubuntu repositories: " - "{repos}".format(repos=remote_ubuntu_repos)) - # NOTE Main MOS repository is always local in our tests - assert_false( - len(local_mos_repos) > 1, - message="More than one MOS repo became local:{repos}".format( - repos=local_mos_repos - ) - ) - - self.show_step(9) - create_cmd = 'fuel-mirror create -P ubuntu -G mos ' \ - '--log-file /var/log/mos_mirrors_create.log' - self.env.admin_actions.ensure_cmd(create_cmd) - - self.show_step(10) - apply_cmd = 'fuel-mirror apply -P ubuntu -G mos' - self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=apply_cmd) - - self.show_step(11) - cluster_repos = self._get_cluster_repos(cluster_id)['all'] - remote_repos = [ - repo for repo in cluster_repos - if admin_ip not in repo['uri'] and - '{settings.MASTER_IP}' not in repo['uri']] - message = pretty_log(cluster_repos) - logger.info(message) - assert_false(remote_repos, - message="There are some remote repositories: " - "{repos}".format(repos=remote_repos)) - - self.show_step(12) - self.fuel_web.verify_network(cluster_id) - self.show_step(13) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(14) - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - self.show_step(15) - self.env.make_snapshot('deploy_with_custom_mirror') - - # pylint: disable=no-self-use - @test(groups=['fuel-mirror', 'use-mirror']) - def deploy_no_official_access(self): - # TODO(akostrikov) add firewall rules to verify that there is no - # connection to official mirrors during mirror creation and deployment. - raise SkipTest('Not implemented yet') - - @test(groups=['fuel-mirror', 'use-mirror']) - def deploy_with_proxy(self): - # TODO(akostrikov) add tests to verify that fuel-mirror works with - # proxies too. - raise SkipTest('Not implemented yet') - # pylint: enable=no-self-use diff --git a/fuelweb_test/tests/tests_multirole/__init__.py b/fuelweb_test/tests/tests_multirole/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/tests_multirole/test_mongo_multirole.py b/fuelweb_test/tests/tests_multirole/test_mongo_multirole.py deleted file mode 100644 index fe1e5affb..000000000 --- a/fuelweb_test/tests/tests_multirole/test_mongo_multirole.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test.settings import iface_alias -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=['mongo_multirole']) -class MongoMultirole(TestBasic): - """MongoMultirole""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=['ha_ceilometer_untag_network'], - enabled=False) - @log_snapshot_after_test - def ha_ceilometer_untag_network(self): - """Deployment with 3 controllers, NeutronVLAN and untag network, - with Ceilometer - - Scenario: - 1. Create new environment - 2. Choose Neutron, VLAN - 3. Choose Ceilometer - 4. Add 3 controller - 5. Add 1 compute - 6. Add 3 mongo+cinder - 7. Move Storage network to eth1 and specify vlan start - 8. Move Management network to eth2 and untag it - 9. Verify networks - 10. Deploy the environment - 11. Verify networks - 12. Run OSTF tests - - Duration 180m - Snapshot ha_ceilometer_untag_network - """ - self.env.revert_snapshot('ready_with_9_slaves') - data = { - 'ceilometer': True, - 'tenant': 'mongomultirole', - 'user': 'mongomultirole', - 'password': 'mongomultirole', - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - } - - self.show_step(1, initialize=True) - self.show_step(2) - self.show_step(3) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - - self.show_step(4) - self.show_step(5) - self.show_step(6) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['mongo', 'cinder'], - 'slave-06': ['mongo', 'cinder'], - 'slave-07': ['mongo', 'cinder'] - } - ) - self.show_step(7) - self.show_step(8) - vlan_turn_on = {'vlan_start': 102} - interfaces = { - iface_alias('eth0'): ['private'], - iface_alias('eth1'): ['storage', 'public'], - iface_alias('eth2'): ['management'], - iface_alias('eth3'): [], - iface_alias('eth4'): [] - } - - nets = self.fuel_web.client.get_networks(cluster_id)['networks'] - nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - for node in nailgun_nodes: - self.fuel_web.update_node_networks(node['id'], interfaces) - - for net in nets: - if net['name'] == 'storage': - net.update(vlan_turn_on) - - self.fuel_web.client.update_network(cluster_id, networks=nets) - - self.show_step(9) - self.fuel_web.verify_network(cluster_id) - - self.show_step(10) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(11) - self.fuel_web.verify_network(cluster_id) - - self.show_step(12) - self.fuel_web.run_ostf(cluster_id) - self.env.make_snapshot('ha_ceilometer_untag_network') - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=['mongo_ceph_with_ceilometer'], - enabled=False) - @log_snapshot_after_test - def mongo_ceph_with_ceilometer(self): - """Deployment with 3 controlelrs, NeutronVLAN, with Ceph, - with Ceilometer - - Scenario: - 1. Create new environment - 2. Choose Neutron, VLAN - 3. Choose Ceph for volumes and Ceph for images, ceph ephemeral, - ceph for objects - 4. Choose Ceilometer - 5. Add 3 controller+mongo - 6. Add 3 ceph - 7. Add 1 compute node - 8. Verify networks - 9. Deploy the environment - 10. Verify networks - 11. Run OSTF tests - - Duration 180m - Snapshot mongo_ceph_with_ceilometer - """ - self.env.revert_snapshot('ready_with_9_slaves') - data = { - 'volumes_lvm': False, - 'ceilometer': True, - 'images_ceph': True, - 'ephemeral_ceph': True, - 'objects_ceph': True, - 'tenant': 'mongomultirole', - 'user': 'mongomultirole', - 'password': 'mongomultirole', - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - } - - self.show_step(1, initialize=True) - self.show_step(2) - self.show_step(3) - self.show_step(4) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.show_step(5) - self.show_step(6) - self.show_step(7) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'mongo'], - 'slave-02': ['controller', 'mongo'], - 'slave-03': ['controller', 'mongo'], - 'slave-04': ['ceph-osd'], - 'slave-05': ['ceph-osd'], - 'slave-06': ['ceph-osd'], - 'slave-07': ['compute'] - } - ) - - self.show_step(8) - self.fuel_web.verify_network(cluster_id) - - self.show_step(9) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(10) - self.fuel_web.verify_network(cluster_id) - - self.show_step(11) - self.fuel_web.run_ostf(cluster_id) - - self.env.make_snapshot('mongo_ceph_with_ceilometer') diff --git a/fuelweb_test/tests/tests_multirole/test_multirole_group_1.py b/fuelweb_test/tests/tests_multirole/test_multirole_group_1.py deleted file mode 100644 index 7110cb1ef..000000000 --- a/fuelweb_test/tests/tests_multirole/test_multirole_group_1.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test - -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["multirole_group_1"]) -class MultiroleGroup1(TestBasic): - """MultiroleGroup1.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["controller_ceph_and_compute_cinder"]) - @log_snapshot_after_test - def controller_ceph_and_compute_cinder(self): - """Deployment with 3 Controllers plus Ceph, Neutron Vxlan - and non-default disks partition - - Scenario: - 1. Create new environment - 2. Choose Neutron Vxlan - 3. Choose Cinder for volumes and Ceph for images - 4. Add 3 controller+ceph - 5. Add 1 compute+cinder - 6. Verify networks - 7. Change disk configuration for all Ceph nodes. - Change 'Ceph' volume for vdc - 8. Deploy the environment - 9. Verify networks - 10. Run OSTF tests - - Duration 180m - Snapshot controller_ceph_and_compute_cinder - """ - - self.env.revert_snapshot("ready_with_5_slaves") - - data = { - 'volumes_lvm': True, - 'images_ceph': True, - 'tenant': 'controllercephcomputecinder', - 'user': 'controllercephcomputecinder', - 'password': 'controllercephcomputecinder', - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['tun'], - } - self.show_step(1, initialize=True) - self.show_step(2) - self.show_step(3) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.show_step(4) - self.show_step(5) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'ceph-osd'], - 'slave-02': ['controller', 'ceph-osd'], - 'slave-03': ['controller', 'ceph-osd'], - 'slave-04': ['compute', 'cinder'] - } - ) - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - - self.show_step(7) - ceph_nodes = self.fuel_web.\ - get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'], - role_status='pending_roles') - for ceph_node in ceph_nodes: - ceph_image_size = self.fuel_web.\ - update_node_partitioning(ceph_node, node_role='ceph') - - self.show_step(8) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.check_ceph_status(cluster_id) - self.show_step(9) - self.fuel_web.verify_network(cluster_id) - - for ceph in ceph_nodes: - checkers.check_ceph_image_size(ceph['ip'], ceph_image_size) - - self.show_step(10) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("controller_ceph_and_compute_cinder") - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["controller_ceph_cinder_compute_ceph_cinder"]) - @log_snapshot_after_test - def controller_ceph_cinder_compute_ceph_cinder(self): - """Deployment with 3 Controllers plus Ceph plus Cinder, Neutron Vlan, - cinder for volumes, ceph for images/ephemeral/objects - - Scenario: - 1. Create new environment - 2. Choose Neutron, Vlan - 3. Choose cinder for volumes and ceph for images/ephemeral/objects - 4. Add 3 controllers+ceph+cinder - 5. Add 1 compute+ceph+cinder - 6. Verify networks - 7. Deploy the environment - 8. Verify networks - 9. Run OSTF tests - - Duration 180m - Snapshot controller_ceph_cinder_compute_ceph_cinder - """ - - self.env.revert_snapshot("ready_with_5_slaves") - - data = { - 'volumes_lvm': True, - 'images_ceph': True, - 'ephemeral_ceph': True, - 'objects_ceph': True, - 'tenant': 'controllercephcinder', - 'user': 'controllercephcinder', - 'password': 'controllercephcinder' - } - self.show_step(1, initialize=True) - self.show_step(2) - self.show_step(3) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.show_step(4) - self.show_step(5) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'ceph-osd', 'cinder'], - 'slave-02': ['controller', 'ceph-osd', 'cinder'], - 'slave-03': ['controller', 'ceph-osd', 'cinder'], - 'slave-04': ['compute', 'ceph-osd', 'cinder'] - } - ) - - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - self.show_step(7) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.check_ceph_status(cluster_id) - self.show_step(8) - self.fuel_web.verify_network(cluster_id) - self.show_step(9) - - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("controller_ceph_cinder_compute_ceph_cinder") diff --git a/fuelweb_test/tests/tests_os_components/__init__.py b/fuelweb_test/tests/tests_os_components/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/tests_os_components/test_mixed_os_components.py b/fuelweb_test/tests/tests_os_components/test_mixed_os_components.py deleted file mode 100644 index fb2e3c164..000000000 --- a/fuelweb_test/tests/tests_os_components/test_mixed_os_components.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test - -from fuelweb_test import settings -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["mixed_os_components", "additional_components"]) -class MixedComponents(TestBasic): - """MixedComponents""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["mixed_components_murano_sahara_ceilometer"], - enabled=False) - @log_snapshot_after_test - def mixed_components_murano_sahara_ceilometer(self): - """Deployment with 3 controllers, NeutronTUN, with Murano, - Sahara and Ceilometer - - Scenario: - 1. Create new environment - 2. Choose Neutron, TUN - 3. Choose Murano - 4. Choose Sahara - 5. Choose Ceilometer - 6. Add 3 controller - 7. Add 1 compute - 8. Add 1 cinder - 9. Add 3 mongo - 10. Verify networks - 11. Deploy the environment - 12. Verify networks - 13. Run OSTF tests - - Duration: 300 min - Snapshot: mixed_components_murano_sahara_ceilometer - """ - - self.env.revert_snapshot('ready_with_9_slaves') - - data = { - 'murano': True, - 'sahara': True, - 'ceilometer': True, - 'tenant': 'mixedcomponents', - 'user': 'mixedcomponents', - 'password': 'mixedcomponents', - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['tun'], - } - - self.show_step(1, initialize=True) - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.show_step(5) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - - self.show_step(6) - self.show_step(7) - self.show_step(8) - self.show_step(9) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['cinder'], - 'slave-06': ['mongo'], - 'slave-07': ['mongo'], - 'slave-08': ['mongo'] - } - ) - - self.show_step(10) - self.fuel_web.verify_network(cluster_id) - - self.show_step(11) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(12) - self.fuel_web.verify_network(cluster_id) - - self.show_step(13) - self.fuel_web.run_ostf( - cluster_id, - test_sets=['smoke', 'sanity', 'ha', 'tests_platform'], - timeout=3600 - ) - - self.env.make_snapshot('mixed_components_murano_sahara_ceilometer') diff --git a/fuelweb_test/tests/tests_os_components/test_murano_os_component.py b/fuelweb_test/tests/tests_os_components/test_murano_os_component.py deleted file mode 100644 index 49d27d7e5..000000000 --- a/fuelweb_test/tests/tests_os_components/test_murano_os_component.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test - -from fuelweb_test import settings -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=['murano_os_component', 'additional_components'], - enabled=False) -class MuranoOSComponent(TestBasic): - """MuranoOSComponent""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=['murano_neutron_vlan']) - @log_snapshot_after_test - def murano_neutron_vlan(self): - """Deployment with 3 controllers, NeutronVLAN, with Murano - - Scenario: - 1. Create new environment - 2. Choose Neutron, VLAN - 3. Choose Murano - 4. Add 3 controller - 5. Add 2 compute - 6. Add 1 cinder - 7. Verify networks - 8. Deploy the environment - 9. Verify networks - 10. Run OSTF tests - - Duration: 180 min - Snapshot: murano_neutron_vlan - """ - - self.env.revert_snapshot('ready_with_9_slaves') - - data = { - 'murano': True, - 'tenant': 'muranooscomponent', - 'user': 'muranooscomponent', - 'password': 'muranooscomponent', - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - } - - self.show_step(1, initialize=True) - self.show_step(2) - self.show_step(3) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - - self.show_step(4) - self.show_step(5) - self.show_step(6) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['cinder'] - } - ) - - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - - self.show_step(8) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(9) - self.fuel_web.verify_network(cluster_id) - - self.show_step(10) - self.fuel_web.run_ostf(cluster_id, test_sets=['smoke', 'sanity', 'ha', - 'tests_platform']) - - self.env.make_snapshot('murano_neutron_vlan') diff --git a/fuelweb_test/tests/tests_os_components/test_sahara_os_component.py b/fuelweb_test/tests/tests_os_components/test_sahara_os_component.py deleted file mode 100644 index a60d624a8..000000000 --- a/fuelweb_test/tests/tests_os_components/test_sahara_os_component.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test - -from fuelweb_test import settings -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=['sahara_os_component', 'additional_components']) -class SaharaOSComponent(TestBasic): - """SaharaOSComponent""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=['sahara_ceph_ephemeral']) - @log_snapshot_after_test - def sahara_ceph_ephemeral(self): - """Deployment with 3 controllers, NeutronVLAN, with Ceph for volumes, - images, ephemeral, object with Sahara - - Scenario: - 1. Create new environment - 2. Choose Neutron, VLAN - 3. Choose Ceph for volumes, images, ephemeral, object - 4. Choose Sahara - 5. Add 3 controller - 6. Add 2 compute - 7. Add 3 ceph nodes - 8. Verify networks - 9. Deploy the environment - 10. Verify networks - 11. Run OSTF tests - - Duration: 180 min - Snapshot: sahara_ceph_ephemeral - """ - - self.env.revert_snapshot('ready_with_9_slaves') - - data = { - 'volumes_lvm': False, - 'volumes_ceph': True, - 'images_ceph': True, - 'ephemeral_ceph': True, - 'objects_ceph': True, - 'sahara': True, - 'tenant': 'sharaoscomponent', - 'user': 'sharaoscomponent', - 'password': 'sharaoscomponent', - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - } - - self.show_step(1, initialize=True) - self.show_step(2) - self.show_step(3) - self.show_step(4) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - - self.show_step(5) - self.show_step(6) - self.show_step(7) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['ceph-osd'], - 'slave-07': ['ceph-osd'], - 'slave-08': ['ceph-osd'] - } - ) - - self.show_step(8) - self.fuel_web.verify_network(cluster_id) - - self.show_step(9) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(10) - self.fuel_web.verify_network(cluster_id) - - self.show_step(11) - self.fuel_web.run_ostf(cluster_id, test_sets=['smoke', 'sanity', 'ha', - 'tests_platform']) - - self.env.make_snapshot('sahara_ceph_ephemeral') diff --git a/fuelweb_test/tests/tests_patching/__init__.py b/fuelweb_test/tests/tests_patching/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/tests_patching/test_patching.py b/fuelweb_test/tests/tests_patching/test_patching.py deleted file mode 100644 index a64e4cd4f..000000000 --- a/fuelweb_test/tests/tests_patching/test_patching.py +++ /dev/null @@ -1,457 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import time - -from proboscis import test -from proboscis.asserts import assert_is_not_none -from proboscis.asserts import assert_true -# pylint: disable=import-error -# noinspection PyUnresolvedReferences -from six.moves.urllib.request import urlopen -# pylint: enable=import-error - -from devops.helpers.helpers import wait -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers import patching -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.rally import RallyBenchmarkTest -from fuelweb_test.helpers.rally import RallyResult -from fuelweb_test.helpers.utils import install_pkg -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["patching"]) -class PatchingTests(TestBasic): - """PatchingTests.""" # TODO documentation - - def __init__(self): - self.snapshot_name = settings.PATCHING_SNAPSHOT - self.pkgs = settings.PATCHING_PKGS - super(PatchingTests, self).__init__() - - @test(groups=['prepare_patching_environment']) - def prepare_patching_environment(self): - """Prepare environment for patching (OpenStack) - - Scenario: - 1. Take existing environment created by previous deployment test - and snapshot it - 2. Revert snapshot and check that environment is alive - 3. Check that deployed environment is affected by the bug and - verification scenario fails without applied patches - - Duration: 10m - """ - - logger.debug('Creating snapshot of environment deployed for patching.') - self.env.make_snapshot(snapshot_name=self.snapshot_name, - is_make=True) - self.env.revert_snapshot(self.snapshot_name) - cluster_id = self.fuel_web.get_last_created_cluster() - assert_is_not_none(cluster_id, 'Environment for patching not found.') - slaves = self.fuel_web.client.list_cluster_nodes(cluster_id) - logger.info('Checking that environment is affected ' - 'by bug #{0}...'.format(settings.PATCHING_BUG_ID)) - is_environment_affected = False - try: - patching.verify_fix(self.env, target='environment', slaves=slaves) - except AssertionError: - is_environment_affected = True - assert_true(is_environment_affected, - 'Deployed environment for testing patches is not affected' - 'by bug #{0} or provided verification scenario is not ' - 'correct! Fix verification passed without applying ' - 'patches!'.format(settings.PATCHING_BUG_ID)) - - @test(groups=["patching_environment"], - depends_on_groups=['prepare_patching_environment']) - @log_snapshot_after_test - def patching_environment(self): - """Apply patches on deployed environment - - Scenario: - 1. Revert snapshot of deployed environment - 2. Run Rally benchmark tests and store results - 3. Download patched packages on master node and make local repositories - 4. Add new local repositories on slave nodes - 5. Download late artifacts and clean generated images if needed - 6. Perform actions required to apply patches - 7. Verify that fix works - 8. Run OSTF - 9. Run Rally benchmark tests and compare results - - Duration 15m - """ - - # Step #1 - if not self.env.revert_snapshot(self.snapshot_name): - raise PatchingTestException('Environment revert from snapshot "{0}' - '" failed.'.format(self.snapshot_name)) - # Check that environment exists and it's ready for patching - cluster_id = self.fuel_web.get_last_created_cluster() - assert_is_not_none(cluster_id, 'Environment for patching not found.') - - # Step #2 - if settings.PATCHING_RUN_RALLY: - rally_benchmarks = {} - benchmark_results1 = {} - for tag in set(settings.RALLY_TAGS): - rally_benchmarks[tag] = RallyBenchmarkTest( - container_repo=settings.RALLY_DOCKER_REPO, - environment=self.env, - cluster_id=cluster_id, - test_type=tag - ) - benchmark_results1[tag] = rally_benchmarks[tag].run() - logger.debug(benchmark_results1[tag].show()) - - # Step #3 - patching_repos = patching.add_remote_repositories( - self.env, settings.PATCHING_MIRRORS) - if settings.PATCHING_MASTER_MIRRORS: - patching_master_repos = patching.add_remote_repositories( - self.env, settings.PATCHING_MASTER_MIRRORS, - prefix_name='custom_master_repo') - - # Step #4 - slaves = self.fuel_web.client.list_cluster_nodes(cluster_id) - for repo in patching_repos: - patching.connect_slaves_to_repo(self.env, slaves, repo) - if settings.PATCHING_MASTER_MIRRORS: - with self.env.d_env.get_admin_remote() as remote: - for repo in patching_master_repos: - install_pkg(remote, 'yum-utils') - patching.connect_admin_to_repo(self.env, repo) - - # Step #5 - if settings.LATE_ARTIFACTS_JOB_URL: - data = urlopen(settings.LATE_ARTIFACTS_JOB_URL + - "/artifact/artifacts/artifacts.txt") - for package in data: - os.system("wget --directory-prefix" - " {0} {1}".format(settings.UPDATE_FUEL_PATH, - package)) - self.env.admin_actions.upload_packages( - local_packages_dir=settings.UPDATE_FUEL_PATH, - centos_repo_path='/var/www/nailgun/centos/auxiliary', - ubuntu_repo_path=settings.LOCAL_MIRROR_UBUNTU) - if settings.REGENERATE_ENV_IMAGE: - self.env.admin_actions.clean_generated_image( - settings.OPENSTACK_RELEASE) - - # Step #6 - logger.info('Applying fix...') - patching.apply_patches(self.env, target='environment', slaves=slaves) - - # Step #7 - logger.info('Verifying fix...') - patching.verify_fix(self.env, target='environment', slaves=slaves) - - # Step #8 - # If OSTF fails (sometimes services aren't ready after - # slaves nodes reboot) sleep 5 minutes and try again - try: - self.fuel_web.run_ostf(cluster_id=cluster_id) - except AssertionError: - time.sleep(300) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - # Step #9 - if settings.PATCHING_RUN_RALLY: - benchmark_results2 = {} - for tag in set(settings.RALLY_TAGS): - benchmark_results2[tag] = rally_benchmarks[tag].run() - logger.debug(benchmark_results2[tag].show()) - - rally_benchmarks_passed = True - - for tag in set(settings.RALLY_TAGS): - if not RallyResult.compare(benchmark_results1[tag], - benchmark_results2[tag], - deviation=0.2): - rally_benchmarks_passed = False - - assert_true(rally_benchmarks_passed, - "Rally benchmarks show performance degradation " - "after packages patching.") - - number_of_nodes = len(self.fuel_web.client.list_cluster_nodes( - cluster_id)) - - cluster_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - roles_list = [node['roles'] for node in cluster_nodes] - unique_roles = [] - - for role in roles_list: - if not [unique_role for unique_role in unique_roles - if set(role) == set(unique_role)]: - unique_roles.append(role) - - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[ - number_of_nodes:number_of_nodes + 1]) - - for roles in unique_roles: - if "mongo" in roles: - continue - - node = {'slave-0{}'.format(number_of_nodes + 1): - [role for role in roles]} - logger.debug("Adding new node to the cluster: {0}".format(node)) - self.fuel_web.update_nodes( - cluster_id, node) - self.fuel_web.deploy_cluster_wait(cluster_id, - check_services=False) - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['sanity', 'smoke', 'ha']) - - if "ceph-osd" in roles: - with self.fuel_web.get_ssh_for_node( - 'slave-0{}'.format(number_of_nodes + 1) - ) as remote_ceph: - self.fuel_web.prepare_ceph_to_delete(remote_ceph) - - nailgun_node = self.fuel_web.update_nodes( - cluster_id, node, False, True) - nodes = [_node for _node in nailgun_node - if _node["pending_deletion"] is True] - self.fuel_web.deploy_cluster(cluster_id) - self.fuel_web.wait_node_is_discovered(nodes[0]) - - # sanity set isn't running due to LP1457515 - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['smoke', 'ha']) - - -@test(groups=["patching_master_tests"]) -class PatchingMasterTests(TestBasic): - - def __init__(self): - self.snapshot_name = settings.PATCHING_SNAPSHOT - self.pkgs = settings.PATCHING_PKGS - super(PatchingMasterTests, self).__init__() - - @test(groups=['prepare_patching_master_environment']) - def prepare_patching_master_environment(self): - """Prepare environment for patching (master node) - - Scenario: - 1. Take existing environment created by previous deployment test - and snapshot it - 2. Revert snapshot and check that environment is alive - 3. Check that deployed environment is affected by the bug and - verification scenario fails without applied patches - - Duration: 10m - """ - - logger.debug('Creating snapshot of environment deployed for patching.') - self.env.make_snapshot(snapshot_name=self.snapshot_name, - is_make=True) - self.env.revert_snapshot(self.snapshot_name) - cluster_id = self.fuel_web.get_last_created_cluster() - assert_is_not_none(cluster_id, 'Environment for patching not found.') - slaves = self.fuel_web.client.list_cluster_nodes(cluster_id) - logger.info('Checking that environment is affected ' - 'by bug #{0}...'.format(settings.PATCHING_BUG_ID)) - is_environment_affected = False - try: - patching.verify_fix(self.env, target='environment', slaves=slaves) - except AssertionError: - is_environment_affected = True - assert_true(is_environment_affected, - 'Deployed environment for testing patches is not affected' - 'by bug #{0} or provided verification scenario is not ' - 'correct! Fix verification passed without applying ' - 'patches!'.format(settings.PATCHING_BUG_ID)) - - @test(groups=["patching_test"], - depends_on_groups=['prepare_patching_master_environment']) - @log_snapshot_after_test - def patching_test(self): - """Apply patches on deployed master - - Scenario: - 1. Download patched packages on master node and make local repositories - 2. Download late artifacts and clean generated images if needed - 3. Perform actions required to apply patches - 4. Verify that fix works - 5. Run OSTF - 6. Run network verification - 7. Reset and delete cluster - 8. Bootstrap 3 slaves - - Duration 30m - """ - - if not self.env.revert_snapshot(self.snapshot_name): - raise PatchingTestException('Environment revert from snapshot "{0}' - '" failed.'.format(self.snapshot_name)) - - # Step #1 - with self.env.d_env.get_admin_remote() as remote: - install_pkg(remote, 'yum-utils') - patching_repos = patching.add_remote_repositories( - self.env, settings.PATCHING_MASTER_MIRRORS) - - for repo in patching_repos: - patching.connect_admin_to_repo(self.env, repo) - - # Step #2 - if settings.LATE_ARTIFACTS_JOB_URL: - data = urlopen( - settings.LATE_ARTIFACTS_JOB_URL + - "/artifact/artifacts/artifacts.txt") - for package in data: - os.system("wget --directory-prefix" - " {0} {1}".format(settings.UPDATE_FUEL_PATH, - package)) - self.env.admin_actions.upload_packages( - local_packages_dir=settings.UPDATE_FUEL_PATH, - centos_repo_path='/var/www/nailgun/centos/auxiliary', - ubuntu_repo_path=settings.LOCAL_MIRROR_UBUNTU) - if settings.REGENERATE_ENV_IMAGE: - self.env.admin_actions.clean_generated_image( - settings.OPENSTACK_RELEASE) - - # Step #3 - logger.info('Applying fix...') - patching.apply_patches(self.env, target='master') - - # Step #4 - logger.info('Verifying fix...') - patching.verify_fix(self.env, target='master') - - # Step #5 - active_nodes = [] - for node in self.env.d_env.nodes().slaves: - if node.driver.node_active(node): - active_nodes.append(node) - logger.debug('active nodes are {}'.format(active_nodes)) - cluster_id = self.fuel_web.get_last_created_cluster() - if self.fuel_web.get_last_created_cluster(): - number_of_nodes = len(self.fuel_web.client.list_cluster_nodes( - cluster_id)) - self.fuel_web.run_ostf(cluster_id=cluster_id) - if number_of_nodes > 1: - self.fuel_web.verify_network(cluster_id) - - cluster_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - roles_list = [node['roles'] for node in cluster_nodes] - unique_roles = [] - - for role in roles_list: - if not [unique_role for unique_role in unique_roles - if set(role) == set(unique_role)]: - unique_roles.append(role) - - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[ - number_of_nodes:number_of_nodes + 1]) - - for roles in unique_roles: - if "mongo" in roles: - continue - node = {'slave-0{}'.format(number_of_nodes + 1): - [role for role in roles]} - logger.debug("Adding new node to" - " the cluster: {0}".format(node)) - self.fuel_web.update_nodes( - cluster_id, node) - self.fuel_web.deploy_cluster_wait(cluster_id, - check_services=False) - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['sanity', 'smoke', 'ha']) - - if "ceph-osd" in roles: - with self.fuel_web.get_ssh_for_node( - 'slave-0{}'.format(number_of_nodes + 1) - ) as remote: - self.fuel_web.prepare_ceph_to_delete(remote) - nailgun_node = self.fuel_web.update_nodes( - cluster_id, node, False, True) - nodes = [_node for _node in nailgun_node - if _node["pending_deletion"] is True] - self.fuel_web.deploy_cluster(cluster_id) - self.fuel_web.wait_node_is_discovered(nodes[0]) - - # sanity set isn't running due to LP1457515 - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['smoke', 'ha']) - - active_nodes = [] - for node in self.env.d_env.nodes().slaves: - if node.driver.node_active(node): - active_nodes.append(node) - logger.debug('active nodes are {}'.format(active_nodes)) - - self.fuel_web.stop_reset_env_wait(cluster_id) - self.fuel_web.wait_nodes_get_online_state( - active_nodes, timeout=10 * 60) - self.fuel_web.client.delete_cluster(cluster_id) - wait((lambda: len( - self.fuel_web.client.list_nodes()) == number_of_nodes), - timeout=5 * 60, - timeout_msg='Timeout: Nodes are not discovered') - - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:3]) - - @test(groups=["patching_master"], - depends_on_groups=['patching_test']) - @log_snapshot_after_test - def patching_master(self): - """ - Deploy cluster after master node patching - - Scenario: - 1. Create cluster - 2. Add 1 node with controller role - 3. Add 2 nodes with compute role - 4. Deploy the cluster - 5. Run network verification - 6. Run OSTF - - Duration 50m - """ - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - 'tenant': 'patchingMaster', - 'user': 'patchingMaster', - 'password': 'patchingMaster' - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['compute'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - -class PatchingTestException(Exception): - pass diff --git a/fuelweb_test/tests/tests_scale/__init__.py b/fuelweb_test/tests/tests_scale/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/tests_scale/test_scale_group_1.py b/fuelweb_test/tests/tests_scale/test_scale_group_1.py deleted file mode 100644 index 8967759bf..000000000 --- a/fuelweb_test/tests/tests_scale/test_scale_group_1.py +++ /dev/null @@ -1,281 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers import checkers -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.settings import NEUTRON_SEGMENT -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["ha_scale_group_1"]) -class HaScaleGroup1(TestBasic): - """HaScaleGroup1.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["add_controllers_stop"]) - @log_snapshot_after_test - def add_controllers_stop(self): - """Add 2 controllers, deploy, stop deploy, remove added controllers, - add 2 controllers once again - - Scenario: - 1. Create cluster - 2. Add 1 controller node - 3. Deploy the cluster - 4. Add 2 controller nodes - 5. Start deployment - 6. Stop deployment on new controllers re-deploy - 7. Delete 2 added controllers - 8. Add 2 new controllers - 9. Deploy the cluster - 10. Verify networks - 11. Run OSTF - - Duration 120m - Snapshot add_controllers_stop - - """ - self.env.revert_snapshot("ready_with_9_slaves") - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE) - self.show_step(2) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'] - } - ) - self.show_step(3) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(4) - nodes = {'slave-02': ['controller'], - 'slave-03': ['controller']} - self.fuel_web.update_nodes( - cluster_id, nodes, - True, False - ) - self.show_step(5) - self.fuel_web.deploy_cluster_wait_progress(cluster_id=cluster_id, - progress=60) - - self.show_step(6) - self.fuel_web.stop_deployment_wait(cluster_id) - self.fuel_web.wait_nodes_get_online_state( - self.env.d_env.nodes().slaves[:3], timeout=10 * 60) - - self.show_step(7) - self.fuel_web.update_nodes( - cluster_id, nodes, - False, True - ) - - self.show_step(8) - nodes = {'slave-04': ['controller'], - 'slave-05': ['controller']} - self.fuel_web.update_nodes( - cluster_id, nodes, - True, False - ) - self.show_step(9) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(10) - self.fuel_web.verify_network(cluster_id) - self.show_step(11) - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.env.make_snapshot("add_controllers_stop") - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["add_ceph_stop"]) - @log_snapshot_after_test - def add_ceph_stop(self): - """Add 2 ceph-osd, deploy, stop deploy, re-deploy again - - Scenario: - 1. Create cluster - 2. Add 3 controller, 1 compute, 2 ceph nodes - 3. Deploy the cluster - 4. Add 2 ceph nodes - 5. Start deployment - 6. Stop deployment on ceph nodes deploy - 7. Deploy changes - 8. Verify networks - 9. Run OSTF - - Duration 120m - Snapshot add_ceph_stop - - """ - self.env.revert_snapshot("ready_with_9_slaves") - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - 'volumes_lvm': False, - 'volumes_ceph': True, - 'images_ceph': True, - 'osd_pool_size': "2", - "net_provider": 'neutron', - "net_segment_type": NEUTRON_SEGMENT['tun'] - } - ) - self.show_step(2) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['ceph-osd'], - 'slave-07': ['ceph-osd'] - } - ) - self.show_step(3) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(4) - nodes = {'slave-08': ['ceph-osd'], - 'slave-09': ['ceph-osd']} - self.fuel_web.update_nodes( - cluster_id, nodes, - True, False - ) - self.show_step(5) - self.fuel_web.deploy_cluster_wait_progress(cluster_id=cluster_id, - progress=5) - self.show_step(6) - self.fuel_web.stop_deployment_wait(cluster_id) - self.fuel_web.wait_nodes_get_online_state( - self.env.d_env.nodes().slaves[:9], timeout=10 * 60) - - self.show_step(7) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(8) - self.fuel_web.verify_network(cluster_id) - self.show_step(9) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("add_ceph_stop") - - -@test(groups=["five_controllers_ceph_restart"]) -class FiveControllerCephRestart(TestBasic): - """HAFiveControllerCephNeutronRestart.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_all], - groups=["deploy_reset_five_ceph_controllers"]) - @log_snapshot_after_test - def deploy_reset_five_ceph_controllers(self): - """Deployment with 5 controllers, NeutronVLAN, with Ceph for volumes, - stop on deployment - - Scenario: - 1. Start deploy environment, 5 controller, 2 compute, 2 ceph nodes, - Neutron VLAN - 2. Change default partitioning scheme for both ceph nodes for 'vdc' - 3. Stop process on controller deployment - 4. Change openstack username, password, tenant - 5. Deploy cluster - 6. Wait for HA services to be ready - 7. Wait for for OS services to be ready - 8. Verify networks - 9. Run OSTF tests - - Duration 120m - Snapshot deploy_reset_five_ceph_controllers - - """ - - self.env.revert_snapshot("ready_with_all_slaves") - - self.show_step(1) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - 'volumes_lvm': False, - 'volumes_ceph': True, - 'images_ceph': True, - 'osd_pool_size': "2", - "net_provider": 'neutron', - "net_segment_type": NEUTRON_SEGMENT['vlan'], - 'tenant': 'simpleVlan', - 'user': 'simpleVlan', - 'password': 'simpleVlan' - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['controller'], - 'slave-05': ['controller'], - 'slave-06': ['compute'], - 'slave-07': ['compute'], - 'slave-08': ['ceph-osd'], - 'slave-09': ['ceph-osd'] - } - ) - - self.show_step(2) - ceph_nodes = self.fuel_web.\ - get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'], - role_status='pending_roles') - for ceph_node in ceph_nodes: - ceph_image_size = self.fuel_web.\ - update_node_partitioning(ceph_node, node_role='ceph') - - self.fuel_web.deploy_cluster_wait_progress(cluster_id=cluster_id, - progress=5) - self.show_step(3) - self.fuel_web.stop_deployment_wait(cluster_id) - self.fuel_web.wait_nodes_get_online_state( - self.env.d_env.nodes().slaves[:9], timeout=10 * 60) - self.show_step(4) - attributes = self.fuel_web.client.get_cluster_attributes(cluster_id) - access_attr = attributes['editable']['access'] - access_attr['user']['value'] = 'myNewUser' - access_attr['password']['value'] = 'myNewPassword' - access_attr['tenant']['value'] = 'myNewTenant' - self.fuel_web.client.update_cluster_attributes(cluster_id, attributes) - - self.show_step(5) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(6) - self.fuel_web.assert_ha_services_ready(cluster_id) - self.show_step(7) - self.fuel_web.assert_os_services_ready(cluster_id, timeout=10 * 60) - self.show_step(8) - self.fuel_web.verify_network(cluster_id) - self.show_step(9) - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - for ceph in ceph_nodes: - checkers.check_ceph_image_size(ceph['ip'], - expected_size=ceph_image_size) - - self.env.make_snapshot("deploy_reset_five_ceph_controllers") diff --git a/fuelweb_test/tests/tests_scale/test_scale_group_2.py b/fuelweb_test/tests/tests_scale/test_scale_group_2.py deleted file mode 100644 index cd8deb81d..000000000 --- a/fuelweb_test/tests/tests_scale/test_scale_group_2.py +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test -from proboscis.asserts import assert_equal - -from fuelweb_test import logger -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["ha_scale_group_2"]) -class HaScaleGroup2(TestBasic): - """HaScaleGroup2.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["ha_scale_group_2_cluster"]) - @log_snapshot_after_test - def ha_scale_group_2_cluster(self): - """Deploy cluster with 3 controllers and 1 compute - - Scenario: - 1. Create cluster - 2. Add 3 controller nodes and 1 compute - 3. Deploy the cluster - - Duration 80m - Snapshot ha_scale_group_2_cluster - - """ - self.env.revert_snapshot("ready_with_5_slaves") - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE) - self.show_step(2) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'] - } - ) - self.show_step(3) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.verify_network(cluster_id) - self.env.make_snapshot("ha_scale_group_2_cluster", - is_make=True) - - @test(depends_on=[ha_scale_group_2_cluster], - groups=["replace_primary_controller"]) - @log_snapshot_after_test - def replace_primary_controller(self): - """Replace the primary controller in the cluster - - Scenario: - 1. Create cluster - 2. Add 3 controller nodes and 1 compute - 3. Deploy the cluster - 4. Destroy primary controller - 5. Wait controller offline - 6. Remove offline controller from cluster - 7. Add 1 new controller - 8. Deploy changes - 9. Run OSTF - 10. Verify networks - - Duration 120m - Snapshot replace_primary_controller - - """ - self.env.revert_snapshot("ha_scale_group_2_cluster") - self.show_step(1, initialize=True) - self.show_step(2) - self.show_step(3) - cluster_id = self.fuel_web.get_last_created_cluster() - - self.show_step(4) - primary_controller = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - primary_controller_id = self.fuel_web.get_nailgun_node_by_devops_node( - primary_controller)['id'] - primary_controller.destroy() - self.show_step(5) - self.fuel_web.wait_node_is_offline(primary_controller) - self.show_step(6) - self.fuel_web.delete_node(primary_controller_id) - self.fuel_web.wait_task_success('deployment') - - self.show_step(7) - nodes = {'slave-05': ['controller']} - self.fuel_web.update_nodes( - cluster_id, nodes, - True, False - ) - self.show_step(8) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(9) - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.show_step(10) - self.fuel_web.verify_network(cluster_id) - self.env.make_snapshot("replace_primary_controller") - - @test(depends_on=[ha_scale_group_2_cluster], - groups=["remove_controllers"]) - @log_snapshot_after_test - def remove_controllers(self): - """Deploy cluster with 3 controllers, remove 2 controllers - and re-deploy, check hosts and corosync - - Scenario: - 1. Create cluster - 2. Add 3 controller, 1 compute - 3. Deploy the cluster - 4. Remove 2 controllers - 5. Deploy changes - 6. Run OSTF - 7. Verify networks - 8. Check /etc/hosts that removed nodes aren't present - 9. Check corosync.conf that removed nodes aren't present - - Duration 120m - Snapshot remove_controllers - - """ - self.env.revert_snapshot("ha_scale_group_2_cluster") - self.show_step(1, initialize=True) - self.show_step(2) - self.show_step(3) - cluster_id = self.fuel_web.get_last_created_cluster() - - hosts = [] - - for node_name in ('slave-02', 'slave-03'): - node = self.fuel_web.get_nailgun_node_by_devops_node( - self.env.d_env.get_node(name=node_name)) - hostname = ''.join(self.ssh_manager.check_call( - ip=node['ip'], command="hostname")['stdout']).strip() - hosts.append(hostname) - logger.debug('hostname are {}'.format(hosts)) - nodes = {'slave-02': ['controller'], - 'slave-03': ['controller']} - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, nodes, - False, True - ) - self.show_step(5) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(6) - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - - node = self.fuel_web.get_nailgun_node_by_devops_node( - self.env.d_env.get_node(name='slave-01')) - - for host in hosts: - self.show_step(8, initialize=True) - cmd = "grep '{}' /etc/hosts".format(host) - logger.info('Checking hosts on {}'.format(host)) - result = self.ssh_manager.check_call( - ip=node['ip'], command=cmd, expected=[1]) - assert_equal(result['exit_code'], 1, - "host {} is present in /etc/hosts".format(host)) - self.show_step(9) - cmd = "grep '{}' /etc/corosync/corosync.conf".format(host) - logger.info('Checking corosync.conf on {}'.format(host)) - result = self.ssh_manager.check_call( - ip=node['ip'], command=cmd, expected=[1]) - assert_equal(result['exit_code'], 1, - "host {} is present in" - " /etc/corosync/corosync.conf".format(host)) - self.env.make_snapshot("remove_controllers") diff --git a/fuelweb_test/tests/tests_scale/test_scale_group_3.py b/fuelweb_test/tests/tests_scale/test_scale_group_3.py deleted file mode 100644 index f122172a0..000000000 --- a/fuelweb_test/tests/tests_scale/test_scale_group_3.py +++ /dev/null @@ -1,214 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["ha_scale_group_3"]) -class HaScaleGroup3(TestBasic): - """HaScaleGroup3.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["add_delete_compute"]) - @log_snapshot_after_test - def add_delete_compute(self): - """Deployment with 3 controllers, NeutronVLAN, add, add/delete/, - delete compute node - - Scenario: - 1. Create new environment - 2. Choose Neutron, VLAN - 3. Add 3 controller - 4. Deploy the environment - 5. Add 2 compute - 6. Verify networks - 7. Deploy the environment - 8. Verify networks - 9. Run OSTF tests - 10. Add 1 new compute node and delete one deployed compute - 11. Re-deploy - 12. Verify networks - 13. Run OSTF tests - 14. Delete one compute node - 15. Re-deploy cluster - 16. Verify networks - 17. Run OSTF - - Duration 120m - Snapshot add_delete_compute - - """ - self.env.revert_snapshot("ready_with_9_slaves") - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE) - self.show_step(2) - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'] - } - ) - self.show_step(4) - self.fuel_web.deploy_cluster_wait(cluster_id) - - nodes = {'slave-04': ['compute'], - 'slave-05': ['compute']} - self.show_step(5) - self.fuel_web.update_nodes( - cluster_id, nodes, - True, False - ) - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - self.show_step(7) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(8) - self.fuel_web.verify_network(cluster_id) - self.show_step(9) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(10) - nodes = {'slave-06': ['compute']} - self.fuel_web.update_nodes( - cluster_id, nodes, - True, False - ) - nodes = {'slave-05': ['compute']} - self.fuel_web.update_nodes( - cluster_id, nodes, - False, True - ) - self.show_step(11) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(12) - self.fuel_web.verify_network(cluster_id) - self.show_step(13) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(14) - nodes = {'slave-04': ['compute']} - self.fuel_web.update_nodes( - cluster_id, nodes, - False, True - ) - self.show_step(15) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(16) - self.fuel_web.verify_network(cluster_id) - self.show_step(17) - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.env.make_snapshot("add_delete_compute") - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["add_delete_cinder"]) - @log_snapshot_after_test - def add_delete_cinder(self): - """Deployment with 3 controllers, NeutronVlan, with add, delete, - add/delete cinder node - - Scenario: - 1. Create cluster: Neutron VLAN, default storages - 2. Add 3 controller and 2 compute node - 3. Deploy the cluster - 4. Add 1 cinder nodes - 5. Deploy changes - 6. Verify network - 7. Run OSTF - 8. Add 2 cinder nodes and delete one deployed cinder node - 9. Deploy changes - 10. Run OSTF - 11. Verify networks - 12. Delete one cinder node - 13. Deploy changes - 14. Verify networks - 15. Run OSTF - - Duration 120m - Snapshot add_delete_cinder - - """ - self.env.revert_snapshot("ready_with_9_slaves") - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE) - self.show_step(2) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'] - } - ) - self.show_step(3) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(4) - nodes = {'slave-06': ['cinder']} - self.fuel_web.update_nodes( - cluster_id, nodes, - True, False - ) - self.show_step(5) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - self.show_step(7) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(8) - nodes = {'slave-07': ['cinder'], - 'slave-08': ['cinder']} - self.fuel_web.update_nodes( - cluster_id, nodes, - True, False - ) - nodes = {'slave-06': ['cinder']} - self.fuel_web.update_nodes( - cluster_id, nodes, - False, True - ) - self.show_step(9) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(10) - self.fuel_web.verify_network(cluster_id) - self.show_step(11) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(12) - nodes = {'slave-07': ['cinder']} - self.fuel_web.update_nodes( - cluster_id, nodes, - False, True - ) - self.show_step(13) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(14) - self.fuel_web.verify_network(cluster_id) - self.show_step(15) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("add_delete_cinder") diff --git a/fuelweb_test/tests/tests_scale/test_scale_group_4.py b/fuelweb_test/tests/tests_scale/test_scale_group_4.py deleted file mode 100644 index 2cae98b30..000000000 --- a/fuelweb_test/tests/tests_scale/test_scale_group_4.py +++ /dev/null @@ -1,262 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["ha_scale_group_4"]) -class HaScaleGroup4(TestBasic): - """HaScaleGroup4.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["add_delete_ceph"]) - @log_snapshot_after_test - def add_delete_ceph(self): - """Deployment with 3 controllers, NeutronVlan, with add, delete, - add/delete ceph node - - Scenario: - 1. Create cluster: Neutron VLAN, ceph for volumes and images, - ceph for ephemeral and Rados GW - 2. Add 3 controller, 1 compute, 3 ceph nodes - 3. Deploy the cluster - 4. Add 1 ceph node - 5. Deploy changes - 6. Verify network - 7. Run OSTF - 8. Add 1 ceph node and delete one deployed ceph node - 9. Deploy changes - 10. Run OSTF - 11. Verify networks - 12. Delete one ceph node - 13. Deploy changes - 14. Verify networks - 15. Run OSTF - - Duration 120m - Snapshot add_delete_ceph - - """ - self.env.revert_snapshot("ready_with_9_slaves") - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - 'volumes_lvm': False, - 'volumes_ceph': True, - 'images_ceph': True, - 'ephemeral_ceph': True, - 'objects_ceph': True - } - ) - self.show_step(2) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['ceph-osd'], - 'slave-06': ['ceph-osd'], - 'slave-07': ['ceph-osd'] - } - ) - self.show_step(3) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(4) - nodes = {'slave-08': ['ceph-osd']} - self.fuel_web.update_nodes( - cluster_id, nodes, - True, False - ) - self.show_step(5) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - self.show_step(7) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(8) - nodes = {'slave-09': ['ceph-osd']} - self.fuel_web.update_nodes( - cluster_id, nodes, - True, False - ) - with self.fuel_web.get_ssh_for_node('slave-05') as remote_ceph: - self.fuel_web.prepare_ceph_to_delete(remote_ceph) - nodes = {'slave-05': ['ceph-osd']} - self.fuel_web.update_nodes( - cluster_id, nodes, - False, True - ) - self.show_step(9) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(10) - self.fuel_web.verify_network(cluster_id) - self.show_step(11) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(12) - with self.fuel_web.get_ssh_for_node('slave-07') as remote_ceph: - self.fuel_web.prepare_ceph_to_delete(remote_ceph) - nodes = {'slave-07': ['ceph-osd']} - self.fuel_web.update_nodes( - cluster_id, nodes, - False, True - ) - self.show_step(13) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(14) - self.fuel_web.verify_network(cluster_id) - self.show_step(15) - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.env.make_snapshot("add_delete_ceph") - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["add_delete_cinder_ceph"]) - @log_snapshot_after_test - def add_delete_cinder_ceph(self): - """Deployment with 3 controllers, NeutronVLan, with add, delete, - add/delete cinder and ceph node - - Scenario: - 1. Create cluster: Neutron VLAN, cinder for volumes - and ceph for images - 2. Add 3 controller+ceph, 1 compute and 1 cinder nodes - 3. Deploy the cluster - 4. Add 1 ceph node and 1 cinder node - 5. Deploy changes - 6. Verify network - 7. Run OSTF - 8. Add 1 cinder node and delete 1 deployed cinder node - 9. Deploy changes - 10. Verify network - 11. Run OSTF - 12. Add 1 ceph node and delete 1 deployed ceph node - 13. Deploy changes - 14. Verify network - 15. Run OSTF - 16. Delete 1 cinder and 1 ceph node - 17. Deploy changes - 18. Verify network - 19. Run OSTF - - Duration 120m - Snapshot add_delete_cinder_ceph - - """ - self.env.revert_snapshot("ready_with_9_slaves") - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - 'volumes_lvm': True, - 'images_ceph': True - } - ) - self.show_step(2) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'ceph-osd'], - 'slave-02': ['controller', 'ceph-osd'], - 'slave-03': ['controller', 'ceph-osd'], - 'slave-04': ['compute'], - 'slave-05': ['cinder'] - } - ) - self.show_step(3) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(4) - nodes = {'slave-06': ['cinder'], - 'slave-07': ['ceph-osd']} - self.fuel_web.update_nodes( - cluster_id, nodes, - True, False - ) - self.show_step(5) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - self.show_step(7) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(8) - nodes = {'slave-08': ['cinder']} - self.fuel_web.update_nodes( - cluster_id, nodes, - True, False - ) - nodes = {'slave-06': ['cinder']} - self.fuel_web.update_nodes( - cluster_id, nodes, - False, True - ) - self.show_step(9) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(10) - self.fuel_web.verify_network(cluster_id) - self.show_step(11) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(12) - nodes = {'slave-09': ['ceph-osd']} - self.fuel_web.update_nodes( - cluster_id, nodes, - True, False - ) - with self.fuel_web.get_ssh_for_node('slave-07') as remote_ceph: - self.fuel_web.prepare_ceph_to_delete(remote_ceph) - nodes = {'slave-07': ['ceph-osd']} - self.fuel_web.update_nodes( - cluster_id, nodes, - False, True - ) - self.show_step(13) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(14) - self.fuel_web.verify_network(cluster_id) - self.show_step(15) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(16) - nodes = {'slave-08': ['cinder']} - self.fuel_web.update_nodes( - cluster_id, nodes, - False, True - ) - with self.fuel_web.get_ssh_for_node('slave-09') as remote_ceph: - self.fuel_web.prepare_ceph_to_delete(remote_ceph) - nodes = {'slave-09': ['ceph-osd']} - self.fuel_web.update_nodes( - cluster_id, nodes, - False, True - ) - self.show_step(17) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(18) - self.fuel_web.verify_network(cluster_id) - self.show_step(19) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("add_delete_cinder_ceph") diff --git a/fuelweb_test/tests/tests_scale/test_scale_group_5.py b/fuelweb_test/tests/tests_scale/test_scale_group_5.py deleted file mode 100644 index be4bd1fa8..000000000 --- a/fuelweb_test/tests/tests_scale/test_scale_group_5.py +++ /dev/null @@ -1,292 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=['ha_scale_group_5']) -class HaScaleGroup5(TestBasic): - """HaScaleGroup5.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["add_delete_compute_cinder_ceph"]) - @log_snapshot_after_test - def add_delete_compute_cinder_ceph(self): - """Deployment with 3 controllers, NeutronVxlan, with add, delete, - add/delete compute+cinder+ceph node - - Scenario: - 1. Deploy cluster 3 controllers, 2 computes + ceph + cinder, - Neutron VXLAN, cinder for volumes, ceph for images. - 2. Verify networks - 3. Run OSTF - 4. Add 1 ceph+cinder+compute and redeploy - 5. Verify networks - 6. Run OSTF - 7. Add 1 new ceph+cinder+compute and delete one already deployed - ceph+cinder+compute - 8. Re-deploy cluster - 9. Verify networks - 10. Run OSTF - 11. Delete one ceph+cinder+compute - 12. Redeploy cluster - 13. Verify network - 14. Run OSTF - - Duration: 300 min - Snapshot: add_delete_compute_cinder_ceph - """ - - self.env.revert_snapshot('ready_with_9_slaves') - - self.show_step(1, initialize=True) - data = { - 'volumes_lvm': True, - 'volumes_ceph': False, - 'images_ceph': True, - 'osd_pool_size': '2', - 'tenant': 'scalegroup5', - 'user': 'scalegroup5', - 'password': 'scalegroup5', - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['tun'] - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute', 'ceph-osd', 'cinder'], - 'slave-05': ['compute', 'ceph-osd', 'cinder'] - } - ) - - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(2) - self.fuel_web.verify_network(cluster_id) - - self.show_step(3) - self.fuel_web.run_ostf(cluster_id) - - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-06': ['compute', 'ceph-osd', 'cinder'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - - self.show_step(6) - self.fuel_web.run_ostf(cluster_id) - - self.show_step(7) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-07': ['compute', 'ceph-osd', 'cinder'] - } - ) - - with self.fuel_web.get_ssh_for_node('slave-04') as remote_ceph: - self.fuel_web.prepare_ceph_to_delete(remote_ceph) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-04': ['compute', 'ceph-osd', 'cinder'] - }, - pending_addition=False, - pending_deletion=True - ) - - self.show_step(8) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(9) - self.fuel_web.verify_network(cluster_id) - - self.show_step(10) - self.fuel_web.run_ostf(cluster_id) - - self.show_step(11) - with self.fuel_web.get_ssh_for_node('slave-07') as remote_ceph: - self.fuel_web.prepare_ceph_to_delete(remote_ceph) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-07': ['compute', 'ceph-osd', 'cinder'] - }, - pending_addition=False, - pending_deletion=True - ) - self.show_step(12) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(13) - self.fuel_web.verify_network(cluster_id) - - self.show_step(14) - self.fuel_web.run_ostf(cluster_id) - - self.env.make_snapshot('add_delete_compute_cinder_ceph') - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["add_delete_controller_cinder_ceph"]) - @log_snapshot_after_test - def add_delete_controller_cinder_ceph(self): - """Deployment with 3 controllers, NeutronVxlan, with add, delete, - add/delete controller+cinder+ceph node - - Scenario: - 1. Deploy cluster 3 controller+cinder+ceph, 2 computes, - Neutron VXLAN, cinder for volumes, ceph for images + Rados GW - 2. Verify networks - 3. Run OSTF - 4. Add 1 ceph+cinder+controller - 5. Re-deploy cluster - 6. Verify networks - 7. Run OSTF - 8. Add 1 new ceph+cinder+controller and delete one already deployed - ceph+cinder+controller - 9. Re-deploy cluster - 10. Verify networks - 11. Run OSTF - 12. Delete one ceph+cinder+controller - 13. Redeploy cluster - 14. Verify network - 15. Run OSTF - - Snapshot: add_delete_controller_cinder_ceph - """ - - self.env.revert_snapshot('ready_with_9_slaves') - - data = { - 'volumes_lvm': True, - 'volumes_ceph': False, - 'images_ceph': True, - 'objects_ceph': True, - 'tenant': 'scalegroup5', - 'user': 'scalegroup5', - 'password': 'scalegroup5', - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['tun'] - } - - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'cinder', 'ceph-osd'], - 'slave-02': ['controller', 'cinder', 'ceph-osd'], - 'slave-03': ['controller', 'cinder', 'ceph-osd'], - 'slave-04': ['compute'], - 'slave-05': ['compute'] - } - ) - - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(2) - self.fuel_web.verify_network(cluster_id) - - self.show_step(3) - self.fuel_web.run_ostf(cluster_id) - - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-06': ['controller', 'cinder', 'ceph-osd'] - } - ) - - self.show_step(5) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - - self.show_step(7) - self.fuel_web.run_ostf(cluster_id) - - self.show_step(8) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-07': ['controller', 'cinder', 'ceph-osd'] - } - ) - - with self.fuel_web.get_ssh_for_node('slave-02') as remote_ceph: - self.fuel_web.prepare_ceph_to_delete(remote_ceph) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-02': ['controller', 'cinder', 'ceph-osd'] - }, - pending_addition=False, - pending_deletion=True - ) - - self.show_step(9) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(10) - self.fuel_web.verify_network(cluster_id) - - self.show_step(11) - self.fuel_web.run_ostf(cluster_id) - - self.show_step(12) - with self.fuel_web.get_ssh_for_node('slave-03') as remote_ceph: - self.fuel_web.prepare_ceph_to_delete(remote_ceph) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-03': ['controller', 'cinder', 'ceph-osd'] - }, - pending_addition=False, - pending_deletion=True - ) - - self.show_step(13) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(14) - self.fuel_web.verify_network(cluster_id) - - self.show_step(15) - self.fuel_web.run_ostf(cluster_id) - - self.env.make_snapshot('add_delete_controller_cinder_ceph') diff --git a/fuelweb_test/tests/tests_scale/test_scale_group_6.py b/fuelweb_test/tests/tests_scale/test_scale_group_6.py deleted file mode 100644 index 18e9fda7a..000000000 --- a/fuelweb_test/tests/tests_scale/test_scale_group_6.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=['ha_scale_group_6']) -class HaScaleGroup6(TestBasic): - """HaScaleGroup6.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["add_delete_compute_cinder_ceph_ephemeral"]) - @log_snapshot_after_test - def add_delete_compute_cinder_ceph_ephemeral(self): - """Deployment with 3 controllers, NeutronVlan, with add, delete, - add/delete cinder+ceph node - - Scenario: - 1. Deploy cluster 3 controllers, 1 computes, 2 ceph + cinder, - Neutron VLAN, cinder for volumes, ceph for images and ephemeral - 2. Verify networks - 3. Run OSTF - 4. Add 1 ceph+cinder and redeploy - 5. Verify networks - 6. Run OSTF - 7. Add 1 new ceph+cinder and delete one alreaddy deployed - ceph+cinder - 8. Re-deploy cluster - 9. Verify networks - 10. Run OSTF - 11. Delete one ceph+cinder - 12. Redeploy cluster - 13. Verify network - 14. Run OSTF - - Duration: 300 min - Snapshot: add_delete_compute_cinder_ceph_ephemeral - """ - - self.env.revert_snapshot('ready_with_9_slaves') - - self.show_step(1, initialize=True) - data = { - 'volumes_lvm': True, - 'volumes_ceph': False, - 'images_ceph': True, - 'ephemeral_ceph': True, - 'osd_pool_size': '2', - 'tenant': 'scalegroup6', - 'user': 'scalegroup6', - 'password': 'scalegroup6' - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['ceph-osd', 'cinder'], - 'slave-06': ['ceph-osd', 'cinder'] - } - ) - - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(2) - self.fuel_web.verify_network(cluster_id) - - self.show_step(3) - self.fuel_web.run_ostf(cluster_id) - - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-07': ['ceph-osd', 'cinder'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - - self.show_step(6) - self.fuel_web.run_ostf(cluster_id) - - self.show_step(7) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-08': ['ceph-osd', 'cinder'] - } - ) - with self.fuel_web.get_ssh_for_node('slave-05') as remote_ceph: - self.fuel_web.prepare_ceph_to_delete(remote_ceph) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-05': ['ceph-osd', 'cinder'] - }, - pending_addition=False, - pending_deletion=True - ) - - self.show_step(8) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(9) - self.fuel_web.verify_network(cluster_id) - - self.show_step(10) - self.fuel_web.run_ostf(cluster_id) - - self.show_step(11) - with self.fuel_web.get_ssh_for_node('slave-08') as remote_ceph: - self.fuel_web.prepare_ceph_to_delete(remote_ceph) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-08': ['ceph-osd', 'cinder'] - }, - pending_addition=False, - pending_deletion=True - ) - self.show_step(12) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(13) - self.fuel_web.verify_network(cluster_id) - - self.show_step(14) - self.fuel_web.run_ostf(cluster_id) - self.env.make_snapshot("add_delete_compute_cinder_ceph_ephemeral") diff --git a/fuelweb_test/tests/tests_security/__init__.py b/fuelweb_test/tests/tests_security/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/tests_security/test_lynis_audit.py b/fuelweb_test/tests/tests_security/test_lynis_audit.py deleted file mode 100644 index 1c7d25a3b..000000000 --- a/fuelweb_test/tests/tests_security/test_lynis_audit.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test -from proboscis.asserts import assert_equal - -from fuelweb_test import logger -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.utils import install_lynis_master -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["tests_security_compliance"]) -class TestsSecurityCompliance(TestBasic): - @test(depends_on=[SetupEnvironment.setup_master], - groups=["master_node_compliance"]) - @log_snapshot_after_test - def master_node_compliance(self): - """ Install and run lynis on master node - - Scenario: - 1. Revert snapshot empty - 2. Install Lynis package - 3. Run lynis custom test - 4. Analyse lynis results - - Duration: 5 min - Snapshot: master_node_compliance - """ - - self.show_step(1) - self.env.revert_snapshot('empty') - self.show_step(2) - ip_master = self.ssh_manager.admin_ip - install_lynis_master(master_node_ip=ip_master) - cmd = 'lynis -c -Q --tests-category "custom"' - self.ssh_manager.execute_on_remote(ip_master, cmd) - cmd =\ - 'awk -F\']\' \'/Mirantis\S+\sResult/ {print $2}\' ' \ - '/var/log/lynis.log' - lynis_failed_tests = [ - test for test in self.ssh_manager.execute_on_remote( - ip_master, cmd)['stdout']] - logger.debug(lynis_failed_tests) - self.show_step(4) - # Check that lynis test haven't failed tests - assert_equal(len(lynis_failed_tests), 0, - message="Some lynis tests was failed." - " Please check lynis logs for that") - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["slave_nodes_compliance"]) - @log_snapshot_after_test - def slave_nodes_compliance(self): - """ Install and run lynis on slave nodes - - Scenario: - 1. Revert snapshot ready_with_3_slaves - 2. Create cluster with 3 nodes: controller, compute, cinder - 3 Install lynis package on slaves - 4. Run lynis custom test - 5. Analyze lynis results - - Duration: 30 min - """ - - self.show_step(1) - self.env.revert_snapshot('ready_with_3_slaves') - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - ) - self.show_step(2) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(3) - cmd =\ - 'echo 172.18.162.63 perestroika-repo-tst.infra.mirantis.net' \ - ' >> /etc/hosts' - for node in self.fuel_web.client.list_cluster_nodes(cluster_id): - self.ssh_manager.execute_on_remote(node['ip'], cmd) - cmd =\ - 'sudo add-apt-repository "http://perestroika-repo-tst.' \ - 'infra.mirantis.net/mos-packaging/ubuntu/"' - for node in self.fuel_web.client.list_cluster_nodes(cluster_id): - self.ssh_manager.execute_on_remote(node['ip'], cmd) - cmd = 'apt-get install lynis' - for node in self.fuel_web.client.list_cluster_nodes(cluster_id): - self.ssh_manager.execute_on_remote(node['ip'], cmd) - # check that lynis version is correct and installed from perestroika - nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) - self.show_step(4) - cmd = 'lynis -c -Q --tests-category "custom"' - for node in nodes: - self.ssh_manager.execute_on_remote(node['ip'], cmd) - self.show_step(5) - cmd = \ - 'awk -F\']\' \'/Mirantis\S+\sResult/ {print $2}\' ' \ - '/var/log/lynis.log' - - for node in nodes: - lynis_failed_tests = [ - test for test in self.ssh_manager.execute_on_remote( - node['ip'], cmd)['stdout']] - logger.debug(lynis_failed_tests) - self.show_step(4) - # Check that lynis test haven't failed tests - assert_equal(len(lynis_failed_tests), - 0, - message="Some lynis tests was failed for node {}." - "Please check lynis logs".format(node['ip'])) diff --git a/fuelweb_test/tests/tests_security/test_run_nessus.py b/fuelweb_test/tests/tests_security/test_run_nessus.py deleted file mode 100644 index 220a9a8ab..000000000 --- a/fuelweb_test/tests/tests_security/test_run_nessus.py +++ /dev/null @@ -1,280 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import time - -from devops.helpers.helpers import tcp_ping -from devops.helpers.helpers import wait -import netaddr -from proboscis import test -from proboscis.asserts import fail - -from fuelweb_test.helpers import decorators -from fuelweb_test.helpers import nessus -from fuelweb_test import settings -from fuelweb_test import logger -from fuelweb_test.tests import base_test_case -from fuelweb_test.tests.test_neutron_tun_base import NeutronTunHaBase - - -@test(groups=["nessus"]) -class TestNessus(NeutronTunHaBase): - """Security tests by Nessus - - Environment variables: - - SECURITY_TEST - True if you have pre-built Nessus qcow image. - Default: False - - NESSUS_IMAGE_PATH - path to pre-built Nessus qcow image. - Default: /var/lib/libvirt/images/nessus.qcow2 - - NESSUS_ADDRESS - Nessus API IP address of pre-installed Nessus. - Note: Nessus should have access to all virtual networks, all nodes - and all ports. - Default: None, address will be detected automatically by scanning - admin network. - - NESSUS_PORT - Nessus API port. - Default: 8834 - - NESSUS_USERNAME - Username to login to Nessus. - - NESSUS_PASSWORD - Password to login to Nessus. - - NESSUS_SSL_VERIFY - True if you want verify Nessus SSL - Default: False - """ - - def enable_password_login_for_ssh_on_slaves(self, slave_names): - for node_name in slave_names: - with self.fuel_web.get_ssh_for_node(node_name) as remote: - remote.execute("sed -i 's/PasswordAuthentication no/" - "PasswordAuthentication yes/g' " - "/etc/ssh/sshd_config") - remote.execute("service ssh restart") - - def find_nessus_address(self, - nessus_net_name='admin', - nessus_port=8834): - admin_net_cidr = \ - self.env.d_env.get_network(name=nessus_net_name).ip_network - - logger.info( - "Scanning network '%s' (%s) for nessus service on port %d", - nessus_net_name, admin_net_cidr, nessus_port) - - for address in netaddr.IPNetwork(admin_net_cidr).iter_hosts(): - if tcp_ping(address.format(), nessus_port, timeout=1): - return address.format() - - fail("No running nessus service found!") - - @staticmethod - def get_check_scan_complete(nessus_client, scan_id, history_id): - def check_scan_complete(): - return ( - nessus_client.get_scan_status( - scan_id, - history_id) == 'completed') - return check_scan_complete - - @test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_5], - groups=["deploy_neutron_tun_ha_nessus"]) - @decorators.log_snapshot_after_test - def deploy_neutron_tun_ha_nessus(self): - """Deploy cluster in HA mode with Neutron VXLAN for Nessus - - Scenario: - 1. Create cluster - 2. Add 3 nodes with controller role - 3. Add 2 nodes with compute role - 4. Deploy the cluster - 5. Run network verification - 6. Run OSTF - - Duration 80m - Snapshot deploy_neutron_tun_ha_nessus - """ - super(self.__class__, self).deploy_neutron_tun_ha_base( - snapshot_name="deploy_neutron_tun_ha_nessus") - - @test(depends_on=[deploy_neutron_tun_ha_nessus], - groups=["nessus_cpa", "nessus_fuel_master_cpa"]) - def nessus_fuel_master_cpa(self): - """Fuel master Credentialed Patch Audit. - - Scenario: - 1. Configure Nessus to run Credentialed Patch Audit - against Fuel Master - 2. Start scan - 3. Download scan results - - Duration 40m - Snapshot nessus_fuel_master_cpa - - """ - self.env.revert_snapshot("deploy_neutron_tun_ha_nessus") - - if settings.NESSUS_ADDRESS is None: - settings.NESSUS_ADDRESS = \ - self.find_nessus_address(nessus_net_name='admin', - nessus_port=settings.NESSUS_PORT) - - nessus_client = nessus.NessusClient(settings.NESSUS_ADDRESS, - settings.NESSUS_PORT, - settings.NESSUS_USERNAME, - settings.NESSUS_PASSWORD, - settings.NESSUS_SSL_VERIFY) - - scan_start_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) - - scan_name = "Scan CPA {0}".format(scan_start_date) - - policies_list = nessus_client.list_policy_templates() - cpa_policy_template = [ - template for template in policies_list - if template['title'] == 'Credentialed Patch Audit'][0] - - policy_id = nessus_client.add_cpa_policy( - scan_name, settings.ENV_NAME, cpa_policy_template['uuid']) - - scan_id = nessus_client.create_scan( - scan_name, settings.ENV_NAME, self.fuel_web.admin_node_ip, - policy_id, cpa_policy_template['uuid']) - scan_uuid = nessus_client.launch_scan(scan_id) - history_id = nessus_client.list_scan_history_ids(scan_id)[scan_uuid] - - check_scan_complete = self.get_check_scan_complete( - nessus_client, scan_id, history_id) - wait(check_scan_complete, interval=10, timeout=60 * 30, - timeout_msg='Timeout: nessus scan status != completed') - - file_id = nessus_client.export_scan(scan_id, history_id, 'html') - nessus_client.download_scan_result( - scan_id, file_id, 'master_cpa', 'html', settings.LOGS_DIR) - - self.env.make_snapshot("nessus_fuel_master_cpa") - - @test(depends_on=[deploy_neutron_tun_ha_nessus], - groups=["nessus_wat", "nessus_fuel_master_wat"]) - def nessus_fuel_master_wat(self): - """Fuel master Advanced Web Services tests. - - Scenario: - 1. Configure Nessus to run Advanced Web Services tests - againstFuel Master - 2. Start scan - 3. Download scan results - - Duration 40 min - Snapshot nessus_fuel_master_wat - - """ - self.env.revert_snapshot("deploy_neutron_tun_ha_nessus") - - if settings.NESSUS_ADDRESS is None: - settings.NESSUS_ADDRESS = \ - self.find_nessus_address(nessus_net_name='admin', - nessus_port=settings.NESSUS_PORT) - - nessus_client = nessus.NessusClient(settings.NESSUS_ADDRESS, - settings.NESSUS_PORT, - settings.NESSUS_USERNAME, - settings.NESSUS_PASSWORD, - settings.NESSUS_SSL_VERIFY) - - scan_start_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) - - scan_name = "Scan WAT {0}".format(scan_start_date) - - policies_list = nessus_client.list_policy_templates() - wat_policy_template = [ - template for template in policies_list - if template['title'] == 'Web Application Tests'][0] - - policy_id = nessus_client.add_wat_policy( - scan_name, settings.ENV_NAME, wat_policy_template['uuid']) - - scan_id = nessus_client.create_scan( - scan_name, settings.ENV_NAME, self.fuel_web.admin_node_ip, - policy_id, wat_policy_template['uuid']) - - scan_uuid = nessus_client.launch_scan(scan_id) - history_id = nessus_client.list_scan_history_ids(scan_id)[scan_uuid] - - check_scan_complete = self.get_check_scan_complete( - nessus_client, scan_id, history_id) - wait(check_scan_complete, interval=10, timeout=60 * 50, - timeout_msg='Timeout: nessus scan status != completed') - - file_id = nessus_client.export_scan(scan_id, history_id, 'html') - nessus_client.download_scan_result( - scan_id, file_id, 'master_wat', 'html', settings.LOGS_DIR) - - self.env.make_snapshot("nessus_fuel_master_wat") - - @test(depends_on=[deploy_neutron_tun_ha_nessus], - groups=["nessus_cpa", "nessus_controller_ubuntu_cpa"]) - def nessus_controller_ubuntu_cpa(self): - """Ubuntu controller Credentialed Patch Audit. - - Scenario: - 1. Configure Nessus to run Credentialed Patch Audit - against MOS controller on Ubuntu - 2. Start scan - 3. Download scan results - - Duration 40 min - Snapshot nessus_controller_ubuntu_cpa - - """ - self.env.revert_snapshot("deploy_neutron_tun_ha_nessus") - - self.enable_password_login_for_ssh_on_slaves(['slave-01']) - - if settings.NESSUS_ADDRESS is None: - settings.NESSUS_ADDRESS = \ - self.find_nessus_address(nessus_net_name='admin', - nessus_port=settings.NESSUS_PORT) - - nessus_client = nessus.NessusClient(settings.NESSUS_ADDRESS, - settings.NESSUS_PORT, - settings.NESSUS_USERNAME, - settings.NESSUS_PASSWORD, - settings.NESSUS_SSL_VERIFY) - - scan_start_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) - - scan_name = "Scan CPA {0}".format(scan_start_date) - - policies_list = nessus_client.list_policy_templates() - cpa_policy_template = [ - template for template in policies_list - if template['title'] == 'Credentialed Patch Audit'][0] - - policy_id = nessus_client.add_cpa_policy( - scan_name, settings.ENV_NAME, cpa_policy_template['uuid']) - - slave_address = \ - self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'] - - scan_id = nessus_client.create_scan( - scan_name, settings.ENV_NAME, slave_address, - policy_id, cpa_policy_template['uuid']) - scan_uuid = nessus_client.launch_scan(scan_id) - history_id = nessus_client.list_scan_history_ids(scan_id)[scan_uuid] - - check_scan_complete = self.get_check_scan_complete( - nessus_client, scan_id, history_id) - wait(check_scan_complete, interval=10, timeout=60 * 30, - timeout_msg='Timeout: nessus scan status != completed') - - file_id = nessus_client.export_scan(scan_id, history_id, 'html') - nessus_client.download_scan_result( - scan_id, file_id, 'controller_cpa', 'html', settings.LOGS_DIR) - - self.env.make_snapshot("nessus_controller_ubuntu_cpa") diff --git a/fuelweb_test/tests/tests_separate_services/__init__.py b/fuelweb_test/tests/tests_separate_services/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/tests_separate_services/test_deploy_platform_components.py b/fuelweb_test/tests/tests_separate_services/test_deploy_platform_components.py deleted file mode 100644 index 2fc6d7a04..000000000 --- a/fuelweb_test/tests/tests_separate_services/test_deploy_platform_components.py +++ /dev/null @@ -1,395 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from proboscis.asserts import assert_true -from proboscis import test -from proboscis import SkipTest - -from fuelweb_test.helpers.checkers import check_plugin_path_env -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -class BaseDeployPlatformComponents(TestBasic): - """Shared methods for test scenarios with platform components deployment - - _install_plugins -> install all required plugins - _enable_plugins -> enables these plugins - _deploy_and_check -> verify_network, deploy, verify network, run OSTF - """ - def __init__(self): - super(BaseDeployPlatformComponents, self).__init__() - check_plugin_path_env( - var_name='SEPARATE_SERVICE_DB_PLUGIN_PATH', - plugin_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH - ) - check_plugin_path_env( - var_name='SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH', - plugin_path=settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH - ) - check_plugin_path_env( - var_name='SEPARATE_SERVICE_RABBIT_PLUGIN_PATH', - plugin_path=settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH - ) - - def _install_plugins(self): - for plugin_path in ( - settings.SEPARATE_SERVICE_DB_PLUGIN_PATH, - settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH, - settings.SEPARATE_SERVICE_RABBIT_PLUGIN_PATH - ): - self.env.admin_actions.upload_plugin(plugin=plugin_path) - self.env.admin_actions.install_plugin( - plugin_file_name=os.path.basename(plugin_path)) - - def _enable_plugins(self, cluster_id): - plugin_names = [ - 'detach-database', 'detach-keystone', 'detach-rabbitmq'] - msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" - for plugin_name in plugin_names: - assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - msg) - options = {'metadata/enabled': True} - self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) - - def __next_step(self): - self.show_step(self.current_log_step + 1) - - def _deploy_and_check(self, cluster_id, timeout=7800): - self.__next_step() - self.fuel_web.verify_network(cluster_id) - - self.__next_step() - self.fuel_web.deploy_cluster_wait(cluster_id, timeout=timeout) - - self.__next_step() - self.fuel_web.verify_network(cluster_id) - - self.__next_step() - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['smoke', 'sanity', 'ha'] - ) - - self.__next_step() - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['tests_platform'], - ) - - -@test(groups=["acceptance_deploy_platform_components"]) -class TestsDeployPlatformComponents(BaseDeployPlatformComponents): - """Deployment with platform components - - Test scenarios from acceptance scope. - """ - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["acceptance_deploy_platform_components_sahara"]) - @log_snapshot_after_test - def acceptance_deploy_platform_components_sahara(self): - """Deploy cluster with detached keystone, rabbitmq, database and Sahara - - Scenario: - 1. Install db, rabbitmq, keystone plugin on the master node. - 2. Create Ubuntu, Neutron Vlan, Default storage, Sahara cluster. - (Cinder, Swift, Glance) - 3. Add 3 nodes with controller role. - 4. Add 3 nodes with keystone, db, rabbitmq role. - 5. Add 1 compute node. - 6. Add 1 cinder node. - 7. Run network verification. - 8. Deploy changes. - 9. Run network verification. - 10. Run OSTF 'smoke', 'sanity', 'ha' tests. - 11. Run OSTF platform tests. - - Duration 120m - """ - - self.env.revert_snapshot("ready_with_9_slaves") - - self.show_step(1, initialize=True) - self._install_plugins() - - self.show_step(2) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - "net_provider": 'neutron', - 'sahara': True, - }) - - self._enable_plugins(cluster_id=cluster_id) - - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.show_step(6) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['standalone-database', 'standalone-keystone', - 'standalone-rabbitmq'], - 'slave-05': ['standalone-database', 'standalone-keystone', - 'standalone-rabbitmq'], - 'slave-06': ['standalone-database', 'standalone-keystone', - 'standalone-rabbitmq'], - 'slave-07': ['compute'], - 'slave-08': ['cinder'] - } - ) - - self.show_step(7) - self.show_step(8) - self.show_step(9) - self.show_step(10) - self.show_step(11) - self._deploy_and_check(cluster_id=cluster_id) - - # TODO: Test is disabled, until Murano plugin is not available. - # TODO: Rework test for use with Murano plugin - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["acceptance_deploy_platform_components_murano"], - enabled=False) - @log_snapshot_after_test - def acceptance_deploy_platform_components_murano(self): - """Deploy cluster with detached keystone, rabbitmq, database and Murano - - Scenario: - 1. Install db, rabbitmq, keystone plugin on the master node. - 2. Create Ubuntu, Neutron Vlan, Default storage, Sahara cluster. - (Cinder, Swift, Glance) - 3. Add 3 nodes with controller role. - 4. Add 3 nodes with keystone, db, rabbitmq role. - 5. Add 1 compute node. - 6. Add 1 cinder node. - 7. Run network verification. - 8. Deploy changes. - 9. Run network verification. - 10. Run OSTF 'smoke', 'sanity', 'ha' tests. - 11. Run OSTF platform tests. - - Duration 120m - """ - - self.env.revert_snapshot("ready_with_9_slaves") - - self.show_step(1, initialize=True) - self._install_plugins() - - self.show_step(2) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - "net_provider": 'neutron', - 'murano': True, - }) - - self._enable_plugins(cluster_id=cluster_id) - - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.show_step(6) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['standalone-database', 'standalone-keystone', - 'standalone-rabbitmq'], - 'slave-05': ['standalone-database', 'standalone-keystone', - 'standalone-rabbitmq'], - 'slave-06': ['standalone-database', 'standalone-keystone', - 'standalone-rabbitmq'], - 'slave-07': ['compute'], - 'slave-08': ['cinder'] - } - ) - - self.show_step(7) - self.show_step(8) - self.show_step(9) - self.show_step(10) - self.show_step(11) - self._deploy_and_check(cluster_id=cluster_id) - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["acceptance_deploy_platform_components_ceilometer"], - enabled=False) - @log_snapshot_after_test - def acceptance_deploy_platform_components_ceilometer(self): - """Deploy cluster with detached keystone, rabbitmq, - database and Ceilometer - - Scenario: - 1. Install db, rabbitmq, keystone plugin on the master node. - 2. Create Ubuntu, Neutron Vlan, Ceph for volumes, images, Rados, - Ceilometer cluster. - 3. Add 3 nodes with controller+mongo role. - 4. Add 3 nodes with keystone, db, rabbitmq role. - 5. Add 1 compute node. - 6. Add 2 ceph nodes. - 7. Run network verification. - 8. Deploy changes. - 9. Run network verification. - 10. Run OSTF 'smoke', 'sanity', 'ha' tests. - 11. Run OSTF platform tests. - - Duration 120m - """ - - self.env.revert_snapshot("ready_with_9_slaves") - - self.show_step(1, initialize=True) - self._install_plugins() - - self.show_step(2) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - "net_provider": 'neutron', - 'osd_pool_size': '2', # Replication factor - 'ceilometer': True, - 'volumes_ceph': True, - 'images_ceph': True, - }) - - self._enable_plugins(cluster_id=cluster_id) - - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.show_step(6) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'mongo'], - 'slave-02': ['controller', 'mongo'], - 'slave-03': ['controller', 'mongo'], - 'slave-04': ['standalone-database', 'standalone-keystone', - 'standalone-rabbitmq'], - 'slave-05': ['standalone-database', 'standalone-keystone', - 'standalone-rabbitmq'], - 'slave-06': ['standalone-database', 'standalone-keystone', - 'standalone-rabbitmq'], - 'slave-07': ['compute'], - 'slave-08': ['ceph-osd'], - 'slave-09': ['ceph-osd'], - } - ) - - self.show_step(7) - self.show_step(8) - self.show_step(9) - self.show_step(10) - self.show_step(11) - self._deploy_and_check(cluster_id=cluster_id) - - -@test(groups=["huge_separate_services"]) -class TestsDeployPlatformComponentsHuge(BaseDeployPlatformComponents): - """Deployment with platform components""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["huge_separate_rabbitmq_db"]) - @log_snapshot_after_test - def huge_separate_rabbitmq_db(self): - """Deploy cluster with 3 controllers, 3 nodes with detached rabbitmq\ - service and 3 nodes with detached db service. - - Scenario: - 1. Install plugins on the master node - 2. Create Ubuntu, Neutron Vlan, Default storage cluster - 3. Add 3 nodes with controller role - 4. Add 3 nodes with db role - 5. Add 3 nodes with rabbitmq role - 6. Add 1 compute node - 7. Add 1 cinder node - 8. Run network verification - 9. Deploy changes - 10. Run network verification - 11. Run OSTF 'smoke', 'sanity', 'ha' tests. - 12. Run OSTF platform tests. - - Duration 180m - """ - - if settings.NODES_COUNT <= 12: - raise SkipTest('Not enough nodes for test') - - self.env.revert_snapshot("ready_with_9_slaves") - - # Bootstrap additional nodes - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[9:12], - skip_timesync=True) - - self.show_step(1, initialize=True) - self._install_plugins() - - self.show_step(2) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - "net_provider": 'neutron', - }) - - self._enable_plugins(cluster_id=cluster_id) - - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.show_step(6) - self.show_step(7) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['standalone-database', 'standalone-keystone'], - 'slave-05': ['standalone-database', 'standalone-keystone'], - 'slave-06': ['standalone-database', 'standalone-keystone'], - 'slave-07': ['standalone-rabbitmq'], - 'slave-08': ['standalone-rabbitmq'], - 'slave-09': ['standalone-rabbitmq'], - 'slave-10': ['compute'], - 'slave-11': ['cinder'] - } - ) - - self._deploy_and_check(cluster_id=cluster_id, timeout=60 * 60 * 3) diff --git a/fuelweb_test/tests/tests_separate_services/test_separate_haproxy.py b/fuelweb_test/tests/tests_separate_services/test_separate_haproxy.py deleted file mode 100644 index d5e53242c..000000000 --- a/fuelweb_test/tests/tests_separate_services/test_separate_haproxy.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from proboscis import asserts -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers import utils -from fuelweb_test.settings import NEUTRON -from fuelweb_test.settings import NEUTRON_SEGMENT -from fuelweb_test.settings import NODEGROUPS -from fuelweb_test.settings import MIRROR_UBUNTU -from fuelweb_test.settings import MULTIPLE_NETWORKS -from fuelweb_test.settings import SEPARATE_SERVICE_BALANCER_PLUGIN_PATH -from fuelweb_test.settings import SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.test_net_templates_base import TestNetworkTemplatesBase -from gates_tests.helpers import exceptions - - -@test(groups=["thread_separate_haproxy"]) -class TestSeparateHaproxy(TestNetworkTemplatesBase): - """Test for verification of deployment with detached haproxy role.""" - - @test(depends_on=[SetupEnvironment.prepare_release], - groups=["separate_haproxy"]) - @log_snapshot_after_test - def separate_haproxy(self): - """Deploy HA environment with separate Haproxy. - - Scenario: - 1. Revert snapshot with ready master node - 2. Copy and install external-lb and detach-haproxy plugins - 3. Bootstrap 3 slaves from default nodegroup - 4. Create cluster with Neutron VXLAN and custom nodegroups - 5. Run 'fuel-mirror' to replace cluster repositories - with local mirrors - 6. Bootstrap 2 slaves nodes from second nodegroup - and one node from third node group - 7. Enable plugins for cluster - 8. Add 2 controllers from default nodegroup and 1 controller - from second node group - 9. Add 1 compute+cinder from default node group - and 1 compute+cinder from second node group - 10. Add haproxy node from third node group - 11. Verify networks - 12. Deploy cluster - - Duration 120m - Snapshot separate_haproxy - """ - - if not MULTIPLE_NETWORKS: - raise exceptions.FuelQAVariableNotSet( - 'MULTIPLE_NETWORKS', 'true') - - self.show_step(1) - self.env.revert_snapshot('ready') - - self.show_step(2) - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH, - tar_target="/var") - - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=SEPARATE_SERVICE_BALANCER_PLUGIN_PATH, - tar_target="/var") - - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename( - SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH)) - - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename( - SEPARATE_SERVICE_BALANCER_PLUGIN_PATH)) - - self.show_step(3) - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[0:3]) - self.show_step(4) - admin_ip = self.ssh_manager.admin_ip - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings={ - 'net_provider': NEUTRON, - 'net_segment_type': NEUTRON_SEGMENT['tun'], - 'tenant': 'separatehaproxy', - 'user': 'separatehaproxy', - 'password': 'separatehaproxy', - 'ntp_list': [admin_ip], - } - ) - self.show_step(5) - if MIRROR_UBUNTU != '': - ubuntu_url = MIRROR_UBUNTU.split()[1] - replace_cmd = \ - "sed -i 's,http://archive.ubuntu.com/ubuntu,{0},g'" \ - " /usr/share/fuel-mirror/ubuntu.yaml".format( - ubuntu_url) - self.ssh_manager.execute_on_remote(ip=admin_ip, - cmd=replace_cmd) - create_mirror_cmd = 'fuel-mirror create -P ubuntu -G mos ubuntu' - self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=create_mirror_cmd) - apply_mirror_cmd = 'fuel-mirror apply -P ubuntu -G mos ubuntu ' \ - '--env {0} --replace'.format(cluster_id) - self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=apply_mirror_cmd) - - self.show_step(6) - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5]) - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[6:7]) - - self.show_step(7) - plugin_name = 'detach_haproxy' - msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" - asserts.assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - msg) - options = {'metadata/enabled': True} - self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) - - plugin_name = 'external_loadbalancer' - msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" - asserts.assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - msg) - options = {'metadata/enabled': True} - self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) - - self.show_step(8) - self.show_step(9) - self.show_step(10) - nodegroup1 = NODEGROUPS[0]['name'] - nodegroup2 = NODEGROUPS[1]['name'] - nodegroup3 = NODEGROUPS[2]['name'] - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': [['controller'], nodegroup1], - 'slave-02': [['controller'], nodegroup1], - 'slave-04': [['compute', 'cinder'], nodegroup2], - 'slave-05': [['controller'], nodegroup2], - 'slave-03': [['compute', 'cinder'], nodegroup1], - 'slave-07': [['standalone-haproxy'], nodegroup3] - } - ) - - self.show_step(11) - self.fuel_web.verify_network(cluster_id) - - self.show_step(12) - self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60, - check_services=False) - - self.env.make_snapshot('separate_haproxy') diff --git a/fuelweb_test/tests/tests_separate_services/test_separate_horizon.py b/fuelweb_test/tests/tests_separate_services/test_separate_horizon.py deleted file mode 100644 index d8f3b9479..000000000 --- a/fuelweb_test/tests/tests_separate_services/test_separate_horizon.py +++ /dev/null @@ -1,250 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from proboscis import test -from proboscis.asserts import assert_true - -from fuelweb_test.helpers.checkers import check_plugin_path_env -from fuelweb_test.helpers import utils -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test import logger -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["thread_separate_services"]) -class SeparateHorizon(TestBasic): - """SeparateHorizon""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["separate_horizon_service"]) - @log_snapshot_after_test - def separate_horizon_service(self): - """Deploy cluster with 3 separate horizon roles - - Scenario: - 1. Create cluster - 2. Add 3 nodes with controller role - 3. Add 3 nodes with horizon role - 4. Add 1 compute and cinder - 5. Verify networks - 6. Deploy the cluster - 7. Verify networks - 8. Run OSTF - - Duration 120m - Snapshot separate_horizon_service - """ - self.check_run("separate_horizon_service") - check_plugin_path_env( - var_name='SEPARATE_SERVICE_HORIZON_PLUGIN_PATH', - plugin_path=settings.SEPARATE_SERVICE_HORIZON_PLUGIN_PATH - ) - - self.env.revert_snapshot("ready_with_9_slaves") - - # copy plugins to the master node - - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=settings.SEPARATE_SERVICE_HORIZON_PLUGIN_PATH, - tar_target="/var") - - # install plugins - - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename( - settings.SEPARATE_SERVICE_HORIZON_PLUGIN_PATH)) - - data = { - 'volumes_lvm': False, - 'volumes_ceph': True, - 'images_ceph': True, - 'objects_ceph': True, - 'osd_pool_size': '2', - 'tenant': 'separatehorizon', - 'user': 'separatehorizon', - 'password': 'separatehorizon', - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings=data) - - plugin_name = 'detach-horizon' - msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" - assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - msg) - options = {'metadata/enabled': True} - self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['standalone-horizon'], - 'slave-05': ['standalone-horizon'], - 'slave-06': ['standalone-horizon'], - 'slave-07': ['compute'], - 'slave-08': ['ceph-osd'], - 'slave-09': ['ceph-osd'] - } - ) - - self.fuel_web.verify_network(cluster_id) - - # Cluster deploy - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - self.env.make_snapshot("separate_horizon_service", is_make=True) - - -@test(groups=["thread_separate_services"]) -class SeparateHorizonFailover(TestBasic): - """SeparateHorizonFailover""" # TODO documentation - - @test(depends_on=[SeparateHorizon.separate_horizon_service], - groups=["separate_horizon_service_shutdown"]) - @log_snapshot_after_test - def separate_horizon_service_shutdown(self): - """Shutdown one horizon node - - Scenario: - 1. Revert snapshot separate_horizon_service - 2. Destroy horizon node - 3. Wait OS services are working - 4. Run OSTF - - Duration 30m - """ - self.env.revert_snapshot("separate_horizon_service") - cluster_id = self.fuel_web.get_last_created_cluster() - # destroy one horizon node - horizon_node = self.env.d_env.nodes().slaves[3] - horizon_node.destroy() - self.fuel_web.wait_node_is_offline(horizon_node) - - self.fuel_web.assert_os_services_ready(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - @test(depends_on=[SeparateHorizon.separate_horizon_service], - groups=["separate_horizon_service_restart"]) - @log_snapshot_after_test - def separate_horizon_service_restart(self): - """Restart one horizon node - - Scenario: - 1. Revert snapshot separate_horizon_service - 2. Restart horizon node - 3. Wait OS services are ready - 4. Run OSTF - - Duration 30m - """ - self.env.revert_snapshot("separate_horizon_service") - cluster_id = self.fuel_web.get_last_created_cluster() - # restart one horizon node - horizon_node = self.env.d_env.nodes().slaves[3] - self.fuel_web.warm_restart_nodes([horizon_node]) - self.fuel_web.wait_node_is_online(horizon_node) - self.fuel_web.assert_os_services_ready(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - @test(depends_on=[SeparateHorizon.separate_horizon_service], - groups=["separate_horizon_service_controller_shutdown"]) - @log_snapshot_after_test - def separate_horizon_service_controller_shutdown(self): - """Shutdown primary controller node - - Scenario: - 1. Revert snapshot separate_horizon_service - 2. Shutdown primary controller node - 3. Wait HA is working - 4. Run OSTF - - Duration 30m - """ - self.env.revert_snapshot("separate_horizon_service") - cluster_id = self.fuel_web.get_last_created_cluster() - # shutdown primary controller - controller = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - logger.debug( - "controller with primary role is {}".format(controller.name)) - controller.destroy() - self.fuel_web.wait_node_is_offline(controller) - - self.fuel_web.assert_ha_services_ready(cluster_id, should_fail=1) - self.fuel_web.assert_os_services_ready(cluster_id, should_fail=1) - - self.fuel_web.run_ostf( - cluster_id=cluster_id, should_fail=1) - - @test(depends_on=[SeparateHorizon.separate_horizon_service], - groups=["separate_horizon_service_add_delete_node"]) - @log_snapshot_after_test - def separate_horizon_service_add_delete_node(self): - """Add and delete horizon node - - Scenario: - 1. Revert snapshot separate_horizon_service - 2. Add one horizon node and re-deploy cluster - 3. Run network verification - 4. Run OSTF - 5. Delete one horizon node - 6. Run network verification - 7. Run ostf - - Duration 30m - """ - self.env.revert_snapshot("separate_horizon_service") - cluster_id = self.fuel_web.get_last_created_cluster() - - node = {'slave-09': ['horizon']} - self.fuel_web.update_nodes( - cluster_id, node, True, False) - - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['sanity', 'smoke', 'ha']) - - nailgun_node = self.fuel_web.update_nodes(cluster_id, node, - False, True) - nodes = [_node for _node in nailgun_node - if _node["pending_deletion"] is True] - self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False) - self.fuel_web.wait_node_is_discovered(nodes[0]) - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['sanity', 'smoke', 'ha']) diff --git a/fuelweb_test/tests/tests_separate_services/test_separate_multiroles.py b/fuelweb_test/tests/tests_separate_services/test_separate_multiroles.py deleted file mode 100644 index 963366995..000000000 --- a/fuelweb_test/tests/tests_separate_services/test_separate_multiroles.py +++ /dev/null @@ -1,218 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from proboscis import test -from proboscis.asserts import assert_true - -from fuelweb_test.helpers.checkers import check_plugin_path_env -from fuelweb_test.helpers import utils -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test import logger -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["thread_separate_services", "thread_2_separate_services"]) -class SeparateAllServices(TestBasic): - """SeparateAllServices""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["separate_all_service"]) - @log_snapshot_after_test - def separate_all_service(self): - """Deploy cluster with 3 nodes with db, keystone, rabbit, horizon - - Scenario: - 1. Create cluster - 2. Add 3 nodes with controller role - 3. Add 3 nodes with database, keystone, rabbit, - horizon - 4. Add 1 compute and cinder - 5. Verify networks - 6. Deploy the cluster - 7. Verify networks - 8. Run OSTF - - Duration 120m - Snapshot separate_all_service - """ - self.check_run("separate_all_service") - - check_plugin_path_env( - var_name='SEPARATE_SERVICE_DB_PLUGIN_PATH', - plugin_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH - ) - check_plugin_path_env( - var_name='SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH', - plugin_path=settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH - ) - check_plugin_path_env( - var_name='SEPARATE_SERVICE_RABBIT_PLUGIN_PATH', - plugin_path=settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH - ) - self.env.revert_snapshot("ready_with_9_slaves") - - # copy plugins to the master node - - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH, - tar_target="/var") - - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=settings.SEPARATE_SERVICE_RABBIT_PLUGIN_PATH, - tar_target="/var") - - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH, - tar_target="/var") - - # install plugins - - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename( - settings.SEPARATE_SERVICE_DB_PLUGIN_PATH)) - - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename( - settings.SEPARATE_SERVICE_RABBIT_PLUGIN_PATH)) - - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename( - settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH)) - - data = { - 'tenant': 'separateall', - 'user': 'separateall', - 'password': 'separateall', - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings=data) - - plugin_names = ['detach-database', 'detach-keystone', - 'detach-rabbitmq'] - msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" - for plugin_name in plugin_names: - assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - msg) - options = {'metadata/enabled': True} - self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['standalone-database', 'standalone-rabbitmq', - 'standalone-keystone'], - 'slave-05': ['standalone-database', 'standalone-keystone', - 'standalone-rabbitmq'], - 'slave-06': ['standalone-database', 'standalone-keystone', - 'standalone-rabbitmq'], - 'slave-07': ['compute'], - 'slave-08': ['cinder'] - } - ) - - self.fuel_web.verify_network(cluster_id) - - # Cluster deploy - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - self.env.make_snapshot("separate_all_service", is_make=True) - - -@test(groups=["thread_separate_services", "thread_2_separate_services"]) -class SeparateAllFailover(TestBasic): - """SeparateAllFailover""" # TODO documentation - - @test(depends_on=[SeparateAllServices.separate_all_service], - groups=["separate_all_service_shutdown"]) - @log_snapshot_after_test - def separate_all_service_shutdown(self): - """Shutdown one multirole node - - Scenario: - 1. Revert snapshot separate_all_service - 2. Destroy multirole node with rabbit master - 3. Wait HA is working - 4. Run OSTF - - Duration 30m - Snapshot None - """ - self.env.revert_snapshot("separate_all_service") - cluster_id = self.fuel_web.get_last_created_cluster() - # destroy node with rabbit master - all_node = self.fuel_web.get_rabbit_master_node( - self.env.d_env.nodes().slaves[3].name) - all_node.destroy() - self.fuel_web.wait_node_is_offline(all_node) - - self.fuel_web.assert_ha_services_ready(cluster_id) - self.fuel_web.assert_os_services_ready(cluster_id, timeout=15 * 60) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - @test(depends_on=[SeparateAllServices.separate_all_service], - groups=["separate_all_service_controller_shutdown"]) - @log_snapshot_after_test - def separate_all_service_controller_shutdown(self): - """Shutdown primary controller node - - Scenario: - 1. Revert snapshot separate_all_service - 2. Shutdown primary controller node - 3. Wait HA is working - 4. Run OSTF - - Duration 30m - Snapshot None - """ - self.env.revert_snapshot("separate_all_service") - cluster_id = self.fuel_web.get_last_created_cluster() - # shutdown primary controller - controller = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - logger.debug( - "controller with primary role is {}".format(controller.name)) - controller.destroy() - self.fuel_web.wait_node_is_offline(controller) - - # One test should fail: Check state of haproxy backends on controllers - self.fuel_web.assert_ha_services_ready(cluster_id, should_fail=1) - self.fuel_web.assert_os_services_ready(cluster_id, timeout=15 * 60) - - self.fuel_web.run_ostf(cluster_id=cluster_id) diff --git a/fuelweb_test/tests/tests_separate_services/test_separate_rabbitmq.py b/fuelweb_test/tests/tests_separate_services/test_separate_rabbitmq.py deleted file mode 100644 index 7f141dcba..000000000 --- a/fuelweb_test/tests/tests_separate_services/test_separate_rabbitmq.py +++ /dev/null @@ -1,283 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from proboscis.asserts import assert_true -from proboscis import test - -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers import utils -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test import logger -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["thread_separate_services", "thread_2_separate_services", - "detach-rabbitmq-ci-group"], enabled=False) -class SeparateRabbit(TestBasic): - """SeparateRabbit""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["separate_rabbit_service"]) - @log_snapshot_after_test - def separate_rabbit_service(self): - """Deploy cluster with 3 separate rabbit roles - - Scenario: - 1. Create cluster - 2. Add 3 nodes with controller role - 3. Add 3 nodes with rabbit role - 4. Add 1 compute and cinder - 5. Verify networks - 6. Deploy the cluster - 7. Verify networks - 8. Run OSTF - - Duration 120m - Snapshot separate_rabbit_service - """ - self.check_run("separate_rabbit_service") - checkers.check_plugin_path_env( - var_name='SEPARATE_SERVICE_RABBIT_PLUGIN_PATH', - plugin_path=settings.SEPARATE_SERVICE_RABBIT_PLUGIN_PATH - ) - - self.env.revert_snapshot("ready_with_9_slaves") - - # copy plugins to the master node - - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=settings.SEPARATE_SERVICE_RABBIT_PLUGIN_PATH, - tar_target="/var") - - # install plugins - - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename( - settings.SEPARATE_SERVICE_RABBIT_PLUGIN_PATH)) - - data = { - 'tenant': 'separaterabbit', - 'user': 'separaterabbit', - 'password': 'separaterabbit', - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings=data) - - plugin_name = 'detach-rabbitmq' - msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" - assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - msg) - options = {'metadata/enabled': True} - self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['standalone-rabbitmq'], - 'slave-05': ['standalone-rabbitmq'], - 'slave-06': ['standalone-rabbitmq'], - 'slave-07': ['compute'], - 'slave-08': ['cinder'] - } - ) - - self.fuel_web.verify_network(cluster_id) - - # Cluster deploy - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - self.env.make_snapshot("separate_rabbit_service", is_make=True) - - -@test(groups=["thread_separate_services", "thread_2_separate_services", - "detach-rabbitmq-ci-group"], enabled=False) -class SeparateRabbitFailover(TestBasic): - """SeparateRabbitFailover""" # TODO documentation - - @test(depends_on=[SeparateRabbit.separate_rabbit_service], - groups=["separate_rabbit_service_shutdown"]) - @log_snapshot_after_test - def separate_rabbit_service_shutdown(self): - """Shutdown one rabbit node - - Scenario: - 1. Revert snapshot separate_rabbit_service - 2. Destroy rabbit node that is master - 3. Wait HA is working - 4. Run OSTF - - Duration 30m - """ - self.env.revert_snapshot("separate_rabbit_service") - cluster_id = self.fuel_web.get_last_created_cluster() - # destroy master rabbit node - rabbit_node = self.fuel_web.get_rabbit_master_node( - self.env.d_env.nodes().slaves[3].name) - rabbit_node.destroy() - self.fuel_web.wait_node_is_offline(rabbit_node) - - self.fuel_web.assert_ha_services_ready(cluster_id) - self.fuel_web.assert_os_services_ready(cluster_id, timeout=15 * 60) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - @test(depends_on=[SeparateRabbit.separate_rabbit_service], - groups=["separate_rabbit_service_restart"]) - @log_snapshot_after_test - def separate_rabbit_service_restart(self): - """Restart one rabbit node - - Scenario: - 1. Revert snapshot separate_rabbit_service - 2. Restart rabbit node that is master - 3. Wait HA is working - 4. Run OSTF - - Duration 30m - """ - self.env.revert_snapshot("separate_rabbit_service") - cluster_id = self.fuel_web.get_last_created_cluster() - # restart rabbit master node - rabbit_node = self.fuel_web.get_rabbit_master_node( - self.env.d_env.nodes().slaves[3].name) - self.fuel_web.warm_restart_nodes([rabbit_node]) - self.fuel_web.wait_node_is_online(rabbit_node) - - self.fuel_web.assert_ha_services_ready(cluster_id) - self.fuel_web.assert_os_services_ready(cluster_id, timeout=15 * 60) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - @test(depends_on=[SeparateRabbit.separate_rabbit_service], - groups=["separate_rabbit_service_controller_shutdown"]) - @log_snapshot_after_test - def separate_rabbit_service_controller_shutdown(self): - """Shutdown primary controller node - - Scenario: - 1. Revert snapshot separate_rabbit_service - 2. Shutdown primary controller node - 3. Wait HA is working - 4. Run OSTF - - Duration 30m - """ - self.env.revert_snapshot("separate_rabbit_service") - cluster_id = self.fuel_web.get_last_created_cluster() - # shutdown primary controller - controller = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - logger.debug( - "controller with primary role is {}".format(controller.name)) - controller.destroy() - self.fuel_web.wait_node_is_offline(controller) - - # One test should fail: Check state of haproxy backends on controllers - self.fuel_web.assert_ha_services_ready(cluster_id, should_fail=1) - self.fuel_web.assert_os_services_ready(cluster_id, timeout=15 * 60) - - self.fuel_web.run_ostf(cluster_id=cluster_id) - - @test(depends_on=[SeparateRabbit.separate_rabbit_service], - groups=["separate_rabbit_service_add_delete_node"]) - @log_snapshot_after_test - def separate_rabbit_service_add_delete_node(self): - """Add and delete rabbit node - - Scenario: - 1. Revert snapshot separate_rabbit_service - 2. Add one rabbit node and re-deploy cluster - 3. Run network verification - 4. Run OSTF - 5. Check hiera hosts are the same for - different group of roles - 6. Delete one rabbit node - 7. Run network verification - 8. Run ostf - 9. Check hiera hosts are the same for - different group of roles - - Duration 120m - """ - self.env.revert_snapshot("separate_rabbit_service") - cluster_id = self.fuel_web.get_last_created_cluster() - - node = {'slave-09': ['standalone-rabbitmq']} - self.fuel_web.update_nodes( - cluster_id, node, True, False) - - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['sanity', 'smoke', 'ha']) - checkers.check_hiera_hosts( - self.fuel_web.client.list_cluster_nodes(cluster_id), - cmd='hiera amqp_hosts') - - checkers.check_hiera_hosts( - self.fuel_web.client.list_cluster_nodes(cluster_id), - cmd='hiera memcache_roles') - - rabbit_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['standalone-rabbitmq']) - logger.debug("rabbit nodes are {0}".format(rabbit_nodes)) - checkers.check_hiera_hosts( - rabbit_nodes, - cmd='hiera corosync_roles') - - nailgun_node = self.fuel_web.update_nodes(cluster_id, node, - False, True) - nodes = [_node for _node in nailgun_node - if _node["pending_deletion"] is True] - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.wait_node_is_discovered(nodes[0]) - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['sanity', 'smoke', 'ha']) - checkers.check_hiera_hosts( - self.fuel_web.client.list_cluster_nodes(cluster_id), - cmd='hiera amqp_hosts') - - checkers.check_hiera_hosts( - self.fuel_web.client.list_cluster_nodes(cluster_id), - cmd='hiera memcache_roles') - - rabbit_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['standalone-rabbitmq']) - logger.debug("rabbit nodes are {0}".format(rabbit_nodes)) - checkers.check_hiera_hosts( - rabbit_nodes, - cmd='hiera corosync_roles') diff --git a/fuelweb_test/tests/tests_separate_services/test_separate_rabbitmq_ceph.py b/fuelweb_test/tests/tests_separate_services/test_separate_rabbitmq_ceph.py deleted file mode 100644 index 49296f3b6..000000000 --- a/fuelweb_test/tests/tests_separate_services/test_separate_rabbitmq_ceph.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from proboscis.asserts import assert_true -from proboscis import test - -from fuelweb_test.helpers.checkers import check_plugin_path_env -from fuelweb_test.helpers import utils -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["thread_separate_services_ceph"], enabled=False) -class SeparateRabbitCeph(TestBasic): - """SeparateRabbitCeph""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["separate_rabbit_ceph_service"]) - @log_snapshot_after_test - def separate_rabbit_ceph_service(self): - """Deployment with separate rabbitmq nodes and ceph for all - - Scenario: - 1. Install the plugin on the master node - 2. Create Ubuntu, Neutron VXLAN, ceph for all storages cluster - 3. Change ceph replication factor to 2 - 4. Add 3 nodes with controller role - 5. Add 3 nodes with rabbitmq role - 6. Add 1 compute node - 7. Add 2 ceph node - 8. Run network verification - 9. Deploy changes - 10. Run network verification - 11. Run OSTF tests - - Duration 120m - Snapshot separate_rabbit_ceph_service - """ - self.check_run("separate_rabbit_ceph_service") - check_plugin_path_env( - var_name='SEPARATE_SERVICE_RABBIT_PLUGIN_PATH', - plugin_path=settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH - ) - self.env.revert_snapshot("ready_with_9_slaves") - - # copy plugins to the master node - - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=settings.SEPARATE_SERVICE_RABBIT_PLUGIN_PATH, - tar_target="/var") - - # install plugins - - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename( - settings.SEPARATE_SERVICE_RABBIT_PLUGIN_PATH)) - - data = { - 'volumes_lvm': False, - 'volumes_ceph': True, - 'images_ceph': True, - 'ephemeral_ceph': True, - 'objects_ceph': True, - 'osd_pool_size': '2', - 'tenant': 'separaterabbitceph', - 'user': 'separaterabbitceph', - 'password': 'separaterabbitceph', - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['tun'], - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings=data) - - plugin_name = 'detach-rabbitmq' - msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" - assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name), - msg) - options = {'metadata/enabled': True} - self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['standalone-rabbitmq'], - 'slave-05': ['standalone-rabbitmq'], - 'slave-06': ['standalone-rabbitmq'], - 'slave-07': ['compute'], - 'slave-08': ['ceph-osd'], - 'slave-09': ['ceph-osd'] - } - ) - - self.fuel_web.verify_network(cluster_id) - - # Cluster deploy - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - self.env.make_snapshot("separate_rabbit_ceph_service") diff --git a/fuelweb_test/tests/tests_strength/__init__.py b/fuelweb_test/tests/tests_strength/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/tests_strength/test_cic_maintenance_mode.py b/fuelweb_test/tests/tests_strength/test_cic_maintenance_mode.py deleted file mode 100644 index b4e4d4832..000000000 --- a/fuelweb_test/tests/tests_strength/test_cic_maintenance_mode.py +++ /dev/null @@ -1,450 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import time - -from devops.helpers.helpers import tcp_ping -from devops.helpers.helpers import wait_pass -from devops.helpers.helpers import wait -from proboscis import asserts -from proboscis import test - -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.cic_maintenance_mode import change_config -from fuelweb_test.helpers.cic_maintenance_mode import check_auto_mode -from fuelweb_test.helpers.cic_maintenance_mode import check_available_mode -from fuelweb_test import logger -from fuelweb_test import ostf_test_mapping -from fuelweb_test import settings -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["cic_maintenance_mode"]) -class CICMaintenanceMode(TestBasic): - """CICMaintenanceMode.""" - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["cic_maintenance_mode_env"]) - @log_snapshot_after_test - def cic_maintenance_mode_env(self): - """Deploy cluster in HA mode with 3 controller for maintenance mode - - Scenario: - 1. Create cluster - 2. Add 3 node with controller role - 3. Add 2 node with compute and cinder roles - 4. Deploy the cluster - - Duration 100m - """ - self.check_run('cic_maintenance_mode') - self.env.revert_snapshot("ready_with_5_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE_HA) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute', 'cinder'], - 'slave-05': ['compute', 'cinder'] - } - ) - - # Cluster deploy - self.fuel_web.deploy_cluster_wait(cluster_id) - - # Check network - self.fuel_web.verify_network(cluster_id) - - # Run ostf - self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'smoke', 'sanity']) - - self.env.make_snapshot("cic_maintenance_mode", is_make=True) - - @test(depends_on=[cic_maintenance_mode_env], - groups=["manual_cic_maintenance_mode", - "positive_cic_maintenance_mode"]) - @log_snapshot_after_test - def manual_cic_maintenance_mode(self): - """Check manual maintenance mode for controller - - Scenario: - 1. Revert snapshot - 2. Switch in maintenance mode - 3. Wait until controller is rebooting - 4. Exit maintenance mode - 5. Check the controller become available - - Duration 155m - """ - self.env.revert_snapshot('cic_maintenance_mode') - - cluster_id = self.fuel_web.get_last_created_cluster() - - # Select a non-primary controller - regular_ctrl = self.fuel_web.get_nailgun_node_by_name("slave-02") - dregular_ctrl = self.fuel_web.get_devops_node_by_nailgun_node( - regular_ctrl) - _ip = regular_ctrl['ip'] - _id = regular_ctrl['id'] - logger.info('Maintenance mode for node-{0}'.format(_id)) - asserts.assert_true('True' in check_available_mode(_ip), - "Maintenance mode is not available") - self.ssh_manager.check_call( - ip=_ip, - command="umm on", - expected=[-1]) - - self.fuel_web.wait_node_is_offline(dregular_ctrl) - - asserts.assert_true( - checkers.check_ping(self.env.get_admin_node_ip(), - _ip, - deadline=600), - "Host {0} is not reachable by ping during 600 sec" - .format(_ip)) - - asserts.assert_true('True' in check_auto_mode(_ip), - "Maintenance mode is not switched on") - - self.ssh_manager.check_call( - ip=_ip, - command="umm off") - - self.fuel_web.wait_node_is_online(dregular_ctrl) - - # Wait until Cinder services UP on a controller - self.fuel_web.wait_cinder_is_up( - [dregular_ctrl.name]) - - # Wait until RabbitMQ cluster is UP - wait_pass(lambda: - self.fuel_web.run_single_ostf_test( - cluster_id, test_sets=['ha'], - test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get( - 'RabbitMQ availability')), - timeout=1500) - logger.info('RabbitMQ cluster is available') - - wait_pass(lambda: - self.fuel_web.run_single_ostf_test( - cluster_id, test_sets=['sanity'], - test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get( - 'Check that required services are running')), - timeout=1500) - logger.info("Required services are running") - - # TODO(astudenov): add timeout_msg - try: - self.fuel_web.run_ostf(cluster_id, - test_sets=['smoke', 'sanity', 'ha']) - except AssertionError: - logger.debug("Test failed from first probe," - " we sleep 600 second try one more time" - " and if it fails again - test will fails ") - time.sleep(600) - self.fuel_web.run_ostf(cluster_id, - test_sets=['smoke', 'sanity', 'ha']) - - @test(depends_on=[cic_maintenance_mode_env], - groups=["auto_cic_maintenance_mode", - "positive_cic_maintenance_mode"]) - @log_snapshot_after_test - def auto_cic_maintenance_mode(self): - """Check auto maintenance mode for controller - - Scenario: - 1. Revert snapshot - 2. Unexpected reboot - 3. Wait until controller is switching in maintenance mode - 4. Exit maintenance mode - 5. Check the controller become available - - Duration 155m - """ - self.env.revert_snapshot('cic_maintenance_mode') - - cluster_id = self.fuel_web.get_last_created_cluster() - - # Select a non-primary controller - regular_ctrl = self.fuel_web.get_nailgun_node_by_name("slave-02") - dregular_ctrl = self.fuel_web.get_devops_node_by_nailgun_node( - regular_ctrl) - _ip = regular_ctrl['ip'] - _id = regular_ctrl['id'] - - asserts.assert_true('True' in check_available_mode(_ip), - "Maintenance mode is not available") - - change_config(_ip, reboot_count=0) - - logger.info('Change UMM.CONF on node-{0}' - .format(_id)) - - logger.info('Unexpected reboot on node-{0}' - .format(_id)) - - self.ssh_manager.check_call( - ip=_ip, - command='reboot >/dev/null & ') - - wait(lambda: - not checkers.check_ping(self.env.get_admin_node_ip(), - _ip), - timeout=60 * 10, - timeout_msg='Node {} still responds to ping'.format( - dregular_ctrl.name)) - - self.fuel_web.wait_node_is_offline(dregular_ctrl) - - logger.info('Check that node-{0} in maintenance mode after' - ' unexpected reboot'.format(_id)) - asserts.assert_true( - checkers.check_ping(self.env.get_admin_node_ip(), - _ip, - deadline=600), - "Host {0} is not reachable by ping during 600 sec" - .format(_ip)) - - asserts.assert_true('True' in check_auto_mode(_ip), - "Maintenance mode is not switched on") - - logger.info('turn off Maintenance mode') - self.ssh_manager.check_call( - ip=_ip, - command="umm off") - time.sleep(30) - - change_config(_ip) - - self.fuel_web.wait_node_is_online(dregular_ctrl) - - # Wait until MySQL Galera is UP on some controller - self.fuel_web.wait_mysql_galera_is_up( - [dregular_ctrl.name]) - - # Wait until Cinder services UP on a controller - self.fuel_web.wait_cinder_is_up( - [dregular_ctrl.name]) - - # Wait until RabbitMQ cluster is UP - wait_pass(lambda: - self.fuel_web.run_single_ostf_test( - cluster_id, test_sets=['ha'], - test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get( - 'RabbitMQ availability')), - timeout=1500) - logger.info('RabbitMQ cluster is available') - - # Wait until all Openstack services are UP - wait_pass(lambda: - self.fuel_web.run_single_ostf_test( - cluster_id, test_sets=['sanity'], - test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get( - 'Check that required services are running')), - timeout=1500) - logger.info("Required services are running") - - try: - self.fuel_web.run_ostf(cluster_id, - test_sets=['smoke', 'sanity', 'ha']) - except AssertionError: - logger.debug("Test failed from first probe," - " we sleep 600 second try one more time" - " and if it fails again - test will fails ") - time.sleep(600) - self.fuel_web.run_ostf(cluster_id, - test_sets=['smoke', 'sanity', 'ha']) - - @test(depends_on=[cic_maintenance_mode_env], - groups=["negative_manual_cic_maintenance_mode", - "negative_cic_maintenance_mode"]) - @log_snapshot_after_test - def negative_manual_cic_maintenance_mode(self): - """Check negative scenario for manual maintenance mode - - Scenario: - 1. Revert snapshot - 2. Disable UMM - 3. Switch in maintenance mode - 4. Check the controller not switching in maintenance mode - 5. Check the controller become available - - Duration 45m - """ - self.env.revert_snapshot('cic_maintenance_mode') - - cluster_id = self.fuel_web.get_last_created_cluster() - - # Select a non-primary controller - regular_ctrl = self.fuel_web.get_nailgun_node_by_name("slave-02") - dregular_ctrl = self.fuel_web.get_devops_node_by_nailgun_node( - regular_ctrl) - _ip = regular_ctrl['ip'] - _id = regular_ctrl['id'] - - asserts.assert_true('True' in check_available_mode(_ip), - "Maintenance mode is not available") - self.ssh_manager.check_call( - ip=_ip, - command="umm disable") - - asserts.assert_false('True' in check_available_mode(_ip), - "Maintenance mode should not be available") - - logger.info('Try to execute maintenance mode ' - 'for node-{0}'.format(_id)) - - self.ssh_manager.check_call( - ip=_ip, - command="umm on", - expected=[1]) - - # If we don't disable maintenance mode, - # the node would have gone to reboot, so we just expect - time.sleep(30) - asserts.assert_true( - self.fuel_web.get_nailgun_node_by_devops_node(dregular_ctrl) - ['online'], - 'Node-{0} should be online after command "umm on"'. - format(_id)) - - try: - self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'smoke', - 'sanity']) - except AssertionError: - logger.debug("Test failed from first probe," - " we sleep 300 second try one more time" - " and if it fails again - test will fails ") - time.sleep(300) - self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'smoke', - 'sanity']) - - @test(depends_on=[cic_maintenance_mode_env], - groups=["negative_auto_cic_maintenance_mode", - "negative_cic_maintenance_mode"]) - @log_snapshot_after_test - def negative_auto_cic_maintenance_mode(self): - """Check negative scenario for auto maintenance mode - - Scenario: - 1. Revert snapshot - 2. Disable UMM - 3. Change UMM.CONF - 4. Unexpected reboot - 5. Check the controller not switching in maintenance mode - 6. Check the controller become available - - Duration 85m - """ - self.env.revert_snapshot('cic_maintenance_mode') - - cluster_id = self.fuel_web.get_last_created_cluster() - - # Select a non-primary controller - regular_ctrl = self.fuel_web.get_nailgun_node_by_name("slave-02") - dregular_ctrl = self.fuel_web.get_devops_node_by_nailgun_node( - regular_ctrl) - _ip = regular_ctrl['ip'] - _id = regular_ctrl['id'] - - asserts.assert_true('True' in check_available_mode(_ip), - "Maintenance mode is not available") - logger.info('Disable UMM on node-{0}'.format(_id)) - - change_config(_ip, umm=False, reboot_count=0) - - asserts.assert_false('True' in check_available_mode(_ip), - "Maintenance mode should not be available") - - logger.info('Unexpected reboot on node-{0}' - .format(_id)) - - self.ssh_manager.check_call( - ip=_ip, - command='reboot >/dev/null & ') - - wait(lambda: - not checkers.check_ping(self.env.get_admin_node_ip(), - _ip), - timeout=60 * 10, - timeout_msg='Node {} still responds to ping'.format( - dregular_ctrl.name)) - - # Node don't have enough time for set offline status - # after reboot - # Just waiting - - asserts.assert_true( - checkers.check_ping(self.env.get_admin_node_ip(), - _ip, - deadline=600), - "Host {0} is not reachable by ping during 600 sec" - .format(_ip)) - - self.fuel_web.wait_node_is_online(dregular_ctrl) - - logger.info('Check that node-{0} not in maintenance mode after' - ' unexpected reboot'.format(_id)) - - wait(lambda: tcp_ping(_ip, 22), - timeout=60 * 10, - timeout_msg='Node {} still is not available by SSH'.format( - dregular_ctrl.name)) - - asserts.assert_false('True' in check_auto_mode(_ip), - "Maintenance mode should not switched") - - # Wait until MySQL Galera is UP on some controller - self.fuel_web.wait_mysql_galera_is_up( - [dregular_ctrl.name]) - - # Wait until Cinder services UP on a controller - self.fuel_web.wait_cinder_is_up( - [dregular_ctrl.name]) - - # Wait until RabbitMQ cluster is UP - wait_pass(lambda: - self.fuel_web.run_single_ostf_test( - cluster_id, test_sets=['ha'], - test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get( - 'RabbitMQ availability')), - timeout=1500) - logger.info('RabbitMQ cluster is available') - - # TODO(astudenov): add timeout_msg - wait_pass(lambda: - self.fuel_web.run_single_ostf_test( - cluster_id, test_sets=['sanity'], - test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get( - 'Check that required services are running')), - timeout=1500) - logger.info("Required services are running") - - try: - self.fuel_web.run_ostf(cluster_id, - test_sets=['smoke', 'sanity', 'ha']) - except AssertionError: - logger.debug("Test failed from first probe," - " we sleep 600 second try one more time" - " and if it fails again - test will fails ") - time.sleep(600) - self.fuel_web.run_ostf(cluster_id, - test_sets=['smoke', 'sanity', 'ha']) diff --git a/fuelweb_test/tests/tests_strength/test_failover.py b/fuelweb_test/tests/tests_strength/test_failover.py deleted file mode 100644 index 5ab68e686..000000000 --- a/fuelweb_test/tests/tests_strength/test_failover.py +++ /dev/null @@ -1,383 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.tests_strength.test_failover_base\ - import TestHaFailoverBase - - -@test(groups=["ha", "neutron_failover", "ha_neutron_destructive"]) -class TestHaNeutronFailover(TestHaFailoverBase): - """TestHaNeutronFailover.""" # TODO documentation - - @property - def snapshot_name(self): - return "prepare_ha_neutron" - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_ha", "prepare_ha_neutron", "neutron", "deployment"]) - @log_snapshot_after_test - def prepare_ha_neutron(self): - """Prepare cluster in HA/Neutron mode for failover tests - - Scenario: - 1. Create cluster - 2. Add 3 nodes with controller roles - 3. Add 2 nodes with compute roles - 4. Deploy the cluster - 5. Make snapshot - - Duration 70m - Snapshot prepare_ha_neutron - """ - super(self.__class__, self).deploy_ha() - - @test(depends_on_groups=['prepare_ha_neutron'], - groups=["ha_neutron_destroy_controllers", "ha_destroy_controllers"]) - @log_snapshot_after_test - def ha_neutron_destroy_controllers(self): - """Destroy two controllers and check pacemaker status is correct - - Scenario: - 1. Revert environment - 2. Destroy first controller - 3. Check pacemaker status - 4. Run OSTF - 5. Revert environment - 6. Destroy second controller - 7. Check pacemaker status - 8. Run OSTF - - Duration 35m - """ - super(self.__class__, self).ha_destroy_controllers() - - @test(enabled=False, - depends_on_groups=['prepare_ha_neutron'], - groups=["ha_neutron_disconnect_controllers", - "ha_disconnect_controllers"]) - @log_snapshot_after_test - def ha_neutron_disconnect_controllers(self): - """Disconnect controllers and check pacemaker status is correct - - Scenario: - 1. Block traffic on br-mgmt of the first controller - 2. Check pacemaker status - 3. Wait until MySQL Galera is UP on some controller - 4. Run OSTF - - Duration 15m - """ - super(self.__class__, self).ha_disconnect_controllers() - - @test(depends_on_groups=['prepare_ha_neutron'], - groups=["ha_neutron_delete_vips", "ha_delete_vips"]) - @log_snapshot_after_test - def ha_neutron_delete_vips(self): - """Delete management and public VIPs 10 times. - Verify that they are restored. - Verify cluster by OSTF - - Scenario: - 1. Delete 10 time public and management VIPs - 2. Wait while it is being restored - 3. Verify it is restored - 4. Run OSTF - - Duration 30m - """ - super(self.__class__, self).ha_delete_vips() - - @test(depends_on_groups=['prepare_ha_neutron'], - groups=["ha_neutron_mysql_termination", "ha_mysql_termination"]) - @log_snapshot_after_test - def ha_neutron_mysql_termination(self): - """Terminate mysql on all controllers one by one - - Scenario: - 1. Terminate mysql - 2. Wait while it is being restarted - 3. Verify it is restarted - 4. Go to another controller - 5. Run OSTF - - Duration 15m - """ - super(self.__class__, self).ha_mysql_termination() - - @test(depends_on_groups=['prepare_ha_neutron'], - groups=["ha_neutron_haproxy_termination", "ha_haproxy_termination"]) - @log_snapshot_after_test - def ha_neutron_haproxy_termination(self): - """Terminate haproxy on all controllers one by one - - Scenario: - 1. Terminate haproxy - 2. Wait while it is being restarted - 3. Verify it is restarted - 4. Go to another controller - 5. Run OSTF - - Duration 25m - """ - super(self.__class__, self).ha_haproxy_termination() - - @test(depends_on_groups=['prepare_ha_neutron'], - groups=["ha_neutron_pacemaker_configuration", - "ha_pacemaker_configuration"]) - @log_snapshot_after_test - def ha_neutron_pacemaker_configuration(self): - """Verify resources are configured - - Scenario: - 1. SSH to controller node - 2. Verify resources are configured - 3. Go to next controller - - Duration 15m - """ - super(self.__class__, self).ha_pacemaker_configuration() - - @test(enabled=False, depends_on_groups=['prepare_ha_neutron'], - groups=["ha_neutron_pacemaker_restart_heat_engine", - "ha_pacemaker_restart_heat_engine"]) - @log_snapshot_after_test - def ha_neutron_pacemaker_restart_heat_engine(self): - """Verify heat engine service is restarted - by pacemaker on amqp connection loss - - Scenario: - 1. SSH to any controller - 2. Check heat-engine status - 3. Block heat-engine amqp connections - 4. Check heat-engine was stopped on current controller - 5. Unblock heat-engine amqp connections - 6. Check heat-engine process is running with new pid - 7. Check amqp connection re-appears for heat-engine - - Duration 15m - """ - super(self.__class__, self).ha_pacemaker_restart_heat_engine() - - @test(enabled=False, depends_on_groups=['prepare_ha_neutron'], - groups=["ha_neutron_check_monit", "ha_check_monit"]) - @log_snapshot_after_test - def ha_neutron_check_monit(self): - """Verify monit restarted nova - service if it was killed - - Scenario: - 1. SSH to every compute node in cluster - 2. Kill nova-compute service - 3. Check service is restarted by monit - - Duration 25m - """ - super(self.__class__, self).ha_check_monit() - - @test(depends_on_groups=['prepare_ha_neutron'], - groups=["ha_neutron_firewall"]) - @log_snapshot_after_test - def ha_neutron_firewall(self): - """Check firewall vulnerability on Neutron network - - Scenario: - 1. Start 'socat' on a cluster node to listen for a free random port - 2. Put to this port a string using 'nc' from admin node - 3. Check if the string appeared in the cluster node - 4. Repeat for each cluster node - - Duration 25m - - """ - super(self.__class__, self).check_firewall_vulnerability() - - @test(depends_on_groups=['prepare_ha_neutron'], - groups=["ha_neutron_virtual_router"]) - @log_snapshot_after_test - def ha_neutron_virtual_router(self): - """Verify connection is present and - downloading maintained by conntrackd - after primary controller destroy - - Scenario: - 1. SSH to compute node - 2. Check Internet connectivity - 3. Destroy primary controller - 4. Check Internet connectivity - - Duration 25m - - """ - super(self.__class__, self).check_virtual_router() - - @test(enabled=False, depends_on_groups=['prepare_ha_neutron'], - groups=["check_neutron_package_loss"]) - @log_snapshot_after_test - def ha_neutron_packages_loss(self): - """Check cluster recovery if br-mgmt loss 5% packages - - Scenario: - 1. SSH to controller - 2. set 5 % package loss on br-mgmt - 3. run ostf - - Duration - """ - # TODO enable test when fencing will be implements - super(self.__class__, self).ha_controller_loss_packages() - - @test(depends_on_groups=['prepare_ha_neutron'], - groups=["ha_neutron_check_alive_rabbit"]) - @log_snapshot_after_test - def ha_neutron_check_alive_rabbit(self): - """Check alive rabbit node is not kicked from cluster - when corosync service on node dies - - Scenario: - 1. SSH to first controller and put corosync cluster to - maintenance mode: - crm configure property maintenance-mode=true - 2. Stop corosync service on first controller - 3. Check on master node that rabbit-fence.log contains - Ignoring alive node rabbit@node-1 - 4. On second controller check that rabbitmq cluster_status - contains all 3 nodes - 5. On first controller start corosync service and restart pacemaker - 6. Check that pcs status contains all 3 nodes - - Duration 25m - - """ - super(self.__class__, self).check_alive_rabbit_node_not_kicked() - - @test(depends_on_groups=['prepare_ha_neutron'], - groups=["ha_neutron_check_dead_rabbit"]) - @log_snapshot_after_test - def ha_neutron_check_dead_rabbit(self): - """Check dead rabbit node is kicked from cluster - when corosync service on node dies - - Scenario: - 1. SSH to first controller and put corosync cluster to - maintenance mode: - crm configure property maintenance-mode=true - 2. Stop rabbit and corosync service on first controller - 3. Check on master node that rabbit-fence.log contains - Disconnecting rabbit@node-1 - 4. On second controller check that rabbitmq cluster_status - contains only 2 nodes - - Duration 25m - - """ - super(self.__class__, self).check_dead_rabbit_node_kicked() - - @test(depends_on_groups=['prepare_ha_neutron'], - groups=["ha_neutron_3_1_rabbit_failover"]) - @log_snapshot_after_test - def ha_neutron_test_3_1_rabbit_failover(self): - """Check 3 in 1 rabbit failover - - Scenario: - 1. SSH to controller and get rabbit master - 2. Destroy not rabbit master node - 3. Check that rabbit master stay as was - 4. Run ostf ha - 5. Turn on destroyed slave - 6. Check rabbit master is the same - 7. Run ostf ha - 8. Destroy rabbit master node - 9. Check that new rabbit-master appears - 10. Run ostf ha - 11. Power on destroyed node - 12. Check that new rabbit-master was not elected - 13. Run ostf ha - - Duration 25m - - """ - super(self.__class__, self).test_3_1_rabbit_failover() - - @test(depends_on_groups=['prepare_ha_neutron'], - groups=["ha_corosync_stability_check", "ha"]) - @log_snapshot_after_test - def ha_corosync_stability_check(self): - """Check after corosync failover that "pcs status nodes" reports - the same DC with quorum, "offline"/"online" statuses - - Scenario: - 1. On the first controller kill corosync - 2. Verify for all controllers "pcs status nodes" reports - 3. Start corosync on the first controller - 4. Repeat steps 1-3 500 times - - Duration 120m - - """ - super(self.__class__, self).ha_corosync_stability_check() - - @test(depends_on_groups=['prepare_ha_neutron'], - groups=['change_pacemaker_parameter_does_not_break_rabbitmq']) - @log_snapshot_after_test - def change_pacemaker_parameter_not_break_rabbitmq(self): - """Change pacemaker parameters doesn't break RabbitMQ. - - Scenario: - 1. Deploy environment with at least 3 controllers - 2. Change max_rabbitmqctl_timeouts parameter on one of - controllers,after that slaves rabbitmq will be restarted by - Pacemaker. - 3. Wait for 3 minutes. - 4. Check RabbitMQ cluster is assembled until success in 10 min - 5. Run OSTF - 6. Repeat two more times steps 2-5 - - Duration 80 min - - """ - super(self.__class__, self). \ - change_pacemaker_parameter_not_break_rabbitmq() - - @test(depends_on_groups=['prepare_ha_neutron'], - groups=['ha_rabbitmq_stability_check']) - @log_snapshot_after_test - def ha_rabbitmq_stability_check(self): - """Check after repeatable failover rabbit cluster is healthy. - - Scenario: - 1. Deploy environment with at least 3 controllers - (Or revert existing snapshot) - 2. Wait for mysql cluster to become active - 3. Run ostf tests before destructive actions - 4. Get rabbit master node - 5. Move master rabbit resource to slave with pcs - 6. Delete pcs constraint for rabbit resource - 7. Assert HA services ready - 8. Get new rabbit master node - 9. Destroy it - 10. Assert HA services ready - 11. Run sanity and smoke OSTF sets - 12. Power on destroyed node - 13. Assert HA services ready - 14. Assert OS services ready - 15. Run OSTF - - Duration 80 min - - """ - super(self.__class__, self).ha_rabbitmq_stability_check() diff --git a/fuelweb_test/tests/tests_strength/test_failover_base.py b/fuelweb_test/tests/tests_strength/test_failover_base.py deleted file mode 100644 index 710ecbc53..000000000 --- a/fuelweb_test/tests/tests_strength/test_failover_base.py +++ /dev/null @@ -1,1414 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re -import time - -from devops.error import TimeoutError -from devops.helpers.helpers import wait_pass -from devops.helpers.helpers import tcp_ping -from devops.helpers.helpers import wait -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_false -from proboscis.asserts import assert_not_equal -from proboscis.asserts import assert_true -from proboscis import SkipTest -# pylint: disable=redefined-builtin -# noinspection PyUnresolvedReferences -from six.moves import xrange -# pylint: enable=redefined-builtin - -from core.helpers.log_helpers import logwrap - -from fuelweb_test import logger -from fuelweb_test.helpers import os_actions -from fuelweb_test.helpers.checkers import check_mysql -from fuelweb_test.helpers.checkers import check_ping -from fuelweb_test.helpers.utils import get_file_size -from fuelweb_test.helpers.utils import RunLimit -from fuelweb_test.helpers.utils import TimeStat -from fuelweb_test.helpers.pacemaker import get_pacemaker_resource_name -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.settings import DNS -from fuelweb_test.settings import DNS_SUFFIX -from fuelweb_test.settings import DOWNLOAD_LINK -from fuelweb_test.settings import OPENSTACK_RELEASE -from fuelweb_test.settings import OPENSTACK_RELEASE_UBUNTU -from fuelweb_test.settings import REPEAT_COUNT -from fuelweb_test.tests.base_test_case import TestBasic - - -class TestHaFailoverBase(TestBasic): - """TestHaFailoverBase.""" # TODO documentation - - @property - def snapshot_name(self): - raise ValueError( - 'Property snapshot_name should be redefined in child classes ' - 'before use!') - - def deploy_ha(self): - - self.check_run(self.snapshot_name) - self.env.revert_snapshot("ready_with_5_slaves") - - self.env.bootstrap_nodes( - self.env.d_env.nodes().slaves[5:6]) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['cinder'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - public_vip = self.fuel_web.get_public_vip(cluster_id) - os_conn = os_actions.OpenStackActions(public_vip) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=14) - self.fuel_web.verify_network(cluster_id) - - self.env.make_snapshot(self.snapshot_name, is_make=True) - - def deploy_ha_ceph(self): - - self.check_run(self.snapshot_name) - self.env.revert_snapshot("ready_with_5_slaves") - - settings = { - 'volumes_ceph': True, - 'images_ceph': True, - 'osd_pool_size': '2', - 'volumes_lvm': False, - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings=settings - ) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'ceph-osd'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute', 'ceph-osd'], - 'slave-05': ['compute'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id, timeout=150 * 60) - public_vip = self.fuel_web.get_public_vip(cluster_id) - os_conn = os_actions.OpenStackActions(public_vip) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=14) - self.fuel_web.verify_network(cluster_id) - - self.env.make_snapshot(self.snapshot_name, is_make=True) - - def ha_destroy_controllers(self): - if not self.env.d_env.has_snapshot(self.snapshot_name): - raise SkipTest('Snapshot {} not found'.format(self.snapshot_name)) - - def get_needed_controllers(cluster_id): - n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id=cluster_id, - roles=['controller']) - ret = [] - d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls) - p_d_ctrl = self.fuel_web.get_nailgun_primary_node(d_ctrls[0]) - ret.append(p_d_ctrl) - ret.append((set(d_ctrls) - {p_d_ctrl}).pop()) - - return ret - - for num in xrange(2): - - # STEP: Revert environment - # if num==0: show_step(1); if num==1: show_step(5) - self.show_step([1, 5][num]) - self.env.revert_snapshot(self.snapshot_name) - - cluster_id = self.fuel_web.client.get_cluster_id( - self.__class__.__name__) - controllers = list(get_needed_controllers(cluster_id)) - - # STEP: Destroy first/second controller - devops_node = controllers[num] - # if num==0: show_step(2); if num==1: show_step(6) - self.show_step([2, 6][num], details="Destroying node: " - "{0}".format(devops_node.name)) - devops_node.destroy(False) - - # STEP: Check pacemaker status - self.show_step([3, 7][num]) - n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id=cluster_id, - roles=['controller']) - d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls) - - self.fuel_web.assert_pacemaker( - (set(d_ctrls) - {devops_node}).pop().name, - set(d_ctrls) - {devops_node}, - [devops_node]) - - # Wait until Nailgun marked suspended controller as offline - self.fuel_web.wait_node_is_offline(devops_node) - - # Wait the pacemaker react to changes in online nodes - time.sleep(60) - # Wait for HA services ready - self.fuel_web.assert_ha_services_ready(cluster_id, should_fail=1) - # Wait until OpenStack services are UP - self.fuel_web.assert_os_services_ready(cluster_id) - - logger.info("Waiting 300 sec before MySQL Galera will up, " - "then run OSTF") - - # Wait until MySQL Galera is UP on online controllers - self.fuel_web.wait_mysql_galera_is_up( - [n.name for n in set(d_ctrls) - {devops_node}], - timeout=300) - - # STEP: Run OSTF - self.show_step([4, 8][num]) - # should fail 1 according to one haproxy backend marked as fail - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity'], - should_fail=1) - - def ha_disconnect_controllers(self): - if not self.env.revert_snapshot(self.snapshot_name): - raise SkipTest('Snapshot {} not found'.format(self.snapshot_name)) - - cluster_id = self.fuel_web.client.get_cluster_id( - self.__class__.__name__) - - p_d_ctrl = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - with self.fuel_web.get_ssh_for_node(p_d_ctrl.name) as remote: - - cmd = ('iptables -I INPUT -i br-mgmt -j DROP && ' - 'iptables -I OUTPUT -o br-mgmt -j DROP') - remote.check_call(cmd) - - # Wait until MySQL Galera is UP on some controller - self.fuel_web.wait_mysql_galera_is_up(['slave-02']) - # should fail 2 according to haproxy backends marked as fail - try: - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['sanity', 'smoke'], should_fail=2) - except AssertionError: - time.sleep(600) - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['smoke', 'sanity'], - should_fail=2) - - def ha_delete_vips(self): - if not self.env.d_env.has_snapshot(self.snapshot_name): - raise SkipTest('Snapshot {} not found'.format(self.snapshot_name)) - - logger.debug('Start reverting of {0} snapshot' - .format(self.snapshot_name)) - self.env.revert_snapshot(self.snapshot_name) - cluster_id = \ - self.fuel_web.client.get_cluster_id(self.__class__.__name__) - logger.debug('Cluster id is {0}'.format(cluster_id)) - resources = { - "vip__management": {"iface": "b_management", "netns": "haproxy"}, - "vip__public": {"iface": "b_public", "netns": "haproxy"} - } - nailgun_controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id=cluster_id, - roles=['controller']) - devops_controllers = self.fuel_web.get_devops_nodes_by_nailgun_nodes( - nailgun_controllers) - - assert_true(devops_controllers is not None, - "Nailgun nodes don't associating to devops nodes") - - logger.debug("Current controller nodes are {0}".format( - [i.name for i in devops_controllers])) - - checks_number = 10 - for resource in resources: - for check_counter in xrange(1, checks_number + 1): - # 1. Locate where resource is running - active_nodes = self.fuel_web.get_pacemaker_resource_location( - devops_controllers[0].name, - resource) - assert_true(len(active_nodes) == 1, - "Resource should be running on a single node, " - "but started on the nodes {0}".format( - [n.name for n in active_nodes])) - - logger.debug("Start looking for the IP of {0} " - "on {1}".format(resource, active_nodes[0].name)) - address = self.fuel_web.ip_address_show( - active_nodes[0].name, - interface=resources[resource]['iface'], - namespace=resources[resource]['netns']) - assert_true(address is not None, - "Resource {0} located on {1}, but interface " - "doesn't have " - "ip address".format(resource, - active_nodes[0].name)) - logger.debug("Found the IP: {0}".format(address)) - - # 2. Deleting VIP - logger.debug("Start ip {0} deletion on node {1} and " - "interface {2} ".format(address, - active_nodes[0].name, - resources[resource])) - self.fuel_web.ip_address_del( - node_name=active_nodes[0].name, - interface=resources[resource]['iface'], - ip=address, namespace=resources[resource]['netns']) - - def check_restore(): - new_nodes = self.fuel_web.get_pacemaker_resource_location( - devops_controllers[0].name, - resource) - if len(new_nodes) != 1: - return False - new_address = self.fuel_web.ip_address_show( - new_nodes[0].name, - interface=resources[resource]['iface'], - namespace=resources[resource]['netns']) - if new_address is None: - return False - else: - return True - - # 3. Waiting for restore the IP - logger.debug("Waiting while deleted ip restores ...") - - wait(check_restore, timeout=60, - timeout_msg='Resource has not been restored for a 60 sec') - - new_nodes = self.fuel_web.get_pacemaker_resource_location( - devops_controllers[0].name, - resource) - assert_true(len(new_nodes) == 1, - "After ip deletion resource should run on a single" - " node, but ran on {0}. On {1} attempt".format( - [n.name for n in new_nodes], - check_counter)) - logger.info( - "Resource has been deleted from {0} and " - "restored on {1}".format( - active_nodes[0].name, - new_nodes[0].name)) - logger.info("Resource {0} restored {1} times".format( - resource, checks_number)) - - # Assert ha services are ready - self.fuel_web.assert_ha_services_ready(cluster_id) - - # Run OSTF tests - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['smoke', 'sanity']) - - def ha_mysql_termination(self): - if not self.env.d_env.has_snapshot(self.snapshot_name): - raise SkipTest('Snapshot {} not found'.format(self.snapshot_name)) - - self.env.revert_snapshot(self.snapshot_name) - cluster_id = self.fuel_web.client.get_cluster_id( - self.__class__.__name__) - n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller']) - for nailgun_node in n_ctrls: - dev_node = self.fuel_web.get_devops_node_by_nailgun_node( - nailgun_node - ) - logger.info('Terminating MySQL on {0}' - .format(dev_node.name)) - - try: - self.ssh_manager.check_call(nailgun_node['ip'], - 'pkill -9 -x "mysqld"') - except: - logger.error('MySQL on {0} is down after snapshot revert'. - format(dev_node.name)) - raise - - check_mysql(nailgun_node['ip'], dev_node.name) - - cluster_id = self.fuel_web.client.get_cluster_id( - self.__class__.__name__) - - self.fuel_web.wait_mysql_galera_is_up(['slave-01', 'slave-02', - 'slave-03'], timeout=300) - - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - def ha_haproxy_termination(self): - if not self.env.d_env.has_snapshot(self.snapshot_name): - raise SkipTest('Snapshot {} not found'.format(self.snapshot_name)) - - self.env.revert_snapshot(self.snapshot_name) - - cluster_id = self.fuel_web.client.get_cluster_id( - self.__class__.__name__) - n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller']) - - def haproxy_started(ip): - pid_path = '/var/run/resource-agents/ns_haproxy/ns_haproxy.pid' - cmd = '[ -f {pid_path} ] && ' \ - '[ "$(ps -p $(cat {pid_path}) -o pid=)" ' \ - '== "$(pidof haproxy)" ]'.format(pid_path=pid_path) - result = self.ssh_manager.execute_on_remote( - ip, - cmd, - raise_on_assert=False) - return result['exit_code'] == 0 - - for nailgun_node in n_ctrls: - ip = nailgun_node['ip'] - cmd = 'kill -9 $(pidof haproxy)' - self.ssh_manager.execute_on_remote( - ip, - cmd=cmd) - wait(lambda: haproxy_started(ip), - timeout=20, - timeout_msg='Waiting 20 sec for haproxy timed out') - assert_true(haproxy_started(ip), 'haproxy was not started') - - cluster_id = self.fuel_web.client.get_cluster_id( - self.__class__.__name__) - - # sometimes keystone is not available right after haproxy - # restart thus ostf tests fail with corresponding error - # about unavailability of the service. In order to consider this - # we do preliminary execution of sanity set - - # 2 minutes more that enough for keystone to be available - # after haproxy restart - timeout = 120 - - self.fuel_web.assert_os_services_ready( - cluster_id=cluster_id, - timeout=timeout) - - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - def ha_pacemaker_configuration(self): - if not self.env.d_env.has_snapshot(self.snapshot_name): - raise SkipTest('Snapshot {} not found'.format(self.snapshot_name)) - - self.env.revert_snapshot(self.snapshot_name) - - cluster_id = self.fuel_web.client.get_cluster_id( - self.__class__.__name__) - n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller']) - d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls) - pcm_nodes = ' '.join(self.fuel_web.get_pcm_nodes( - self.env.d_env.nodes().slaves[0].name, pure=True)['Online']) - logger.debug("pacemaker nodes are {0}".format(pcm_nodes)) - for devops_node in d_ctrls: - config = self.fuel_web.get_pacemaker_config(devops_node.name) - logger.debug("config on node {0} is {1}".format( - devops_node.name, config)) - assert_not_equal( - re.search("vip__public\s+\(ocf::fuel:ns_IPaddr2\):\s+Started", - config) and - re.search("Clone Set:\s+clone_ping_vip__public\s+" - "\[ping_vip__public\]\s+Started:\s+\[ {0} \]" - .format(pcm_nodes), config), - None, 'Resource [vip__public] is not properly configured') - assert_true( - 'vip__management (ocf::fuel:ns_IPaddr2): Started' - in config, 'Resource [vip__management] is not properly' - ' configured') - assert_not_equal(re.search( - "Clone Set: clone_p_(heat|openstack-heat)-engine" - " \[p_(heat|openstack-heat)-engine\]\s+" - "Started: \[ {0} \]".format( - pcm_nodes), config), None, - 'Some of [heat*] engine resources are not properly configured') - assert_not_equal(re.search( - "Clone Set: clone_p_mysqld \[p_mysqld\]\s+Started:" - " \[ {0} \]".format(pcm_nodes), config), None, - 'Resource [p_mysqld] is not properly configured') - assert_not_equal(re.search( - "Clone Set: clone_p_haproxy \[p_haproxy\]\s+Started:" - " \[ {0} \]".format(pcm_nodes), config), None, - 'Resource [p_haproxy] is not properly configured') - - def ha_pacemaker_restart_heat_engine(self): - if not self.env.d_env.has_snapshot(self.snapshot_name): - raise SkipTest('Snapshot {} not found'.format(self.snapshot_name)) - - self.env.revert_snapshot(self.snapshot_name) - ocf_success = "DEBUG: OpenStack Orchestration Engine" \ - " (heat-engine) monitor succeeded" - ocf_error = "ERROR: OpenStack Heat Engine is not connected to the" \ - " AMQP server: AMQP connection test returned 1" - - heat_name = 'heat-engine' - ocf_status = \ - 'script -q -c "OCF_ROOT=/usr/lib/ocf' \ - ' /usr/lib/ocf/resource.d/fuel/{0}' \ - ' monitor 2>&1"'.format(heat_name) - - p_d_ctrl = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - - with self.fuel_web.get_ssh_for_node(p_d_ctrl.name) as remote: - pid = ''.join(remote.execute('pgrep {0}' - .format(heat_name))['stdout']) - get_ocf_status = ''.join( - remote.execute(ocf_status)['stdout']).rstrip() - assert_true(ocf_success in get_ocf_status, - "heat engine is not succeeded, status is {0}".format( - get_ocf_status)) - - with self.fuel_web.get_ssh_for_node(p_d_ctrl.name) as remote: - amqp_con = len(remote.execute( - "netstat -nap | grep {0} | grep :5673". - format(pid))['stdout']) - assert_true(amqp_con > 0, 'There is no amqp connections') - - with self.fuel_web.get_ssh_for_node(p_d_ctrl.name) as remote: - remote.execute("iptables -I OUTPUT 1 -m owner --uid-owner heat -m" - " state --state NEW,ESTABLISHED,RELATED -j DROP") - cmd = "netstat -nap | grep {0} | grep :5673".format(pid) - wait(lambda: len(remote.execute(cmd)['stdout']) == 0, timeout=300, - timeout_msg='Failed to drop AMQP connections on node {}' - ''.format(p_d_ctrl.name)) - - get_ocf_status = ''.join( - remote.execute(ocf_status)['stdout']).rstrip() - logger.info('ocf status after blocking is {0}'.format( - get_ocf_status)) - assert_true(ocf_error in get_ocf_status, - "heat engine is running, status is {0}".format( - get_ocf_status)) - - with self.fuel_web.get_ssh_for_node(p_d_ctrl.name) as remote: - remote.execute("iptables -D OUTPUT 1 -m owner --uid-owner heat -m" - " state --state NEW,ESTABLISHED,RELATED") - # TODO(astudenov): add timeout_msg - wait_pass(lambda: assert_true(ocf_success in ''.join( - remote.execute(ocf_status)['stdout']).rstrip()), timeout=240) - newpid = ''.join(remote.execute('pgrep {0}' - .format(heat_name))['stdout']) - assert_true(pid != newpid, "heat pid is still the same") - get_ocf_status = ''.join(remote.execute( - ocf_status)['stdout']).rstrip() - - assert_true(ocf_success in get_ocf_status, - "heat engine is not succeeded, status is {0}".format( - get_ocf_status)) - - with self.fuel_web.get_ssh_for_node(p_d_ctrl.name) as remote: - heat = len( - remote.execute("netstat -nap | grep {0} | grep :5673" - .format(newpid))['stdout']) - assert_true(heat > 0) - cluster_id = self.fuel_web.get_last_created_cluster() - self.fuel_web.run_ostf(cluster_id=cluster_id) - - def ha_check_monit(self): - if not self.env.d_env.has_snapshot(self.snapshot_name): - raise SkipTest('Snapshot {} not found'.format(self.snapshot_name)) - - self.env.revert_snapshot(self.snapshot_name) - cluster_id = self.fuel_web.client.get_cluster_id( - self.__class__.__name__) - n_computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute']) - d_computes = self.fuel_web.get_devops_nodes_by_nailgun_nodes( - n_computes) - for devops_node in d_computes: - with self.fuel_web.get_ssh_for_node(devops_node.name) as remote: - remote.execute("kill -9 `pgrep nova-compute`") - wait( - lambda: - len(remote.execute('pgrep nova-compute')['stdout']) == 1, - timeout=120, - timeout_msg='Nova service was not restarted') - assert_true(len(remote.execute( - "grep \"nova-compute.*trying to restart\" " - "/var/log/monit.log")['stdout']) > 0, - 'Nova service was not restarted') - - def check_firewall_vulnerability(self): - if not self.env.d_env.has_snapshot(self.snapshot_name): - raise SkipTest('Snapshot {} not found'.format(self.snapshot_name)) - self.env.revert_snapshot(self.snapshot_name) - cluster_id = self.fuel_web.get_last_created_cluster() - - self.fuel_web.security.verify_firewall(cluster_id) - - def check_virtual_router(self): - if not self.env.d_env.has_snapshot(self.snapshot_name): - raise SkipTest('Snapshot {} not found'.format(self.snapshot_name)) - - self.env.revert_snapshot(self.snapshot_name) - cluster_id = self.fuel_web.get_last_created_cluster() - for node in self.fuel_web.client.list_cluster_nodes(cluster_id): - assert_true( - check_ping(node['ip'], DNS, deadline=120, interval=10), - "No Internet access from {0}".format(node['fqdn']) - ) - - devops_node = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - file_name = DOWNLOAD_LINK.split('/')[-1] - file_path = '/root/tmp' - with self.fuel_web.get_ssh_for_node('slave-05') as remote: - remote.execute( - "screen -S download -d -m bash -c 'mkdir -p {0} &&" - " cd {0} && wget --limit-rate=100k {1}'".format(file_path, - DOWNLOAD_LINK)) - - with self.fuel_web.get_ssh_for_node('slave-05') as remote: - wait(lambda: remote.execute("ls -1 {0}/{1}".format( - file_path, file_name))['exit_code'] == 0, timeout=60, - timeout_msg='File download was not started') - - ip_slave_5 = self.fuel_web.get_nailgun_node_by_name('slave-05')['ip'] - file_size1 = get_file_size(ip_slave_5, file_name, file_path) - time.sleep(60) - file_size2 = get_file_size(ip_slave_5, file_name, file_path) - assert_true(file_size2 > file_size1, - "File download was interrupted, size of downloading " - "does not change. File: {0}. Current size: {1} byte(s), " - "prev size: {2} byte(s)".format(file_name, - file_size2, - file_size1)) - devops_node.destroy() - self.fuel_web.wait_node_is_offline(devops_node) - - slave05 = self.fuel_web.get_nailgun_node_by_name('slave-05') - assert_true( - check_ping(slave05['ip'], DNS, deadline=120, interval=10), - "No Internet access from {0}".format(slave05['fqdn']) - ) - if OPENSTACK_RELEASE == OPENSTACK_RELEASE_UBUNTU: - _ip = self.fuel_web.get_nailgun_node_by_name('slave-05')['ip'] - file_size1 = get_file_size(_ip, file_name, file_path) - time.sleep(60) - file_size2 = get_file_size(_ip, file_name, file_path) - assert_true(file_size2 > file_size1, - "File download was interrupted, size of downloading " - "does not change") - - def ha_controller_loss_packages(self, dev='br-mgmt', loss_percent='0.05'): - if not self.env.d_env.has_snapshot(self.snapshot_name): - raise SkipTest('Snapshot {} not found'.format(self.snapshot_name)) - - self.env.revert_snapshot(self.snapshot_name) - - logger.debug( - 'start to execute command on the slave' - ' for dev{0}, loss percent {1}'. format(dev, loss_percent)) - - p_d_ctrl = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - remote = self.fuel_web.get_ssh_for_node(p_d_ctrl.name) - cmd_input = ('iptables -I INPUT -m statistic --mode random ' - '--probability {0} -i ' - '{1} -j DROP'.format(loss_percent, dev)) - cmd_output = ('iptables -I OUTPUT -m statistic --mode random ' - '--probability {0} -o ' - '{1} -j DROP'.format(loss_percent, dev)) - try: - remote.check_call(cmd_input) - remote.check_call(cmd_output) - except: - logger.error( - 'Command {:s} failed to be executed'.format(p_d_ctrl.name)) - raise - finally: - remote.clear() - - cluster_id = self.fuel_web.client.get_cluster_id( - self.__class__.__name__) - - # Wait until MySQL Galera is UP on some controller - self.fuel_web.wait_mysql_galera_is_up(['slave-02']) - - try: - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - except AssertionError: - time.sleep(600) - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['smoke', 'sanity']) - - def ha_sequential_rabbit_master_failover(self): - if not self.env.d_env.has_snapshot(self.snapshot_name): - raise SkipTest('Snapshot {} not found'.format(self.snapshot_name)) - - self.env.revert_snapshot(self.snapshot_name) - - cluster_id = self.fuel_web.client.get_cluster_id( - self.__class__.__name__) - - net_provider = self.fuel_web.client.get_cluster( - cluster_id)['net_provider'] - - # Wait until MySQL Galera is UP on some controller - self.fuel_web.wait_mysql_galera_is_up(['slave-02']) - - # Check keystone is fine after revert - try: - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'sanity']) - except AssertionError: - time.sleep(600) - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'sanity']) - - public_vip = self.fuel_web.get_public_vip(cluster_id) - os_conn = os_actions.OpenStackActions(public_vip) - - net_label = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - - # Create instance - instance = os_conn.create_server_for_migration( - neutron=True, label=net_label) if net_provider == 'neutron' \ - else os_conn.create_server_for_migration() - - # Check ping - logger.info("Assigning floating ip to server") - floating_ip = os_conn.assign_floating_ip(instance) - - # check instance - wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120, - timeout_msg='Can not ping instance' - ' by floating ip {0}'.format(floating_ip.ip)) - - p_d_ctrl = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - # get master rabbit controller - master_rabbit = self.fuel_web.get_rabbit_master_node(p_d_ctrl.name) - - # destroy devops node with master rabbit - master_rabbit.destroy(False) - - # Wait until Nailgun marked destroyed controller as offline - self.fuel_web.wait_node_is_offline(master_rabbit) - - # check ha - try: - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha']) - except AssertionError: - time.sleep(300) - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha'], should_fail=3) - - # check instance - wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120, - timeout_msg='Can not ping instance' - ' by floating ip {0}'.format(floating_ip.ip)) - - n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller']) - d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls) - active_slaves = [slave for slave - in d_ctrls - if slave.name != master_rabbit.name] - - second_master_rabbit = self.fuel_web.get_rabbit_master_node( - active_slaves[0].name) - - # destroy devops node with master rabbit - second_master_rabbit.destroy(False) - - # Wait until Nailgun marked destroyed controller as offline - self.fuel_web.wait_node_is_offline(second_master_rabbit) - - # turn on 1-st master - - master_rabbit.start() - - # Wait until Nailgun marked destroyed controller as online - self.fuel_web.wait_node_is_online(master_rabbit) - - self.fuel_web.check_ceph_status( - cluster_id, - offline_nodes=[self.fuel_web.get_nailgun_node_by_devops_node( - second_master_rabbit)['id']]) - - # check ha - try: - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha']) - except AssertionError: - time.sleep(600) - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha'], should_fail=3) - - # turn on second master - - second_master_rabbit.start() - - # Wait until Nailgun marked destroyed controller as online - self.fuel_web.wait_node_is_online(second_master_rabbit) - - self.fuel_web.check_ceph_status(cluster_id) - # check ha - try: - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha']) - except AssertionError: - time.sleep(600) - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha']) - - # ping instance - wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120, - timeout_msg='Can not ping instance' - ' by floating ip {0}'.format(floating_ip.ip)) - - # delete instance - os_conn = os_actions.OpenStackActions(public_vip) - os_conn.delete_instance(instance) - - # run ostf - try: - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - except AssertionError: - time.sleep(600) - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - def check_alive_rabbit_node_not_kicked(self): - - if not self.env.d_env.has_snapshot(self.snapshot_name): - raise SkipTest('Snapshot {} not found'.format(self.snapshot_name)) - - self.env.revert_snapshot(self.snapshot_name) - - pcm_nodes = self.fuel_web.get_pcm_nodes( - self.env.d_env.nodes().slaves[0].name, pure=True)['Online'] - logger.debug("pcm nodes are {}".format(pcm_nodes)) - rabbit_nodes = [node.replace(DNS_SUFFIX, "") - for node in pcm_nodes] - logger.debug("rabbit nodes are {}".format(rabbit_nodes)) - - rabbit_slave1_name = None - - p_d_ctrl = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - - with self.fuel_web.get_ssh_for_node(p_d_ctrl.name) as remote: - slave1_name = ''.join( - remote.execute('hostname')['stdout']).strip() - logger.debug('slave1 name is {}'.format(slave1_name)) - for rabbit_node in rabbit_nodes: - if rabbit_node in slave1_name: - rabbit_slave1_name = rabbit_node - logger.debug("rabbit node is {}".format(rabbit_slave1_name)) - - pcm_nodes.remove(slave1_name) - - with self.fuel_web.get_ssh_for_node(p_d_ctrl.name) as remote: - remote.execute('crm configure property maintenance-mode=true') - remote.execute('service corosync stop') - - with self.env.d_env.get_admin_remote() as remote: - cmd = ("grep -P 'Ignoring alive node rabbit@\S*\\b{0}\\b' " - "/var/log/remote/{1}/rabbit-fence.log").format( - rabbit_slave1_name, pcm_nodes[0]) - try: - wait( - lambda: not remote.execute(cmd)['exit_code'], - timeout=2 * 60) - except TimeoutError: - result = remote.execute(cmd) - assert_equal(0, result['exit_code'], - 'alive rabbit node was not ignored,' - ' result is {}'.format(result)) - assert_equal(0, remote.execute( - "grep -P 'Got \S*\\b{0}\\b that left cluster' /var/log/remote/" - "{1}/rabbit-fence.log".format(slave1_name, - pcm_nodes[0]))['exit_code'], - "slave {} didn't leave cluster".format(slave1_name)) - assert_equal(0, remote.execute( - "grep -P 'Preparing to fence node rabbit@\S*\\b{0}\\b from " - "rabbit cluster' /var/log/remote/{1}/rabbit-fence.log".format( - rabbit_slave1_name, pcm_nodes[0]))['exit_code'], - "Node {} wasn't prepared for fencing".format( - rabbit_slave1_name)) - - cluster_id = self.fuel_web.client.get_cluster_id( - self.__class__.__name__) - - n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, - ['controller']) - d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls) - - rabbit_status = self.fuel_web.get_rabbit_running_nodes( - list((set(d_ctrls) - {p_d_ctrl}))[0].name) - logger.debug("rabbit status is {}".format(rabbit_status)) - for rabbit_node in rabbit_nodes: - assert_true(rabbit_node in rabbit_status, - "rabbit node {} is not in" - " rabbit status".format(rabbit_node)) - - with self.fuel_web.get_ssh_for_node(p_d_ctrl.name) as remote: - remote.execute("service corosync start") - remote.execute("service pacemaker restart") - - self.fuel_web.assert_pacemaker(p_d_ctrl.name, - d_ctrls, []) - - def check_dead_rabbit_node_kicked(self): - if not self.env.d_env.has_snapshot(self.snapshot_name): - raise SkipTest('Snapshot {} not found'.format(self.snapshot_name)) - - self.env.revert_snapshot(self.snapshot_name) - - p_d_ctrl = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - - pcm_nodes = self.fuel_web.get_pcm_nodes( - p_d_ctrl.name, pure=True)['Online'] - logger.debug("pcm nodes are {}".format(pcm_nodes)) - - rabbit_nodes = [node.replace(DNS_SUFFIX, "") - for node in pcm_nodes] - logger.debug("rabbit nodes are {}".format(rabbit_nodes)) - - with self.fuel_web.get_ssh_for_node(p_d_ctrl.name) as remote: - slave1_name = ''.join( - remote.execute('hostname')['stdout']).strip() - logger.debug('slave1 name is {}'.format(slave1_name)) - for rabbit_node in rabbit_nodes: - if rabbit_node in slave1_name: - rabbit_slave1_name = rabbit_node - logger.debug("rabbit node is {}".format(rabbit_slave1_name)) - - pcm_nodes.remove(slave1_name) - - with self.fuel_web.get_ssh_for_node(p_d_ctrl.name) as remote: - remote.execute('crm configure property maintenance-mode=true') - remote.execute('rabbitmqctl stop_app') - remote.execute('service corosync stop') - - with self.env.d_env.get_admin_remote() as remote: - - cmd = ("grep -P 'Forgetting cluster node rabbit@\S*\\b{0}\\b'" - " /var/log/remote/{1}/rabbit-fence.log").format( - rabbit_slave1_name, pcm_nodes[0]) - try: - wait( - lambda: not remote.execute(cmd)['exit_code'], - timeout=2 * 60) - except TimeoutError: - result = remote.execute(cmd) - assert_equal(0, result['exit_code'], - 'dead rabbit node was not removed,' - ' result is {}'.format(result)) - - assert_equal(0, remote.execute( - "grep -P 'Got \S*\\b{0}\\b that left cluster' " - "/var/log/remote/{1}/rabbit-fence.log".format( - slave1_name, pcm_nodes[0]))['exit_code'], - "node {} didn't leave cluster".format(slave1_name)) - assert_equal(0, remote.execute( - "grep -P 'Preparing to fence node rabbit@\S*\\b{0}\\b from " - "rabbit cluster' /var/log/remote/{1}/rabbit-fence.log".format( - rabbit_slave1_name, pcm_nodes[0]))['exit_code'], - "Node {} wasn't prepared for fencing".format( - rabbit_slave1_name)) - assert_equal(0, remote.execute( - "grep -P 'Disconnecting node rabbit@\S*\\b{0}\\b' " - "/var/log/remote/{1}/rabbit-fence.log".format( - rabbit_slave1_name, pcm_nodes[0]))['exit_code'], - "node {} wasn't disconnected".format(rabbit_slave1_name)) - - rabbit_nodes.remove(rabbit_slave1_name) - - d_ctrl = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0], role='controller') - rabbit_status = self.fuel_web.get_rabbit_running_nodes(d_ctrl.name) - logger.debug("rabbit status is {}".format(rabbit_status)) - - for rabbit_node in rabbit_nodes: - assert_true(rabbit_node in rabbit_status, - "rabbit node {} is not in" - " rabbit status".format(rabbit_node)) - assert_false(rabbit_slave1_name in rabbit_status, - "rabbit node {0} is still in" - " cluster".format(rabbit_slave1_name)) - - def test_3_1_rabbit_failover(self): - if not self.env.d_env.has_snapshot(self.snapshot_name): - raise SkipTest('Snapshot {} not found'.format(self.snapshot_name)) - logger.info('Revert environment started...') - self.env.revert_snapshot(self.snapshot_name) - - cluster_id = self.fuel_web.client.get_cluster_id( - self.__class__.__name__) - - logger.info('Waiting for galera is up') - - # Wait until MySQL Galera is UP on some controller - self.fuel_web.wait_mysql_galera_is_up(['slave-02']) - - # Check ha ans services are fine after revert - with TimeStat("ha_ostf_after_revert", is_uniq=True): - self.fuel_web.assert_ha_services_ready(cluster_id, timeout=800) - self.fuel_web.assert_os_services_ready(cluster_id) - - p_d_ctrl = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - - # get master rabbit controller - master_rabbit = self.fuel_web.get_rabbit_master_node(p_d_ctrl.name) - logger.info('Try to find slave where rabbit slaves are running') - # get rabbit slaves - rabbit_slaves = self.fuel_web.get_rabbit_slaves_node(p_d_ctrl.name) - assert_true(rabbit_slaves, - 'Can not find rabbit slaves. ' - 'Current result is {0}'.format(rabbit_slaves)) - logger.info('Destroy node {0}'.format(rabbit_slaves[0].name)) - # destroy devops node with rabbit slave - rabbit_slaves[0].destroy() - - # Wait until Nailgun marked destroyed controller as offline - self.fuel_web.wait_node_is_offline(rabbit_slaves[0]) - - # check ha - logger.info('Node was destroyed {0}'.format(rabbit_slaves[0].name)) - # backend for destroyed node will be down - with TimeStat("ha_ostf_after_slave_rabbit_destroy", is_uniq=True): - self.fuel_web.assert_ha_services_ready( - cluster_id, timeout=800, should_fail=1) - - # Run sanity and smoke tests to see if cluster operable - - self.fuel_web.run_ostf(cluster_id=cluster_id) - - n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller']) - d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls) - - active_slaves = [slave for slave - in d_ctrls - if slave.name != rabbit_slaves[0].name] - logger.debug('Active slaves are {0}'.format(active_slaves)) - assert_true(active_slaves, 'Can not find any active slaves.') - - master_rabbit_after_slave_fail = self.fuel_web.get_rabbit_master_node( - active_slaves[0].name) - assert_equal(master_rabbit.name, master_rabbit_after_slave_fail.name) - - # turn on rabbit slave - logger.info('Try to power on node: {0}'.format(rabbit_slaves[0].name)) - rabbit_slaves[0].start() - - # Wait until Nailgun marked suspended controller as online - self.fuel_web.wait_node_is_online(rabbit_slaves[0]) - - # check ha - with TimeStat("ha_ostf_after_rabbit_slave_power_on", is_uniq=True): - self.fuel_web.assert_ha_services_ready(cluster_id, timeout=800) - # check os - self.fuel_web.assert_os_services_ready(cluster_id) - - # run ostf smoke and sanity - self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['smoke']) - - # check that master rabbit is the same - - master_rabbit_after_slave_back = self.fuel_web.get_rabbit_master_node( - active_slaves[0].name) - - assert_equal(master_rabbit.name, master_rabbit_after_slave_back.name) - - # turn off rabbit master - logger.info('Destroy node {0}'.format(master_rabbit.name)) - master_rabbit.destroy() - - # Wait until Nailgun marked destroyed controller as offline - self.fuel_web.wait_node_is_offline(master_rabbit) - - # check ha and note that backend for destroyed node will be down - with TimeStat("ha_ostf_master_rabbit_destroy", is_uniq=True): - self.fuel_web.assert_ha_services_ready( - cluster_id, timeout=800, should_fail=1) - self.fuel_web.run_ostf(cluster_id=cluster_id, should_fail=1) - - active_slaves = [slave for slave - in d_ctrls - if slave.name != master_rabbit.name] - logger.debug('Active slaves are {0}'.format(active_slaves)) - assert_true(active_slaves, 'Can not find any active slaves') - - master_rabbit_after_fail = self.fuel_web.get_rabbit_master_node( - active_slaves[0].name) - assert_not_equal(master_rabbit.name, master_rabbit_after_fail.name) - - # turn on rabbit master - logger.info('Power on node {0}'.format(master_rabbit.name)) - master_rabbit.start() - - # Wait until Nailgun marked controller as online - self.fuel_web.wait_node_is_online(master_rabbit) - - # check ha - with TimeStat("ha_ostf_master_rabbit_power_on", is_uniq=True): - self.fuel_web.assert_ha_services_ready(cluster_id, timeout=800) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - # check that master rabbit is the same - - master_rabbit_after_node_back = self.fuel_web.get_rabbit_master_node( - active_slaves[0].name) - - assert_equal(master_rabbit_after_fail.name, - master_rabbit_after_node_back.name) - - def ha_corosync_stability_check(self): - - @logwrap - def _get_pcm_nodes(remote, pure=False): - nodes = {} - pcm_nodes = remote.execute('pcs status nodes').stdout_yaml - for status in ('Online', 'Offline', 'Standby'): - list_nodes = (pcm_nodes['Pacemaker Nodes'] - [status] or '').split() - if not pure: - nodes[status] = [self.fuel_web.get_fqdn_by_hostname(x) - for x in list_nodes] - else: - nodes[status] = list_nodes - return nodes - - def _check_all_pcs_nodes_status(ctrl_remotes, pcs_nodes_online, - status): - for remote in ctrl_remotes: - pcs_nodes = _get_pcm_nodes(remote) - # TODO: FIXME: Rewrite using normal SSHManager and node name - node_name = ''.join( - remote.execute('hostname -f')['stdout']).strip() - logger.debug( - "Status of pacemaker nodes on node {0}: {1}". - format(node_name, pcs_nodes)) - if set(pcs_nodes_online) != set(pcs_nodes[status]): - return False - return True - - if not self.env.d_env.has_snapshot(self.snapshot_name): - raise SkipTest('Snapshot {} not found'.format(self.snapshot_name)) - self.env.revert_snapshot(self.snapshot_name) - - p_d_ctrl = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - controller_node = self.fuel_web.get_nailgun_node_by_name(p_d_ctrl.name) - with self.fuel_web.get_ssh_for_node( - p_d_ctrl.name) as remote_controller: - pcs_nodes = self.fuel_web.get_pcm_nodes(p_d_ctrl.name) - assert_true( - not pcs_nodes['Offline'], "There are offline nodes: {0}". - format(pcs_nodes['Offline'])) - pcs_nodes_online = pcs_nodes['Online'] - cluster_id = self.fuel_web.get_last_created_cluster() - ctrl_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller']) - alive_corosync_nodes = [node for node in ctrl_nodes - if node['mac'] != controller_node['mac']] - ctrl_remotes = [self.env.d_env.get_ssh_to_remote(node['ip']) - for node in ctrl_nodes] - live_remotes = [self.env.d_env.get_ssh_to_remote(node['ip']) - for node in alive_corosync_nodes] - for count in xrange(500): - logger.debug('Checking splitbrain in the loop, ' - 'count number: {0}'.format(count)) - wait_pass( - lambda: assert_equal( - remote_controller.execute( - 'killall -TERM corosync')['exit_code'], 0, - 'Corosync was not killed on controller, ' - 'see debug log, count-{0}'.format(count)), timeout=20) - wait_pass( - lambda: assert_true( - _check_all_pcs_nodes_status( - live_remotes, [controller_node['fqdn']], - 'Offline'), - 'Caught splitbrain, see debug log, ' - 'count-{0}'.format(count)), timeout=20) - wait_pass( - lambda: assert_equal( - remote_controller.execute( - 'service corosync restart && service pacemaker ' - 'restart')['exit_code'], 0, - 'Corosync was not started, see debug log,' - ' count-{0}'.format(count)), timeout=20) - wait_pass( - lambda: assert_true( - _check_all_pcs_nodes_status( - ctrl_remotes, pcs_nodes_online, 'Online'), - 'Corosync was not started on controller, see debug ' - 'log, count: {0}'.format(count)), timeout=20) - for remote in ctrl_remotes: - remote.clear() - for remote in live_remotes: - remote.clear() - - def change_pacemaker_parameter_not_break_rabbitmq(self): - error = 'Cannot execute command {}. Timeout exceeded' - - self.env.revert_snapshot(self.snapshot_name) - cluster_id = self.env.fuel_web.get_last_created_cluster() - n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller']) - d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls) - rabbit_master = self.fuel_web.get_rabbit_master_node(d_ctrls[0].name) - rabbit_slaves = self.fuel_web.get_rabbit_slaves_node(d_ctrls[0].name) - - def count_run_rabbit(node): - r_nodes = len(self.fuel_web.get_rabbit_running_nodes(node.name)) - return r_nodes == len(n_ctrls) - - for n in xrange(1, 4): - logger.info('Checking {} time'.format(n)) - cmd = 'crm_resource --resource p_rabbitmq-server ' \ - '--set-parameter max_rabbitmqctl_timeouts ' \ - '--parameter-value {}'.format(3 + n) - - self.ssh_manager.execute_on_remote( - self.fuel_web.get_node_ip_by_devops_name(rabbit_master.name), - cmd - ) - logger.info('Command {} was executed on controller'.format(cmd)) - - logger.info('Waiting 60 seconds to give Pacemaker time to kill ' - 'some RabbitMQ nodes if it decides to do so') - time.sleep(60) - - logger.info('Check parameter was changed') - for node in rabbit_slaves: - node_ip = self.fuel_web.get_node_ip_by_devops_name(node.name) - cmd = 'crm_resource --resource p_rabbitmq-server' \ - ' --get-parameter max_rabbitmqctl_timeouts' - with RunLimit(seconds=30, - error_message=error.format(cmd)): - out = int( - self.ssh_manager.execute_on_remote( - node_ip, cmd=cmd)['stdout'][0]) - assert_equal(out, 3 + n, 'Parameter was not changed') - - logger.info('Wait and check nodes back to the RabbitMQ cluster') - wait(lambda: count_run_rabbit(rabbit_master), - timeout=600, interval=120, - timeout_msg='RabbitMQ cluster was not assembled') - for node in rabbit_slaves: - # pylint: disable=undefined-loop-variable - wait(lambda: count_run_rabbit(node), timeout=180, interval=10, - timeout_msg='Some nodes did not back to the cluster after' - '10 minutes wait.') - # pylint: enable=undefined-loop-variable - - for node in d_ctrls: - node_ip = self.fuel_web.get_node_ip_by_devops_name(node.name) - cmd = 'rabbitmqctl list_queues' - with RunLimit(seconds=30, error_message=error.format(cmd)): - self.ssh_manager.execute_on_remote(node_ip, cmd) - - self.env.fuel_web.run_ostf(cluster_id, ['ha', 'smoke', 'sanity']) - - def ha_rabbitmq_stability_check(self): - if not self.env.d_env.has_snapshot(self.snapshot_name): - raise SkipTest('Snapshot {} not found'.format(self.snapshot_name)) - logger.info('Revert environment started...') - self.show_step(1, initialize=True) - self.env.revert_snapshot(self.snapshot_name) - - cluster_id = self.fuel_web.client.get_cluster_id( - self.__class__.__name__) - - logger.info('Waiting for mysql cluster is up') - - self.show_step(2) - # Wait until MySQL Galera is UP on some controller - self.fuel_web.wait_mysql_galera_is_up(['slave-02']) - - # Check ha ans services are fine after revert - self.show_step(3) - logger.info('Run ostf tests before destructive actions') - self.fuel_web.assert_ha_services_ready(cluster_id, timeout=600) - self.fuel_web.assert_os_services_ready(cluster_id) - - # Start the test - for count in xrange(REPEAT_COUNT): - logger.info('Attempt {0} to check rabbit recovery'.format(count)) - # Get primary controller from nailgun - p_d_ctrl = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - self.show_step(4, - details='Run count: {0}'.format(count), - initialize=True) - # get master rabbit controller - master_rabbit = self.fuel_web.get_rabbit_master_node(p_d_ctrl.name) - logger.info('Master rabbit is on {0} for attempt {1}'.format( - master_rabbit, count)) - - # get rabbit slaves - rabbit_slaves = self.fuel_web.get_rabbit_slaves_node(p_d_ctrl.name) - rabbit_resource_name = get_pacemaker_resource_name( - self.fuel_web.get_node_ip_by_devops_name(p_d_ctrl.name), - self.fuel_constants['rabbit_pcs_name']) - assert_true(rabbit_slaves, - 'Can not find rabbit slaves. On count {0} ' - 'current result is {1}'.format(count, rabbit_slaves)) - logger.info('Rabbit slaves are running {0}' - ' on count {1}'.format(rabbit_slaves, count)) - - # Move rabbit master resource from master rabbit controller - master_rabbit_fqdn = self.fuel_web.get_rabbit_master_node( - p_d_ctrl.name, fqdn_needed=True) - - logger.info('Master rabbit fqdn {0} on count {1}'.format( - master_rabbit_fqdn, count)) - - slaves_rabbit_fqdn = self.fuel_web.get_rabbit_slaves_node( - p_d_ctrl.name, fqdn_needed=True) - - assert_true(slaves_rabbit_fqdn, - 'Failed to get rabbit slaves ' - 'fqdn on count {0}'.format(count)) - - logger.info('Slaves rabbit fqdn {0} ' - 'on count {1}'.format(slaves_rabbit_fqdn, count)) - - master_rabbit_ip = self.fuel_web.get_node_ip_by_devops_name( - master_rabbit.name) - - cmd = ( - 'pcs constraint delete ' - 'location-{0} 2>&1 >/dev/null| true'.format( - rabbit_resource_name)) - - self.ssh_manager.execute_on_remote(master_rabbit_ip, cmd) - - self.show_step(5, details='Run count: {0}'.format(count)) - # Move resource to rabbit slave - cmd_move = ('pcs constraint location {0} ' - 'rule role=master score=-INFINITY \#uname ' - 'ne {1}').format(rabbit_resource_name, - slaves_rabbit_fqdn[0]) - - result = self.ssh_manager.execute_on_remote( - master_rabbit_ip, cmd_move, raise_on_assert=False) - - assert_equal( - result['exit_code'], 0, - 'Fail to move p_rabbitmq-server with {0} on ' - 'count {1}'.format(result, count)) - - # Clear all - self.show_step(6, details='Run count: {0}'.format(count)) - cmd_clear = ('pcs constraint delete ' - 'location-{0}').format(rabbit_resource_name) - - result = self.ssh_manager.execute_on_remote( - master_rabbit_ip, cmd_clear, raise_on_assert=False) - - assert_equal( - result['exit_code'], 0, - 'Fail to delete pcs constraint using {0} on ' - 'count {1}'.format(cmd_clear, count)) - - # check ha - self.show_step(7) - self.fuel_web.assert_ha_services_ready(cluster_id, timeout=700) - - # get new rabbit master node - self.show_step(8) - master_rabbit_2 = self.fuel_web.get_rabbit_master_node( - p_d_ctrl.name) - - logger.info('New master rabbit node is {0} on count {1}'.format( - master_rabbit_2.name, count)) - - # destroy master master_rabbit_node_2 - logger.info('Destroy master rabbit node {0} on count {1}'.format( - master_rabbit_2.name, count)) - - # destroy devops node with rabbit master - self.show_step(9) - master_rabbit_2.destroy() - - # Wait until Nailgun marked suspended controller as offline - self.fuel_web.wait_node_is_offline(master_rabbit_2) - - # check ha, should fail 1 test according - # to haproxy backend from destroyed will be down - - self.show_step(10) - self.fuel_web.assert_ha_services_ready( - cluster_id, timeout=800, should_fail=1) - - # Run sanity and smoke tests to see if cluster operable - self.show_step(11) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - # turn on destroyed node - self.show_step(12) - master_rabbit_2.start() - - # Wait until Nailgun marked suspended controller as online - self.fuel_web.wait_node_is_online(master_rabbit_2) - - # check ha - self.show_step(13) - self.fuel_web.assert_ha_services_ready(cluster_id, timeout=800) - # check os - self.show_step(14) - self.fuel_web.assert_os_services_ready(cluster_id) - - # run ostf smoke and sanity - self.show_step(15) - self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['smoke']) diff --git a/fuelweb_test/tests/tests_strength/test_failover_group_1.py b/fuelweb_test/tests/tests_strength/test_failover_group_1.py deleted file mode 100644 index fc69b2604..000000000 --- a/fuelweb_test/tests/tests_strength/test_failover_group_1.py +++ /dev/null @@ -1,406 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from devops.helpers.helpers import wait -from proboscis import test -from proboscis.asserts import assert_true, assert_equal - -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers import os_actions -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=['failover_group_1']) -class FailoverGroup1(TestBasic): - """FailoverGroup1""" # TODO documentation - - @test(depends_on_groups=['prepare_slaves_5'], - groups=['deploy_ha_cinder']) - @log_snapshot_after_test - def deploy_ha_cinder(self): - """Deploy environment with 3 controllers, Cinder and NeutronVLAN - - Scenario: - 1. Create environment with Cinder for storage and Neutron VLAN - 2. Add 3 controller, 2 compute+cinder nodes - 3. Verify networks - 4. Deploy environment - 5. Verify networks - 6. Run OSTF tests - - Duration 120m - Snapshot deploy_ha_cinder - - """ - - self.check_run('deploy_ha_cinder') - - self.env.revert_snapshot('ready_with_5_slaves') - - self.show_step(1, initialize=True) - data = { - 'tenant': 'failover', - 'user': 'failover', - 'password': 'failover', - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - } - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - - self.show_step(2) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute', 'cinder'], - 'slave-05': ['compute', 'cinder'], - } - ) - - self.show_step(3) - self.fuel_web.verify_network(cluster_id) - - self.show_step(4) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - - self.show_step(6) - self.fuel_web.run_ostf(cluster_id) - - self.env.make_snapshot('deploy_ha_cinder', is_make=True) - - @test(depends_on_groups=['deploy_ha_cinder'], - groups=['lock_db_access_from_primary_controller']) - @log_snapshot_after_test - def lock_db_access_from_primary_controller(self): - """Lock DB access from primary controller - - Scenario: - 1. Pre-condition - do steps from 'deploy_ha_cinder' test - 2. Lock DB access from primary controller - (emulate non-responsiveness of MySQL from the controller - where management VIP located) - 3. Verify networks - 4. Run HA OSTF tests, check MySQL tests fail - 5. Run Smoke and Sanity OSTF tests - - Duration 20m - Snapshot lock_db_access_from_primary_controller - """ - - self.show_step(1, initialize=True) - self.env.revert_snapshot('deploy_ha_cinder') - - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, roles=('controller',)) - assert_equal(len(controllers), 3, - 'Environment does not have 3 controller nodes, ' - 'found {} nodes!'.format(len(controllers))) - - target_controllers = self.fuel_web.get_pacemaker_resource_location( - controllers[0]['fqdn'], 'vip__management') - - assert_equal(len(target_controllers), 1, - 'Expected 1 controller with "vip__management" resource ' - 'running, found {0}: {1}!'.format(len(target_controllers), - target_controllers)) - - target_controller = self.fuel_web.get_nailgun_node_by_devops_node( - target_controllers[0]) - - result = self.ssh_manager.execute( - ip=target_controller['ip'], - cmd='iptables -I OUTPUT -p tcp --dport 4567 -j DROP && ' - 'iptables -I INPUT -p tcp --dport 4567 -j DROP') - - assert_equal(result['exit_code'], 0, - "Lock DB access failed: {0}!".format(result)) - - self.show_step(3) - self.fuel_web.verify_network(cluster_id) - - self.show_step(4) - self.fuel_web.run_ostf(cluster_id, test_sets=['ha'], should_fail=5) - - self.show_step(5) - self.fuel_web.run_ostf(cluster_id) - - self.env.make_snapshot('lock_db_access_from_primary_controller') - - @test(depends_on_groups=['deploy_ha_cinder'], - groups=['recovery_neutron_agents_after_restart']) - @log_snapshot_after_test - def recovery_neutron_agents_after_restart(self): - """Recovery of neutron agents after restart - - Scenario: - 1. Pre-condition - do steps from 'deploy_ha_cinder' test - 2. Kill neutron agents at all on one of the controllers. - - Pacemaker should restart it - - 2.1 verify output crm status | grep -A1 "clone_p_neutron-l3-agent" - have failed status for controller - - 2.2 verify neutron-l3-proccess restarted - by ps -aux | grep neutron-l3-agent - - 2.3 verify output crm status | grep -A1 "clone_p_neutron-l3-agent" - have started status for controller - - 3. Verify networks - 4. Run OSTF tests - - Duration 20m - Snapshot recovery_neutron_agents_after_restart - """ - - self.show_step(1, initialize=True) - self.env.revert_snapshot('deploy_ha_cinder') - - self.show_step(2) - neutron_agents = [ - {'name': 'neutron-openvswitch-agent', - 'resource': 'neutron-openvswitch-agent'}, - {'name': 'neutron-l3-agent', - 'resource': 'neutron-l3-agent'}, - {'name': 'neutron-dhcp-agent', - 'resource': 'neutron-dhcp-agent'}, - {'name': 'neutron-metadata-agent', - 'resource': 'neutron-metadata-agent'} - ] - - cluster_id = self.fuel_web.get_last_created_cluster() - controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, roles=('controller',)) - assert_equal(len(controllers), 3, - 'Environment does not have 3 controller nodes, ' - 'found {} nodes!'.format(len(controllers))) - - for agent in neutron_agents: - target_controllers = self.fuel_web.get_pacemaker_resource_location( - controllers[0]['fqdn'], agent['resource']) - assert_true(len(target_controllers) >= 1, - "Didn't find controllers with " - "running {0} on it".format(agent['name'])) - target_controller = self.fuel_web.get_nailgun_node_by_devops_node( - target_controllers[0]) - old_pids = self.ssh_manager.execute( - target_controller['ip'], - cmd='pgrep -f {}'.format(agent['name']))['stdout'] - assert_true(len(old_pids) > 0, - 'PIDs of {0} not found on {1}'.format( - agent['name'], target_controller['name'])) - logger.debug('Old PIDs of {0} on {1}: {2}'.format( - agent['name'], target_controller['name'], old_pids)) - result = self.ssh_manager.execute( - target_controller['ip'], - cmd='pkill -9 -f {}'.format(agent['name'])) - assert_equal(result['exit_code'], 0, - 'Processes of {0} were not killed on {1}: {2}'.format( - agent['name'], target_controller['name'], result)) - wait(lambda: len(self.ssh_manager.execute( - target_controller['ip'], - cmd='pgrep -f {}'.format(agent['name']))['stdout']) > 0, - timeout=60, - timeout_msg='Neutron agent {0} was not recovered on node {1} ' - 'within 60 seconds!'.format( - agent['name'], target_controller['name'])) - new_pids = self.ssh_manager.execute( - target_controller['ip'], - cmd='pgrep -f {}'.format(agent['name']))['stdout'] - bad_pids = set(old_pids) & set(new_pids) - assert_equal(len(bad_pids), 0, - '{0} processes with PIDs {1} were not ' - 'killed on {2}!'.format(agent['name'], - bad_pids, - target_controller['name'])) - - self.show_step(3) - self.fuel_web.verify_network(cluster_id) - - self.show_step(4) - self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'smoke', 'sanity']) - - self.env.make_snapshot('recovery_neutron_agents_after_restart') - - @test(depends_on_groups=['deploy_ha_cinder'], - groups=['safe_reboot_primary_controller']) - @log_snapshot_after_test - def safe_reboot_primary_controller(self): - """Safe reboot of primary controller - - Scenario: - 1. Pre-condition - do steps from 'deploy_ha_cinder' test - 2. Safe reboot of primary controller - 3. Wait up to 10 minutes for HA readiness - 4. Verify networks - 5. Run OSTF tests - - Duration: 30 min - Snapshot: safe_reboot_primary_controller - """ - - self.show_step(1, initialize=True) - self.env.revert_snapshot('deploy_ha_cinder') - cluster_id = self.fuel_web.get_last_created_cluster() - - controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, roles=('controller',)) - assert_equal(len(controllers), 3, - 'Environment does not have 3 controller nodes, ' - 'found {} nodes!'.format(len(controllers))) - - self.show_step(2) - target_controller = self.fuel_web.get_nailgun_primary_node( - self.fuel_web.get_devops_node_by_nailgun_node(controllers[0])) - self.fuel_web.warm_restart_nodes([target_controller]) - - self.show_step(3) - self.fuel_web.assert_ha_services_ready(cluster_id, timeout=60 * 10) - - self.show_step(4) - self.fuel_web.verify_network(cluster_id) - - self.show_step(5) - self.fuel_web.run_ostf(cluster_id) - - self.env.make_snapshot('safe_reboot_primary_controller') - - @test(depends_on_groups=['deploy_ha_cinder'], - groups=['hard_reset_primary_controller']) - @log_snapshot_after_test - def hard_reset_primary_controller(self): - """Hard reset of primary controller - - Scenario: - 1. Pre-condition - do steps from 'deploy_ha_cinder' test - 2. Hard reset of primary controller - 3. Wait up to 10 minutes for HA readiness - 4. Verify networks - 5. Run OSTF tests - - Duration: 30 min - Snapshot: hard_reset_primary_controller - """ - - self.show_step(1, initialize=True) - self.env.revert_snapshot('deploy_ha_cinder') - cluster_id = self.fuel_web.get_last_created_cluster() - - controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, roles=('controller',)) - assert_equal(len(controllers), 3, - 'Environment does not have 3 controller nodes, ' - 'found {} nodes!'.format(len(controllers))) - - self.show_step(2) - target_controller = self.fuel_web.get_nailgun_primary_node( - self.fuel_web.get_devops_node_by_nailgun_node(controllers[0])) - self.fuel_web.cold_restart_nodes([target_controller]) - - self.show_step(3) - self.fuel_web.assert_ha_services_ready(cluster_id, timeout=60 * 10) - - self.show_step(4) - self.fuel_web.verify_network(cluster_id) - - self.show_step(5) - self.fuel_web.run_ostf(cluster_id) - - self.env.make_snapshot('hard_reset_primary_controller') - - @test(depends_on_groups=['deploy_ha_cinder'], - groups=['power_outage_cinder_cluster']) - @log_snapshot_after_test - def power_outage_cinder_cluster(self): - """Power outage of Neutron vlan, cinder/swift cluster - - Scenario: - 1. Pre-condition - do steps from 'deploy_ha_cinder' test - 2. Create 2 instances - 3. Create 2 volumes - 4. Attach volumes to instances - 5. Fill cinder storage up to 30% - 6. Cold shutdown of all nodes - 7. Wait 5 min - 8. Start of all nodes - 9. Wait for HA services ready - 10. Verify networks - 11. Run OSTF tests - - Duration: 30 min - """ - - self.show_step(1, initialize=True) - self.env.revert_snapshot('deploy_ha_cinder') - cluster_id = self.fuel_web.get_last_created_cluster() - - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id), 'failover', 'failover', - 'failover') - net_name = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - self.show_step(2) - self.show_step(3) - self.show_step(4) - server = os_conn.create_instance( - neutron_network=True, label=net_name) - volume = os_conn.create_volume() - os_conn.attach_volume(volume, server) - server = os_conn.create_instance( - flavor_name='test_flavor1', - server_name='test_instance1', - neutron_network=True, label=net_name) - vol = os_conn.create_volume() - os_conn.attach_volume(vol, server) - - self.show_step(5) - with self.fuel_web.get_ssh_for_node('slave-04') as remote: - file_name = 'test_data' - result = remote.execute( - 'lvcreate -n test -L20G cinder')['exit_code'] - assert_equal(result, 0, "The file {0} was not " - "allocated".format(file_name)) - - self.show_step(6) - self.show_step(7) - self.show_step(8) - self.fuel_web.cold_restart_nodes( - self.env.d_env.get_nodes(name__in=[ - 'slave-01', - 'slave-02', - 'slave-03', - 'slave-04', - 'slave-05']), wait_after_destroy=300) - - self.show_step(9) - self.fuel_web.assert_ha_services_ready(cluster_id) - - self.show_step(10) - self.fuel_web.verify_network(cluster_id) - - self.show_step(11) - self.fuel_web.run_ostf(cluster_id=cluster_id) diff --git a/fuelweb_test/tests/tests_strength/test_failover_group_2.py b/fuelweb_test/tests/tests_strength/test_failover_group_2.py deleted file mode 100644 index ac7729f43..000000000 --- a/fuelweb_test/tests/tests_strength/test_failover_group_2.py +++ /dev/null @@ -1,352 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import SkipTest -from proboscis import test -from proboscis.asserts import assert_equal - -from devops.helpers.helpers import tcp_ping -from devops.helpers.helpers import wait -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers import os_actions -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=['failover_group_2']) -class FailoverGroup2(TestBasic): - """FailoverGroup2""" # TODO documentation - - @test(depends_on_groups=['prepare_slaves_5'], - groups=['deploy_ha_ceph']) - @log_snapshot_after_test - def deploy_ha_ceph(self): - """Deploy environment with 3 controllers, Ceph and Neutron VXLAN - - Scenario: - 1. Create environment with Ceph for storage and Neutron VXLAN - 2. Add 3 controller, 2 compute+ceph nodes - 3. Verify networks - 4. Deploy environment - 5. Verify networks - 6. Run OSTF tests - - Duration 120m - Snapshot deploy_ha_ceph - - """ - - if self.env.d_env.has_snapshot('deploy_ha_ceph'): - raise SkipTest("Test 'deploy_ha_ceph' already run") - self.env.revert_snapshot('ready_with_5_slaves') - - self.show_step(1, initialize=True) - data = { - 'tenant': 'failover', - 'user': 'failover', - 'password': 'failover', - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['tun'], - 'ephemeral_ceph': True, - 'objects_ceph': True, - 'volumes_ceph': True, - 'images_ceph': True, - 'osd_pool_size': '2', - 'volumes_lvm': False, - } - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - - self.show_step(2) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute', 'ceph-osd'], - 'slave-05': ['compute', 'ceph-osd'], - } - ) - - self.show_step(3) - self.fuel_web.verify_network(cluster_id) - - self.show_step(4) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - - self.show_step(6) - self.fuel_web.run_ostf(cluster_id) - - self.env.make_snapshot('deploy_ha_ceph', is_make=True) - - @test(depends_on_groups=['deploy_ha_ceph'], - groups=['safe_reboot_primary_controller_ceph']) - @log_snapshot_after_test - def safe_reboot_primary_controller_ceph(self): - """Safe reboot of primary controller on ceph cluster - - Scenario: - 1. Pre-condition - do steps from 'deploy_ha_ceph' test - 2. Safe reboot of primary controller - 3. Wait up to 10 minutes for HA readiness - 4. Verify networks - 5. Run OSTF tests - - Duration: 30 min - Snapshot: safe_reboot_primary_controller - """ - - self.show_step(1, initialize=True) - self.env.revert_snapshot('deploy_ha_ceph') - cluster_id = self.fuel_web.get_last_created_cluster() - - controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, roles=('controller',)) - assert_equal(len(controllers), 3, - 'Environment does not have 3 controller nodes, ' - 'found {} nodes!'.format(len(controllers))) - - self.show_step(2) - target_controller = self.fuel_web.get_nailgun_primary_node( - self.fuel_web.get_devops_node_by_nailgun_node(controllers[0])) - self.fuel_web.warm_restart_nodes([target_controller]) - - self.show_step(3) - self.fuel_web.assert_ha_services_ready(cluster_id, timeout=60 * 10) - - self.show_step(4) - self.fuel_web.verify_network(cluster_id) - - self.show_step(5) - self.fuel_web.run_ostf(cluster_id) - - self.env.make_snapshot('safe_reboot_primary_controller_ceph') - - @test(depends_on_groups=['deploy_ha_ceph'], - groups=['hard_reboot_primary_controller_ceph']) - @log_snapshot_after_test - def hard_reboot_primary_controller_ceph(self): - """Hard reboot of primary controller with Ceph for storage - - Scenario: - 1. Pre-condition - do steps from 'deploy_ha_ceph' test - 2. Hard reboot of primary controller - 3. Wait up to 10 minutes for HA readiness - 4. Verify networks - 5. Run OSTF tests - - Duration: 30 min - Snapshot: hard_reset_primary_controller_ceph - """ - - self.show_step(1, initialize=True) - self.env.revert_snapshot('deploy_ha_ceph') - cluster_id = self.fuel_web.get_last_created_cluster() - - controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, roles=('controller',)) - assert_equal(len(controllers), 3, - 'Environment does not have 3 controller nodes, ' - 'found {} nodes!'.format(len(controllers))) - - self.show_step(2) - target_controller = self.fuel_web.get_nailgun_primary_node( - self.fuel_web.get_devops_node_by_nailgun_node(controllers[0])) - self.fuel_web.cold_restart_nodes([target_controller]) - - self.show_step(3) - self.fuel_web.assert_ha_services_ready(cluster_id, timeout=60 * 10) - - self.show_step(4) - self.fuel_web.verify_network(cluster_id) - - self.show_step(5) - self.fuel_web.run_ostf(cluster_id) - - self.env.make_snapshot('hard_reboot_primary_controller_ceph') - - @test(depends_on_groups=['deploy_ha_ceph'], - groups=['shutdown_primary_controller_ceph']) - @log_snapshot_after_test - def shutdown_primary_controller_ceph(self): - """Shutdown primary controller for Neutron on ceph cluster - - Scenario: - 1. Pre-condition - do steps from 'deploy_ha_ceph' test - 2. Create 1 instance - 3. Set floating IP associated with created instance - 4. Shut down primary controller - 5. Wait for HA services to be ready - 6. Verify networks - 7. Ensure connectivity to external resources from VM - 8. Run OSTF tests - - Duration: XXX min - Snapshot: shutdown_primary_controller_ceph - """ - - self.show_step(1, initialize=True) - self.env.revert_snapshot('deploy_ha_ceph') - cluster_id = self.fuel_web.get_last_created_cluster() - controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, roles=('controller',)) - assert_equal(len(controllers), 3, - 'Environment does not have 3 controller nodes, ' - 'found {} nodes!'.format(len(controllers))) - - self.show_step(2) - os = os_actions.OpenStackActions( - controller_ip=self.fuel_web.get_public_vip(cluster_id), - user='failover', passwd='failover', tenant='failover') - net_name = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - hypervisors = os.get_hypervisors() - hypervisor_name = hypervisors[0].hypervisor_hostname - instance_1 = os.create_server_for_migration( - neutron=True, - availability_zone="nova:{0}".format(hypervisor_name), - label=net_name - ) - logger.info("New instance {0} created on {1}" - .format(instance_1.id, hypervisor_name)) - - self.show_step(3) - floating_ip_1 = os.assign_floating_ip(instance_1) - logger.info("Floating address {0} associated with instance {1}" - .format(floating_ip_1.ip, instance_1.id)) - - self.show_step(4) - target_controller = self.fuel_web.get_nailgun_primary_node( - self.fuel_web.get_devops_node_by_nailgun_node(controllers[0])) - self.fuel_web.warm_shutdown_nodes([target_controller]) - - self.show_step(5) - self.fuel_web.assert_ha_services_ready(cluster_id, should_fail=1) - - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - - self.show_step(7) - wait(lambda: tcp_ping(floating_ip_1.ip, 22), timeout=120, - timeout_msg='Can not ping instance' - ' by floating ip {0}'.format(floating_ip_1.ip)) - - self.show_step(8) - self.fuel_web.run_ostf(cluster_id) - - self.env.make_snapshot('shutdown_primary_controller_ceph') - - @test(depends_on_groups=['deploy_ha_ceph'], - groups=['shutdown_non_primary_controller_ceph']) - @log_snapshot_after_test - def shutdown_non_primary_controller_ceph(self): - """Shutdown non primary controller for Neutron on ceph cluster - - Scenario: - 1. Pre-condition - do steps from 'deploy_ha_ceph' test - 2. Create 1 instance - 3. Set floating IP associated with created instance - 4. Shut down non primary controller - 5. Wait for HA services to be ready - 6. Verify networks - 7. Ensure connectivity to external resources from VM - 8. Run OSTF tests - 9. Turn on non-primary controller - 10. Wait for for OS services to be ready - 11. Wait for HA services to be ready - 12. Verify networks - 13. Run OSTF tests - - - Duration: 180 min - Snapshot: shutdown_non_primary_controller_ceph - """ - - self.show_step(1) - self.env.revert_snapshot('deploy_ha_ceph') - cluster_id = self.fuel_web.get_last_created_cluster() - controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, roles=('controller',)) - - self.show_step(2) - os = os_actions.OpenStackActions( - controller_ip=self.fuel_web.get_public_vip(cluster_id), - user='failover', passwd='failover', tenant='failover') - net_name = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - hypervisors = os.get_hypervisors() - hypervisor_name = hypervisors[0].hypervisor_hostname - instance_1 = os.create_server_for_migration( - neutron=True, - availability_zone="nova:{0}".format(hypervisor_name), - label=net_name - ) - logger.info("New instance {0} created on {1}" - .format(instance_1.id, hypervisor_name)) - - self.show_step(3) - floating_ip_1 = os.assign_floating_ip(instance_1) - logger.info("Floating address {0} associated with instance {1}" - .format(floating_ip_1.ip, instance_1.id)) - - self.show_step(4) - primary_controller = self.fuel_web.get_nailgun_primary_node( - self.fuel_web.get_devops_node_by_nailgun_node(controllers[0])) - pn_controller = self.fuel_web.get_nailgun_node_by_devops_node( - primary_controller) - np_controllers = [c for c in controllers if - c['id'] != pn_controller['id']] - - d_node = \ - self.fuel_web.get_devops_node_by_nailgun_node(np_controllers[0]) - - self.fuel_web.warm_shutdown_nodes([d_node]) - - self.show_step(5) - self.fuel_web.assert_ha_services_ready(cluster_id, should_fail=1) - - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - - self.show_step(7) - wait(lambda: tcp_ping(floating_ip_1.ip, 22), timeout=120, - timeout_msg='Can not ping instance' - ' by floating ip {0}'.format(floating_ip_1.ip)) - - self.show_step(8) - self.fuel_web.run_ostf(cluster_id) - - self.show_step(9) - self.fuel_web.warm_start_nodes([d_node]) - - self.show_step(10) - self.fuel_web.assert_os_services_ready(cluster_id, timeout=15 * 60) - - self.show_step(11) - self.fuel_web.assert_ha_services_ready(cluster_id) - - self.show_step(12) - self.fuel_web.verify_network(cluster_id) - - self.show_step(13) - self.fuel_web.run_ostf(cluster_id) - - self.env.make_snapshot('shutdown_primary_controller_ceph') diff --git a/fuelweb_test/tests/tests_strength/test_failover_group_3.py b/fuelweb_test/tests/tests_strength/test_failover_group_3.py deleted file mode 100644 index 012045c4e..000000000 --- a/fuelweb_test/tests/tests_strength/test_failover_group_3.py +++ /dev/null @@ -1,466 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import division -import time - -from devops.error import TimeoutError -from devops.helpers.ssh_client import SSHAuth -from proboscis import test -from proboscis.asserts import assert_equal - -from fuelweb_test import settings -from fuelweb_test.helpers import os_actions -from fuelweb_test.helpers import utils -from fuelweb_test import logger -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import TestBasic - - -cirros_auth = SSHAuth(**settings.SSH_IMAGE_CREDENTIALS) - - -@test(groups=['failover_group_3']) -class FailoverGroup3(TestBasic): - """FailoverGroup3""" # TODO documentation - - @test(depends_on_groups=['prepare_slaves_9'], - groups=['shutdown_ceph_for_all']) - @log_snapshot_after_test - def shutdown_ceph_for_all(self): - """Shutdown of Neutron Vxlan, ceph for all cluster - - Scenario: - 1. Create cluster with Neutron Vxlan, ceph for all, - ceph replication factor - 3 - 2. Add 3 controller, 2 compute, 3 ceph nodes - 3. Verify Network - 4. Deploy cluster - 5. Verify networks - 6. Run OSTF - 7. Create 2 volumes and 2 instances with attached volumes - 8. Fill ceph storages up to 30%(15% for each instance) - 9. Shutdown of all nodes - 10. Wait 5 minutes - 11. Start cluster - 12. Wait until OSTF 'HA' suite passes - 13. Verify networks - 14. Run OSTF tests - - Duration 230m - - """ - - self.env.revert_snapshot('ready_with_9_slaves') - - self.show_step(1, initialize=True) - data = { - 'tenant': 'failover', - 'user': 'failover', - 'password': 'failover', - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['tun'], - 'volumes_ceph': True, - 'images_ceph': True, - 'ephemeral_ceph': True, - 'objects_ceph': True, - 'osd_pool_size': '3', - 'volumes_lvm': False, - } - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - - self.show_step(2) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['ceph-osd'], - 'slave-07': ['ceph-osd'], - 'slave-08': ['ceph-osd'] - } - ) - self.show_step(3) - self.fuel_web.verify_network(cluster_id) - - self.show_step(4) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - - self.show_step(6) - self.fuel_web.run_ostf(cluster_id) - - self.show_step(7) - os = os_actions.OpenStackActions( - controller_ip=self.fuel_web.get_public_vip(cluster_id), - user='failover', passwd='failover', tenant='failover') - net_name = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - hypervisors = os.get_hypervisors() - hypervisor_name = hypervisors[0].hypervisor_hostname - instance_1 = os.create_server_for_migration( - neutron=True, - availability_zone="nova:{0}".format(hypervisor_name), - label=net_name - ) - logger.info("New instance {0} created on {1}" - .format(instance_1.id, hypervisor_name)) - - floating_ip_1 = os.assign_floating_ip(instance_1) - logger.info("Floating address {0} associated with instance {1}" - .format(floating_ip_1.ip, instance_1.id)) - - hypervisor_name = hypervisors[1].hypervisor_hostname - instance_2 = os.create_server_for_migration( - neutron=True, - availability_zone="nova:{0}".format(hypervisor_name), - label=net_name - ) - logger.info("New instance {0} created on {1}" - .format(instance_2.id, hypervisor_name)) - - floating_ip_2 = os.assign_floating_ip(instance_2) - logger.info("Floating address {0} associated with instance {1}" - .format(floating_ip_2.ip, instance_2.id)) - - self.show_step(8) - ceph_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['ceph-osd']) - total_ceph_size = 0 - for node in ceph_nodes: - total_ceph_size += \ - self.fuel_web.get_node_partition_size(node['id'], 'ceph') - percent_15_mb = 0.15 * total_ceph_size - percent_15_gb = percent_15_mb // 1024 - volume_size = int(percent_15_gb + 1) - - volume_1 = os.create_volume(size=volume_size) - volume_2 = os.create_volume(size=volume_size) - - logger.info('Created volumes: {0}, {1}'.format(volume_1.id, - volume_2.id)) - - ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip'] - - logger.info("Attach volumes") - cmd = 'nova volume-attach {srv_id} {volume_id} /dev/vdb' - - self.ssh_manager.execute_on_remote( - ip=ip, - cmd='. openrc; ' + cmd.format(srv_id=instance_1.id, - volume_id=volume_1.id) - ) - self.ssh_manager.execute_on_remote( - ip=ip, - cmd='. openrc; ' + cmd.format(srv_id=instance_2.id, - volume_id=volume_2.id) - ) - - cmds = ['sudo sh -c "/usr/sbin/mkfs.ext4 /dev/vdb"', - 'sudo sh -c "/bin/mount /dev/vdb /mnt"', - 'sudo sh -c "/usr/bin/nohup' - ' /bin/dd if=/dev/zero of=/mnt/bigfile ' - 'bs=1M count={} &"'.format(int(percent_15_mb))] - - md5s = {floating_ip_1.ip: '', floating_ip_2.ip: ''} - with self.fuel_web.get_ssh_for_node("slave-01") as remote: - for ip in [floating_ip_1.ip, floating_ip_2.ip]: - for cmd in cmds: - res = remote.execute_through_host( - hostname=ip, - cmd=cmd, - auth=cirros_auth) - logger.info('RESULT for {}: {}'.format( - cmd, - utils.pretty_log(res)) - ) - logger.info('Wait 7200 untill "dd" ends') - for _ in range(720): - cmd = 'ps -ef |grep -v grep| grep "dd if" ' - res = remote.execute_through_host( - hostname=ip, - cmd=cmd, - auth=cirros_auth) - if res['exit_code'] != 0: - break - time.sleep(10) - logger.debug('Wait another 10 sec -' - ' totally waited {} sec'.format(10 * _)) - else: - raise TimeoutError('BigFile has not been' - ' created yet, after 7200 sec') - cmd = 'md5sum /mnt/bigfile' - md5s[ip] = remote.execute_through_host( - hostname=ip, - cmd=cmd, - auth=cirros_auth)['stdout'] - - self.show_step(9) - nodes = {'compute': [], 'controller': [], 'ceph-osd': []} - - for role in nodes: - nailgun_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, [role]) - nodes[role] = self.fuel_web.get_devops_nodes_by_nailgun_nodes( - nailgun_nodes) - - self.fuel_web.warm_shutdown_nodes(nodes['compute']) - self.fuel_web.warm_shutdown_nodes(nodes['controller']) - self.fuel_web.warm_shutdown_nodes(nodes['ceph-osd']) - - self.show_step(10) - time.sleep(300) - - self.show_step(11) - self.fuel_web.warm_start_nodes(nodes['ceph-osd']) - self.fuel_web.warm_start_nodes(nodes['controller']) - self.show_step(12) - self.fuel_web.assert_ha_services_ready(cluster_id) - self.fuel_web.warm_start_nodes(nodes['compute']) - self.fuel_web.assert_os_services_ready(cluster_id) - - self.show_step(13) - self.fuel_web.verify_network(cluster_id) - self.show_step(14) - self.fuel_web.run_ostf(cluster_id) - with self.fuel_web.get_ssh_for_node("slave-01") as remote: - for ip in [floating_ip_1.ip, floating_ip_2.ip]: - cmd = 'md5sum /mnt/bigfile' - md5 = remote.execute_through_host( - hostname=ip, - cmd=cmd, - auth=cirros_auth)['stdout'] - assert_equal(md5, md5s[ip], - "Actual md5sum {0} doesnt match" - " with old one {1} on {2}".format( - md5, md5s[ip], ip)) - - @test(depends_on_groups=['prepare_slaves_5'], - groups=['shutdown_cinder_cluster']) - @log_snapshot_after_test - def shutdown_cinder_cluster(self): - """Shutdown of Neutron vlan, cinder/swift cluster - - Scenario: - 1. Create cluster with Neutron Vlan, cinder/swift - 2. Add 3 controller, 2 compute, 1 cinder nodes - 3. Verify Network - 4. Deploy cluster - 5. Verify networks - 6. Run OSTF - 7. Create 2 volumes and 2 instances with attached volumes - 8. Fill cinder storage up to 30%(15% for each instance) - 9. Shutdown of all nodes - 10. Wait 5 minutes - 11. Start cluster - 12. Wait until OSTF 'HA' suite passes - 13. Verify networks - 14. Run OSTF tests - - Duration 230m - """ - self.env.revert_snapshot('ready_with_5_slaves') - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[5:6], - skip_timesync=True) - - self.show_step(1, initialize=True) - data = { - 'tenant': 'failover', - 'user': 'failover', - 'password': 'failover', - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'] - } - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - - self.show_step(2) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['cinder'] - } - ) - self.show_step(3) - self.fuel_web.verify_network(cluster_id) - - self.show_step(4) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - - self.show_step(6) - self.fuel_web.run_ostf(cluster_id) - - self.show_step(7) - os = os_actions.OpenStackActions( - controller_ip=self.fuel_web.get_public_vip(cluster_id), - user='failover', passwd='failover', tenant='failover') - net_name = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - hypervisors = os.get_hypervisors() - hypervisor_name = hypervisors[0].hypervisor_hostname - instance_1 = os.create_server_for_migration( - neutron=True, - availability_zone="nova:{0}".format(hypervisor_name), - label=net_name - ) - logger.info("New instance {0} created on {1}" - .format(instance_1.id, hypervisor_name)) - - floating_ip_1 = os.assign_floating_ip(instance_1) - logger.info("Floating address {0} associated with instance {1}" - .format(floating_ip_1.ip, instance_1.id)) - - hypervisor_name = hypervisors[1].hypervisor_hostname - instance_2 = os.create_server_for_migration( - neutron=True, - availability_zone="nova:{0}".format(hypervisor_name), - label=net_name - ) - logger.info("New instance {0} created on {1}" - .format(instance_2.id, hypervisor_name)) - - floating_ip_2 = os.assign_floating_ip(instance_2) - logger.info("Floating address {0} associated with instance {1}" - .format(floating_ip_2.ip, instance_2.id)) - - # COUNT SIZE - cinder_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['cinder']) - total_cinder_size = 0 - for node in cinder_nodes: - total_cinder_size += \ - self.fuel_web.get_node_partition_size(node['id'], 'cinder') - percent_15_mb = 0.15 * total_cinder_size - percent_15_gb = percent_15_mb // 1024 - volume_size = int(percent_15_gb + 1) - - volume_1 = os.create_volume(size=volume_size) - volume_2 = os.create_volume(size=volume_size) - - logger.info('Created volumes: {0}, {1}'.format(volume_1.id, - volume_2.id)) - - ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip'] - - logger.info("Attach volumes") - cmd = 'nova volume-attach {srv_id} {volume_id} /dev/vdb' - - self.ssh_manager.execute_on_remote( - ip=ip, - cmd='. openrc; ' + cmd.format(srv_id=instance_1.id, - volume_id=volume_1.id) - ) - self.ssh_manager.execute_on_remote( - ip=ip, - cmd='. openrc; ' + cmd.format(srv_id=instance_2.id, - volume_id=volume_2.id) - ) - - self.show_step(8) - cmds = ['sudo sh -c "/usr/sbin/mkfs.ext4 /dev/vdb"', - 'sudo sh -c "/bin/mount /dev/vdb /mnt"', - 'sudo sh -c "/usr/bin/nohup' - ' /bin/dd if=/dev/zero of=/mnt/bigfile ' - 'bs=1M count={} &"'.format(int(percent_15_mb))] - - md5s = {floating_ip_1.ip: '', floating_ip_2.ip: ''} - with self.fuel_web.get_ssh_for_node("slave-01") as remote: - for ip in [floating_ip_1.ip, floating_ip_2.ip]: - for cmd in cmds: - res = remote.execute_through_host( - hostname=ip, - cmd=cmd, - auth=cirros_auth) - logger.info('RESULT for {}: {}'.format( - cmd, - utils.pretty_log(res)) - ) - logger.info('Wait 7200 untill "dd" ends') - for _ in range(720): - cmd = 'ps -ef |grep -v grep| grep "dd if" ' - res = remote.execute_through_host( - hostname=ip, - cmd=cmd, - auth=cirros_auth) - if res['exit_code'] != 0: - break - time.sleep(15) - logger.debug('Wait another 15 sec -' - ' totally waited {} sec'.format(10 * _)) - else: - raise TimeoutError('BigFile has not been' - ' created yet, after 7200 sec') - cmd = 'md5sum /mnt/bigfile' - md5s[ip] = remote.execute_through_host( - hostname=ip, - cmd=cmd, - auth=cirros_auth)['stdout'] - self.show_step(9) - nodes = {'compute': [], 'controller': [], 'cinder': []} - - for role in nodes: - nailgun_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, [role]) - nodes[role] = self.fuel_web.get_devops_nodes_by_nailgun_nodes( - nailgun_nodes) - - self.fuel_web.warm_shutdown_nodes(nodes['compute']) - self.fuel_web.warm_shutdown_nodes(nodes['controller']) - self.fuel_web.warm_shutdown_nodes(nodes['cinder']) - - self.show_step(10) - time.sleep(300) - - self.show_step(11) - self.fuel_web.warm_start_nodes(nodes['cinder']) - self.fuel_web.warm_start_nodes(nodes['controller']) - self.show_step(12) - self.fuel_web.assert_ha_services_ready(cluster_id) - self.fuel_web.warm_start_nodes(nodes['compute']) - self.fuel_web.assert_os_services_ready(cluster_id) - - self.show_step(13) - self.fuel_web.verify_network(cluster_id) - self.show_step(14) - self.fuel_web.run_ostf(cluster_id) - - with self.fuel_web.get_ssh_for_node("slave-01") as remote: - for ip in [floating_ip_1.ip, floating_ip_2.ip]: - cmd = 'md5sum /mnt/bigfile' - md5 = remote.execute_through_host( - hostname=ip, - cmd=cmd, - auth=cirros_auth)['stdout'] - assert_equal(md5, md5s[ip], - "Actual md5sum {0} doesnt match" - " with old one {1} on {2}".format( - md5, md5s[ip], ip)) diff --git a/fuelweb_test/tests/tests_strength/test_failover_mongo.py b/fuelweb_test/tests/tests_strength/test_failover_mongo.py deleted file mode 100644 index ff7905964..000000000 --- a/fuelweb_test/tests/tests_strength/test_failover_mongo.py +++ /dev/null @@ -1,250 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -from proboscis import test -from proboscis.asserts import assert_equal - -from fuelweb_test import settings -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=['failover_group_mongo'], - enabled=False) -class FailoverGroupMongo(TestBasic): - """ FailoverGroupMongo """ # TODO documentation - - @test(depends_on_groups=["prepare_slaves_9"], - groups=['deploy_mongo_cluster'], - enabled=False) - @log_snapshot_after_test - def deploy_mongo_cluster(self): - """Deploy cluster with MongoDB nodes - - Scenario: - 1. Create environment with enabled Ceilometer and Neutron VLAN - 2. Add 3 controller, 3 mongodb, 1 compute and 1 cinder nodes - 3. Verify networks - 4. Deploy environment - 5. Verify networks - 6. Run OSTF tests - - Duration 200m - Snapshot deploy_mongo_cluster - """ - - self.env.revert_snapshot('ready_with_9_slaves') - - self.show_step(1, initialize=True) - data = { - 'ceilometer': True, - 'tenant': 'mongo', - 'user': 'mongo', - 'password': 'mongo', - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - } - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=data - ) - - self.show_step(2) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['mongo'], - 'slave-05': ['mongo'], - 'slave-06': ['mongo'], - 'slave-07': ['compute'], - 'slave-08': ['cinder'], - } - ) - - self.show_step(3) - self.fuel_web.verify_network(cluster_id) - - self.show_step(4) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - - self.show_step(6) - self.fuel_web.run_ostf(cluster_id, - test_sets=['smoke', 'sanity', - 'ha', 'tests_platform'], - timeout=50 * 60) - self.env.make_snapshot('deploy_mongo_cluster', is_make=True) - - @test(depends_on_groups=["deploy_mongo_cluster"], - groups=['kill_mongo_processes'], - enabled=False) - @log_snapshot_after_test - def kill_mongo_processes(self): - """Kill mongo processes - - Scenario: - 1. Pre-condition - do steps from 'deploy_mongo_cluster' test - 2. Kill mongo processes on 1st node - 3. Wait 1 minute - 4. Check new mongo processes exist on 1st node - 5. Kill mongo processes on 2nd node - 6. Wait 1 minute - 7. Check new mongo processes exist on 2nd node - 8. Kill mongo processes on 3rd node - 9. Wait 1 minute - 10. Check new mongo processes exist on 3rd node - 11. Run OSTF tests - - Duration 60m - Snapshot kill_mongo_processes - """ - - self.show_step(1, initialize=True) - self.env.revert_snapshot('deploy_mongo_cluster') - - cluster_id = self.fuel_web.get_last_created_cluster() - mongodb = self.fuel_web.get_nailgun_cluster_nodes_by_roles(cluster_id, - ['mongo']) - assert_equal(len(mongodb), 3, - "Environment doesn't have 3 MongoDB nodes, " - "found {} nodes!".format(len(mongodb))) - step = 2 - for node in mongodb: - old_pids = self.ssh_manager.execute( - ip=node['ip'], cmd='pgrep -f mongo')['stdout'] - self.show_step(step) - self.ssh_manager.execute_on_remote( - ip=node['ip'], cmd='pkill -9 -f mongo') - - self.show_step(step + 1) - time.sleep(60) - - self.show_step(step + 2) - new_pids = self.ssh_manager.execute( - ip=node['ip'], cmd='pgrep -f mongo')['stdout'] - bad_pids = set(old_pids) & set(new_pids) - assert_equal(len(bad_pids), 0, - 'MongoDB processes with PIDs {} ' - 'were not killed!'.format(bad_pids)) - step += 3 - - self.show_step(11) - self.fuel_web.run_ostf(cluster_id, - test_sets=['smoke', 'sanity', - 'ha', 'tests_platform'], - timeout=50 * 60) - - self.env.make_snapshot('kill_mongo_processes') - - @test(depends_on_groups=['deploy_mongo_cluster'], - groups=['close_connections_for_mongo'], - enabled=False) - @log_snapshot_after_test - def close_connections_for_mongo(self): - """Close connection for Mongo node - - Scenario: - 1. Pre-condition - do steps from 'deploy_mongo_cluster' test - 2. Close management network for 1 Mongo node - 3. Run OSTF tests - - Duration 60m - Snapshot close_connections_for_mongo - """ - - self.show_step(1, initialize=True) - self.env.revert_snapshot('deploy_mongo_cluster') - - cluster_id = self.fuel_web.get_last_created_cluster() - mongodb = self.fuel_web.get_nailgun_cluster_nodes_by_roles(cluster_id, - ['mongo']) - assert_equal(len(mongodb), 3, - "Environment doesn't have 3 MongoDB nodes, " - "found {} nodes!".format(len(mongodb))) - - self.show_step(2) - self.ssh_manager.execute_on_remote( - ip=mongodb[0]['ip'], - cmd='iptables -I INPUT -i br-mgmt -j DROP && ' - 'iptables -I OUTPUT -o br-mgmt -j DROP') - - self.show_step(3) - self.fuel_web.run_ostf(cluster_id, - test_sets=['smoke', 'sanity', - 'ha', 'tests_platform'], - timeout=50 * 60) - - self.env.make_snapshot('close_connections_for_mongo') - - @test(depends_on_groups=['deploy_mongo_cluster'], - groups=['shut_down_mongo_node'], - enabled=False) - @log_snapshot_after_test - def shut_down_mongo_node(self): - """Shut down Mongo node for Neutron - - Scenario: - 1. Pre-condition - do steps from 'deploy_mongo_cluster' test - 2. Shut down 1 Mongo node - 3. Verify networks - 4. Run OSTF tests - 5. Turn on Mongo node - 6. Verify networks - 7. Run OSTF tests - - Duration: 60 min - Snapshot: shut_down_mongo_node - """ - - self.show_step(1, initialize=True) - self.env.revert_snapshot('deploy_mongo_cluster') - cluster_id = self.fuel_web.get_last_created_cluster() - mongodb = self.fuel_web.get_nailgun_cluster_nodes_by_roles(cluster_id, - ['mongo']) - assert_equal(len(mongodb), 3, - "Environment doesn't have 3 MongoDB nodes, " - "found {} nodes!".format(len(mongodb))) - - target_node = self.fuel_web.get_devops_node_by_nailgun_node(mongodb[0]) - - self.show_step(2) - self.fuel_web.warm_shutdown_nodes([target_node]) - - self.show_step(3) - self.fuel_web.verify_network(cluster_id) - - self.show_step(4) - self.fuel_web.run_ostf(cluster_id=cluster_id, should_fail=1) - - self.show_step(5) - self.fuel_web.warm_start_nodes([target_node]) - - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - - self.show_step(7) - self.fuel_web.run_ostf(cluster_id, - should_fail=1, - test_sets=['smoke', 'sanity', - 'ha', 'tests_platform'], - timeout=50 * 60) - - self.env.make_snapshot('shut_down_mongo_node') diff --git a/fuelweb_test/tests/tests_strength/test_failover_with_ceph.py b/fuelweb_test/tests/tests_strength/test_failover_with_ceph.py deleted file mode 100644 index 680967ccd..000000000 --- a/fuelweb_test/tests/tests_strength/test_failover_with_ceph.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.tests_strength.test_failover_base\ - import TestHaFailoverBase - - -@test(groups=["ha_destructive_ceph_neutron"]) -class TestHaCephNeutronFailover(TestHaFailoverBase): - - @property - def snapshot_name(self): - return "prepare_ha_ceph_neutron" - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["deploy_ceph_ha", "prepare_ha_ceph_neutron"]) - @log_snapshot_after_test - def prepare_ha_ceph_neutron(self): - """Prepare cluster in HA/Neutron mode with ceph for failover tests - - Scenario: - 1. Create cluster - 2. Add 2 nodes with controller roles, 1 node controller + ceph-osd - 3. Add 1 node with compute role, 1 node compute + ceph-osd - 4. Deploy the cluster - 5. Make snapshot - - Duration 70m - Snapshot prepare_ha_ceph_neutron - """ - super(self.__class__, self).deploy_ha_ceph() - - @test(depends_on_groups=['prepare_ha_ceph_neutron'], - groups=["ha_ceph_neutron_sequential_destroy_controllers"]) - @log_snapshot_after_test - def ha_ceph_neutron_rabbit_master_destroy(self): - """Suspend rabbit master, check neutron cluster, - resume nodes, check cluster - - Scenario: - 1. Revert snapshot prepare_ha_ceph_neutron - 2. Wait galera is up, keystone re-trigger tokens - 3. Create instance, assign floating ip - 5. Ping instance by floating ip - 6. Suspend rabbit-master controller - 7. Run OSTF ha suite - 8. Ping created instance - 9. Suspend second rabbit-master controller - 10. Turn on controller from step 6 - 11. Run OSTF ha suite - 12. Ping instance - 13. Turn on controller from step 9 - 14. Run OSTF ha suite - 15. Ping instance - 16. Run OSTF - - Duration 40m - """ - super(self.__class__, self).ha_sequential_rabbit_master_failover() diff --git a/fuelweb_test/tests/tests_strength/test_huge_environments.py b/fuelweb_test/tests/tests_strength/test_huge_environments.py deleted file mode 100644 index 5f60cd495..000000000 --- a/fuelweb_test/tests/tests_strength/test_huge_environments.py +++ /dev/null @@ -1,329 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test.tests import base_test_case -from fuelweb_test.helpers import os_actions - - -@test(enabled=False, - groups=["huge_environments", "huge_ha_nova"]) -class HugeEnvironments(base_test_case.TestBasic): - # REMOVE THIS NOVA_NETWORK CLASS WHEN NEUTRON BE DEFAULT - """HugeEnvironments.""" # TODO documentation - - @test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_9], - groups=["nine_nodes_mixed"], - enabled=False) - @log_snapshot_after_test - def nine_nodes_mixed(self): - """Deploy cluster with mixed roles on 9 nodes in HA mode - - Scenario: - 1. Create cluster - 2. Add 4 nodes as controllers with ceph OSD roles - 3. Add 5 nodes as compute with ceph OSD and mongo roles - 4. Turn on Sahara and Ceilometer - 5. Verify network - 6. Deploy the cluster - 7. Check networks and OSTF - - Duration 150m - - """ - self.env.revert_snapshot("ready_with_9_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - 'volumes_ceph': True, - 'images_ceph': True, - 'volumes_lvm': False, - 'sahara': True, - 'ceilometer': True - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'mongo'], - 'slave-02': ['controller', 'mongo'], - 'slave-03': ['controller', 'mongo'], - 'slave-04': ['controller', 'mongo'], - 'slave-05': ['compute', 'ceph-osd'], - 'slave-06': ['compute', 'ceph-osd'], - 'slave-07': ['compute', 'ceph-osd'], - 'slave-08': ['compute', 'ceph-osd'], - 'slave-09': ['compute', 'ceph-osd'] - } - ) - # Verify network - self.fuel_web.verify_network(cluster_id) - - # Cluster deploy - self.fuel_web.deploy_cluster_wait(cluster_id, - timeout=150 * 60, - interval=30) - - # Verify network - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - @test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_9], - groups=["nine_nodes_separate_roles"]) - @log_snapshot_after_test - def nine_nodes_separate_roles(self): - """Deploy cluster with separate roles on 9 nodes in HA mode - - Scenario: - 1. Create cluster - 2. Add 3 nodes as controllers - 3. Add 2 nodes as compute - 4. Add 1 node as cinder and 1 as mongo - 5. Add 2 nodes as ceph - 6. Turn on Sahara - 7. Verify network - 8. Deploy the cluster - 9. Check networks and OSTF - - Duration 100m - - """ - self.env.revert_snapshot("ready_with_9_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - 'volumes_ceph': True, - 'images_ceph': False, - 'volumes_lvm': False, - 'osd_pool_size': '2', - 'sahara': True - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['cinder'], - 'slave-07': ['cinder'], - 'slave-08': ['ceph-osd'], - 'slave-09': ['ceph-osd'], - } - ) - # Verify network - self.fuel_web.verify_network(cluster_id) - - # Cluster deploy - self.fuel_web.deploy_cluster_wait(cluster_id, - timeout=150 * 60, - interval=30) - - # Verify network - self.fuel_web.verify_network(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - -@test(groups=["huge_environments", "huge_ha_neutron", "huge_scale"]) -class HugeHaNeutron(base_test_case.TestBasic): - - @test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_9], - groups=["huge_ha_neutron_tun_ceph_ceilometer_rados"], - enabled=False) - @log_snapshot_after_test - def huge_ha_neutron_tun_ceph_ceilometer_rados(self): - """Deploy cluster in HA mode with Neutron VXLAN, RadosGW - - Scenario: - 1. Create cluster - 2. Add 3 nodes with controller and ceph role - 3. Add 3 nodes with compute and ceph roles - 4. Add 3 nodes with mongo roles - 5. Verify network - 6. Deploy the cluster - 8. Verify smiles count - 9. Check networks and OSTF - - Duration 100m - - """ - self.env.revert_snapshot("ready_with_9_slaves") - - data = { - 'volumes_lvm': False, - 'volumes_ceph': True, - 'images_ceph': True, - 'objects_ceph': True, - 'ceilometer': True, - 'net_provider': 'neutron', - 'net_segment_type': settings.NEUTRON_SEGMENT['tun'], - 'tenant': 'haVxlanCephHugeScale', - 'user': 'haVxlanCephHugeScale', - 'password': 'haVxlanCephHugeScale' - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings=data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'ceph-osd'], - 'slave-02': ['controller', 'ceph-osd'], - 'slave-03': ['controller', 'ceph-osd'], - 'slave-04': ['compute', 'ceph-osd'], - 'slave-05': ['compute', 'ceph-osd'], - 'slave-06': ['compute', 'ceph-osd'], - 'slave-07': ['mongo'], - 'slave-08': ['mongo'], - 'slave-09': ['mongo'] - } - ) - # Verify network - self.fuel_web.verify_network(cluster_id) - - # Cluster deploy - self.fuel_web.deploy_cluster_wait(cluster_id, - timeout=150 * 60, - interval=30) - - # Verify network - self.fuel_web.verify_network(cluster_id) - - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id), - data['user'], - data['password'], - data['tenant']) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=15) - - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - test_class_main = ('fuel_health.tests.tests_platform.' - 'test_ceilometer.' - 'CeilometerApiPlatformTests') - tests_names = ['test_check_alarm_state', - 'test_create_sample'] - test_classes = ['{0}.{1}'.format(test_class_main, test_name) - for test_name in tests_names] - for test_name in test_classes: - self.fuel_web.run_single_ostf_test( - cluster_id=cluster_id, test_sets=['tests_platform'], - test_name=test_name, timeout=60 * 20) - - @test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_9], - groups=["huge_ha_neutron_vlan_ceph_ceilometer_rados"], - enabled=False) - @log_snapshot_after_test - def huge_ha_neutron_vlan_ceph_ceilometer_rados(self): - """Deploy cluster with separate roles in HA mode - with Neutron VLAN, RadosGW - - Scenario: - 1. Create cluster - 2. Add 3 nodes with controller - 3. Add 3 nodes with compute - 4. Add 1 node with mongo roles - 5. Add 2 nodes as ceph - 6. Verify network - 7. Deploy the cluster - 8. Verify smiles count - 9. Check networks and OSTF - - Duration 100m - - """ - self.env.revert_snapshot("ready_with_9_slaves") - - data = { - 'ceilometer': True, - 'volumes_ceph': True, - 'images_ceph': True, - 'volumes_lvm': False, - 'objects_ceph': True, - 'osd_pool_size': '2', - 'net_provider': 'neutron', - 'net_segment_type': settings.NEUTRON_SEGMENT['vlan'], - 'tenant': 'haVlanCephHugeScale', - 'user': 'haVlanCephHugeScale', - 'password': 'haVlanCephHugeScale' - } - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings=data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['compute'], - 'slave-07': ['mongo'], - 'slave-08': ['ceph-osd'], - 'slave-09': ['ceph-osd'], - } - ) - # Verify network - self.fuel_web.verify_network(cluster_id) - - # Cluster deploy - self.fuel_web.deploy_cluster_wait(cluster_id, - timeout=150 * 60, - interval=30) - - # Verify network - self.fuel_web.verify_network(cluster_id) - - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id), - data['user'], - data['password'], - data['tenant']) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=15) - - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - test_class_main = ('fuel_health.tests.tests_platform.' - 'test_ceilometer.' - 'CeilometerApiPlatformTests') - tests_names = ['test_check_alarm_state', - 'test_create_sample'] - test_classes = ['{0}.{1}'.format(test_class_main, test_name) - for test_name in tests_names] - for test_name in test_classes: - self.fuel_web.run_single_ostf_test( - cluster_id=cluster_id, test_sets=['tests_platform'], - test_name=test_name, timeout=60 * 20) diff --git a/fuelweb_test/tests/tests_strength/test_image_based.py b/fuelweb_test/tests/tests_strength/test_image_based.py deleted file mode 100644 index bd14424bf..000000000 --- a/fuelweb_test/tests/tests_strength/test_image_based.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import time -from devops.helpers.helpers import wait -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.settings import NEUTRON_SEGMENT -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["repeatable_image_based", "image_based"]) -class RepeatableImageBased(TestBasic): - """RepeatableImageBased.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["repeatable_image_based", "image_based"]) - @log_snapshot_after_test - def repeatable_image_based(self): - """Provision new cluster many times after deletion the old one - - Scenario: - 1. Create HA cluster - 2. Add 1 controller, 2 compute and 2 cinder nodes - 3. Deploy the cluster - 4. Delete cluster - 5. Create snapshot of environment - 6. Revert snapshot - 7. Create and try provision another HA cluster - 8. Repeat 6-7 steps 10 times - - Duration 60m - - """ - self.env.revert_snapshot("ready_with_5_slaves") - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": NEUTRON_SEGMENT['tun']}) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['compute'], - 'slave-04': ['cinder'], - 'slave-05': ['cinder'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.client.delete_cluster(cluster_id) - # wait nodes go to reboot - wait(lambda: not self.fuel_web.client.list_nodes(), - timeout=10 * 60, - timeout_msg='Nodes failed to become offline') - # wait for nodes to appear after bootstrap - wait(lambda: len(self.fuel_web.client.list_nodes()) == 5, - timeout=10 * 60, - timeout_msg='Nodes failed to become online') - for slave in self.env.d_env.nodes().slaves[:5]: - slave.destroy() - self.fuel_web.wait_nodes_get_offline_state( - self.env.d_env.nodes().slaves[:5], timeout=10 * 60) - - self.env.make_snapshot("deploy_after_delete", is_make=True) - - for _ in range(10): - self.env.revert_snapshot("deploy_after_delete") - for node in self.env.d_env.nodes().slaves[:5]: - node.start() - time.sleep(2) - self.fuel_web.wait_nodes_get_online_state( - self.env.d_env.nodes().slaves[:5], timeout=10 * 60) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": 'vlan' - } - ) - - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'] - } - ) - cluster_id = self.fuel_web.get_last_created_cluster() - self.fuel_web.provisioning_cluster_wait(cluster_id) diff --git a/fuelweb_test/tests/tests_strength/test_load.py b/fuelweb_test/tests/tests_strength/test_load.py deleted file mode 100644 index df0055519..000000000 --- a/fuelweb_test/tests/tests_strength/test_load.py +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -from proboscis import test -from proboscis.asserts import assert_true - -from core.helpers.setup_teardown import setup_teardown - -from fuelweb_test import logger -from fuelweb_test import ostf_test_mapping -from fuelweb_test import settings -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.rally import RallyBenchmarkTest -from fuelweb_test.helpers.utils import fill_space -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.tests_strength.test_load_base import TestLoadBase - - -@test(groups=["load"]) -class Load(TestLoadBase): - """Test class for the test group devoted to the load tests. - - Contains test case with cluster in HA mode with ceph, - launching Rally and cold restart of all nodes. - - """ - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["load_ceph_partitions_cold_reboot"]) - @log_snapshot_after_test - @setup_teardown(setup=TestLoadBase.prepare_load_ceph_ha) - def load_ceph_partitions_cold_reboot(self): - """Load ceph-osd partitions on 30% ~start rally~ reboot nodes - - Scenario: - 1. Revert snapshot 'prepare_load_ceph_ha' - 2. Wait until MySQL Galera is UP on some controller - 3. Check Ceph status - 4. Run ostf - 5. Fill ceph partitions on all nodes up to 30% - 6. Check Ceph status - 7. Run RALLY - 8. Cold restart all nodes - 9. Wait for HA services ready - 10. Wait until MySQL Galera is UP on some controller - 11. Run ostf - - Duration 180m - Snapshot load_ceph_partitions_cold_reboot - """ - self.show_step(1, initialize=True) - self.env.revert_snapshot("prepare_load_ceph_ha") - - self.show_step(2) - primary_controller = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - self.fuel_web.wait_mysql_galera_is_up([primary_controller.name]) - cluster_id = self.fuel_web.get_last_created_cluster() - - self.show_step(3) - self.fuel_web.check_ceph_status(cluster_id) - - self.show_step(4) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(5) - ceph_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['ceph-osd']) - for node in ceph_nodes: - ip = node['ip'] - file_dir = self.ssh_manager.execute_on_remote( - ip=ip, - cmd="mount | grep -m 1 ceph | awk '{printf($3)}'")['stdout'][0] - fill_space(ip, file_dir, 30 * 1024) - - self.show_step(6) - self.fuel_web.check_ceph_status(cluster_id) - - self.show_step(7) - assert_true(settings.PATCHING_RUN_RALLY, - 'PATCHING_RUN_RALLY was not set in true') - rally_benchmarks = {} - benchmark_results = {} - for tag in set(settings.RALLY_TAGS): - rally_benchmarks[tag] = RallyBenchmarkTest( - container_repo=settings.RALLY_DOCKER_REPO, - environment=self.env, - cluster_id=cluster_id, - test_type=tag - ) - benchmark_results[tag] = rally_benchmarks[tag].run() - logger.debug(benchmark_results[tag].show()) - - self.show_step(8) - self.fuel_web.cold_restart_nodes( - self.env.d_env.get_nodes(name__in=[ - 'slave-01', - 'slave-02', - 'slave-03', - 'slave-04', - 'slave-05'])) - - self.show_step(9) - self.fuel_web.assert_ha_services_ready(cluster_id) - - self.fuel_web.assert_os_services_ready(cluster_id) - - self.show_step(10) - self.fuel_web.wait_mysql_galera_is_up([primary_controller.name]) - - try: - self.fuel_web.run_single_ostf_test( - cluster_id, test_sets=['smoke'], - test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get( - 'Create volume and attach it to instance')) - except AssertionError: - logger.debug("Test failed from first probe," - " we sleep 180 seconds and try one more time " - "and if it fails again - test will fail ") - time.sleep(180) - self.fuel_web.run_single_ostf_test( - cluster_id, test_sets=['smoke'], - test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get( - 'Create volume and attach it to instance')) - self.show_step(11) - # LB 1519018 - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.env.make_snapshot("load_ceph_partitions_cold_reboot") diff --git a/fuelweb_test/tests/tests_strength/test_load_base.py b/fuelweb_test/tests/tests_strength/test_load_base.py deleted file mode 100644 index bfb3b0828..000000000 --- a/fuelweb_test/tests/tests_strength/test_load_base.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.tests.base_test_case import TestBasic - - -class TestLoadBase(TestBasic): - """ - - This class contains basic methods for different load tests scenarios. - - """ - - def prepare_load_ceph_ha(self): - """Prepare cluster in HA mode with ceph for load tests - - Scenario: - 1. Create cluster - 2. Add 3 nodes with controller + ceph-osd roles - 3. Add 2 node with compute role - 4. Deploy the cluster - 5. Make snapshot - - Duration 70m - Snapshot prepare_load_ceph_ha - """ - - if self.env.d_env.has_snapshot("prepare_load_ceph_ha"): - return - - self.env.revert_snapshot("ready_with_5_slaves") - - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - 'volumes_ceph': True, - 'images_ceph': True, - 'volumes_lvm': False, - 'osd_pool_size': "3" - } - ) - self.show_step(2) - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'ceph-osd'], - 'slave-02': ['controller', 'ceph-osd'], - 'slave-03': ['controller', 'ceph-osd'], - 'slave-04': ['compute'], - 'slave-05': ['compute'] - } - ) - - self.show_step(4) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf( - cluster_id=cluster_id) - - self.show_step(5) - self.env.make_snapshot("prepare_load_ceph_ha", is_make=True) diff --git a/fuelweb_test/tests/tests_strength/test_master_node_failover.py b/fuelweb_test/tests/tests_strength/test_master_node_failover.py deleted file mode 100644 index eeaed1d1f..000000000 --- a/fuelweb_test/tests/tests_strength/test_master_node_failover.py +++ /dev/null @@ -1,232 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time -import traceback - -from proboscis.asserts import assert_equal -from proboscis import test - -from fuelweb_test.helpers import common -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers import os_actions -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test import logger -from fuelweb_test.tests import base_test_case - - -@test(groups=["thread_non_func_1"]) -class DeployHAOneControllerMasterNodeFail(base_test_case.TestBasic): - """DeployHAOneControllerMasterNodeFail.""" # TODO documentation - - def resume_admin_node(self): - logger.info('Start admin node...') - self.env.d_env.nodes().admin.resume() - try: - self.env.d_env.nodes().admin.await( - 'admin', timeout=60, by_port=8000) - except Exception as e: - logger.warning( - "From first time admin isn't reverted: {0}".format(e)) - self.env.d_env.nodes().admin.destroy() - logger.info('Admin node was destroyed. Wait 10 sec.') - time.sleep(10) - self.env.d_env.nodes().admin.start() - logger.info('Admin node started second time.') - self.env.d_env.nodes().admin.await('admin') - self.env.set_admin_ssh_password() - self.env.admin_actions.wait_for_fuel_ready( - timeout=600) - - logger.info('Waiting for containers') - self.env.set_admin_ssh_password() - self.env.admin_actions.wait_for_fuel_ready() - - @test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_3], - groups=["non_functional", - "deploy_ha_one_controller_neutron_master_node_fail"]) - @log_snapshot_after_test - def deploy_ha_one_controller_neutron_master_node_fail(self): - """Deploy HA cluster with neutron and check it without master node - - Scenario: - 1. Create cluster in ha mode with 1 controller - 2. Add 1 node with controller role - 3. Add 1 node with compute role - 4. Deploy the cluster - 5. Validate cluster was set up correctly, there are no dead - services, there are no errors in logs - 6. Verify networks - 7. Verify network configuration on controller - 8. Run OSTF - 9. Shut down master node - 10. Run OpenStack verification - - Duration 1000m - """ - self.env.revert_snapshot("ready_with_3_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - controller_ip = self.fuel_web.get_public_vip(cluster_id) - - os_conn = os_actions.OpenStackActions(controller_ip) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5) - net_name = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - - self.fuel_web.verify_network(cluster_id) - logger.info('PASS DEPLOYMENT') - self.fuel_web.run_ostf( - cluster_id=cluster_id) - logger.info('PASS OSTF') - - logger.info('Destroy admin node...') - try: - self.env.d_env.nodes().admin.destroy() - logger.info('Admin node destroyed') - - common_func = common.Common( - controller_ip, - settings.SERVTEST_USERNAME, - settings.SERVTEST_PASSWORD, - settings.SERVTEST_TENANT) - - # create instance - server = common_func.create_instance(neutron_network=True, - label=net_name) - - # get_instance details - details = common_func.get_instance_detail(server) - assert_equal(details.name, 'test_instance') - - # Check if instance active - common_func.verify_instance_status(server, 'ACTIVE') - - # delete instance - common_func.delete_instance(server) - except Exception: - logger.error( - 'Failed to operate with cluster after master node destroy') - logger.error(traceback.format_exc()) - raise - finally: - self.resume_admin_node() - - self.env.make_snapshot( - "deploy_ha_one_controller_neutron_master_node_fail") - - @test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_5], - groups=["deploy_ha_dns_ntp"]) - @log_snapshot_after_test - def deploy_ha_dns_ntp(self): - """Use external ntp and dns in ha mode - - Scenario: - 1. Create cluster - 2 Configure external NTP,DNS settings - 3. Add 3 nodes with controller roles - 4. Add 2 nodes with compute roles - 5. Deploy the cluster - - """ - - self.env.revert_snapshot("ready_with_5_slaves") - external_dns = settings.EXTERNAL_DNS - if settings.FUEL_USE_LOCAL_DNS: - public_gw = self.env.d_env.router(router_name="public") - external_dns += [public_gw] - - net_provider_data = { - 'ntp_list': settings.EXTERNAL_NTP, - 'dns_list': external_dns, - } - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE_HA, - settings=net_provider_data - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - os_conn = os_actions.OpenStackActions(self.fuel_web. - get_public_vip(cluster_id)) - self.fuel_web.assert_cluster_ready(os_conn, smiles_count=14) - - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.env.make_snapshot("deploy_ha_dns_ntp", is_make=True) - - @test(depends_on=[deploy_ha_dns_ntp], - groups=["external_dns_ha"]) - @log_snapshot_after_test - def external_dns_ha(self): - """Check external dns in ha mode - - Scenario: - 1. Revert cluster - 2. Shutdown dnsmasq - 3. Check dns resolution - - """ - - self.env.revert_snapshot("deploy_ha_dns_ntp") - - with self.env.d_env.get_admin_remote() as remote: - _ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'] - remote.execute("killall dnsmasq") - checkers.external_dns_check(_ip) - - @test(depends_on=[deploy_ha_dns_ntp], - groups=["external_ntp_ha"]) - @log_snapshot_after_test - def external_ntp_ha(self): - """Check external ntp in ha mode - - Scenario: - 1. Create cluster - 2. Shutdown ntpd - 3. Check ntp update - - """ - - self.env.revert_snapshot("deploy_ha_dns_ntp") - - cluster_id = self.fuel_web.get_last_created_cluster() - with self.env.d_env.get_admin_remote() as remote: - _ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'] - vrouter_vip = self.fuel_web\ - .get_management_vrouter_vip(cluster_id) - remote.execute("pkill -9 ntpd") - checkers.external_ntp_check(_ip, vrouter_vip) diff --git a/fuelweb_test/tests/tests_strength/test_network_outage.py b/fuelweb_test/tests/tests_strength/test_network_outage.py deleted file mode 100644 index 265db1f16..000000000 --- a/fuelweb_test/tests/tests_strength/test_network_outage.py +++ /dev/null @@ -1,405 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -from devops.error import TimeoutError -from devops.helpers.ssh_client import SSHAuth -from proboscis import test -from proboscis.asserts import assert_true - -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers import os_actions -from fuelweb_test.helpers import utils -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.rally import RallyBenchmarkTest -from fuelweb_test.tests.base_test_case import TestBasic - - -cirros_auth = SSHAuth(**settings.SSH_IMAGE_CREDENTIALS) - - -@test(groups=['network_outage']) -class NetworkOutage(TestBasic): - """NetworkOutage""" # TODO documentation - - @test(depends_on_groups=['deploy_ha_cinder'], - groups=['block_net_traffic_cinder']) - @log_snapshot_after_test - def block_net_traffic_cinder(self): - """Block network traffic of whole environment - - Scenario: - 1. Revert environment deploy_ha_cinder - 2. Create 2 volumes and 2 instances with attached volumes - 3. Fill cinder storages up to 30% - 4. Start Rally - 5. Block traffic of all networks - 6. Sleep 5 minutes - 7. Unblock traffic of all networks - 8. Wait until cluster nodes become online - 9. Verify networks - 10. Run OSTF tests - - Duration: 40 min - Snapshot: block_net_traffic - """ - - self.show_step(1) - self.env.revert_snapshot('deploy_ha_cinder') - cluster_id = self.fuel_web.get_last_created_cluster() - - self.show_step(2) - os = os_actions.OpenStackActions( - controller_ip=self.fuel_web.get_public_vip(cluster_id), - user='failover', passwd='failover', tenant='failover') - net_name = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - hypervisors = os.get_hypervisors() - hypervisor_name = hypervisors[0].hypervisor_hostname - instance_1 = os.create_server_for_migration( - neutron=True, - availability_zone="nova:{0}".format(hypervisor_name), - label=net_name - ) - logger.info("New instance {0} created on {1}" - .format(instance_1.id, hypervisor_name)) - - floating_ip_1 = os.assign_floating_ip(instance_1) - logger.info("Floating address {0} associated with instance {1}" - .format(floating_ip_1.ip, instance_1.id)) - - hypervisor_name = hypervisors[1].hypervisor_hostname - instance_2 = os.create_server_for_migration( - neutron=True, - availability_zone="nova:{0}".format(hypervisor_name), - label=net_name - ) - logger.info("New instance {0} created on {1}" - .format(instance_2.id, hypervisor_name)) - - floating_ip_2 = os.assign_floating_ip(instance_2) - logger.info("Floating address {0} associated with instance {1}" - .format(floating_ip_2.ip, instance_2.id)) - - self.show_step(3) - cinder_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['cinder']) - total_cinder_size = 0 - for node in cinder_nodes: - total_cinder_size += \ - self.fuel_web.get_node_partition_size(node['id'], 'cinder') - percent_15_mb = 0.15 * total_cinder_size - percent_15_gb = percent_15_mb // 1024 - volume_size = int(percent_15_gb + 1) - - volume_1 = os.create_volume(size=volume_size) - volume_2 = os.create_volume(size=volume_size) - - logger.info('Created volumes: {0}, {1}'.format(volume_1.id, - volume_2.id)) - - ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip'] - - logger.info("Attach volumes") - cmd = 'nova volume-attach {srv_id} {volume_id} /dev/vdb' - - self.ssh_manager.execute_on_remote( - ip=ip, - cmd='. openrc; ' + cmd.format(srv_id=instance_1.id, - volume_id=volume_1.id) - ) - self.ssh_manager.execute_on_remote( - ip=ip, - cmd='. openrc; ' + cmd.format(srv_id=instance_2.id, - volume_id=volume_2.id) - ) - - cmds = ['sudo sh -c "/usr/sbin/mkfs.ext4 /dev/vdb"', - 'sudo sh -c "/bin/mount /dev/vdb /mnt"', - 'sudo sh -c "/usr/bin/nohup' - ' /bin/dd if=/dev/zero of=/mnt/bigfile ' - 'bs=1M count={} &"'.format(int(percent_15_mb))] - - md5s = {floating_ip_1.ip: '', floating_ip_2.ip: ''} - with self.fuel_web.get_ssh_for_node("slave-01") as remote: - for ip in [floating_ip_1.ip, floating_ip_2.ip]: - for cmd in cmds: - res = remote.execute_through_host( - hostname=ip, - cmd=cmd, - auth=cirros_auth) - logger.info('RESULT for {}: {}'.format( - cmd, - utils.pretty_log(res)) - ) - logger.info('Wait 7200 untill "dd" ends') - for _ in range(720): - cmd = 'ps -ef |grep -v grep| grep "dd if" ' - res = remote.execute_through_host( - hostname=ip, - cmd=cmd, - auth=cirros_auth) - if res['exit_code'] != 0: - break - time.sleep(15) - logger.debug('Wait another 15 sec -' - ' totally waited {} sec'.format(10 * _)) - else: - raise TimeoutError('BigFile has not been' - ' created yet, after 7200 sec') - cmd = 'md5sum /mnt/bigfile' - md5s[ip] = remote.execute_through_host( - hostname=ip, - cmd=cmd, - auth=cirros_auth)['stdout'] - self.show_step(4) - assert_true(settings.PATCHING_RUN_RALLY, - 'PATCHING_RUN_RALLY was not set in true') - rally_benchmarks = {} - benchmark_results = {} - for tag in set(settings.RALLY_TAGS): - rally_benchmarks[tag] = RallyBenchmarkTest( - container_repo=settings.RALLY_DOCKER_REPO, - environment=self.env, - cluster_id=cluster_id, - test_type=tag - ) - benchmark_results[tag] = rally_benchmarks[tag].run() - logger.debug(benchmark_results[tag].show()) - - self.show_step(5) - nodes = [ - node for node in sorted( - self.env.d_env.get_nodes(role='fuel_slave'), - key=lambda x: x.name) - if node.driver.node_active(node)] - for interface in nodes[0].interfaces: - if interface.is_blocked: - raise Exception('Interface {0} is blocked'.format(interface)) - else: - interface.network.block() - - self.show_step(6) - time.sleep(60 * 5) - - self.show_step(7) - for interface in nodes[0].interfaces: - if interface.network.is_blocked: - interface.network.unblock() - else: - raise Exception( - 'Interface {0} was not blocked'.format(interface)) - - self.show_step(8) - self.fuel_web.wait_nodes_get_online_state(nodes) - - self.show_step(9) - self.fuel_web.verify_network(cluster_id) - - self.show_step(10) - try: - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - except AssertionError: - time.sleep(600) - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - @test(depends_on_groups=['deploy_ha_ceph'], - groups=['block_net_traffic_ceph']) - @log_snapshot_after_test - def block_net_traffic_ceph(self): - """Block network traffic of whole environment - - Scenario: - 1. Revert environment deploy_ha_ceph - 2. Create 2 volumes and 2 instances with attached volumes - 3. Fill ceph storages up to 30% - 4. Start Rally - 5. Block traffic of all networks - 6. Sleep 5 minutes - 7. Unblock traffic of all networks - 8. Wait until cluster nodes become online - 9. Verify networks - 10. Run OSTF tests - - Duration: 40 min - Snapshot: block_net_traffic - """ - - self.show_step(1, initialize=True) - self.env.revert_snapshot('deploy_ha_ceph') - cluster_id = self.fuel_web.get_last_created_cluster() - - self.show_step(2) - os = os_actions.OpenStackActions( - controller_ip=self.fuel_web.get_public_vip(cluster_id), - user='failover', passwd='failover', tenant='failover') - net_name = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - hypervisors = os.get_hypervisors() - hypervisor_name = hypervisors[0].hypervisor_hostname - instance_1 = os.create_server_for_migration( - neutron=True, - availability_zone="nova:{0}".format(hypervisor_name), - label=net_name - ) - logger.info("New instance {0} created on {1}" - .format(instance_1.id, hypervisor_name)) - - floating_ip_1 = os.assign_floating_ip(instance_1) - logger.info("Floating address {0} associated with instance {1}" - .format(floating_ip_1.ip, instance_1.id)) - - hypervisor_name = hypervisors[1].hypervisor_hostname - instance_2 = os.create_server_for_migration( - neutron=True, - availability_zone="nova:{0}".format(hypervisor_name), - label=net_name - ) - logger.info("New instance {0} created on {1}" - .format(instance_2.id, hypervisor_name)) - - floating_ip_2 = os.assign_floating_ip(instance_2) - logger.info("Floating address {0} associated with instance {1}" - .format(floating_ip_2.ip, instance_2.id)) - - self.show_step(3) - ceph_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['ceph-osd']) - total_ceph_size = 0 - for node in ceph_nodes: - total_ceph_size += \ - self.fuel_web.get_node_partition_size(node['id'], 'ceph') - percent_15_mb = 0.15 * total_ceph_size - percent_15_gb = percent_15_mb // 1024 - volume_size = int(percent_15_gb + 1) - - volume_1 = os.create_volume(size=volume_size) - volume_2 = os.create_volume(size=volume_size) - - logger.info('Created volumes: {0}, {1}'.format(volume_1.id, - volume_2.id)) - - ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip'] - - logger.info("Attach volumes") - cmd = 'nova volume-attach {srv_id} {volume_id} /dev/vdb' - - self.ssh_manager.execute_on_remote( - ip=ip, - cmd='. openrc; ' + cmd.format(srv_id=instance_1.id, - volume_id=volume_1.id) - ) - self.ssh_manager.execute_on_remote( - ip=ip, - cmd='. openrc; ' + cmd.format(srv_id=instance_2.id, - volume_id=volume_2.id) - ) - - cmds = ['sudo sh -c "/usr/sbin/mkfs.ext4 /dev/vdb"', - 'sudo sh -c "/bin/mount /dev/vdb /mnt"', - 'sudo sh -c "/usr/bin/nohup' - ' /bin/dd if=/dev/zero of=/mnt/bigfile ' - 'bs=1M count={} &"'.format(int(percent_15_mb))] - - md5s = {floating_ip_1.ip: '', floating_ip_2.ip: ''} - with self.fuel_web.get_ssh_for_node("slave-01") as remote: - for ip in [floating_ip_1.ip, floating_ip_2.ip]: - for cmd in cmds: - res = remote.execute_through_host( - hostname=ip, - cmd=cmd, - auth=cirros_auth) - logger.info('RESULT for {}: {}'.format( - cmd, - utils.pretty_log(res)) - ) - logger.info('Wait 7200 untill "dd" ends') - for _ in range(720): - cmd = 'ps -ef |grep -v grep| grep "dd if" ' - res = remote.execute_through_host( - hostname=ip, - cmd=cmd, - auth=cirros_auth) - if res['exit_code'] != 0: - break - time.sleep(10) - logger.debug('Wait another 10 sec -' - ' totally waited {} sec'.format(10 * _)) - else: - raise TimeoutError('BigFile has not been' - ' created yet, after 7200 sec') - cmd = 'md5sum /mnt/bigfile' - md5s[ip] = remote.execute_through_host( - hostname=ip, - cmd=cmd, - auth=cirros_auth)['stdout'] - self.show_step(4) - assert_true(settings.PATCHING_RUN_RALLY, - 'PATCHING_RUN_RALLY was not set in true') - rally_benchmarks = {} - benchmark_results = {} - for tag in set(settings.RALLY_TAGS): - rally_benchmarks[tag] = RallyBenchmarkTest( - container_repo=settings.RALLY_DOCKER_REPO, - environment=self.env, - cluster_id=cluster_id, - test_type=tag - ) - benchmark_results[tag] = rally_benchmarks[tag].run() - logger.debug(benchmark_results[tag].show()) - - self.show_step(5) - nodes = [ - node for node in sorted( - self.env.d_env.get_nodes(role='fuel_slave'), - key=lambda x: x.name) - if node.driver.node_active(node)] - for interface in nodes[0].interfaces: - if interface.is_blocked: - raise Exception('Interface {0} is blocked'.format(interface)) - else: - interface.network.block() - - self.show_step(6) - time.sleep(60 * 5) - self.show_step(7) - for interface in nodes[0].interfaces: - if interface.network.is_blocked: - interface.network.unblock() - else: - raise Exception( - 'Interface {0} was not blocked'.format(interface)) - - self.show_step(8) - self.fuel_web.wait_nodes_get_online_state(nodes) - - self.show_step(9) - self.fuel_web.verify_network(cluster_id) - - self.show_step(10) - try: - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - except AssertionError: - time.sleep(600) - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) diff --git a/fuelweb_test/tests/tests_strength/test_neutron.py b/fuelweb_test/tests/tests_strength/test_neutron.py deleted file mode 100644 index 83512c200..000000000 --- a/fuelweb_test/tests/tests_strength/test_neutron.py +++ /dev/null @@ -1,252 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests import base_test_case - -from fuelweb_test.tests.tests_strength.test_neutron_base\ - import TestNeutronFailoverBase - - -@test(groups=["ha_neutron_destructive_vlan", "ha"]) -class TestNeutronFailoverVlan(TestNeutronFailoverBase): - """TestNeutronFailoverVlan""" # TODO(kkuznetsova) documentation - - @property - def segment_type(self): - return "vlan" - - @test(depends_on=[base_test_case.SetupEnvironment.prepare_release], - groups=["deploy_ha_neutron_vlan"]) - @log_snapshot_after_test - def deploy_ha_neutron_vlan(self): - """Deploy cluster in HA mode, Neutron with VLAN segmentation - - Scenario: - 1. Create cluster. HA, Neutron with VLAN segmentation - 2. Add 3 nodes with controller roles - 3. Add 2 nodes with compute roles - 4. Add 1 node with cinder role - 5. Deploy the cluster - - Duration 90m - Snapshot deploy_ha_neutron_vlan - - """ - super(self.__class__, self).deploy_ha_neutron() - - @test(depends_on=[deploy_ha_neutron_vlan], - groups=["neutron_l3_migration", - "neutron_l3_migration_vlan"]) - @log_snapshot_after_test - def neutron_l3_migration_vlan(self): - """Check l3-agent rescheduling after l3-agent dies on vlan - - Scenario: - 1. Revert snapshot with neutron cluster - 2. Create an instance with a key pair - 3. Manually reschedule router from primary controller - to another one - 4. Stop l3-agent on new node with pcs - 5. Check l3-agent was rescheduled - 6. Check network connectivity from instance via - dhcp namespace - 7. Run OSTF - - Duration 30m - - """ - super(self.__class__, self).neutron_l3_migration() - - @test(depends_on=[deploy_ha_neutron_vlan], - groups=["neutron_l3_migration_after_reset", - "neutron_l3_migration_after_reset_vlan"]) - @log_snapshot_after_test - def neutron_l3_migration_after_reset_vlan(self): - """Check l3-agent rescheduling after reset non-primary controller vlan - - Scenario: - 1. Revert snapshot with neutron cluster - 2. Create an instance with a key pair - 3. Manually reschedule router from primary controller - to another one - 4. Reset controller with l3-agent - 5. Check l3-agent was rescheduled - 6. Check network connectivity from instance via - dhcp namespace - 7. Run OSTF - - Duration 30m - """ - super(self.__class__, self).neutron_l3_migration_after_reset() - - @test(depends_on=[deploy_ha_neutron_vlan], - groups=["neutron_l3_migration_after_destroy", - "neutron_l3_migration_after_destroy_vlan"]) - @log_snapshot_after_test - def neutron_l3_migration_after_destroy_vlan(self): - """Check l3-agent rescheduling after destroy nonprimary controller vlan - - Scenario: - 1. Revert snapshot with neutron cluster - 2. Create an instance with a key pair - 3. Manually reschedule router from primary controller - to another one - 4. Destroy controller with l3-agent - 5. Check l3-agent was rescheduled - 6. Check network connectivity from instance via - dhcp namespace - 7. Run OSTF - - Duration 30m - """ - super(self.__class__, self).neutron_l3_migration_after_destroy() - - @test(depends_on=[deploy_ha_neutron_vlan], - groups=["neutron_packets_drops_stat", - "neutron_packets_drops_stat_vlan"]) - @log_snapshot_after_test - def neutron_packets_drop_stat_vlan(self): - """Check packets drops statistic when size is equal to MTU on vlan - - Scenario: - 1. Revert snapshot with neutron cluster - 2. Create instance, assign floating IP to it - 3. Send ICMP packets from controller to instance with 1500 bytes - 4. If at least 7 responses on 10 requests are received - assume test is passed - - Duration 30m - - """ - super(self.__class__, self).neutron_packets_drop_stat() - - -@test(groups=["ha_neutron_destructive_vxlan", "ha"]) -class TestNeutronFailoverVxlan(TestNeutronFailoverBase): - """TestNeutronFailoverVxlan""" # TODO(akostrikov) documentation - - @property - def segment_type(self): - return "tun" - - @test(depends_on=[base_test_case.SetupEnvironment.prepare_release], - groups=["deploy_ha_neutron_vxlan"]) - @log_snapshot_after_test - def deploy_ha_neutron_vxlan(self): - """Deploy cluster in HA mode, Neutron with VxLAN segmentation - - Scenario: - 1. Create cluster. HA, Neutron with VxLAN segmentation - 2. Add 3 nodes with controller roles - 3. Add 2 nodes with compute roles - 4. Add 1 node with cinder role - 5. Deploy the cluster - - Duration 90m - Snapshot deploy_ha_neutron_vxlan - - """ - super(self.__class__, self).deploy_ha_neutron() - - @test(depends_on=[deploy_ha_neutron_vxlan], - groups=["neutron_l3_migration", - "neutron_l3_migration_vxlan"]) - @log_snapshot_after_test - def neutron_l3_migration_vxlan(self): - """Check l3-agent rescheduling after l3-agent dies on vxlan - - Scenario: - 1. Revert snapshot with neutron cluster - 2. Create an instance with a key pair - 3. Manually reschedule router from primary controller - to another one - 4. Stop l3-agent on new node with pcs - 5. Check l3-agent was rescheduled - 6. Check network connectivity from instance via - dhcp namespace - 7. Run OSTF - - Duration 30m - - """ - super(self.__class__, self).neutron_l3_migration() - - @test(depends_on=[deploy_ha_neutron_vxlan], - groups=["neutron_l3_migration_after_reset", - "neutron_l3_migration_after_reset_vxlan"]) - @log_snapshot_after_test - def neutron_l3_migration_after_reset_vxlan(self): - """Check l3-agent rescheduling after reset non-primary controller - for vxlan - - Scenario: - 1. Revert snapshot with neutron cluster - 2. Create an instance with a key pair - 3. Manually reschedule router from primary controller - to another one - 4. Reset controller with l3-agent - 5. Check l3-agent was rescheduled - 6. Check network connectivity from instance via - dhcp namespace - 7. Run OSTF - - Duration 30m - """ - super(self.__class__, self).neutron_l3_migration_after_reset() - - @test(depends_on=[deploy_ha_neutron_vxlan], - groups=["neutron_l3_migration_after_destroy", - "neutron_l3_migration_after_destroy_vxlan"]) - @log_snapshot_after_test - def neutron_l3_migration_after_destroy_vxlan(self): - """Check l3-agent rescheduling after destroy non-primary controller - for vxlan - - Scenario: - 1. Revert snapshot with neutron cluster - 2. Create an instance with a key pair - 3. Manually reschedule router from primary controller - to another one - 4. Destroy controller with l3-agent - 5. Check l3-agent was rescheduled - 6. Check network connectivity from instance via - dhcp namespace - 7. Run OSTF - - Duration 30m - - """ - super(self.__class__, self).neutron_l3_migration_after_destroy() - - @test(depends_on=[deploy_ha_neutron_vxlan], - groups=["neutron_packets_drops_stat", - "neutron_packets_drops_stat_vxlan"]) - @log_snapshot_after_test - def neutron_packets_drop_stat_vxlan(self): - """Check packets drops statistic when size is equal to MTU on vxlan - - Scenario: - 1. Revert snapshot with neutron cluster - 2. Create instance, assign floating IP to it - 3. Send ICMP packets from controller to instance with 1500 bytes - 4. If at least 7 responses on 10 requests are received - assume test is passed - - Duration 30m - - """ - super(self.__class__, self).neutron_packets_drop_stat() diff --git a/fuelweb_test/tests/tests_strength/test_neutron_base.py b/fuelweb_test/tests/tests_strength/test_neutron_base.py deleted file mode 100644 index 40ff884b4..000000000 --- a/fuelweb_test/tests/tests_strength/test_neutron_base.py +++ /dev/null @@ -1,521 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -from devops.helpers.helpers import wait -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_true -from proboscis import SkipTest - -from core.helpers.log_helpers import logwrap - -from fuelweb_test.helpers.decorators import retry -from fuelweb_test.helpers import os_actions -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.tests import base_test_case - - -class NotFound(Exception): - message = "Not Found." - - -class TestNeutronFailoverBase(base_test_case.TestBasic): - """TestNeutronFailoverBase - - :param: self.segment_type - string, one of the elements from the - list settings.NEUTRON_SEGMENT - - """ - - # ----------- helpers ----------- - - @property - def segment_type(self): - raise ValueError( - 'Property segment_type should be redefined in child classes ' - 'before use!') - - @staticmethod - @logwrap - def create_instance_with_keypair(os_conn, key_name, label): - return os_conn.create_server_for_migration(key_name=key_name, - label=label) - - @staticmethod - @logwrap - def reschedule_router_manually(os_conn, router_id): - router_l3_agents = os_conn.get_l3_agent_ids(router_id) - if not router_l3_agents: - raise NotFound("l3 agent hosting router with id:{}" - " not found.".format(router_id)) - l3_agent = router_l3_agents[0] - logger.debug("l3 agent id is {0}".format(l3_agent)) - - another_l3_agents = os_conn.get_available_l3_agents_ids(l3_agent) - if not another_l3_agents: - raise NotFound("another l3 agent except l3 agent with id:{}" - " not found.".format(l3_agent)) - another_l3_agent = another_l3_agents[0] - logger.debug("another l3 agent is {0}".format(another_l3_agent)) - - os_conn.remove_l3_from_router(l3_agent, router_id) - os_conn.add_l3_to_router(another_l3_agent, router_id) - err_msg = ("l3 agent with id:{l3_1} don't start hosting router " - "with id:{router} after remove l3 agent with id:{l3_2} " - "as a hosting this router during 5 minutes.") - wait(lambda: os_conn.get_l3_agent_ids(router_id), timeout=60 * 5, - timeout_msg=err_msg.format(l3_1=l3_agent, router=router_id, - l3_2=another_l3_agent)) - - @staticmethod - @logwrap - def check_instance_connectivity(remote, dhcp_namespace, instance_ip, - instance_keypair): - cmd_check_ns = 'ip netns list' - namespaces = [ - l.strip() for l in remote.check_call(cmd_check_ns).stdout] - logger.debug('Net namespaces on remote: {0}.'.format(namespaces)) - assert_true(dhcp_namespace in namespaces, - "Network namespace '{0}' doesn't exist on " - "remote slave!".format(dhcp_namespace)) - instance_key_path = '/root/.ssh/instancekey_rsa' - remote.check_call('echo "{0}" > {1} && chmod 400 {1}'.format( - instance_keypair.private_key, instance_key_path)) - - cmd = (". openrc; ip netns exec {0} ssh -i {1}" - " -o 'StrictHostKeyChecking no'" - " cirros@{2} \"ping -c 1 {3}\"").format(dhcp_namespace, - instance_key_path, - instance_ip, - settings.PUBLIC_TEST_IP) - err_msg = ("SSH command:\n{command}\nwas not completed with " - "exit code 0 after 3 attempts with 1 minute timeout.") - wait(lambda: remote.execute(cmd)['exit_code'] == 0, - interval=60, timeout=3 * 60, - timeout_msg=err_msg.format(command=cmd)) - res = remote.execute(cmd) - assert_equal(0, res['exit_code'], - 'Instance has no connectivity, exit code {0},' - 'stdout {1}, stderr {2}'.format(res['exit_code'], - res['stdout'], - res['stderr'])) - - @logwrap - def get_node_with_dhcp(self, os_conn, net_id): - nodes = os_conn.get_node_with_dhcp_for_network(net_id) - if not nodes: - raise NotFound("Nodes with dhcp for network with id:{}" - " not found.".format(net_id)) - node_fqdn = self.fuel_web.get_fqdn_by_hostname(nodes[0]) - logger.debug('node name with dhcp is {0}'.format(nodes[0])) - return self.fuel_web.find_devops_node_by_nailgun_fqdn( - node_fqdn, self.env.d_env.nodes().slaves[0:6]) - - @logwrap - def get_node_with_l3(self, node_with_l3): - node_with_l3_fqdn = self.fuel_web.get_fqdn_by_hostname(node_with_l3) - logger.debug("new node with l3 is {0}".format(node_with_l3)) - devops_node = self.fuel_web.find_devops_node_by_nailgun_fqdn( - node_with_l3_fqdn, - self.env.d_env.nodes().slaves[0:6]) - return devops_node - - def deploy_ha_neutron(self): - try: - self.check_run('deploy_ha_neutron_{}'.format(self.segment_type)) - except SkipTest: - return - self.env.revert_snapshot("ready") - self.env.bootstrap_nodes( - self.env.d_env.nodes().slaves[:6]) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": self.segment_type - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['cinder'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.env.make_snapshot('deploy_ha_neutron_{}'.format( - self.segment_type), is_make=True) - - # ----------- test suites ----------- - - def neutron_l3_migration(self): - self.env.revert_snapshot("deploy_ha_neutron_{}".format( - self.segment_type)) - - # init variables - cluster_id = self.fuel_web.get_last_created_cluster() - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - net_name = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - net_id = os_conn.get_network(net_name)['id'] - router_id = os_conn.get_routers_ids()[0] - devops_node = self.get_node_with_dhcp(os_conn, net_id) - _ip = self.fuel_web.get_nailgun_node_by_name(devops_node.name)['ip'] - nodes_with_l3 = os_conn.get_l3_agent_hosts(router_id) - if not nodes_with_l3: - err_msg = ("Node with l3 agent from router:{r_id} after reset " - "old node with l3 agent not found.") - raise NotFound(err_msg.format(router_id)) - node_with_l3 = nodes_with_l3[0] - - instance_keypair = os_conn.create_key(key_name='instancekey') - - # create instance for check neutron migration processes - instance_ip = self.create_instance_with_keypair( - os_conn, instance_keypair.name, - label=net_name).addresses[net_name][0]['addr'] - - with self.env.d_env.get_ssh_to_remote(_ip) as remote: - dhcp_namespace = ''.join(remote.execute( - 'ip netns | grep {0}'.format(net_id))['stdout']).rstrip() - - logger.debug('dhcp namespace is {0}'.format(dhcp_namespace)) - - ssh_awail_cmd = ('ip netns exec {ns} /bin/bash -c "{cmd}"'.format( - ns=dhcp_namespace, - cmd="echo '' | nc -w 1 {ip} 22 > /dev/null".format( - ip=instance_ip))) - - # Wait 60 second until ssh is available on instance - wait( - lambda: remote.execute(ssh_awail_cmd)['exit_code'] == 0, - timeout=60, - timeout_msg='SSH port is not available in dhcp_namespace={}' - ''.format(dhcp_namespace)) - - logger.debug('instance internal ip is {0}'.format(instance_ip)) - - # Reschedule router for net for created instance to new controller - self.reschedule_router_manually(os_conn, router_id) - - # Get remote to the controller with running DHCP agent - with self.env.d_env.get_ssh_to_remote(_ip) as remote: - dhcp_namespace = ''.join(remote.execute( - 'ip netns | grep {0}'.format(net_id))['stdout']).rstrip() - - logger.debug('dhcp namespace is {0}'.format(dhcp_namespace)) - - # Check connect to public network from instance after - # rescheduling l3 agent for router - self.check_instance_connectivity( - remote, dhcp_namespace, instance_ip, instance_keypair) - - # Find new l3 agent after rescheduling - node_with_l3 = os_conn.get_l3_agent_hosts(router_id)[0] - - # Ban this l3 agent using pacemaker - remote.execute("pcs resource ban neutron-l3-agent {0}".format( - node_with_l3)) - - err_msg = "l3 agent wasn't banned, it is still {0}" - # Wait to migrate l3 agent on new controller - wait(lambda: not node_with_l3 == os_conn.get_l3_agent_hosts( - router_id)[0], timeout=60 * 3, - timeout_msg=err_msg.format(node_with_l3)) - - with self.env.d_env.get_ssh_to_remote(_ip) as remote: - # Check connect to public network from instance after - # ban old l3 agent for router - self.check_instance_connectivity(remote, dhcp_namespace, - instance_ip, instance_keypair) - - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - with self.env.d_env.get_ssh_to_remote(_ip) as remote: - # Unban banned l3 agent - remote.execute("pcs resource clear neutron-l3-agent {0}". - format(node_with_l3)) - - def neutron_l3_migration_after_reset(self): - self.env.revert_snapshot("deploy_ha_neutron_{}".format( - self.segment_type)) - - # Init variables - cluster_id = self.fuel_web.get_last_created_cluster() - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - net_name = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - net_id = os_conn.get_network(net_name)['id'] - devops_node = self.get_node_with_dhcp(os_conn, net_id) - instance_keypair = os_conn.create_key(key_name='instancekey') - router_id = os_conn.get_routers_ids()[0] - _ip = self.fuel_web.get_nailgun_node_by_name(devops_node.name)['ip'] - - # create instance for check neutron migration processes - instance_ip = self.create_instance_with_keypair( - os_conn, instance_keypair.name, - label=net_name).addresses[net_name][0]['addr'] - logger.debug('instance internal ip is {0}'.format(instance_ip)) - - # Reschedule router for net for created instance to new controller - self.reschedule_router_manually(os_conn, router_id) - - # Get remote to the controller with running DHCP agent - with self.env.d_env.get_ssh_to_remote(_ip) as remote: - dhcp_namespace = ''.join(remote.execute( - 'ip netns | grep {0}'.format(net_id))['stdout']).rstrip() - - logger.debug('dhcp namespace is {0}'.format(dhcp_namespace)) - - # Check connect to public network from instance after - # rescheduling l3 agent for router - self.check_instance_connectivity(remote, dhcp_namespace, - instance_ip, instance_keypair) - - # Find node with hosting l3 agent for router - nodes_with_l3 = os_conn.get_l3_agent_hosts(router_id) - if not nodes_with_l3: - err_msg = ("Node with l3 agent from router:{r_id} after reset " - "old node with l3 agent not found.") - raise NotFound(err_msg.format(router_id)) - node_with_l3 = nodes_with_l3[0] - new_devops = self.get_node_with_l3(node_with_l3) - - # Restart this node - self.fuel_web.warm_restart_nodes([new_devops]) - - err_msg = "Node:{node} was not come back to online state after reset." - wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node( - new_devops)['online'], timeout=60 * 5, - timeout_msg=err_msg.format(node=new_devops)) - - # Wait for HA services get ready - self.fuel_web.assert_ha_services_ready(cluster_id) - - # Wait for Galera service get ready - self.fuel_web.wait_mysql_galera_is_up(['slave-01', 'slave-02', - 'slave-03']) - - # Wait reschedule l3 agent - err_msg = "l3 agent wasn't rescheduled, it is still {0}" - wait(lambda: not node_with_l3 == os_conn.get_l3_agent_hosts( - router_id)[0], timeout=60 * 3, - timeout_msg=err_msg.format(node_with_l3)) - - # Find host with dhcp agent for private network - # after reset one of controllers - devops_node = self.get_node_with_dhcp(os_conn, net_id) - _ip = self.fuel_web.get_nailgun_node_by_devops_node(devops_node)['ip'] - - # Get remote to the controller with running DHCP agent - with self.env.d_env.get_ssh_to_remote(_ip) as remote: - # Check connect to public network from instance after - # reset controller with l3 agent from this instance - self.check_instance_connectivity(remote, dhcp_namespace, - instance_ip, instance_keypair) - - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - def neutron_l3_migration_after_destroy(self): - self.env.revert_snapshot("deploy_ha_neutron_{}".format( - self.segment_type)) - - # Init variables - cluster_id = self.fuel_web.get_last_created_cluster() - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - net_name = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - net_id = os_conn.get_network(net_name)['id'] - router_id = os_conn.get_routers_ids()[0] - devops_node = self.get_node_with_dhcp(os_conn, net_id) - _ip = self.fuel_web.get_nailgun_node_by_name(devops_node.name)['ip'] - instance_keypair = os_conn.create_key(key_name='instancekey') - - # create instance for check neutron migration processes - instance_ip = self.create_instance_with_keypair( - os_conn, instance_keypair.name, - label=net_name).addresses[net_name][0]['addr'] - logger.debug('instance internal ip is {0}'.format(instance_ip)) - - # Reschedule router for net for created instance to new controller - self.reschedule_router_manually(os_conn, router_id) - - # Get remote to the controller with running DHCP agent - with self.env.d_env.get_ssh_to_remote(_ip) as remote: - dhcp_namespace = ''.join(remote.execute( - 'ip netns | grep {0}'.format(net_id))['stdout']).rstrip() - - logger.debug('dhcp namespace is {0}'.format(dhcp_namespace)) - - # Check connect to public network from instance after - # rescheduling l3 agent for router - self.check_instance_connectivity(remote, dhcp_namespace, - instance_ip, instance_keypair) - - # Find node with hosting l3 agent for router - nodes_with_l3 = os_conn.get_l3_agent_hosts(router_id) - err_msg = ("Node with l3 agent from router:{r_id} after reset " - "old node with l3 agent not found.") - if not nodes_with_l3: - raise NotFound(err_msg.format(router_id)) - node_with_l3 = nodes_with_l3[0] - devops_node_with_l3 = self.get_node_with_l3(node_with_l3) - - # Destroy controller with l3 agent for start migration process - devops_node_with_l3.destroy() - self.fuel_web.wait_node_is_offline(devops_node_with_l3) - - # Wait for HA services get ready - self.fuel_web.assert_ha_services_ready(cluster_id, should_fail=1) - - # Wait for Galera service get ready - n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, - ['controller']) - d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls) - - online_controllers_names = [n.name for n in - set(d_ctrls) - {devops_node_with_l3}] - self.fuel_web.wait_mysql_galera_is_up(online_controllers_names) - - # Wait reschedule l3 agent - err_msg = "l3 agent wasn't rescheduled, it is still {0}" - wait(lambda: not node_with_l3 == os_conn.get_l3_agent_hosts( - router_id)[0], timeout=60 * 3, - timeout_msg=err_msg.format(node_with_l3)) - - # Find host with dhcp agent for private network - # after reset one of controllers - err_msg = ("Not found new controller node after destroy old " - "controller node:{node} with dhcp for net:{net}") - wait(lambda: self.get_node_with_dhcp(os_conn, net_id), timeout=60 * 3, - timeout_msg=err_msg.format(node=devops_node, net=net_id)) - new_devops_node = self.get_node_with_dhcp(os_conn, net_id) - _ip = self.fuel_web.get_nailgun_node_by_devops_node( - new_devops_node)['ip'] - - # Get remote to the controller with running DHCP agent - with self.env.d_env.get_ssh_to_remote(_ip) as remote: - # Check connect to public network from instance after - # reset controller with l3 agent from this instance - self.check_instance_connectivity(remote, dhcp_namespace, - instance_ip, instance_keypair) - - # Run OSTF after destroy controller - @retry(count=3, delay=120) - def run_single_test(cluster_id): - self.fuel_web.run_single_ostf_test( - cluster_id, test_sets=['smoke'], - test_name='fuel_health.tests.smoke.' - 'test_neutron_actions.TestNeutron.' - 'test_check_neutron_objects_creation') - - run_single_test(cluster_id) - - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['sanity']) - - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke'], should_fail=1) - - def neutron_packets_drop_stat(self): - self.env.revert_snapshot("deploy_ha_neutron_{}".format( - self.segment_type)) - - # Init variables - cluster_id = self.fuel_web.get_last_created_cluster() - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id)) - _ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'] - # Size of the header in ICMP package in bytes - ping_header_size = 28 - net_label = self.fuel_web.get_cluster_predefined_networks_name( - cluster_id)['private_net'] - - # Create instance with floating ip for check ping from ext network - instance = os_conn.create_server_for_migration(label=net_label) - floating_ip = os_conn.assign_floating_ip( - instance, use_neutron=True)['floating_ip_address'] - logger.debug("Instance floating ip is {ip}".format(ip=floating_ip)) - - # command for check ping to instance - ping = "ping -c 3 -w 10 {ip}" - # Check ping to instance - check_ping = ping.format(ip=floating_ip) - err_msg = 'Instance with ip:{ip} is not reachable by ICMP.' - with self.env.d_env.get_ssh_to_remote(_ip) as remote: - wait(lambda: remote.execute(check_ping)['exit_code'] == 0, - timeout=120, - timeout_msg=err_msg.format(ip=floating_ip)) - - # command for get original MTU for external bridge on one - # of controllers - orig_mtu = (r"cat /sys/class/net/$(ip r g {ip} | " - r"sed -rn 's/.*dev\s+(\S+)\s.*/\1/p')/mtu") - # Get MTU on controller - mtu_cmd = orig_mtu.format(ip=floating_ip) - with self.env.d_env.get_ssh_to_remote(_ip) as remote: - ctrl_mtu = ''.join(remote.execute(mtu_cmd)['stdout']) - logger.info("MTU on controller is equal to {mtu}".format(mtu=ctrl_mtu)) - max_packetsize = int(ctrl_mtu) - ping_header_size - - # command for check ping to instance w/o MTU fragmentation - # w/ special packet size - mtu_ping = "ping -M do -s {data} -c 7 -w 10 {ip}" - # Check ping to instance from controller w/ wrong MTU - new_packetsize = None - cmd = mtu_ping.format(data=max_packetsize, ip=floating_ip) - logger.info("Executing command: {0}".format(cmd)) - with self.env.d_env.get_ssh_to_remote(_ip) as remote: - res = remote.execute(cmd) - message = (res['stdout'] + res['stderr']) - if res['exit_code'] == 1: - # No packets were received at all - for l in message: - # Check if actual MTU is in stdout or in stderr - if 'Frag needed and DF set' in l or 'Message too long' in l: - logger.error("Incorrect MTU: '{line}'".format(line=l)) - m = re.match(".*mtu\s*=\s*(\d+)", l) - if m: - allowed_mtu = m.group(1) - new_packetsize = int(allowed_mtu) - ping_header_size - break - if new_packetsize: - # Check ping to instance from controller w/ correct MTU - cmd = mtu_ping.format(data=new_packetsize, ip=floating_ip) - logger.info("Executing command using new MTU: {0}".format(cmd)) - with self.env.d_env.get_ssh_to_remote(_ip) as remote: - res = remote.execute(cmd) - message = (res['stdout'] + res['stderr']) - - err_msg = "Most packages were dropped, result code:{0}\nmessage:{1}" - assert_equal(0, res['exit_code'], - err_msg.format(res, message)) diff --git a/fuelweb_test/tests/tests_strength/test_ostf_repeatable_tests.py b/fuelweb_test/tests/tests_strength/test_ostf_repeatable_tests.py deleted file mode 100644 index cb41cf786..000000000 --- a/fuelweb_test/tests/tests_strength/test_ostf_repeatable_tests.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test.tests import base_test_case - - -@test(groups=["ostf_repeatable_tests"]) -class OstfRepeatableTests(base_test_case.TestBasic): - """OstfRepeatableTests.""" # TODO documentation - - @test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_3], - groups=["create_delete_ip_n_times_neutron_vlan"]) - @log_snapshot_after_test - def create_delete_ip_n_times_neutron_vlan(self): - """Deploy cluster in ha mode with VLAN Manager - - Scenario: - 1. Create cluster in ha mode with 1 controller - 2. Add 1 nodes with controller roles - 3. Add 2 nodes with compute roles - 4. Deploy the cluster - 5. Run network verification - 6. Run test Check network connectivity - from instance via floating IP' n times - - Duration 100m - Snapshot create_delete_ip_n_times_neutron_vlan - - """ - self.env.revert_snapshot("ready_with_3_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - 'net_provider': 'neutron', - 'net_segment_type': settings.NEUTRON_SEGMENT['vlan'] - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'] - } - ) - - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf_repeatably(cluster_id) - - self.env.make_snapshot("create_delete_ip_n_times_neutron_vlan") - - @test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_3], - groups=["create_delete_ip_n_times_neutron_tun"]) - @log_snapshot_after_test - def deploy_create_delete_ip_n_times_neutron_tun(self): - """Deploy HA cluster, check connectivity from instance n times - - Scenario: - 1. Create cluster in ha mode with 1 controller - 2. Add 1 node with controller role - 3. Add 1 node with compute role - 4. Deploy the cluster - 5. Verify networks - 6. Run test Check network connectivity - from instance via floating IP' n times - - Duration 1000m - Snapshot: create_delete_ip_n_times_neutron_tun - - """ - self.env.revert_snapshot("ready_with_3_slaves") - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - 'net_provider': 'neutron', - 'net_segment_type': settings.NEUTRON_SEGMENT['tun'] - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'] - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.fuel_web.verify_network(cluster_id) - self.fuel_web.run_ostf_repeatably(cluster_id) - - self.env.make_snapshot("create_delete_ip_n_times_neutron_tun") - - @test(groups=["run_ostf_n_times_against_custom_environment"]) - @log_snapshot_after_test - def run_ostf_n_times_against_custom_deployment(self): - cluster_id = self.fuel_web.client.get_cluster_id( - settings.DEPLOYMENT_NAME) - self.fuel_web.run_ostf_repeatably(cluster_id) diff --git a/fuelweb_test/tests/tests_strength/test_repetitive_restart.py b/fuelweb_test/tests/tests_strength/test_repetitive_restart.py deleted file mode 100644 index 72adbf33d..000000000 --- a/fuelweb_test/tests/tests_strength/test_repetitive_restart.py +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -from proboscis import test -from proboscis.asserts import assert_true -# pylint: disable=redefined-builtin -# noinspection PyUnresolvedReferences -from six.moves import xrange -# pylint: enable=redefined-builtin - -from core.helpers.setup_teardown import setup_teardown - -from fuelweb_test import logger -from fuelweb_test import ostf_test_mapping -from fuelweb_test import settings -from fuelweb_test.helpers.cic_maintenance_mode import change_config -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.rally import RallyBenchmarkTest -from fuelweb_test.helpers.utils import fill_space -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.tests_strength.test_load_base import TestLoadBase - - -@test(groups=["repetitive_restart"]) -class RepetitiveRestart(TestLoadBase): - """Test class for test group devoted to the repetitive cold restart - of all nodes. - - Contains test case with cluster in HA mode with ceph - and 100 times reboot procedure. - - """ - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["ceph_partitions_repetitive_cold_restart"]) - @log_snapshot_after_test - @setup_teardown(setup=TestLoadBase.prepare_load_ceph_ha) - def ceph_partitions_repetitive_cold_restart(self): - """Ceph-osd partitions on 30% ~start rally~ repetitive cold restart - - Scenario: - 1. Revert snapshot 'prepare_load_ceph_ha' - 2. Wait until MySQL Galera is UP on some controller - 3. Check Ceph status - 4. Run ostf - 5. Fill ceph partitions on all nodes up to 30% - 6. Check Ceph status - 7. Disable UMM - 8. Run RALLY - 9. 100 times repetitive reboot: - 10. Cold restart of all nodes - 11. Wait for HA services ready - 12. Wait until MySQL Galera is UP on some controller - 13. Run ostf - - Duration 1700m - Snapshot ceph_partitions_repetitive_cold_restart - """ - self.show_step(1, initialize=True) - self.env.revert_snapshot("prepare_load_ceph_ha") - - self.show_step(2) - primary_controller = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - self.fuel_web.wait_mysql_galera_is_up([primary_controller.name]) - cluster_id = self.fuel_web.get_last_created_cluster() - - self.show_step(3) - self.fuel_web.check_ceph_status(cluster_id) - - self.show_step(4) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(5) - ceph_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['ceph-osd']) - for node in ceph_nodes: - ip = node['ip'] - file_dir = self.ssh_manager.execute_on_remote( - ip=ip, - cmd="mount | grep -m 1 ceph | awk '{printf($3)}'")['stdout'][0] - fill_space(ip, file_dir, 30 * 1024) - - self.show_step(6) - self.fuel_web.check_ceph_status(cluster_id) - - self.show_step(7) - - for node in self.fuel_web.client.list_cluster_nodes(cluster_id): - change_config(node['ip'], umm=False) - - self.show_step(8) - assert_true(settings.PATCHING_RUN_RALLY, - 'PATCHING_RUN_RALLY was not set in true') - rally_benchmarks = {} - benchmark_results = {} - for tag in set(settings.RALLY_TAGS): - rally_benchmarks[tag] = RallyBenchmarkTest( - container_repo=settings.RALLY_DOCKER_REPO, - environment=self.env, - cluster_id=cluster_id, - test_type=tag - ) - benchmark_results[tag] = rally_benchmarks[tag].run() - logger.debug(benchmark_results[tag].show()) - - self.show_step(9) - for i in xrange(settings.RESTART_COUNT): - self.show_step(10, 'number {}'.format(i + 1), initialize=True) - self.fuel_web.cold_restart_nodes( - self.env.d_env.get_nodes(name__in=[ - 'slave-01', - 'slave-02', - 'slave-03', - 'slave-04', - 'slave-05'])) - - self.show_step(11) - self.fuel_web.assert_ha_services_ready(cluster_id) - - self.fuel_web.assert_os_services_ready(cluster_id) - - self.show_step(12) - self.fuel_web.wait_mysql_galera_is_up([primary_controller.name]) - - try: - self.fuel_web.run_single_ostf_test( - cluster_id, test_sets=['smoke'], - test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get( - 'Create volume and attach it to instance')) - except AssertionError: - logger.debug("Test failed from first probe," - " we sleep 180 seconds and try one more time " - "and if it fails again - test will fail ") - time.sleep(180) - self.fuel_web.run_single_ostf_test( - cluster_id, test_sets=['smoke'], - test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get( - 'Create volume and attach it to instance')) - self.show_step(13) - # LB 1519018 - self.fuel_web.run_ostf(cluster_id=cluster_id) - self.env.make_snapshot("ceph_partitions_repetitive_cold_restart") diff --git a/fuelweb_test/tests/tests_strength/test_restart.py b/fuelweb_test/tests/tests_strength/test_restart.py deleted file mode 100644 index 8ee296f63..000000000 --- a/fuelweb_test/tests/tests_strength/test_restart.py +++ /dev/null @@ -1,390 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import time -from warnings import warn - -from proboscis import test -from proboscis import SkipTest - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers import checkers -from fuelweb_test import logger -from fuelweb_test import ostf_test_mapping -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic -from fuelweb_test.settings import NEUTRON_SEGMENT - - -@test(groups=["thread_3", "ceph"]) -class CephRestart(TestBasic): - """CephRestart.""" # TODO documentation - - @test(depends_on_groups=['ceph_ha_one_controller_with_cinder'], - groups=["ceph_ha_one_controller_with_cinder_restart"]) - @log_snapshot_after_test - def ceph_ha_one_controller_with_cinder_restart(self): - """Restart cluster with ceph and cinder in ha mode - Scenario: - - 1. Create cluster in ha mode with 1 controller - 2. Add 1 node with controller and ceph OSD roles - 3. Add 1 node with compute role - 4. Add 2 nodes with cinder and ceph OSD roles - 5. Deploy the cluster - 6. Warm restart - 7. Check ceph status - - Duration 90m - Snapshot None - """ - self.env.revert_snapshot("ceph_ha_one_controller_with_cinder") - - cluster_id = self.fuel_web.get_last_created_cluster() - - # Warm restart - self.fuel_web.warm_restart_nodes( - self.env.d_env.nodes().slaves[:4]) - - # Wait for HA services ready - self.fuel_web.assert_ha_services_ready(cluster_id) - # Wait until OpenStack services are UP - self.fuel_web.assert_os_services_ready(cluster_id) - - self.fuel_web.run_ceph_task(cluster_id, offline_nodes=[]) - self.fuel_web.check_ceph_status(cluster_id) - - # Wait until Cinder services UP on a controller - self.fuel_web.wait_cinder_is_up(['slave-01']) - - try: - self.fuel_web.run_single_ostf_test( - cluster_id, test_sets=['smoke'], - test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get( - 'Create volume and attach it to instance')) - except AssertionError: - logger.debug("Test failed from first probe," - " we sleep 60 second try one more time " - "and if it fails again - test will fails ") - time.sleep(60) - self.fuel_web.run_single_ostf_test( - cluster_id, test_sets=['smoke'], - test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get( - 'Create volume and attach it to instance')) - - self.fuel_web.run_ostf(cluster_id=cluster_id) - - @test(depends_on_groups=['ceph_ha'], - groups=["ceph_ha_restart"]) - @log_snapshot_after_test - def ceph_ha_restart(self): - """Destructive ceph test in HA mode - - Scenario: - 1. Revert from ceph_ha - 2. Waiting up galera and cinder - 3. Check ceph status - 4. Run OSTF - 5. Destroy and remove osd-node - 6. Check ceph status - 7. Run OSTF - 8. Destroy and remove one compute node - 9. Check ceph status - 10. Run OSTF - 11. Cold restart - 12. Waiting up galera and cinder - 13. Run single OSTF - Create volume and attach it to instance - 14. Run OSTF - - Duration 30m - Snapshot ceph_ha_restart - - """ - self.env.revert_snapshot("ceph_ha") - - # Wait until MySQL Galera is UP on some controller - self.fuel_web.wait_mysql_galera_is_up(['slave-01']) - - # Wait until Cinder services UP on a controller - self.fuel_web.wait_cinder_is_up(['slave-01']) - - cluster_id = self.fuel_web.get_last_created_cluster() - - self.fuel_web.check_ceph_status(cluster_id) - - # Run ostf - self.fuel_web.run_ostf(cluster_id=cluster_id) - - # Destroy and remove osd-node - logger.info("Destroy and remove slave-06") - with self.fuel_web.get_ssh_for_node('slave-06') as remote_ceph: - self.fuel_web.prepare_ceph_to_delete(remote_ceph) - slave_06 = self.env.d_env.get_node(name='slave-06') - nailgun_node_id = self.fuel_web.get_nailgun_node_by_devops_node( - slave_06)['id'] - slave_06.destroy() - self.fuel_web.wait_node_is_offline(slave_06) - - self.fuel_web.delete_node(nailgun_node_id) - self.fuel_web.check_ceph_status(cluster_id) - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['sanity', 'smoke', 'ha']) - # Destroy and remove compute node - logger.info("Destroy and remove slave-05") - with self.fuel_web.get_ssh_for_node('slave-05') as remote_ceph: - self.fuel_web.prepare_ceph_to_delete(remote_ceph) - slave_05 = self.env.d_env.get_node(name='slave-05') - nailgun_node_id = self.fuel_web.get_nailgun_node_by_devops_node( - slave_05)['id'] - slave_05.destroy() - - self.fuel_web.wait_node_is_offline(slave_05) - - self.fuel_web.delete_node(nailgun_node_id) - self.fuel_web.check_ceph_status(cluster_id) - - self.fuel_web.run_ostf(cluster_id=cluster_id) - - # Cold restart - self.fuel_web.cold_restart_nodes( - self.env.d_env.get_nodes(name__in=[ - 'slave-01', - 'slave-02', - 'slave-03', - 'slave-04'])) - - # Wait for HA services ready - self.fuel_web.assert_ha_services_ready(cluster_id) - - # Wait until OpenStack services are UP - self.fuel_web.assert_os_services_ready(cluster_id) - - self.fuel_web.check_ceph_status(cluster_id) - - # Wait until MySQL Galera is UP on some controller - self.fuel_web.wait_mysql_galera_is_up(['slave-01']) - - # Wait until Cinder services UP on a controller - self.fuel_web.wait_cinder_is_up(['slave-01']) - - try: - self.fuel_web.run_single_ostf_test( - cluster_id, test_sets=['smoke'], - test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get( - 'Create volume and attach it to instance')) - except AssertionError: - logger.debug("Test failed from first probe," - " we sleep 60 second try one more time " - "and if it fails again - test will fails ") - time.sleep(180) - self.fuel_web.run_single_ostf_test( - cluster_id, test_sets=['smoke'], - test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get( - 'Create volume and attach it to instance')) - - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("ceph_ha_restart") - - -@test(enabled=False, groups=["thread_1"]) -class HAOneControllerNeutronRestart(TestBasic): - """HAOneControllerNeutronRestart - - Test disabled and move to fuel_tests suite: - fuel_tests.test.test_restart - - """ - - @test(enabled=False, - depends_on=[SetupEnvironment.prepare_slaves_3], - groups=["ha_one_controller_neutron_warm_restart"]) - @log_snapshot_after_test - def ha_one_controller_neutron_warm_restart(self): - """Warm restart for ha one controller environment - - Test disabled and move to fuel_tests suite: - fuel_tests.test.test_restart.TestHAOneControllerNeutronRestart - - Scenario: - 1. Create cluster - 2. Add 1 node with controller role - 3. Add 1 node with compute role - 4. Deploy the cluster - 5. Run network verification - 6. Run OSTF - 7. Warm restart - 8. Wait for HA services to be ready - 9. Wait for OS services to be ready - 10. Wait for Galera is up - 11. Verify firewall rules - 12. Run network verification - 13. Run OSTF - - Duration 30m - - """ - # pylint: disable=W0101 - warn("Test disabled and move to fuel_tests suite", DeprecationWarning) - raise SkipTest("Test disabled and move to fuel_tests suite") - - self.env.revert_snapshot("ready_with_3_slaves") - - self.show_step(1, initialize=True) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - ) - self.show_step(2) - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'] - } - ) - self.show_step(4) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - self.show_step(6) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(7) - self.fuel_web.warm_restart_nodes( - self.env.d_env.get_nodes(name__in=['slave-01', 'slave-02'])) - - self.show_step(8) - self.fuel_web.assert_ha_services_ready(cluster_id) - - self.show_step(9) - self.fuel_web.assert_os_services_ready(cluster_id) - - self.show_step(10) - self.fuel_web.wait_mysql_galera_is_up(['slave-01']) - - self.show_step(11) - self.fuel_web.security.verify_firewall(cluster_id) - - self.show_step(12) - self.fuel_web.verify_network(cluster_id) - - self.show_step(13) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - -@test(groups=["five_controllers_restart"]) -class FiveControllerRestart(TestBasic): - """HAFiveControllerNeutronRestart.""" # TODO documentation - - @test(depends_on=[SetupEnvironment.prepare_slaves_all], - groups=["deploy_reset_five_controllers"]) - @log_snapshot_after_test - def deploy_reset_five_controllers(self): - """Deployment with 5 controllers, NeutronVLAN with reset and re-deploy - - Scenario: - 1. Deploy environment with 5 controller NeutronVLAN, 2 compute, - 1 cinder with disks partitioning 'vdc' - 2. Verify networks - 3. Run OSTF tests - 4. Reset cluster - 5. Change openstack username, password, tenant - 6. Re-deploy environment - 7. Wait for HA services to be ready - 8. Wait for for OS services to be ready - 9. Verify networks - 10. Run OSTF - - Duration 120m - Snapshot deploy_reset_five_controllers - - """ - - self.env.revert_snapshot("ready_with_all_slaves") - - self.show_step(1) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=DEPLOYMENT_MODE, - settings={ - "net_provider": 'neutron', - "net_segment_type": NEUTRON_SEGMENT['vlan'], - 'tenant': 'simpleVlan', - 'user': 'simpleVlan', - 'password': 'simpleVlan' - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['controller'], - 'slave-05': ['controller'], - 'slave-06': ['compute'], - 'slave-07': ['compute'], - 'slave-08': ['cinder'] - } - ) - - cinder_nodes = self.fuel_web.\ - get_nailgun_cluster_nodes_by_roles(cluster_id, ['cinder'], - role_status='pending_roles') - for cinder_node in cinder_nodes: - cinder_image_size = self.fuel_web.\ - update_node_partitioning(cinder_node, node_role='cinder') - - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(2) - self.fuel_web.verify_network(cluster_id) - - for cinder in cinder_nodes: - checkers.check_cinder_image_size(cinder['ip'], cinder_image_size) - - # ostf_tests before reset - self.show_step(3) - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - self.show_step(4) - self.fuel_web.stop_reset_env_wait(cluster_id) - self.show_step(5) - attributes = self.fuel_web.client.get_cluster_attributes(cluster_id) - access_attr = attributes['editable']['access'] - access_attr['user']['value'] = 'myNewUser' - access_attr['password']['value'] = 'myNewPassword' - access_attr['tenant']['value'] = 'myNewTenant' - self.fuel_web.client.update_cluster_attributes(cluster_id, attributes) - self.show_step(6) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(7) - self.fuel_web.assert_ha_services_ready(cluster_id) - self.show_step(8) - self.fuel_web.assert_os_services_ready(cluster_id, timeout=10 * 60) - self.show_step(9) - self.fuel_web.verify_network(cluster_id) - - # ostf_tests after reset - self.show_step(10) - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=['ha', 'smoke', 'sanity']) - - self.env.make_snapshot("deploy_reset_five_controllers") diff --git a/fuelweb_test/tests/tests_uca/__init__.py b/fuelweb_test/tests/tests_uca/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fuelweb_test/tests/tests_uca/test_uca.py b/fuelweb_test/tests/tests_uca/test_uca.py deleted file mode 100644 index f8e71ad30..000000000 --- a/fuelweb_test/tests/tests_uca/test_uca.py +++ /dev/null @@ -1,348 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test -from proboscis.asserts import assert_true, assert_is_not_none - -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.ssh_manager import SSHManager -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["deploy_from_uca"]) -class UCATest(TestBasic): - """Tests for "enable deployment from ubuntu cloud archive" feature. - Deploy several cluster using Ubuntu+UCA release then validate packages - origin (ubuntu-cloud.archive.canonical.com)""" - - def get_uca_repo(self, cluster_id): - """Pick link to UCA repository from cluster settings""" - repos = self.fuel_web.get_cluster_repos(cluster_id) - # only check that the UCA uri exists - template = '{uri}/' - uca_repo = None - for repo in repos['value']: - if repo['name'] == 'uca': - uca_repo = template.format(**repo) - break - assert_is_not_none(uca_repo, "UCA repo was not found!") - assert_true("ubuntu-cloud.archive.canonical.com" in uca_repo, - "{!r} does not contains link to UCA repo".format(uca_repo)) - return uca_repo - - @staticmethod - def check_package_origin(ip, package, origin): - """Check that given package was installed from given repository""" - version_cmd = ("apt-cache policy {package} | " - "awk '$1 == \"Installed:\" {{print $2}}'").format( - package=package) - version = SSHManager().execute_on_remote(ip, version_cmd)['stdout_str'] - origin_cmd = ("apt-cache madison {package} | " - "grep '{version}'").format(package=package, - version=version) - result = SSHManager().execute_on_remote(ip, origin_cmd)['stdout'] - # we only want to check for the UCA uri because it might be in main - # or proposed - repos = [str.strip(line.split("|")[2]) for line in result] - # Remove trailing spaces and backslash characters to avoid - # false negatives. - origin = origin.rstrip('/ ') - assert_true( - any([origin in repo for repo in repos]), - "Package {!r}: repository {!r} not found in {!r}".format( - package, origin, repos) - ) - - @staticmethod - def get_os_packages(ip, packages_pattern=None): - """Pick names of some OS packages from node""" - if not packages_pattern: - packages_pattern = "neutron|nova|cinder|keystone|" \ - "ceilometer|ironic|glance" - - packages = SSHManager().execute_on_remote( - ip, "dpkg-query -W -f '${{package}}\\n' | grep -E '{}'".format( - packages_pattern) - )['stdout_str'] - return packages.split('\n') - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["uca_neutron_ha"]) - @log_snapshot_after_test - def uca_neutron_ha(self): - """Deploy cluster in ha mode with UCA repo - - Scenario: - 1. Create cluster using UCA release - 2. Add 3 nodes with controller role - 3. Add 2 nodes with compute+cinder role - 4. Run network verification - 5. Deploy the cluster - 6. Run network verification - 7. Ensure that openstack packages were taken from UCA repository - 8. Run OSTF - - Duration 60m - Snapshot uca_neutron_ha - """ - self.env.revert_snapshot("ready_with_5_slaves") - - uca_enabled = {'uca_enabled': True} - - self.show_step(1) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - release_name=settings.OPENSTACK_RELEASE_UBUNTU_UCA, - settings=uca_enabled - ) - - self.show_step(2) - self.show_step(3) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute', 'cinder'], - 'slave-05': ['compute', 'cinder'], - } - ) - - self.show_step(4) - self.fuel_web.verify_network(cluster_id) - - self.show_step(5) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - - self.show_step(7) - uca_repo = self.get_uca_repo(cluster_id) - assert_is_not_none(uca_repo, "UCA repo was not found!") - - for node in self.fuel_web.client.list_cluster_nodes(cluster_id): - logger.info("Checking packages on node {!r}".format(node['name'])) - packages = self.get_os_packages(node['ip']) - for package in packages: - self.check_package_origin(node['ip'], package, uca_repo) - - self.show_step(8) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.env.make_snapshot("uca_neutron_ha", is_make=True) - - @test(depends_on=[SetupEnvironment.prepare_slaves_5], - groups=["uca_neutron_tun_ceph"]) - @log_snapshot_after_test - def uca_neutron_tun_ceph(self): - """Deploy cluster with NeutronTUN, Ceph and UCA repo - - Scenario: - 1. Create cluster using UCA release - 2. Add 3 nodes with controller role - 3. Add 2 nodes with compute+ceph role - 4. Add 1 node with ceph role - 5. Run network verification - 6. Deploy the cluster - 7. Run network verification - 8. Ensure that openstack packages were taken from UCA repository - 9. Run OSTF - - Duration 60m - """ - self.env.revert_snapshot("ready_with_5_slaves") - self.env.bootstrap_nodes([self.env.d_env.get_node(name='slave-06')]) - - cluster_settings = { - 'net_provider': settings.NEUTRON, - 'net_segment_type': settings.NEUTRON_SEGMENT['tun'], - 'uca_enabled': True, - 'volumes_lvm': False, - 'volumes_ceph': True, - 'images_ceph': True, - 'objects_ceph': True, - 'ephemeral_ceph': True - } - - self.show_step(1) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - release_name=settings.OPENSTACK_RELEASE_UBUNTU_UCA, - settings=cluster_settings - ) - - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute', 'ceph-osd'], - 'slave-05': ['compute', 'ceph-osd'], - 'slave-06': ['ceph-osd'] - } - ) - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - - self.show_step(6) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - - self.show_step(8) - uca_repo = self.get_uca_repo(cluster_id) - assert_is_not_none(uca_repo, "UCA repo was not found!") - - for node in self.fuel_web.client.list_cluster_nodes(cluster_id): - logger.info("Checking packages on node {!r}".format(node['name'])) - packages = self.get_os_packages(node['ip']) - for package in packages: - self.check_package_origin(node['ip'], package, uca_repo) - - self.show_step(9) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["uca_vlan_mongo"], - enabled=False) - @log_snapshot_after_test - def uca_vlan_mongo(self): - """Deploy cluster with NeutronVlan, Ceilometer and UCA repo - - Scenario: - 1. Create cluster using UCA release, Ceph for images and objects - 2. Add 3 nodes with controller+mongo role - 3. Add 1 node with compute+cinder role - 4. Add 3 nodes with ceph-osd role - 5. Run network verification - 6. Deploy the cluster - 7. Run network verification - 8. Ensure that openstack packages were taken from UCA repository - 9. Run OSTF - - Duration 60m - """ - self.env.revert_snapshot("ready_with_9_slaves") - - cluster_settings = { - 'net_provider': settings.NEUTRON, - 'net_segment_type': settings.NEUTRON_SEGMENT['vlan'], - 'uca_enabled': True, - 'images_ceph': True, - 'objects_ceph': True, - 'ceilometer': True, - } - - self.show_step(1) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - release_name=settings.OPENSTACK_RELEASE_UBUNTU_UCA, - settings=cluster_settings - ) - - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller', 'mongo'], - 'slave-02': ['controller', 'mongo'], - 'slave-03': ['controller', 'mongo'], - 'slave-04': ['compute', 'cinder'], - 'slave-05': ['ceph-osd'], - 'slave-06': ['ceph-osd'], - 'slave-07': ['ceph-osd'] - } - ) - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - - self.show_step(6) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - - self.show_step(8) - uca_repo = self.get_uca_repo(cluster_id) - assert_is_not_none(uca_repo, "UCA repo was not found!") - - for node in self.fuel_web.client.list_cluster_nodes(cluster_id): - logger.info("Checking packages on node {!r}".format(node['name'])) - packages = self.get_os_packages(node['ip']) - for package in packages: - self.check_package_origin(node['ip'], package, uca_repo) - - self.show_step(9) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - @test(depends_on=[uca_neutron_ha], groups=['uca_shutdown_cluster']) - @log_snapshot_after_test - def uca_shutdown_cluster(self): - """Graceful shutdown of cluster deployed from UCA - - Scenario: - 1. Revert "uca_neutron_ha" snapshot - 2. Warm power off compute+cinder nodes - 3. Warm power off controller nodes - 4. Start compute+cinder nodes - 5. Start controller nodes - 6. Wait until ha services are ok - 7. Run OSTF - - Duration: 20m - """ - self.show_step(1) - self.env.revert_snapshot("uca_neutron_ha") - - cluster_id = self.fuel_web.get_last_created_cluster() - controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller']) - other = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['compute']) - d_controllers = self.fuel_web.get_devops_nodes_by_nailgun_nodes( - controllers) - d_other = self.fuel_web.get_devops_nodes_by_nailgun_nodes(other) - - self.show_step(2) - self.fuel_web.warm_shutdown_nodes(d_other) - self.show_step(3) - self.fuel_web.warm_shutdown_nodes(d_controllers) - - self.show_step(4) - self.fuel_web.warm_start_nodes(d_other) - self.show_step(5) - self.fuel_web.warm_start_nodes(d_controllers) - - self.show_step(6) - self.fuel_web.assert_ha_services_ready(cluster_id) - - self.show_step(7) - self.fuel_web.run_ostf(cluster_id) diff --git a/fuelweb_test/tests/tests_upgrade/__init__.py b/fuelweb_test/tests/tests_upgrade/__init__.py deleted file mode 100644 index 498d25c8c..000000000 --- a/fuelweb_test/tests/tests_upgrade/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# pylint: disable=line-too-long - -from fuelweb_test.tests.tests_upgrade import test_clone_env # noqa -from fuelweb_test.tests.tests_upgrade import test_data_driven_upgrade_ceph_ha # noqa -from fuelweb_test.tests.tests_upgrade import test_data_driven_upgrade_net_tmpl # noqa -from fuelweb_test.tests.tests_upgrade import test_data_driven_upgrade_no_cluster # noqa -from fuelweb_test.tests.tests_upgrade import test_data_driven_upgrade_plugin # noqa -from fuelweb_test.tests.tests_upgrade import test_data_driven_upgrade_smoke # noqa -from fuelweb_test.tests.tests_upgrade import test_node_reassignment # noqa -from fuelweb_test.tests.tests_upgrade import upgrader_tool # noqa -from fuelweb_test.tests.tests_upgrade import test_os_upgrade # noqa -from fuelweb_test.tests.tests_upgrade import \ - test_data_driven_upgrade_multirack_deployment # noqa - - -__all__ = [ - 'test_clone_env', - 'test_data_driven_upgrade_ceph_ha', - 'test_data_driven_upgrade_net_tmpl', - 'test_data_driven_upgrade_no_cluster', - 'test_data_driven_upgrade_plugin', - 'test_data_driven_upgrade_smoke', - 'test_node_reassignment', - 'test_os_upgrade', - 'upgrader_tool', - 'test_data_driven_upgrade_multirack_deployment' -] diff --git a/fuelweb_test/tests/tests_upgrade/example_upgrade_scenario.yaml b/fuelweb_test/tests/tests_upgrade/example_upgrade_scenario.yaml deleted file mode 100644 index e5363e0af..000000000 --- a/fuelweb_test/tests/tests_upgrade/example_upgrade_scenario.yaml +++ /dev/null @@ -1,23 +0,0 @@ -upgrade_data: - - name: step_1 - fuel_version: "7.0" - action: backup - source_snapshot_name: prepare_upgrade_ceph_ha_before_backup - backup_name: data_ceph_ha_octane_backup_7.tgz - repos_backup_name: repo_ceph_ha_octane_backup_7.tgz - backup_snapshot_name: ceph_ha_octane_backup_7 - - name: step_2 - source_fuel_version: "7.0" - target_fuel_version: "8.0" - action: restore - backup_snapshot_name: ceph_ha_octane_backup_7 - backup_name: data_ceph_ha_octane_backup_7.tgz - repos_backup_name: repo_ceph_ha_octane_backup_7.tgz - restore_snapshot_name: ceph_ha_octane_restore_8 - - name: step_3 - fuel_version: "8.0" - action: backup - source_snapshot_name: ceph_ha_octane_restore_8 - backup_name: backup_ceph_ha.tar.gz - repos_backup_name: repos_backup_ceph_ha.tar.gz - backup_snapshot_name: upgrade_ceph_ha_backup \ No newline at end of file diff --git a/fuelweb_test/tests/tests_upgrade/octane_patcher.sh b/fuelweb_test/tests/tests_upgrade/octane_patcher.sh deleted file mode 100644 index 52a02a6ad..000000000 --- a/fuelweb_test/tests/tests_upgrade/octane_patcher.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/sh -set -ex -PATCH_DIR=$1 -shift - -show_only_unmerged() { - xargs -tI% sh -c 'curl -s "https://review.openstack.org/changes/%/detail?O=2002" | grep -q "\"status\": \"MERGED\"" && (echo http://review.openstack.org/% MERGED > /dev/stderr) || echo %' -} - -show_only_unapplied() { - xargs -tI% sh -c 'curl -s "https://review.openstack.org/changes/%/detail?O=2002" | sed -nE "/current_revision/ {s/[ ]+?.current_revision.: .//;s/.,\$//p;q}" | xargs -tI{} sh -c "curl -s https://review.openstack.org/changes/%/revisions/{}/patch?download | base64 -d | patch -N --follow-symlinks --batch -p2 --silent --dry-run 2>&1 >/dev/null && echo % || (echo http://review.openstack.org/% cant be applied > /dev/stderr)"' -} - -cr_filter() { - grep -oE '[0-9]+?' -} - - - -apply_patches() { - cd $1 - shift - cr_filter | show_only_unmerged | show_only_unapplied | xargs -tI% sh -c 'curl -s "https://review.openstack.org/changes/%/detail?O=2002" | sed -nE "/current_revision/ {s/[ ]+?.current_revision.: .//;s/.,\$//p;q}" | xargs -tI{} sh -c "curl -s https://review.openstack.org/changes/%/revisions/{}/patch?download | base64 -d | patch --batch -p2 && echo http://review.openstack.org/% successfully"' -} - -test $# -ge 1 && echo $* | apply_patches ${PATCH_DIR} diff --git a/fuelweb_test/tests/tests_upgrade/test_clone_env.py b/fuelweb_test/tests/tests_upgrade/test_clone_env.py deleted file mode 100644 index 8a8e05c19..000000000 --- a/fuelweb_test/tests/tests_upgrade/test_clone_env.py +++ /dev/null @@ -1,327 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from keystoneauth1.exceptions import NotFound -from keystoneauth1.exceptions import BadRequest -from proboscis.asserts import assert_equal -from proboscis.asserts import fail -from proboscis import test -from proboscis import SkipTest - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import TestBasic -from fuelweb_test import logger - - -@test(groups=["clone_env_for_os_upgrade", "cluster_upgrade_extension"], - depends_on_groups=["upgrade_ceph_ha_restore"]) -class TestCloneEnv(TestBasic): - - snapshot = 'upgrade_ceph_ha_restore' - - @test(groups=["test_clone_environment"]) - @log_snapshot_after_test - def test_clone_environment(self): - """Test clone environment - - Scenario: - 1. Revert snapshot "upgrade_ceph_ha_restore" - 2. Clone cluster - 3. Check status code - 4. Check that clusters are equal - - """ - - def text_to_textlist(old_val, new_val): - return set([val.strip() for val in - old_val.split(',')]) == set(new_val) - - def get_field_comparer(old_type, new_type): - method_fields = {('text', 'text_list'): text_to_textlist} - return method_fields.get( - (old_type, new_type), - lambda old_val, new_val: old_val == new_val) - - if not self.env.d_env.has_snapshot(self.snapshot): - raise SkipTest('Snapshot {} not found'.format(self.snapshot)) - self.env.revert_snapshot(self.snapshot) - - cluster_id = self.fuel_web.get_last_created_cluster() - cluster = self.fuel_web.client.get_cluster(cluster_id) - - release_id = self.fuel_web.get_next_deployable_release_id( - cluster["release_id"]) - - data = { - "name": "new_test_cluster", - "release_id": release_id - } - body = self.fuel_web.client.clone_environment(cluster_id, data) - - assert_equal(release_id, body["release_id"]) - assert_equal(cluster["net_provider"], body["net_provider"]) - assert_equal(cluster["mode"], body["mode"]) - - cluster_attrs = self.fuel_web.client.get_cluster_attributes( - cluster_id - ) - cloned_cluster_attrs = self.fuel_web.client.get_cluster_attributes( - body["id"] - ) - - for key in cloned_cluster_attrs["editable"]: - if key == "repo_setup": - continue - for key1, value1 in cloned_cluster_attrs["editable"][key].items(): - if "value" in value1: - if "value" in cluster_attrs["editable"].get(key, {}).get( - key1, {}): - value_old = cluster_attrs["editable"][key][key1] - comparator = get_field_comparer(value_old["type"], - value1["type"]) - assert_equal( - comparator(value_old["value"], value1["value"]), - True) - elif "values" in value1: - if "values" in cluster_attrs["editable"].get(key, {}).get( - key1, {}): - assert_equal( - cluster_attrs["editable"][key][key1]["values"], - value1["values"]) - - old_cluster_net_cfg = self.fuel_web.client.get_networks(cluster_id) - cloned_cluster_net_cfg = self.fuel_web.client.get_networks(body["id"]) - - for parameter in cloned_cluster_net_cfg["networking_parameters"]: - if parameter in old_cluster_net_cfg["networking_parameters"]: - assert_equal( - old_cluster_net_cfg["networking_parameters"][parameter], - cloned_cluster_net_cfg["networking_parameters"][parameter] - ) - - for network in cloned_cluster_net_cfg["networks"]: - if network["name"] not in ["public", "management", "storage"]: - continue - for old_network in old_cluster_net_cfg["networks"]: - if network["name"] == old_network["name"] and network["name"]: - assert_equal(old_network["cidr"], network["cidr"]) - assert_equal(old_network["ip_ranges"], - network["ip_ranges"]) - assert_equal(old_network["vlan_start"], - network["vlan_start"]) - - @test(groups=["test_clone_nonexistent_cluster"]) - # TODO(astepanov) maintain names changes later - @log_snapshot_after_test - def test_clone_nonexistent_cluster(self): - """Test clone environment with nonexistent cluster id as argument - - Scenario: - 1. Revert snapshot "upgrade_ceph_ha_restore" - 2. Try to clone nonexistent environment - 3. Check status code - - """ - if not self.env.d_env.has_snapshot(self.snapshot): - raise SkipTest('Snapshot {} not found'.format(self.snapshot)) - self.env.revert_snapshot(self.snapshot) - - data = { - "name": "new_test_cluster", - "release_id": 123456 - } - try: - self.fuel_web.client.clone_environment(1234567, data) - except NotFound: - logger.debug('exceptions.NotFound received as expected') - else: - fail("Doesn't raise needed error") - - @test(groups=["test_clone_wo_name_in_body"]) - @log_snapshot_after_test - def test_clone_wo_name_in_body(self): - """Test clone without name in POST body - - Scenario: - 1. Revert snapshot "upgrade_ceph_ha_restore" - 2. Try to clone environment without name in POST body - 3. Check status code - - """ - if not self.env.d_env.has_snapshot(self.snapshot): - raise SkipTest('Snapshot {} not found'.format(self.snapshot)) - self.env.revert_snapshot(self.snapshot) - - cluster_id = self.fuel_web.get_last_created_cluster() - cluster = self.fuel_web.client.get_cluster(cluster_id) - release_id = self.fuel_web.get_next_deployable_release_id( - cluster["release_id"]) - - data = { - "release_id": release_id - } - - try: - self.fuel_web.client.clone_environment(cluster_id, data) - except BadRequest: - logger.debug('exceptions.BadRequest received as expected') - else: - fail("Doesn't raise needed error") - - @test(groups=["test_clone_wo_release_id_in_body"]) - @log_snapshot_after_test - def test_clone_wo_release_id_in_body(self): - """Test clone without release id in POST body - - Scenario: - 1. Revert snapshot "upgrade_ceph_ha_restore" - 2. Try to clone environment without release id in POST body - 3. Check status code - - """ - if not self.env.d_env.has_snapshot(self.snapshot): - raise SkipTest('Snapshot {} not found'.format(self.snapshot)) - self.env.revert_snapshot(self.snapshot) - - cluster_id = self.fuel_web.get_last_created_cluster() - - data = { - "name": "new_test_cluster" - } - - try: - self.fuel_web.client.clone_environment(cluster_id, data) - except BadRequest: - logger.debug('exceptions.BadRequest received as expected') - else: - fail("Doesn't raise needed error") - - @test(groups=["test_clone_with_empty_body"]) - @log_snapshot_after_test - def test_clone_with_empty_body(self): - """Test clone with empty body - - Scenario: - 1. Revert snapshot "upgrade_ceph_ha_restore" - 2. Try to clone environment with empty body - 3. Check status code - - """ - if not self.env.d_env.has_snapshot(self.snapshot): - raise SkipTest('Snapshot {} not found'.format(self.snapshot)) - self.env.revert_snapshot(self.snapshot) - - cluster_id = self.fuel_web.get_last_created_cluster() - - try: - self.fuel_web.client.clone_environment(cluster_id, None) - except BadRequest: - logger.debug('exceptions.BadRequest received as expected') - else: - fail("Doesn't raise needed error") - - @test(groups=["test_clone_with_nonexistent_release_id"]) - @log_snapshot_after_test - def test_clone_with_nonexistent_release_id(self): - """Test clone with nonexistent release id in POST body - - Scenario: - 1. Revert snapshot "upgrade_ceph_ha_restore" - 2. Try to clone environment with nonexistent - release id in POST body - 3. Check status code - - """ - if not self.env.d_env.has_snapshot(self.snapshot): - raise SkipTest('Snapshot {} not found'.format(self.snapshot)) - self.env.revert_snapshot(self.snapshot) - - cluster_id = self.fuel_web.get_last_created_cluster() - - data = { - "name": "new_test_cluster", - "release_id": 123456 - } - - try: - self.fuel_web.client.clone_environment(cluster_id, data) - except NotFound: - logger.debug('exceptions.NotFound received as expected') - else: - fail("Doesn't raise needed error") - - @test(groups=["test_clone_with_incorrect_release_id"]) - @log_snapshot_after_test - def test_clone_with_incorrect_release_id(self): - """Test clone with incorrect release id in POST body - - Scenario: - 1. Revert snapshot "upgrade_ceph_ha_restore" - 2. Try to clone environment with incorrect - release id in POST body - 3. Check status code - - """ - if not self.env.d_env.has_snapshot(self.snapshot): - raise SkipTest('Snapshot {} not found'.format(self.snapshot)) - self.env.revert_snapshot(self.snapshot) - - cluster_id = self.fuel_web.get_last_created_cluster() - - data = { - "name": "new_test_cluster", - "release_id": "djigurda" - } - - try: - self.fuel_web.client.clone_environment(cluster_id, data) - except BadRequest: - logger.debug('exceptions.BadRequest received as expected') - else: - fail("Doesn't raise needed error") - - @test(groups=["test_double_clone_environment"]) - @log_snapshot_after_test - def test_double_clone_environment(self): - """Test double clone environment - - Scenario: - 1. Revert snapshot "upgrade_ceph_ha_restore" - 2. Clone cluster - 3. Clone cluster again - 4. Check status code - - """ - - if not self.env.d_env.has_snapshot(self.snapshot): - raise SkipTest('Snapshot {} not found'.format(self.snapshot)) - self.env.revert_snapshot(self.snapshot) - - cluster_id = self.fuel_web.get_last_created_cluster() - cluster = self.fuel_web.client.get_cluster(cluster_id) - - release_id = self.fuel_web.get_next_deployable_release_id( - cluster["release_id"]) - - data = { - "name": "new_test_cluster", - "release_id": release_id - } - self.fuel_web.client.clone_environment(cluster_id, data) - try: - self.fuel_web.client.clone_environment(cluster_id, data) - except BadRequest: - logger.debug('exceptions.BadRequest received as expected') - else: - fail("Doesn't raise needed error") diff --git a/fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_base.py b/fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_base.py deleted file mode 100644 index 1f766b927..000000000 --- a/fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_base.py +++ /dev/null @@ -1,468 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals - -import os -# pylint: disable=import-error -# pylint: disable=no-name-in-module -from distutils.version import LooseVersion -# pylint: enable=no-name-in-module -# pylint: enable=import-error - -from devops.error import TimeoutError, DevopsCalledProcessError -from proboscis.asserts import assert_is_not_none -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_false -from proboscis.asserts import assert_true - -from fuelweb_test import logger -from fuelweb_test.helpers.utils import YamlEditor -from fuelweb_test.settings import DEPLOYMENT_MODE -from fuelweb_test.settings import KEYSTONE_CREDS -from fuelweb_test.settings import LOGS_DIR -from fuelweb_test.settings import OCTANE_PATCHES -from fuelweb_test.settings import OCTANE_REPO_LOCATION -from fuelweb_test.settings import REPLACE_DEFAULT_REPOS -from fuelweb_test.settings import REPLACE_DEFAULT_REPOS_ONLY_ONCE -from fuelweb_test.settings import UPGRADE_FUEL_FROM -from fuelweb_test.settings import UPGRADE_BACKUP_FILES_LOCAL_DIR -from fuelweb_test.settings import UPGRADE_BACKUP_FILES_REMOTE_DIR -from fuelweb_test.settings import UPGRADE_FUEL_TO -from fuelweb_test.tests.base_test_case import TestBasic - - -class DataDrivenUpgradeBase(TestBasic): - - IGNORED_OSTF_TESTS = { - '7.0': ['Check that required services are running', - 'Instance live migration'], - '8.0': ['Check that required services are running', - 'Launch instance with file injection'], - '9.0': ['Instance live migration'], - '9.1': ['Instance live migration'] - } - - OCTANE_COMMANDS = { - 'backup': 'octane -v --debug fuel-backup --to {path}', - 'repo-backup': 'octane -v --debug fuel-repo-backup --to {path} --full', - 'restore': - 'octane -v --debug fuel-restore --from {path} ' - '--admin-password {pwd} > ~/restore_stdout.log ' - '2> ~/restore_stderr.log', - 'repo-restore': 'octane -v --debug fuel-repo-restore --from {path}', - 'update-bootstrap-centos': 'octane -v --debug update-bootstrap-centos' - } - - FUEL_MIRROR_CFG_FILE = "/usr/share/fuel-mirror/ubuntu.yaml" - - def __init__(self): - super(DataDrivenUpgradeBase, self).__init__() - self.local_dir_for_backups = UPGRADE_BACKUP_FILES_LOCAL_DIR - if not os.path.exists(self.local_dir_for_backups): - os.makedirs(self.local_dir_for_backups) - self.remote_dir_for_backups = UPGRADE_BACKUP_FILES_REMOTE_DIR - self.cluster_creds = { - 'tenant': 'upgrade', - 'user': 'upgrade', - 'password': 'upgrade' - } - self.snapshot_name = None - self.source_snapshot_name = None - self.backup_snapshot_name = None - self.restore_snapshot_name = None - self.tarball_remote_dir = None - self.backup_name = None - self.repos_backup_name = None - # pylint: disable=no-member - if hasattr(self.env, "reinstall_master_node"): - self.reinstall_master_node = self.env.reinstall_master_node - # pylint: enable=no-member - - # cluster's names database for avoiding true hardcode but allowing to - # store names in one place. All cluster names should migrate here later - # in separate commits - self.cluster_names = { - "ceph_ha": "ceph_ha_cluster_for_upgrade", - "smoke": "smoke_cluster_for_upgrade" - } - - @property - def backup_path(self): - return os.path.join(self.remote_dir_for_backups, self.backup_name) - - @property - def local_path(self): - return os.path.join(self.local_dir_for_backups, self.backup_name) - - @property - def repos_backup_path(self): - return os.path.join(self.remote_dir_for_backups, - self.repos_backup_name) - - @property - def fuel_version(self): - version = self.fuel_web.client.get_api_version()['release'] - return LooseVersion(version) - - @property - def repos_local_path(self): - return os.path.join(self.local_dir_for_backups, self.repos_backup_name) - - @property - def admin_remote(self): - return self.env.d_env.get_admin_remote() - - # pylint: disable=no-member - - def upload_file(self, source, destination, remote=None): - if not remote: - remote = self.admin_remote - assert_true(os.path.exists(source), - "Source file {!r} does not exists".format(source)) - logger.info("Uploading {!r} to {!r}".format(source, destination)) - remote.upload(source, destination) - assert_true(remote.exists(destination), - "Destination file {!r} does not exists after " - "uploading".format(destination)) - logger.info("File {!r} uploaded".format(destination)) - - def download_file(self, source, destination, remote=None): - if not remote: - remote = self.admin_remote - assert_true( - remote.exists(source), - "Source file {!r} on remote does not exists".format(source)) - logger.info("Downloading {!r} to {!r}".format(source, destination)) - remote.download(source, destination) - assert_true(os.path.exists(destination), - "Destination file {!r} does not exists after " - "downloading".format(destination)) - logger.info("File {!r} downloaded".format(destination)) - - def remove_remote_file(self, path, remote=None): - if not remote: - remote = self.admin_remote - remote.rm_rf(path) - - def remote_file_exists(self, path, remote=None): - if not remote: - remote = self.admin_remote - return remote.exists(path) - - # pylint: enable=no-member - - def cleanup(self): - os.remove(self.local_path) - os.remove(self.repos_local_path) - - def install_octane(self): - """ Install fuel-octane package to master node""" - conf_file = None - if OCTANE_REPO_LOCATION: - conf_file = '/etc/yum.repos.d/fuel-proposed.repo' - cmd = ("echo -e " - "'[fuel-proposed]\n" - "name=fuel-proposed\n" - "baseurl={}/\n" - "gpgcheck=0\n" - "priority=1' > {}").format( - OCTANE_REPO_LOCATION, - conf_file) - - # pylint: disable=no-member - self.admin_remote.check_call(cmd) - # pylint: enable=no-member - - logger.info("Removing previously installed fuel-octane") - # pylint: disable=no-member - self.admin_remote.check_call( - "yum remove -y fuel-octane", - raise_on_err=False) - self.admin_remote.check_call( - "rm -rf /usr/lib/python2.*/site-packages/octane", - raise_on_err=False) - if self.fuel_version >= LooseVersion("9.0"): - self.admin_remote.check_call( - "yum remove -y fuel-nailgun-extension-cluster-upgrade", - raise_on_err=False) - - logger.info("Installing fuel-octane") - self.admin_remote.check_call("yum install -y fuel-octane") - - octane_log = self.admin_remote.check_call( - "rpm -q --changelog fuel-octane").stdout_str - # pylint: enable=no-member - logger.info("Octane changes:") - logger.info(octane_log) - - if OCTANE_PATCHES: - logger.info("Patching octane with CR: {!r}".format( - OCTANE_PATCHES)) - # pylint: disable=no-member - self.admin_remote.upload( - os.path.join( - os.path.abspath(os.path.dirname(__file__)), - "octane_patcher.sh"), - "/tmp/octane_patcher.sh") - - self.admin_remote.check_call( - "bash /tmp/octane_patcher.sh {}".format( - OCTANE_PATCHES)) - # pylint: enable=no-member - - if OCTANE_REPO_LOCATION: - # pylint: disable=no-member - self.admin_remote.rm_rf(conf_file) - # pylint: enable=no-member - - def octane_action(self, action, path=None): - assert_true(action in self.OCTANE_COMMANDS.keys(), - "Unknown octane action '{}', aborting".format(action)) - octane_cli_args = { - 'path': path, - 'pwd': KEYSTONE_CREDS['password'] - } - admin_remote = self.env.d_env.get_admin_remote() - if 'backup' in action: - assert_false( - admin_remote.exists(path), - 'File {!r} already exists, not able to reuse'.format(path)) - elif 'restore' in action: - assert_true( - admin_remote.exists(path), - 'File {!r} does not exists - can not run restore'.format(path)) - - cmd = self.OCTANE_COMMANDS[action].format(**octane_cli_args) - - try: - admin_remote.check_call(cmd, timeout=60 * 60) - except (DevopsCalledProcessError, TimeoutError): - # snapshot generating procedure can be broken - admin_remote.download( - "/var/log/octane.log", - os.path.join(LOGS_DIR, - "octane_{}_.log".format(os.path.basename(path)))) - raise - - if 'backup' in action: - assert_true( - admin_remote.exists(path), - "File {!r} was not created after backup command!".format(path)) - - def do_backup(self, - backup_path, local_path, - repos_backup_path=None, repos_local_path=None): - """ Wrapper for backup process of upgrading procedure""" - # BOTH repos arguments should be passed at the same time - # or BOTH should not be passed - assert_equal(bool(repos_backup_path), bool(repos_local_path), - "Both repos arguments should be specified") - self.install_octane() - - cmd = "mkdir -p {}".format(self.remote_dir_for_backups) - # pylint: disable=no-member - self.admin_remote.check_call(cmd) - - self.octane_action("backup", backup_path) - logger.info("Downloading {}".format(backup_path)) - - self.admin_remote.download(backup_path, local_path) - # pylint: enable=no-member - assert_true(os.path.exists(local_path)) - - if repos_backup_path: - self.octane_action("repo-backup", repos_backup_path) - logger.info("Downloading {}".format(repos_backup_path)) - # pylint: disable=no-member - self.admin_remote.download(repos_backup_path, repos_local_path) - # pylint: enable=no-member - assert_true(os.path.exists(repos_local_path)) - - def do_restore(self, - backup_path, local_path, - repos_backup_path=None, repos_local_path=None): - """ Wrapper for restore process of upgrading procedure""" - # BOTH repos arguments should be passed at the same time - # or BOTH should not be passed - assert_equal(bool(repos_backup_path), bool(repos_local_path), - "Both repos arguments should be specified") - self.install_octane() - - cmd = "mkdir -p {}".format(self.remote_dir_for_backups) - # pylint: disable=no-member - self.admin_remote.check_call(cmd) - - logger.info("Uploading {}".format(local_path)) - - self.admin_remote.upload(local_path, backup_path) - # pylint: enable=no-member - logger.info("Applying backup from {}".format(backup_path)) - self.octane_action("restore", backup_path) - - if repos_backup_path: - logger.info("Uploading {}".format(repos_local_path)) - # pylint: disable=no-member - self.admin_remote.upload(repos_local_path, repos_backup_path) - # pylint: enable=no-member - logger.info("Applying backup from {}".format(repos_backup_path)) - self.octane_action("repo-restore", repos_backup_path) - - if self.fuel_version in (LooseVersion('7.0'), LooseVersion('8.0')): - logger.info( - "Update CentOS bootstrap image with restored ssh keys") - self.octane_action('update-bootstrap-centos') - - if REPLACE_DEFAULT_REPOS and REPLACE_DEFAULT_REPOS_ONLY_ONCE: - self.fuel_web.replace_default_repos() - if self.fuel_version >= LooseVersion('8.0'): - self.fuel_web.change_default_network_settings() - - discover_n_nodes = [node for node in self.fuel_web.client.list_nodes() - if self.fuel_web.is_node_discovered(node)] - - if discover_n_nodes: - logger.info("Rebooting bootstrapped nodes") - discover_d_nodes = self.fuel_web.get_devops_nodes_by_nailgun_nodes( - discover_n_nodes) - self.fuel_web.cold_restart_nodes(discover_d_nodes) - - def revert_source(self): - assert_is_not_none(self.source_snapshot_name, - "'source_snapshot_name' variable is not defined!") - assert_true( - self.env.revert_snapshot(self.source_snapshot_name), - "The test can not use given environment - snapshot " - "{!r} does not exists".format(self.source_snapshot_name)) - - def revert_backup(self): - assert_is_not_none(self.backup_snapshot_name, - "'backup_snapshot_name' variable is not defined!") - assert_true( - self.env.revert_snapshot(self.backup_snapshot_name), - "The test can not use given environment - snapshot " - "{!r} does not exists".format(self.backup_snapshot_name)) - - def revert_restore(self): - assert_is_not_none(self.snapshot_name, - "'snapshot_name' variable is not defined!") - assert_true( - self.env.revert_snapshot(self.snapshot_name), - "The test can not use given environment - snapshot " - "{!r} does not exists".format(self.snapshot_name)) - - def deploy_cluster(self, cluster_settings): - slaves_count = len(cluster_settings['nodes']) - slaves = self.env.d_env.nodes().slaves[:slaves_count] - for chunk in [slaves[x:x + 5] for x in range(0, slaves_count, 5)]: - self.env.bootstrap_nodes(chunk, skip_timesync=True) - self.env.sync_time() - cluster_id = self.fuel_web.create_cluster( - name=cluster_settings['name'], - mode=DEPLOYMENT_MODE, - settings=cluster_settings['settings'] - ) - if cluster_settings.get('plugin'): - plugin_name = cluster_settings['plugin']['name'] - assert_true( - self.fuel_web.check_plugin_exists(cluster_id, plugin_name)) - self.fuel_web.update_plugin_data( - cluster_id, plugin_name, cluster_settings['plugin']['data']) - - self.fuel_web.update_nodes(cluster_id, cluster_settings['nodes']) - self.fuel_web.verify_network(cluster_id) - - # Code for debugging on hosts with low IO - # for chunk in [slaves[x:x+5] for x in range(0, slaves_count, 5)]: - # ids = [self.fuel_web.get_nailgun_node_by_devops_node(x)['id'] - # for x in chunk] - # self.fuel_web.client.provision_nodes(cluster_id, ids) - # wait(lambda: all( - # [self.fuel_web.get_nailgun_node_by_devops_node(node)['status' - # ] == 'provisioned' for node in chunk]), - # timeout=30 * 60, - # interval=60) - - self.fuel_web.deploy_cluster_wait(cluster_id) - self.fuel_web.verify_network(cluster_id) - - @staticmethod - def verify_bootstrap_on_node(remote, os_type): - os_type = os_type.lower() - if os_type not in ['ubuntu', 'centos']: - raise Exception("Only Ubuntu and CentOS are supported, " - "you have chosen {0}".format(os_type)) - - logger.info("Verify bootstrap on slave {0}".format(remote.host)) - - cmd = 'cat /etc/*release' - output = remote.check_call(cmd).stdout_str.lower() - assert_true(os_type in output, - "Slave {0} doesn't use {1} image for bootstrap " - "after {1} images were enabled, /etc/release " - "content: {2}".format(remote.host, os_type, output)) - - def check_cobbler_node_exists(self, node_id): - """Check node with following node_id is present in - the cobbler node list - :param node_id: fuel node id - """ - logger.debug("Check that cluster contains node with ID:{0} ". - format(node_id)) - admin_remote = self.env.d_env.get_admin_remote() - - cmd = 'bash -c "cobbler system list" | grep ' \ - '-w "node-{0}"'.format(node_id) - if self.fuel_version <= LooseVersion('8.0'): - cmd = "dockerctl shell cobbler {}".format(cmd) - admin_remote.check_call(cmd) - - def check_ostf(self, cluster_id, test_sets=None, timeout=30 * 60, - ignore_known_issues=False, additional_ignored_issues=None): - """Run OSTF tests with the ignoring some test result - """ - if additional_ignored_issues: - ignr_tests = additional_ignored_issues - else: - ignr_tests = [] - - if ignore_known_issues: - mrg_set = set() - for key, val in self.IGNORED_OSTF_TESTS.items(): - if ( - LooseVersion(UPGRADE_FUEL_FROM) <= - LooseVersion(key) <= - LooseVersion(UPGRADE_FUEL_TO) - ): - mrg_set.update(val) - mrg_set.update(ignr_tests) - ignr_tests = list(mrg_set) - - self.fuel_web.run_ostf(cluster_id, test_sets=test_sets, - should_fail=len(ignr_tests), - failed_test_name=ignr_tests, timeout=timeout) - - def add_proposed_to_fuel_mirror_config(self): - with YamlEditor(self.FUEL_MIRROR_CFG_FILE, - ip=self.env.get_admin_node_ip()) as editor: - proposed_desc = { - str("name"): "mos-proposed", - "uri": editor.content['mos_baseurl'], - "suite": "mos$mos_version-proposed", - "section": "main restricted", - "type": "deb", - "priority": 1050 - } - editor.content["groups"]["mos"].append(proposed_desc) - editor.content["repos"].append(proposed_desc) diff --git a/fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_ceph_ha.py b/fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_ceph_ha.py deleted file mode 100644 index 3cfdee10c..000000000 --- a/fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_ceph_ha.py +++ /dev/null @@ -1,291 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from proboscis import test -from proboscis.asserts import assert_true, assert_not_equal -import yaml - -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers import os_actions -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_base import \ - DataDrivenUpgradeBase -from fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_base import \ - LooseVersion - - -@test -class UpgradeCephHA(DataDrivenUpgradeBase): - def __init__(self): - super(UpgradeCephHA, self).__init__() - self.source_snapshot_name = "prepare_upgrade_ceph_ha_before_backup" - self.backup_snapshot_name = "upgrade_ceph_ha_backup" - self.snapshot_name = "upgrade_ceph_ha_restore" - self.backup_name = "backup_ceph_ha.tar.gz" - self.repos_backup_name = "repos_backup_ceph_ha.tar.gz" - assert_not_equal( - settings.KEYSTONE_CREDS['password'], 'admin', - "Admin password was not changed, aborting execution") - self.workload_description_file = os.path.join( - self.local_dir_for_backups, "ceph_ha_instances_data.yaml") - if settings.HARDWARE["slave_node_memory"] < 4096: - logger.warning("Less than 4gb RAM can be harmful in " - "live-migration scenario! Please increase it if you" - "want to upgrade this cloud later using " - "'export SLAVE_NODE_MEMORY='") - - @test(groups=['prepare_upgrade_ceph_ha_before_backup'], - depends_on=[SetupEnvironment.prepare_release]) - @log_snapshot_after_test - def prepare_upgrade_ceph_ha_before_backup(self): - """Prepare HA, ceph for all cluster using previous version of Fuel. - Nailgun password should be changed via KEYSTONE_PASSWORD env variable - - Scenario: - 1. Create cluster with NeutronVLAN and ceph for all (replica factor 3) - 2. Add 3 node with controller role - 3. Add 3 node with compute role - 4. Add 3 node with ceph osd role - 5. Verify networks - 6. Deploy cluster - 7. Spawn instance on each compute - 8. Write workload definition to storage file - - Duration: TODO - Snapshot: prepare_upgrade_ceph_ha_before_backup - """ - - self.check_run(self.source_snapshot_name) - self.env.revert_snapshot("ready", skip_timesync=True) - - admin_ip = self.env.get_admin_node_ip() - # pylint: disable=redefined-variable-type - if self.fuel_version <= LooseVersion("8.0"): - dns_ntp_arg = admin_ip - else: - dns_ntp_arg = [admin_ip] - # pylint: enable=redefined-variable-type - cluster_settings = { - 'net_provider': settings.NEUTRON, - 'net_segment_type': settings.NEUTRON_SEGMENT['vlan'], - 'volumes_lvm': False, - 'volumes_ceph': True, - 'images_ceph': True, - 'objects_ceph': True, - 'ephemeral_ceph': True, - 'osd_pool_size': '3', - 'ntp_list': dns_ntp_arg, - 'dns_list': dns_ntp_arg - } - cluster_settings.update(self.cluster_creds) - - self.show_step(1) - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.show_step(6) - self.deploy_cluster( - {'name': self.cluster_names["ceph_ha"], - 'settings': cluster_settings, - 'nodes': - {'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute'], - 'slave-05': ['compute'], - 'slave-06': ['compute'], - 'slave-07': ['ceph-osd'], - 'slave-08': ['ceph-osd'], - 'slave-09': ['ceph-osd']} - }) - - cluster_id = self.fuel_web.get_last_created_cluster() - os_conn = os_actions.OpenStackActions( - self.fuel_web.get_public_vip(cluster_id), - user=self.cluster_creds['user'], - passwd=self.cluster_creds['password'], - tenant=self.cluster_creds['tenant']) - - self.show_step(7) - vmdata = os_conn.boot_parameterized_vms(attach_volume=True, - boot_vm_from_volume=True, - enable_floating_ips=True, - on_each_compute=True) - self.show_step(8) - with open(self.workload_description_file, "w") as file_obj: - yaml.dump(vmdata, file_obj, - default_flow_style=False, default_style='"') - - self.env.make_snapshot(self.source_snapshot_name, is_make=True) - - @test(groups=['upgrade_ceph_ha_backup'], - depends_on_groups=['prepare_upgrade_ceph_ha_before_backup']) - @log_snapshot_after_test - def upgrade_ceph_ha_backup(self): - """Create upgrade backup file for ceph HA cluster - - Scenario: - 1. Revert "prepare_upgrade_ceph_ha_before_backup" snapshot - 2. Install fuel-octane package - 3. Create backup file using 'octane fuel-backup' - 4. Download the backup to the host - - Snapshot: upgrade_ceph_ha_backup - """ - self.check_run(self.backup_snapshot_name) - self.show_step(1) - self.env.revert_snapshot("prepare_upgrade_ceph_ha_before_backup", - skip_timesync=True) - - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.do_backup(self.backup_path, self.local_path, - self.repos_backup_path, self.repos_local_path) - - self.env.make_snapshot(self.backup_snapshot_name, is_make=True) - - @test(groups=['upgrade_ceph_ha_tests', 'upgrade_ceph_ha_restore']) - @log_snapshot_after_test - def upgrade_ceph_ha_restore(self): - """Reinstall Fuel and restore data with Vlan+Ceph+HA cluster - - Scenario: - 1. Revert "upgrade_ceph_ha_backup" snapshot - 2. Reinstall Fuel master using iso given in ISO_PATH - 3. Install fuel-octane package - 4. Upload the backup back to reinstalled Fuel maser node - 5. Restore master node using 'octane fuel-restore' - 6. Verify networks for restored cluster - 7. Run OSTF for restored cluster - - Snapshot: upgrade_ceph_ha_restore - Duration: TODO - """ - self.check_run(self.snapshot_name) - - assert_true( - os.path.exists(self.local_path), - "Data backup file was not found at {!r}".format(self.local_path)) - assert_true( - os.path.exists(self.repos_local_path), - "Repo backup file was not found at {!r}".format( - self.repos_local_path)) - - intermediate_snapshot = 'ceph_ha_before_restore' - if not self.env.d_env.has_snapshot(intermediate_snapshot): - self.show_step(1) - self.revert_backup() - self.show_step(2) - self.reinstall_master_node() - self.env.make_snapshot(intermediate_snapshot) - else: - self.env.d_env.revert(intermediate_snapshot) - self.env.resume_environment() - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.do_restore(self.backup_path, self.local_path, - self.repos_backup_path, self.repos_local_path) - - self.show_step(6) - cluster_id = self.fuel_web.get_last_created_cluster() - self.fuel_web.verify_network(cluster_id) - self.show_step(7) - # Live migration test could fail - # https://bugs.launchpad.net/fuel/+bug/1471172 - # https://bugs.launchpad.net/fuel/+bug/1604749 - self.check_ostf(cluster_id, ignore_known_issues=True) - - self.env.make_snapshot(self.snapshot_name, is_make=True) - self.cleanup() - - @test(groups=['upgrade_ceph_ha_tests', 'upgrade_ceph_ha_reboot_ctrl'], - depends_on_groups=['upgrade_ceph_ha_restore']) - @log_snapshot_after_test - def upgrade_ceph_ha_reboot_ctrl(self): - """Ensure that controller receives correct boot order from cobbler - - Scenario: - 1. Revert "upgrade_ceph_ha_restore" snapshot. - 2. Warm restart of a controller. - 3. Wait until services become ready. - 4. Run OSTF. - - Duration: 20m - """ - self.show_step(1) - self.env.revert_snapshot(self.snapshot_name) - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - - n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id=cluster_id, - roles=['controller']) - d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls) - self.fuel_web.warm_restart_nodes([d_ctrls[0]]) - self.show_step(3) - self.fuel_web.assert_ha_services_ready(cluster_id) - self.fuel_web.assert_os_services_ready(cluster_id) - self.show_step(4) - self.check_ostf(cluster_id, ignore_known_issues=True) - - @test(groups=['upgrade_ceph_ha_tests', 'upgrade_ceph_ha_replace_node'], - depends_on_groups=['upgrade_ceph_ha_restore']) - @log_snapshot_after_test - def upgrade_ceph_ha_replace_node(self): - """Replace 1 compute on ceph node in existing cluster after upgrade - - Scenario: - 1. Revert "upgrade_ceph_ha_restore" snapshot. - 2. Mark 1 compute node for removing - 3. Deploy changes - 4. Run OSTF - 5. Add 1 ceph node - 6. Verify networks - 7. Deploy cluster - 8. Run OSTF - - """ - self.show_step(1) - self.env.revert_snapshot(self.snapshot_name) - - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-06': ['compute'] - }, - pending_addition=False, - pending_deletion=True - ) - self.show_step(3) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(4) - self.check_ostf(cluster_id, ignore_known_issues=True) - self.show_step(5) - self.fuel_web.update_nodes(cluster_id, {'slave-06': ['ceph-osd']}) - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - self.show_step(7) - # LP 1562736 get_devops_node_by_nailgun_node is not working - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(8) - self.check_ostf(cluster_id, ignore_known_issues=True) diff --git a/fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_multirack_deployment.py b/fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_multirack_deployment.py deleted file mode 100644 index 80347b4ae..000000000 --- a/fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_multirack_deployment.py +++ /dev/null @@ -1,386 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from devops.helpers.helpers import wait -from proboscis import asserts -from proboscis import SkipTest -from proboscis import test -from proboscis.asserts import assert_true -import yaml - -from fuelweb_test.helpers.checkers import check_ping -from fuelweb_test.helpers.decorators import check_fuel_statistics -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers import multiple_networks_hacks -from fuelweb_test.helpers import utils -from fuelweb_test.settings import DEPLOYMENT_MODE_HA -from fuelweb_test.settings import MULTIPLE_NETWORKS -from fuelweb_test.settings import NEUTRON_SEGMENT -from fuelweb_test.settings import NODEGROUPS -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test import logger -from fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_base import \ - DataDrivenUpgradeBase - - -@test -class TestMultiRackDeployment(DataDrivenUpgradeBase): - """TestMultiRackDeployment""" # TODO documentation - - def __init__(self): - super(TestMultiRackDeployment, self).__init__() - self.backup_name = "backup_multirack.tar.gz" - self.repos_backup_name = "repos_backup_multirack.tar.gz" - self.source_snapshot_name = "prepare_upgrade_multirack_before_backup" - self.backup_snapshot_name = "upgrade_multirack_backup" - self.snapshot_name = "upgrade_multirack_restore" - self.netgroup_description_file = os.path.join( - self.local_dir_for_backups, "multirack_netgroup_data.yaml") - - def restore_firewall_rules(self): - # NOTE: this code works if fuel-qa version is newer than stable/7.0 - admin_devops_node = self.env.d_env.nodes().admin - admin_networks = [iface.network.name - for iface in admin_devops_node.interfaces] - for i, network_name in enumerate(admin_networks): - if 'admin' in network_name and 'admin' != network_name: - iface_name = 'enp0s' + str(i + 3) - admin_net_obj = self.env.d_env.get_network(name=network_name) - admin_network = admin_net_obj.ip.network - admin_netmask = admin_net_obj.ip.netmask - logger.info('Configure firewall rules for {}/{}' - .format(admin_network, admin_netmask)) - multiple_networks_hacks.configure_second_admin_firewall( - self.ssh_manager.admin_ip, - admin_network, - admin_netmask, - iface_name, - self.env.get_admin_node_ip()) - logger.info('The configuration completed successfully') - - self.ssh_manager.execute(ip=self.ssh_manager.admin_ip, - cmd="cobbler sync") - - @staticmethod - def is_update_dnsmasq_running(tasks): - for task in tasks: - if task['name'] == "update_dnsmasq" and \ - task["status"] == "running": - return True - return False - - @test(depends_on=[SetupEnvironment.prepare_release], - groups=["prepare_upgrade_multirack_before_backup"]) - @log_snapshot_after_test - @check_fuel_statistics - def prepare_upgrade_multirack_before_backup(self): - """Deploy HA environment with NeutronVXLAN and 2 nodegroups - - Scenario: - 1. Revert snapshot with ready master node - 2. Bootstrap slaves from default nodegroup - 3. Create cluster with Neutron VXLAN and custom nodegroups - 4. Remove 2nd custom nodegroup which is added automatically - 5. Bootstrap slave nodes from custom nodegroup - 6. Add 3 controller nodes from default nodegroup - 7. Add 2 compute nodes from custom nodegroup - 8. Deploy cluster - 9. Run network verification - 10. Run health checks (OSTF) - - Duration 110m - Snapshot: prepare_upgrade_multirack_before_backup - - """ - - if not MULTIPLE_NETWORKS: - raise SkipTest() - - self.show_step(1) - self.env.revert_snapshot("ready") - - self.show_step(2) - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[0:3]) - - self.show_step(3) - cluster_id = self.fuel_web.create_cluster( - name="TestMultiRackDeployment", - mode=DEPLOYMENT_MODE_HA, - settings={ - "net_provider": 'neutron', - "net_segment_type": NEUTRON_SEGMENT['tun'], - 'tenant': 'haVxlan', - 'user': 'haVxlan', - 'password': 'haVxlan' - } - ) - - self.show_step(4) - netconf_all_groups = self.fuel_web.client.get_networks(cluster_id) - with open(self.netgroup_description_file, "w") as file_obj: - yaml.dump(netconf_all_groups, file_obj, - default_flow_style=False, default_style='"') - - custom_group2 = self.fuel_web.get_nodegroup( - cluster_id, name=NODEGROUPS[2]['name']) - wait(lambda: not self.is_update_dnsmasq_running( - self.fuel_web.client.get_tasks()), timeout=60, - timeout_msg="Timeout exceeded while waiting for task " - "'update_dnsmasq' is finished!") - self.fuel_web.client.delete_nodegroup(custom_group2['id']) - - self.show_step(5) - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5]) - - self.show_step(6) - self.show_step(7) - nodegroup_default = NODEGROUPS[0]['name'] - nodegroup_custom1 = NODEGROUPS[1]['name'] - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': [['controller'], nodegroup_default], - 'slave-02': [['controller'], nodegroup_default], - 'slave-03': [['controller'], nodegroup_default], - 'slave-04': [['compute', 'cinder'], nodegroup_custom1], - 'slave-05': [['compute', 'cinder'], nodegroup_custom1], - } - ) - - self.show_step(8) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(9) - self.fuel_web.verify_network(cluster_id) - self.show_step(10) - self.check_ostf(cluster_id=cluster_id) - - self.env.make_snapshot(self.source_snapshot_name, - is_make=True) - - @test(groups=["upgrade_multirack_backup"], - depends_on_groups=["prepare_upgrade_multirack_before_backup"]) - @log_snapshot_after_test - def upgrade_multirack_backup(self): - """Create upgrade backup files for multi-rack cluster - - Scenario: - 1. Revert "prepare_upgrade_multirack_before_backup" snapshot - 2. Install fuel-octane package - 3. Create backups for upgrade procedure - 4. Download the backup to the host - - Snapshot: upgrade_multirack_backup - """ - - self.check_run(self.backup_snapshot_name) - self.show_step(1) - self.env.revert_snapshot(self.source_snapshot_name, - skip_timesync=True) - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.do_backup(self.backup_path, self.local_path, - self.repos_backup_path, self.repos_local_path) - - self.env.make_snapshot(self.backup_snapshot_name, is_make=True) - - @test(groups=["upgrade_multirack_test", "upgrade_multirack_restore"]) - @log_snapshot_after_test - def upgrade_multirack_restore(self): - """Restore Fuel master - multi-rack - - Scenario: - 1. Revert "upgrade_multirack_backup" snapshot - 2. Reinstall Fuel master using iso given in ISO_PATH - 3. Install fuel-octane package - 4. Upload the backup back to reinstalled Fuel maser node - 5. Restore master node using 'octane fuel-restore' - 6. Restore firewall rules for other nodegroup - 7. Verify networks - 8. Run OSTF - - Snapshot: upgrade_multirack_restore - """ - - self.check_run(self.snapshot_name) - assert_true(os.path.exists(self.repos_local_path)) - assert_true(os.path.exists(self.local_path)) - - self.show_step(1) - self.revert_backup() - self.show_step(2) - self.reinstall_master_node() - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.do_restore(self.backup_path, self.local_path, - self.repos_backup_path, self.repos_local_path) - - self.show_step(6) - self.restore_firewall_rules() - self.show_step(7) - cluster_id = self.fuel_web.get_last_created_cluster() - self.fuel_web.verify_network(cluster_id) - self.show_step(8) - self.check_ostf(cluster_id=cluster_id, - test_sets=['smoke', 'sanity', 'ha'], - ignore_known_issues=True) - self.env.make_snapshot(self.snapshot_name, is_make=True) - - @test(depends_on_groups=["upgrade_multirack_restore"], - groups=["upgrade_multirack_test", "reset_deploy_multirack"]) - @log_snapshot_after_test - def reset_deploy_multirack(self): - """Reset the existing cluster and redeploy - multi-rack - - Scenario: - 1. Revert "upgrade_multirack_restore" snapshot - 2. Reset the existing cluster - 3. Deploy cluster - 4. Verify networks - 5. Run OSTF - - Snapshot: reset_deploy_multirack - """ - - self.show_step(1) - self.env.revert_snapshot(self.snapshot_name) - - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - self.fuel_web.stop_reset_env_wait(cluster_id) - - self.show_step(3) - self.fuel_web.verify_network(cluster_id) - - self.show_step(4) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - - self.show_step(6) - self.check_ostf(cluster_id=cluster_id, - test_sets=['smoke', 'sanity', 'ha'], - ignore_known_issues=True) - self.env.make_snapshot("reset_deploy_multirack") - - @test(depends_on_groups=["upgrade_multirack_restore"], - groups=["upgrade_multirack_test", - "add_custom_nodegroup_after_master_upgrade"]) - @log_snapshot_after_test - def add_custom_nodegroup_after_master_upgrade(self): - """Add new nodegroup to existing operational environment after - Fuel Master upgrade - - Scenario: - 1. Revert "upgrade_multirack_restore" snapshot - 2. Create new nodegroup for the environment and configure - it's networks - 3. Bootstrap slave node from custom-2 nodegroup - 4. Add node from new nodegroup to the environment with compute role - 5. Run network verification - 6. Deploy changes - 7. Run network verification - 8. Run OSTF - 9. Check that nodes from 'default' nodegroup can reach nodes - from new nodegroup via management and storage networks - - Duration 50m - Snapshot add_custom_nodegroup_after_master_upgrade - """ - - self.show_step(1) - self.env.revert_snapshot(self.snapshot_name) - cluster_id = self.fuel_web.get_last_created_cluster() - self.fuel_web.assert_nodes_in_ready_state(cluster_id) - asserts.assert_true(not any(ng['name'] == NODEGROUPS[2]['name'] for ng - in self.fuel_web.client.get_nodegroups()), - 'Custom nodegroup {0} already ' - 'exists!'.format(NODEGROUPS[2]['name'])) - - self.show_step(2) - new_nodegroup = self.fuel_web.client.create_nodegroup( - cluster_id, NODEGROUPS[2]['name']) - logger.debug('Updating custom nodegroup ID in network configuration..') - network_config_new = self.fuel_web.client.get_networks(cluster_id) - with open(self.netgroup_description_file, "r") as file_obj: - netconf_all_groups = yaml.load(file_obj) - - asserts.assert_true(netconf_all_groups is not None, - 'Network configuration for nodegroups is empty!') - - for network in netconf_all_groups['networks']: - if network['group_id'] is not None and \ - not any(network['group_id'] == ng['id'] - for ng in self.fuel_web.client.get_nodegroups()): - network['group_id'] = new_nodegroup['id'] - for new_network in network_config_new['networks']: - if new_network['name'] == network['name'] and \ - new_network['group_id'] == network['group_id']: - network['id'] = new_network['id'] - - self.fuel_web.client.update_network( - cluster_id, - netconf_all_groups['networking_parameters'], - netconf_all_groups['networks']) - - self.show_step(3) - self.env.bootstrap_nodes([self.env.d_env.nodes().slaves[6]]) - - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, - {'slave-07': [['compute'], new_nodegroup['name']]}, - True, False - ) - - self.show_step(5) - self.fuel_web.verify_network(cluster_id) - - self.show_step(6) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - - self.show_step(8) - self.fuel_web.run_ostf(cluster_id=cluster_id) - - self.show_step(9) - primary_ctrl = self.fuel_web.get_nailgun_node_by_devops_node( - self.fuel_web.get_nailgun_primary_node( - slave=self.env.d_env.nodes().slaves[0])) - - with self.fuel_web.get_ssh_for_node('slave-07') as remote: - new_node_networks = utils.get_net_settings(remote) - - for interface in ('br-storage', 'br-mgmt'): - if interface in new_node_networks: - logger.info("Checking new node is accessible from primary " - "controller via {0} interface.".format(interface)) - for ip in new_node_networks[interface]['ip_addresses']: - address = ip.split('/')[0] - result = check_ping(primary_ctrl['ip'], - address, - timeout=3) - asserts.assert_true(result, - "New node isn't accessible from " - "primary controller via {0} interface" - ": {1}.".format(interface, result)) - - self.env.make_snapshot("add_custom_nodegroup_after_master_upgrade") diff --git a/fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_net_tmpl.py b/fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_net_tmpl.py deleted file mode 100644 index c7494b392..000000000 --- a/fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_net_tmpl.py +++ /dev/null @@ -1,363 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from copy import deepcopy -import os - -from devops.helpers.helpers import wait -from proboscis import test -from proboscis.asserts import assert_not_equal, assert_true - -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.utils import get_network_template -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.test_net_templates_base import TestNetworkTemplatesBase -from fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_base import \ - DataDrivenUpgradeBase -from fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_base import \ - LooseVersion - - -@test -class TestUpgradeNetworkTemplates(TestNetworkTemplatesBase, - DataDrivenUpgradeBase): - """Test upgrade of master node with cluster deployed with net template.""" - - def __init__(self): - super(self.__class__, self).__init__() - self.backup_name = "backup_net_tmpl.tar.gz" - self.repos_backup_name = "repos_backup_net_tmpl.tar.gz" - self.source_snapshot_name = "upgrade_net_tmpl_backup" - self.backup_snapshot_name = self.source_snapshot_name - self.snapshot_name = "upgrade_net_tmpl_restore" - assert_not_equal( - settings.KEYSTONE_CREDS['password'], 'admin', - "Admin password was not changed, aborting execution") - - @test(depends_on=[SetupEnvironment.prepare_slaves_9], - groups=["upgrade_net_tmpl_backup"]) - @log_snapshot_after_test - def upgrade_net_tmpl_backup(self): - """Deploy HA environment with Ceph, Neutron and network template - - Scenario: - 1. Revert snapshot with 9 slaves - 2. Create cluster (HA) with Neutron VLAN/VXLAN/GRE - 3. Add 3 controller - 4. Add 3 ceph osd - 5. Add 2 compute - 6. Upload 'upgrades' network template - 7. Create custom network groups basing on template endpoints - assignments - 8. Run network verification - 9. Deploy cluster - 10. Run network verification - 11. Run health checks (OSTF) - 12. Check L3 network configuration on slaves - 13. Check that services are listening on their networks only - 14. Install fuel-octane package - 15. Create backups for upgrade procedure - - Duration 180m - Snapshot upgrade_net_tmpl_backup - """ - self.check_run(self.source_snapshot_name) - - intermediate_snapshot = "prepare_upgrade_tmpl_before_backup" - if not self.env.d_env.has_snapshot(intermediate_snapshot): - self.show_step(1) - self.env.revert_snapshot("ready_with_9_slaves") - self.show_step(2) - cluster_settings = { - 'volumes_ceph': True, 'images_ceph': True, - 'volumes_lvm': False, 'ephemeral_ceph': True, - 'objects_ceph': True, - 'net_provider': 'neutron', - 'net_segment_type': - settings.NEUTRON_SEGMENT['vlan']} - cluster_settings.update(self.cluster_creds) - - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - settings=cluster_settings) - - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.fuel_web.update_nodes( - cluster_id, - {'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['ceph-osd'], - 'slave-05': ['ceph-osd'], - 'slave-06': ['ceph-osd'], - 'slave-07': ['compute'], - 'slave-08': ['compute']}, - update_interfaces=False) - - self.show_step(6) - network_template = get_network_template("upgrades") - self.fuel_web.client.upload_network_template( - cluster_id=cluster_id, network_template=network_template) - self.show_step(7) - # pylint: disable=redefined-variable-type - if settings.UPGRADE_FUEL_FROM == "7.0": - network = '10.200.0.0/16' - else: - network = {'default': '10.200.0.0/16'} - # pylint: enable=redefined-variable-type - networks = self.generate_networks_for_template( - network_template, network, '24') - existing_networks = self.fuel_web.client.get_network_groups() - networks = self.create_custom_networks(networks, existing_networks) - - logger.debug('Networks: {0}'.format( - self.fuel_web.client.get_network_groups())) - - self.show_step(8) - self.fuel_web.verify_network(cluster_id) - - self.show_step(9) - self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60) - - self.show_step(10) - self.fuel_web.verify_network(cluster_id) - self.show_step(11) - # Live migration test could fail - # https://bugs.launchpad.net/fuel/+bug/1471172 - # https://bugs.launchpad.net/fuel/+bug/1604749 - self.check_ostf(cluster_id=cluster_id, - test_sets=['smoke', 'sanity', 'ha'], - ignore_known_issues=True) - self.show_step(12) - self.check_ipconfig_for_template(cluster_id, network_template, - networks) - - self.show_step(13) - self.check_services_networks(cluster_id, network_template) - - self.env.make_snapshot(intermediate_snapshot) - - # revert_snapshot will do nothing if there is no snapshot - self.env.revert_snapshot(intermediate_snapshot) - - self.show_step(13) - self.show_step(14) - self.do_backup(self.backup_path, self.local_path, - self.repos_backup_path, self.repos_local_path) - self.env.make_snapshot(self.source_snapshot_name, is_make=True) - - @test(groups=["upgrade_net_tmpl_restore"]) - @log_snapshot_after_test - def upgrade_net_tmpl_restore(self): - """Restore Fuel master - network templates - - Scenario: - 1. Revert "upgrade_net_tmpl_backup" snapshot - 2. Reinstall Fuel master using iso given in ISO_PATH - 3. Install fuel-octane package - 4. Upload the backup back to reinstalled Fuel maser node - 5. Restore master node using 'octane fuel-restore' - 6. Check that network template is still available - 7. Verify networks - 8. Run OSTF - - Snapshot: upgrade_net_tmpl_restore - """ - - self.check_run(self.snapshot_name) - assert_true(os.path.exists(self.repos_local_path)) - assert_true(os.path.exists(self.local_path)) - - self.show_step(1) - self.revert_backup() - self.show_step(2) - self.reinstall_master_node() - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.do_restore(self.backup_path, self.local_path, - self.repos_backup_path, self.repos_local_path) - self.show_step(6) - cluster_id = self.fuel_web.get_last_created_cluster() - # get_network_template will raise en exception if there is no template - template = self.fuel_web.client.get_network_template(cluster_id) - if LooseVersion(settings.UPGRADE_FUEL_FROM) == LooseVersion("7.0"): - # replace network mapping from eth* schema to enp0s* schema for all - # deployed nodes - nic_map = template['adv_net_template']['default']['nic_mapping'] - default_mapping = nic_map['default'] - for node in self.fuel_web.client.list_cluster_nodes(cluster_id): - template['adv_net_template']['default']['nic_mapping'][ - node['hostname']] = deepcopy(default_mapping) - new_template = get_network_template("upgrades") - template['adv_net_template']['default']['nic_mapping'][ - 'default'] = new_template['adv_net_template']['default'][ - 'nic_mapping']['default'] - self.fuel_web.client.upload_network_template(cluster_id, template) - - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - self.show_step(8) - self.check_ostf(cluster_id=cluster_id, - test_sets=['smoke', 'sanity', 'ha'], - ignore_known_issues=True) - self.env.make_snapshot("upgrade_net_tmpl_restore", is_make=True) - - @test(depends_on_groups=["upgrade_net_tmpl_restore"], - groups=["reset_deploy_net_tmpl", "upgrade_net_tmpl_tests"]) - @log_snapshot_after_test - def reset_deploy_net_tmpl(self): - """Reset the existing cluster and redeploy - network templates - - Scenario: - 1. Revert "upgrade_net_tmpl_restore" snapshot - 2. Reset the existing cluster - 3. Deploy cluster - 4. Verify networks - 5. Run OSTF - - Snapshot: reset_cluster_net_tmpl - """ - - self.show_step(1) - self.env.revert_snapshot("upgrade_net_tmpl_restore") - - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - self.fuel_web.stop_reset_env_wait(cluster_id) - - # After reset nodes will use new interface naming scheme which - # conflicts with nailgun data (it still contains eth-named - # interfaces and there is no way to fix it) - # LP : 1553210 - if LooseVersion(settings.UPGRADE_FUEL_FROM) == LooseVersion("7.0"): - template = self.fuel_web.client.get_network_template(cluster_id) - for node in self.fuel_web.client.list_cluster_nodes(cluster_id): - del template['adv_net_template']['default']['nic_mapping'][ - node['hostname']] - self.fuel_web.delete_node(node['id']) - self.fuel_web.client.upload_network_template(cluster_id, template) - slaves = self.env.d_env.nodes().slaves[:7] - wait(lambda: all(self.env.nailgun_nodes(slaves)), timeout=10 * 60) - for node in self.fuel_web.client.list_cluster_nodes(cluster_id): - wait(lambda: self.fuel_web.is_node_discovered(node), - timeout=60) - - self.show_step(3) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(4) - self.fuel_web.verify_network(cluster_id) - - self.show_step(5) - self.check_ostf(cluster_id=cluster_id, - test_sets=['smoke', 'sanity', 'ha'], - ignore_known_issues=True) - self.env.make_snapshot("reset_deploy_net_tmpl") - - @test(depends_on_groups=["upgrade_net_tmpl_restore"], - groups=["replace_controller_net_tmpl", "upgrade_net_tmpl_tests"]) - @log_snapshot_after_test - def replace_controller_net_tmpl(self): - """Replace controller and redeploy - network templates - - Scenario: - 1. Revert "upgrade_net_tmpl_restore" snapshot - 2. Remove the existing controller - 3. Add new controller - 4. Verify networks - 5. Deploy cluster - 6. Verify networks - 7. Run OSTF - - Snapshot: replace_controller_net_tmpl - """ - - self.show_step(1) - self.env.revert_snapshot("upgrade_net_tmpl_restore") - - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'] - }, - pending_addition=False, - pending_deletion=True - ) - - self.show_step(4) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-09': ['controller'] - }, - update_interfaces=False - ) - - self.show_step(4) - self.fuel_web.verify_network(cluster_id) - - self.show_step(5) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - - self.show_step(7) - self.check_ostf(cluster_id=cluster_id, - test_sets=['smoke', 'sanity', 'ha'], - ignore_known_issues=True) - self.env.make_snapshot("replace_controller_net_tmpl", is_make=True) - - @test(depends_on_groups=["replace_controller_net_tmpl"], - groups=["restart_node_net_tmpl", "upgrade_net_tmpl_tests"]) - @log_snapshot_after_test - def restart_node_net_tmpl(self): - """Reboot node after controller replacement - network templates - - Scenario: - 1. Revert "replace_controller_net_tmpl" snapshot - 2. Reboot node - 3. Wait until OS and HA services are ready - 4. Verify networks - 5. Run OSTF - - Snapshot: restart_node_net_tmpl - """ - - self.show_step(1) - self.env.revert_snapshot("replace_controller_net_tmpl") - - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - self.fuel_web.cold_restart_nodes( - self.env.d_env.get_nodes(name__in=['slave-03'])) - - self.show_step(3) - self.fuel_web.assert_ha_services_ready(cluster_id) - self.fuel_web.assert_os_services_ready(cluster_id) - self.show_step(4) - self.fuel_web.verify_network(cluster_id) - - self.show_step(5) - self.check_ostf(cluster_id=cluster_id, - test_sets=['smoke', 'sanity', 'ha'], - ignore_known_issues=True) - self.env.make_snapshot("restart_node_net_tmpl") diff --git a/fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_no_cluster.py b/fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_no_cluster.py deleted file mode 100644 index 6daef1e93..000000000 --- a/fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_no_cluster.py +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from proboscis import test -from proboscis.asserts import assert_true - -from fuelweb_test import settings -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_base import \ - DataDrivenUpgradeBase - - -@test -class UpgradeNoCluster(DataDrivenUpgradeBase): - def __init__(self): - super(self.__class__, self).__init__() - self.backup_name = "backup_no_cluster.tar.gz" - self.repos_backup_name = "repos_backup_no_cluster.tar.gz" - self.source_snapshot_name = "upgrade_no_cluster_backup" - self.snapshot_name = "upgrade_no_cluster_restore" - - @test(groups=['upgrade_no_cluster_backup'], - depends_on=[SetupEnvironment.prepare_release]) - @log_snapshot_after_test - def upgrade_no_cluster_backup(self): - """Prepare Fuel master node without cluster - - Scenario: - 1. Create backup file using 'octane fuel-backup' - 2. Download the backup to the host - - Duration 5m - """ - self.check_run("upgrade_no_cluster_backup") - self.env.revert_snapshot("ready", skip_timesync=True) - - self.do_backup(self.backup_path, self.local_path, - self.repos_backup_path, self.repos_local_path) - self.env.make_snapshot("upgrade_no_cluster_backup", - is_make=True) - - @test(groups=['upgrade_no_cluster_tests', 'upgrade_no_cluster_restore']) - @log_snapshot_after_test - def upgrade_no_cluster_restore(self): - """Reinstall Fuel and restore data with detach-db plugin and without - cluster - - Scenario: - 1. Revert "upgrade_no_cluster_backup" snapshot - 2. Reinstall Fuel master using iso given in ISO_PATH - 3. Install fuel-octane package - 4. Upload the backup back to reinstalled Fuel maser node - 5. Restore master node using 'octane fuel-restore' - 6. Ensure that master node was restored - - Duration: 60 m - Snapshot: upgrade_no_cluster_restore - - """ - self.check_run(self.snapshot_name) - assert_true(os.path.exists(self.local_path), - "Can't find backup file at {!r}".format(self.local_path)) - assert_true( - os.path.exists(self.repos_local_path), - "Can't find backup file at {!r}".format(self.repos_local_path)) - intermediate_snapshot = 'no_cluster_before_restore' - if not self.env.d_env.has_snapshot(intermediate_snapshot): - self.show_step(1) - assert_true( - self.env.revert_snapshot(self.source_snapshot_name), - "The test can not use given environment - snapshot " - "{!r} does not exists".format(self.source_snapshot_name)) - self.show_step(2) - self.reinstall_master_node() - self.env.make_snapshot(intermediate_snapshot) - else: - self.env.d_env.revert(intermediate_snapshot) - self.env.resume_environment() - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.do_restore(self.backup_path, self.local_path, - self.repos_backup_path, self.repos_local_path) - self.show_step(6) - self.fuel_web.change_default_network_settings() - self.fuel_web.client.get_releases() - # TODO(vkhlyunev): add additional checks for validation of node - self.env.make_snapshot(self.snapshot_name, is_make=True) - self.cleanup() - - @test(groups=['upgrade_no_cluster_tests', 'upgrade_no_cluster_deploy'], - depends_on_groups=['upgrade_no_cluster_restore']) - @log_snapshot_after_test - def upgrade_no_cluster_deploy(self): - """Deploy fresh cluster using restored empty Fuel - - Scenario: - 1. Revert "upgrade_no_cluster_restore" snapshot - 2. Bootstrap 2 additional nodes - 3. Create cluster, add 1 controller and 1 compute nodes - 4. Verify networks - 5. Deploy cluster - 6. Verify networks - 7. Run OSTF - """ - - self.show_step(1) - self.env.revert_snapshot(self.snapshot_name) - self.show_step(2) - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:3]) - self.show_step(3) - cluster_id = self.fuel_web.create_cluster( - name=self.upgrade_no_cluster_deploy.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - 'net_provider': settings.NEUTRON, - 'net_segment_type': settings.NEUTRON_SEGMENT['vlan'] - } - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute', 'cinder'] - } - ) - self.show_step(4) - self.fuel_web.verify_network(cluster_id) - self.show_step(5) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - self.show_step(7) - self.check_ostf(cluster_id) - self.env.make_snapshot("upgrade_no_cluster_deploy") - - @test(groups=['upgrade_no_cluster_tests', - 'upgrade_no_cluster_deploy_old_cluster'], - depends_on_groups=['upgrade_no_cluster_restore']) - @log_snapshot_after_test - def upgrade_no_cluster_deploy_old_cluster(self): - """Deploy old cluster using upgraded Fuel. - - Scenario: - 1. Revert 'upgrade_no_cluster_restore' snapshot - 2. Create new cluster with old release and default parameters - 3. Add 1 node with controller role - 4. Add 1 node with compute+cinder roles - 5. Verify network - 6. Deploy changes - 7. Run OSTF - - Snapshot: upgrade_no_cluster_new_deployment - Duration: TODO - """ - self.show_step(1, initialize=True) - self.env.revert_snapshot(self.snapshot_name, skip_timesync=True) - - self.show_step(2) - self.show_step(3) - releases = self.fuel_web.client.get_releases() - release_id = [ - release['id'] for release in releases if - release['is_deployable'] and - settings.UPGRADE_FUEL_FROM in release['version']][0] - cluster_id = self.fuel_web.create_cluster( - name=self.upgrade_no_cluster_deploy_old_cluster.__name__, - mode=settings.DEPLOYMENT_MODE, - release_id=release_id, - settings={ - 'net_provider': settings.NEUTRON, - 'net_segment_type': settings.NEUTRON_SEGMENT['vlan'] - } - ) - self.show_step(4) - self.show_step(5) - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:2]) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute', 'cinder'] - } - ) - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - self.show_step(7) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(8) - self.check_ostf(cluster_id, ignore_known_issues=True) diff --git a/fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_plugin.py b/fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_plugin.py deleted file mode 100644 index 3304c02be..000000000 --- a/fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_plugin.py +++ /dev/null @@ -1,227 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from proboscis import test -from proboscis.asserts import assert_true, assert_not_equal, assert_is_not_none - -from fuelweb_test import settings -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_base import \ - DataDrivenUpgradeBase -from fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_base import \ - LooseVersion - - -@test -class UpgradePlugin(DataDrivenUpgradeBase): - def __init__(self): - super(self.__class__, self).__init__() - self.source_snapshot_name = "upgrade_plugin_backup" - self.snapshot_name = "upgrade_plugin_restore" - self.backup_name = "backup_plugin.tar.gz" - self.repos_backup_name = "repos_backup_plugin.tar.gz" - - if LooseVersion(settings.UPGRADE_FUEL_FROM) < LooseVersion("9.0"): - self.plugin_url = settings.EXAMPLE_V3_PLUGIN_REMOTE_URL - self.plugin_name = "fuel_plugin_example_v3" - self.plugin_custom_role = "fuel_plugin_example_v3" - else: - self.plugin_url = settings.EXAMPLE_V4_PLUGIN_REMOTE_URL - self.plugin_name = "fuel_plugin_example_v4" - self.plugin_custom_role = "fuel_plugin_example_v4" - - @test(groups=['upgrade_plugin_backup'], - depends_on=[SetupEnvironment.prepare_release]) - @log_snapshot_after_test - def upgrade_plugin_backup(self): - """Prepare fuel+example plugin with cluster - Using: HA, ceph for all - - Scenario: - 1. Install fuel_plugin_example_v3 plugin on master node - 2. Create cluster with NeutronTUN network provider - 3. Enable plugin for created cluster - 4. Add 1 node with controller role - 5. Add 1 node with fuel_plugin_example_v3 role - 6. Add 3 node with compute+ceph roles - 7. Verify networks - 8. Deploy cluster - 9. Install fuel-octane package - 10. Create backup file using 'octane fuel-backup' - 11. Download the backup to the host - - Duration: TODO - Snapshot: upgrade_plugin_backup - """ - - assert_is_not_none(self.plugin_url, - "EXAMPLE_V[34]_PLUGIN_REMOTE_URL is not defined!") - example_plugin_remote_name = os.path.join( - "/var", - os.path.basename(self.plugin_url)) - - self.check_run(self.source_snapshot_name) - - self.show_step(1) - self.env.revert_snapshot("ready", skip_timesync=True) - - # using curl to predict file name and avoid '*.rpm'-like patterns - admin_remote = self.env.d_env.get_admin_remote() - admin_remote.check_call( - "curl -s {url} > {location}".format( - url=self.plugin_url, - location=example_plugin_remote_name)) - admin_remote.check_call( - "fuel plugins --install {location} ".format( - location=example_plugin_remote_name)) - - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.show_step(6) - self.show_step(7) - cluster_settings = { - 'net_provider': settings.NEUTRON, - 'net_segment_type': settings.NEUTRON_SEGMENT['tun'], - 'volumes_lvm': False, - 'volumes_ceph': True, - 'images_ceph': True, - 'objects_ceph': True, - 'ephemeral_ceph': True, - } - cluster_settings.update(self.cluster_creds) - - self.deploy_cluster({ - 'name': self.upgrade_plugin_backup.__name__, - 'settings': cluster_settings, - 'plugin': - {'name': self.plugin_name, - 'data': {'metadata/enabled': True}}, - 'nodes': - {'slave-01': ['controller'], - 'slave-02': [self.plugin_custom_role], - 'slave-03': ['compute', 'ceph-osd'], - 'slave-04': ['compute', 'ceph-osd'], - 'slave-05': ['compute', 'ceph-osd']} - }) - self.show_step(9) - self.show_step(10) - self.show_step(11) - self.do_backup(self.backup_path, self.local_path, - self.repos_backup_path, self.repos_local_path) - self.env.make_snapshot(self.source_snapshot_name, is_make=True) - - @test(groups=['upgrade_plugin_tests', 'upgrade_plugin_restore']) - @log_snapshot_after_test - def upgrade_plugin_restore(self): - """Reinstall Fuel and restore data with cluster with example plugin - - Scenario: - 1. Revert "upgrade_plugin_backup" snapshot - 2. Reinstall Fuel master using iso given in ISO_PATH - 3. Install fuel-octane package - 4. Upload the backup back to reinstalled Fuel maser node - 5. Restore master node using 'octane fuel-restore' - 6. Ensure that plugin were restored - 7. Verify networks for restored cluster - 8. Run OSTF for restored cluster - - Snapshot: upgrade_plugin_restore - """ - self.check_run(self.snapshot_name) - assert_true(os.path.exists(self.repos_local_path)) - assert_true(os.path.exists(self.local_path)) - - intermediate_snapshot = 'plugin_before_restore' - if not self.env.d_env.has_snapshot(intermediate_snapshot): - self.show_step(1) - assert_true( - self.env.revert_snapshot(self.source_snapshot_name), - "The test can not use given environment - snapshot " - "{!r} does not exists".format(self.source_snapshot_name)) - self.show_step(2) - self.reinstall_master_node() - self.env.make_snapshot(intermediate_snapshot) - else: - self.env.d_env.revert(intermediate_snapshot) - self.env.resume_environment() - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.do_restore(self.backup_path, self.local_path, - self.repos_backup_path, self.repos_local_path) - - cluster_id = self.fuel_web.get_last_created_cluster() - self.show_step(6) - attr = self.fuel_web.client.get_cluster_attributes(cluster_id) - assert_true(self.plugin_name in attr['editable'], - "Can't find plugin data in cluster attributes!") - admin_remote = self.env.d_env.get_admin_remote() - stdout = admin_remote.check_call( - "find /var/www/nailgun/plugins/ " - "-name fuel_plugin_example_v*")['stdout'] - assert_not_equal(len(stdout), 0, "Can not find plugin's directory") - plugin_dir = stdout[0].strip() - - assert_true( - admin_remote.exists(os.path.join(plugin_dir, "metadata.yaml")), - "Plugin's files does not found!") - - self.show_step(7) - self.fuel_web.verify_network(cluster_id) - self.show_step(8) - # Live migration test could fail - # https://bugs.launchpad.net/fuel/+bug/1471172 - # https://bugs.launchpad.net/fuel/+bug/1604749 - self.check_ostf(cluster_id, ignore_known_issues=True) - - self.env.make_snapshot(self.snapshot_name, is_make=True) - self.cleanup() - - @test(groups=['upgrade_plugin_tests', 'upgrade_plugin_scale'], - depends_on_groups=['upgrade_plugin_restore']) - @log_snapshot_after_test - def upgrade_plugin_scale(self): - """Add 1 node with plugin custom role to existing cluster - - Scenario: - 1. Revert "upgrade_plugin_backup" snapshot. - 2. Add 1 fuel_plugin_example_v3 node - 3. Verify networks - 4. Deploy cluster - 5. Run OSTF - - Duration: 60m - """ - - self.show_step(1) - self.env.revert_snapshot(self.snapshot_name) - - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - slave_name = "slave-0{}".format( - len(self.fuel_web.client.list_nodes()) + 1) - self.env.bootstrap_nodes([self.env.d_env.get_node(name=slave_name)]) - self.fuel_web.update_nodes(cluster_id, - {slave_name: [self.plugin_custom_role]}) - self.show_step(3) - self.fuel_web.verify_network(cluster_id) - self.show_step(4) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(5) - self.check_ostf(cluster_id, ignore_known_issues=True) diff --git a/fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_smoke.py b/fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_smoke.py deleted file mode 100644 index 3a7a8e8a4..000000000 --- a/fuelweb_test/tests/tests_upgrade/test_data_driven_upgrade_smoke.py +++ /dev/null @@ -1,397 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from devops.helpers.helpers import wait -from proboscis import test -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_not_equal -from proboscis.asserts import assert_true - -from fuelweb_test import settings -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_base import \ - DataDrivenUpgradeBase - - -@test -class UpgradeSmoke(DataDrivenUpgradeBase): - - def __init__(self): - super(self.__class__, self).__init__() - self.backup_name = "backup_smoke.tar.gz" - self.repos_backup_name = "repos_backup_smoke.tar.gz" - - self.source_snapshot_name = "prepare_upgrade_smoke_before_backup" - self.backup_snapshot_name = "upgrade_smoke_backup" - self.snapshot_name = "upgrade_smoke_restore" - - assert_not_equal( - settings.KEYSTONE_CREDS['password'], 'admin', - "Admin password was not changed, aborting execution") - - @test(groups=['prepare_upgrade_smoke_before_backup'], - depends_on=[SetupEnvironment.prepare_release]) - @log_snapshot_after_test - def prepare_upgrade_smoke_before_backup(self): - """Prepare non-HA+cinder cluster using previous version of Fuel - Nailgun password should be changed via KEYSTONE_PASSWORD env variable - - Scenario: - 1. Create cluster with default configuration - 2. Add 1 node with controller role - 3. Add 1 node with compute+cinder roles - 4. Verify networks - 5. Deploy cluster - - Snapshot: prepare_upgrade_smoke_before_backup - """ - self.check_run(self.source_snapshot_name) - self.env.revert_snapshot("ready", skip_timesync=True) - - cluster_settings = { - 'net_provider': settings.NEUTRON, - 'net_segment_type': settings.NEUTRON_SEGMENT['vlan'] - } - cluster_settings.update(self.cluster_creds) - - self.show_step(1) - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.deploy_cluster( - {'name': self.cluster_names["smoke"], - 'settings': cluster_settings, - 'nodes': {'slave-01': ['controller'], - 'slave-02': ['compute', 'cinder']} - } - ) - - self.env.make_snapshot(self.source_snapshot_name, is_make=True) - - @test(groups=['upgrade_smoke_backup'], - depends_on_groups=['prepare_upgrade_smoke_before_backup']) - @log_snapshot_after_test - def upgrade_smoke_backup(self): - """Create upgrade backup file for non-HA cluster - - Scenario: - 1. Revert "prepare_upgrade_smoke_before_backup" snapshot - 2. Install fuel-octane package - 3. Create backup file using 'octane fuel-backup' - 4. Download the backup to the host - - Snapshot: upgrade_smoke_backup - """ - self.check_run(self.backup_snapshot_name) - self.show_step(1) - self.revert_source() - - self.show_step(2) - self.show_step(3) - self.show_step(4) - self.do_backup(self.backup_path, self.local_path, - self.repos_backup_path, self.repos_local_path) - self.env.make_snapshot(self.backup_snapshot_name, is_make=True) - - @test(groups=['upgrade_smoke_tests', 'upgrade_smoke_restore']) - @log_snapshot_after_test - def upgrade_smoke_restore(self): - """Reinstall Fuel and restore non-HA cluster using fuel-octane. - - Scenario: - 1. Revert "upgrade_smoke_backup" snapshot - 2. Reinstall Fuel master using iso given in ISO_PATH - 3. Install fuel-octane package - 4. Upload the backup back to reinstalled Fuel maser node - 5. Restore master node using 'octane fuel-restore' - 6. Check that nailgun is available - 7. Check cobbler configs for all discovered nodes - 8. Check ubuntu bootstrap is available - 9. Verify networks - 10. Run OSTF - - Snapshot: upgrade_smoke_restore - Duration: TODO - """ - - self.check_run(self.snapshot_name) - assert_true(os.path.exists(self.repos_local_path)) - assert_true(os.path.exists(self.local_path)) - - intermediate_snapshot = 'upgrade_smoke_before_restore' - if not self.env.d_env.has_snapshot(intermediate_snapshot): - self.show_step(1) - self.revert_backup() - self.show_step(2) - self.reinstall_master_node() - self.env.make_snapshot(intermediate_snapshot) - else: - self.show_step(1) - self.show_step(2) - self.env.d_env.revert(intermediate_snapshot) - self.env.resume_environment() - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.do_restore(self.backup_path, self.local_path, - self.repos_backup_path, self.repos_local_path) - # Check nailgun api is available - self.show_step(6) - self.fuel_web.change_default_network_settings() - - cluster_id = self.fuel_web.get_last_created_cluster() - - self.show_step(7) - for node in self.fuel_web.client.list_cluster_nodes(cluster_id): - self.check_cobbler_node_exists(node['id']) - - # Check non-default parameters of the cluster - creds = self.fuel_web.get_cluster_credentials(cluster_id) - assert_equal(sorted(creds.values()), - sorted(self.cluster_creds.values())) - - self.show_step(8) - slave_03 = self.env.d_env.get_node(name="slave-03") - self.env.bootstrap_nodes([slave_03]) - with self.fuel_web.get_ssh_for_node(slave_03.name) as slave_remote: - self.verify_bootstrap_on_node(slave_remote, "ubuntu") - - self.show_step(9) - self.fuel_web.verify_network(cluster_id) - self.show_step(10) - # Live migration test could fail - # https://bugs.launchpad.net/fuel/+bug/1471172 - # https://bugs.launchpad.net/fuel/+bug/1604749 - self.check_ostf(cluster_id, ignore_known_issues=True) - - self.env.make_snapshot("upgrade_smoke_restore", is_make=True) - self.cleanup() - - @test(groups=['upgrade_smoke_tests', 'upgrade_smoke_scale'], - depends_on_groups=['upgrade_smoke_restore']) - @log_snapshot_after_test - def upgrade_smoke_scale(self): - """Scale already existing cluster using upgraded Fuel. - - Scenario: - 1. Revert 'upgrade_smoke_restore' snapshot - 2. Add to existing cluster 3 nodes with controller role - 3. Verify network - 4. Deploy changes - 5. Run OSTF - 6. Remove from the cluster 1 node with controller role - 7. Deploy changes - 8. Wait until nodes are discovered - 9. Verify that bootstrapped nodes are using ubuntu bootstrap - 10. Verify network - 11. Run OSTF - - Snapshot: upgrade_smoke_scale - Duration: TODO - """ - self.check_run("upgrade_smoke_scale") - self.show_step(1) - self.revert_restore() - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:6]) - self.fuel_web.update_nodes( - cluster_id, - {'slave-04': ['controller'], - 'slave-05': ['controller'], - 'slave-06': ['controller']}) - self.show_step(3) - self.fuel_web.verify_network(cluster_id) - self.show_step(4) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(5) - self.check_ostf(cluster_id, ignore_known_issues=True) - - cluster_id = self.fuel_web.get_last_created_cluster() - self.show_step(6) - nodes_to_remove = {'slave-06': ['controller']} - - nailgun_nodes = self.fuel_web.update_nodes( - cluster_id, nodes_to_remove, False, True) - - pending_nodes = [x for x in nailgun_nodes if x["pending_deletion"]] - self.show_step(7) - self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False) - self.show_step(8) - self.show_step(9) - for node in pending_nodes: - self.fuel_web.wait_node_is_discovered(node) - with self.fuel_web.get_ssh_for_node( - self.fuel_web.get_devops_node_by_nailgun_node( - node).name) as slave_remote: - self.verify_bootstrap_on_node(slave_remote, "ubuntu") - self.show_step(10) - self.fuel_web.verify_network(cluster_id) - self.show_step(11) - self.check_ostf(cluster_id, ignore_known_issues=True) - self.env.make_snapshot("upgrade_smoke_scale", is_make=True) - - @test(groups=['upgrade_smoke_tests', 'upgrade_smoke_reset_deploy'], - depends_on_groups=['upgrade_smoke_restore']) - @log_snapshot_after_test - def upgrade_smoke_reset_deploy(self): - """Reset existing cluster and redeploy - - Scenario: - 1. Revert "upgrade_smoke_restore". - 2. Reset cluster. - 3. Delete nodes from nailgun. - 4. Wait until nodes are discovered. - 5. Re-add nodes back to cluster. - 6. Verify networks. - 7. Deploy cluster. - 8. Run OSTF. - - Duration: TODO - """ - self.show_step(1) - self.revert_restore() - - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - self.fuel_web.stop_reset_env_wait(cluster_id) - - # After reset nodes will use new interface naming scheme which - # conflicts with nailgun data (it still contains eth-named - # interfaces and there is no way to fix it) - # LP : 1553210 - self.show_step(3) - for node in self.fuel_web.client.list_cluster_nodes( - cluster_id=cluster_id): - self.fuel_web.delete_node(node['id']) - - self.show_step(4) - - nodes = self.fuel_web.client.list_cluster_nodes(cluster_id=cluster_id) - for node in nodes: - self.fuel_web.wait_node_is_discovered(node, timeout=10 * 60) - - self.show_step(5) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute', 'cinder'] - } - ) - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - self.show_step(7) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(8) - self.check_ostf(cluster_id, ignore_known_issues=True) - - @test(groups=['upgrade_smoke_tests', 'upgrade_smoke_new_deployment'], - depends_on_groups=['upgrade_smoke_restore']) - @log_snapshot_after_test - def upgrade_smoke_new_deployment(self): - """Deploy new release cluster using upgraded Fuel. - - Scenario: - 1. Revert 'upgrade_smoke_restore' snapshot - 2. Delete existing cluster - 3. Create new cluster with default parameters - 4. Add 1 node with controller role - 5. Add 1 node with compute+cinder roles - 6. Verify network - 7. Deploy changes - 8. Run OSTF - - Snapshot: upgrade_smoke_new_deployment - Duration: TODO - """ - self.show_step(1) - self.revert_restore() - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - devops_nodes = self.fuel_web.get_devops_nodes_by_nailgun_nodes( - self.fuel_web.client.list_cluster_nodes(cluster_id=cluster_id) - ) - self.fuel_web.client.delete_cluster(cluster_id) - wait(lambda: not any([cluster['id'] == cluster_id for cluster in - self.fuel_web.client.list_clusters()]), - timeout=10 * 60, - timeout_msg='Failed to delete cluster id={}'.format(cluster_id)) - self.env.bootstrap_nodes(devops_nodes) - - self.show_step(3) - cluster_id = self.fuel_web.create_cluster( - name=self.upgrade_smoke_new_deployment.__name__, - mode=settings.DEPLOYMENT_MODE, - settings={ - 'net_provider': settings.NEUTRON, - 'net_segment_type': settings.NEUTRON_SEGMENT['vlan'] - } - ) - self.show_step(4) - self.show_step(5) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute', 'cinder'] - } - ) - self.show_step(6) - self.fuel_web.verify_network(cluster_id) - self.show_step(7) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(8) - self.check_ostf(cluster_id) - self.env.make_snapshot("upgrade_smoke_new_deployment") - - @test(depends_on_groups=["upgrade_smoke_scale"], - groups=["upgrade_smoke_tests", "upgrade_smoke_restart_node"]) - @log_snapshot_after_test - def upgrade_smoke_restart_node(self): - """Reboot primary controller after upgrading Fuel - - Scenario: - 1. Revert "upgrade_smoke_scale" snapshot - 2. Reboot node - 3. Wait until OS and HA services are ready - 4. Verify networks - 5. Run OSTF - - Snapshot: upgrade_smoke_restart_node - """ - - self.show_step(1) - self.env.revert_snapshot("upgrade_smoke_scale") - - self.show_step(2) - cluster_id = self.fuel_web.get_last_created_cluster() - self.fuel_web.cold_restart_nodes( - self.env.d_env.get_nodes(name__in=['slave-01'])) - - self.show_step(3) - self.fuel_web.assert_ha_services_ready(cluster_id) - self.fuel_web.assert_os_services_ready(cluster_id) - self.show_step(4) - self.fuel_web.verify_network(cluster_id) - - self.show_step(5) - self.check_ostf(cluster_id=cluster_id, - test_sets=['smoke', 'sanity', 'ha'], - ignore_known_issues=True) - self.env.make_snapshot("upgrade_smoke_restart_node") diff --git a/fuelweb_test/tests/tests_upgrade/test_node_reassignment.py b/fuelweb_test/tests/tests_upgrade/test_node_reassignment.py deleted file mode 100644 index 85aa39f54..000000000 --- a/fuelweb_test/tests/tests_upgrade/test_node_reassignment.py +++ /dev/null @@ -1,269 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from keystoneauth1.exceptions import NotFound -from keystoneauth1.exceptions import BadRequest -from proboscis.asserts import assert_equal -from proboscis.asserts import fail -from proboscis import test -from proboscis import SkipTest - -from fuelweb_test import logger -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.base_test_case import TestBasic - - -@test(groups=["reassign_node_for_os_upgrade", "os_upgrade", - "cluster_upgrade_extension"], - depends_on_groups=["upgrade_ceph_ha_restore"]) -class TestReassignNode(TestBasic): - - snapshot = 'upgrade_ceph_ha_restore' - - @test(groups=["reassign_node_to_cloned_environment"]) - @log_snapshot_after_test - def reassign_node_to_cloned_environment(self): - """Test reassign node - - Scenario: - 1. Revert snapshot "upgrade_ceph_ha_restore" - 2. Clone cluster - 3. Reassign node - 4. Verify node settings - 5. Wait node successful provision - - """ - if not self.env.d_env.has_snapshot(self.snapshot): - raise SkipTest('Snapshot {} not found'.format(self.snapshot)) - self.env.revert_snapshot(self.snapshot) - - cluster_id = self.fuel_web.get_last_created_cluster() - cluster = self.fuel_web.client.get_cluster(cluster_id) - release_id = self.fuel_web.get_next_deployable_release_id( - cluster["release_id"] - ) - - data = { - "name": "new_test_cluster", - "release_id": release_id - } - - cloned_cluster = self.fuel_web.client.clone_environment( - cluster_id, data) - - controller_node = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller'])[0] - controller_ifaces = self.fuel_web.client.get_node_interfaces( - controller_node["id"]) - controller_disks = self.fuel_web.client.get_node_disks( - controller_node["id"]) - - data = { - "nodes_ids": [controller_node["id"]] - } - - task = self.fuel_web.client.reassign_node(cloned_cluster["id"], data) - - new_controller = self.fuel_web.client.list_cluster_nodes( - cloned_cluster["id"])[0] - new_controller_ifaces = self.fuel_web.client.get_node_interfaces( - new_controller["id"]) - new_controller_disks = self.fuel_web.client.get_node_disks( - new_controller["id"]) - - assert_equal(["controller"], - new_controller["pending_roles"]) - assert_equal(controller_node["id"], new_controller["id"]) - assert_equal(controller_node["hostname"], new_controller["hostname"]) - for new_iface in new_controller_ifaces: - for iface in controller_ifaces: - if new_iface["name"] == iface["name"]: - assert_equal( - set(net["name"] for net in iface["assigned_networks"]), - set(net["name"] for net in new_iface[ - "assigned_networks"]) - ) - - assert_equal(len(controller_disks), len(new_controller_disks)) - for new_disk in new_controller_disks: - for disk in controller_disks: - if set(x for x in disk["extra"]) == set( - x for x in new_disk["extra"]): - assert_equal(disk["size"], new_disk["size"]) - assert_equal( - sorted([(volume["name"], volume["size"]) - for volume in disk["volumes"] - if volume["size"]]), - sorted([(volume["name"], volume["size"]) - for volume in new_disk["volumes"] - if volume["size"]]) - ) - self.fuel_web.assert_task_success(task) - - @test(groups=["reassign_node_to_nonexistent_cluster"]) - @log_snapshot_after_test - def reassign_node_to_nonexistent_cluster(self): - """Test reassign node to nonexistent cluster - - Scenario: - 1. Revert snapshot "upgrade_ceph_ha_restore" - 2. Reassign node to nonexistent cluster - 3. Check status code: 404 - - """ - if not self.env.d_env.has_snapshot(self.snapshot): - raise SkipTest('Snapshot {} not found'.format(self.snapshot)) - self.env.revert_snapshot(self.snapshot) - - cluster_id = self.fuel_web.get_last_created_cluster() - - controller_node = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id, ['controller'])[0] - - data = { - "nodes_ids": [controller_node["id"]] - } - - try: - self.fuel_web.client.reassign_node(123456, data) - except NotFound: - logger.debug('Got NotFound error as expected') - else: - fail("Doesn't rise HTTP 404 error" - "while reassigning" - "the node with id {0}" - "to non-existing" - "cluster 123456".format(controller_node["id"])) - - @test(groups=["reassign_node_with_empty_body"]) - @log_snapshot_after_test - def reassign_node_with_empty_body(self): - """Test reassign node with empty body - - Scenario: - 1. Revert snapshot "upgrade_ceph_ha_restore" - 2. Clone cluster - 3. Reassign node with empty POST body - 4. Check status code: 400 - - """ - if not self.env.d_env.has_snapshot(self.snapshot): - raise SkipTest('Snapshot {} not found'.format(self.snapshot)) - self.env.revert_snapshot(self.snapshot) - - cluster_id = self.fuel_web.get_last_created_cluster() - cluster = self.fuel_web.client.get_cluster(cluster_id) - release_id = self.fuel_web.get_next_deployable_release_id( - cluster["release_id"] - ) - - data = { - "name": "new_test_cluster", - "release_id": release_id - } - - cloned_cluster = self.fuel_web.client.clone_environment( - cluster_id, data) - - try: - self.fuel_web.client.reassign_node(cloned_cluster["id"], None) - except BadRequest: - logger.debug('Got BadRequest error as expected') - else: - fail("Doesn't raise HTTP 400 error on request" - "to reassigning node with empty body") - - @test(groups=["reassign_node_with_incorrect_node"]) - @log_snapshot_after_test - def reassign_node_with_incorrect_node(self): - """Test reassign node with incorrect node in POST body - - Scenario: - 1. Revert snapshot "upgrade_ceph_ha_restore" - 2. Clone cluster - 3. Reassign node with incorrect node in POST body - 4. Check status code: 400 - - """ - if not self.env.d_env.has_snapshot(self.snapshot): - raise SkipTest('Snapshot {} not found'.format(self.snapshot)) - self.env.revert_snapshot(self.snapshot) - - cluster_id = self.fuel_web.get_last_created_cluster() - cluster = self.fuel_web.client.get_cluster(cluster_id) - release_id = self.fuel_web.get_next_deployable_release_id( - cluster["release_id"] - ) - - data = { - "name": "new_test_cluster", - "release_id": release_id - } - - cloned_cluster = self.fuel_web.client.clone_environment( - cluster_id, data) - - data = { - "nodes_ids": ["white_rabbit"] - } - - try: - self.fuel_web.client.reassign_node(cloned_cluster["id"], data) - except BadRequest: - logger.debug('Got BadRequest error as expected') - else: - fail("Doesn't raise HTTP 400 error on request" - "to reassigning node with incorrect node_id") - - @test(groups=["reassign_nonexistent_node_to_cloned_environment"]) - @log_snapshot_after_test - def reassign_nonexistent_node_to_cloned_environment(self): - """Test reassign node with nonexistent node in POST body - - Scenario: - 1. Revert snapshot "upgrade_ceph_ha_restore" - 2. Clone cluster - 3. Reassign node with nonexistent node in POST body - 4. Check status code: 404 - - """ - if not self.env.d_env.has_snapshot(self.snapshot): - raise SkipTest('Snapshot {} not found'.format(self.snapshot)) - self.env.revert_snapshot(self.snapshot) - - cluster_id = self.fuel_web.get_last_created_cluster() - cluster = self.fuel_web.client.get_cluster(cluster_id) - release_id = self.fuel_web.get_next_deployable_release_id( - cluster["release_id"] - ) - - data = { - "name": "new_test_cluster", - "release_id": release_id - } - - cloned_cluster = self.fuel_web.client.clone_environment( - cluster_id, data) - - data = { - "nodes_ids": [123456] - } - - try: - self.fuel_web.client.reassign_node(cloned_cluster["id"], data) - except NotFound: - logger.debug('Got NotFound error as expected') - else: - fail("Doesn't raise HTTP 404 error on request" - "to reassigning nonexistent node to cloned cluster") diff --git a/fuelweb_test/tests/tests_upgrade/test_os_upgrade.py b/fuelweb_test/tests/tests_upgrade/test_os_upgrade.py deleted file mode 100644 index 080715132..000000000 --- a/fuelweb_test/tests/tests_upgrade/test_os_upgrade.py +++ /dev/null @@ -1,422 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals - -from proboscis import test - -from fuelweb_test import logger -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.settings import MAKE_SNAPSHOT -from fuelweb_test.tests.tests_upgrade.upgrade_base import OSUpgradeBase - - -@test(groups=["os_upgrade"]) -class TestOSupgrade(OSUpgradeBase): - def __init__(self): - super(TestOSupgrade, self).__init__() - self.old_cluster_name = self.cluster_names["ceph_ha"] - - @test(depends_on_groups=['upgrade_ceph_ha_restore'], - groups=["os_upgrade_env"]) - @log_snapshot_after_test - def os_upgrade_env(self): - """Octane clone target environment - - Scenario: - 1. Revert snapshot upgrade_ceph_ha_restore - 2. Run fuel2 release clone - 3. Run "octane upgrade-env " - 3. Ensure that new cluster was created with correct release - - """ - self.check_release_requirements() - self.check_run('os_upgrade_env') - self.env.revert_snapshot("upgrade_ceph_ha_restore") - - # some paranoid time sync sequence - self.env.sync_time(["admin"]) - self.env.sync_time() - - self.upgrade_mcollective_agents() - - self.install_octane() - - release_id = self.upgrade_release(use_net_template=False) - - logger.info( - 'Releases available for deploy:\n' - '{}'.format( - ''.join( - map( - lambda release: '\t{:<4}: {}\n'.format( - release["id"], release['name']), - self.fuel_web.client.get_deployable_releases() - ) - ) - ) - ) - logger.info('RELEASE ID for env upgrade: {}'.format(release_id)) - - self.upgrade_env_code(release_id=release_id) - - self.env.make_snapshot("os_upgrade_env") - - @test(depends_on=[os_upgrade_env], groups=["upgrade_first_cic"]) - @log_snapshot_after_test - def upgrade_first_cic(self): - """Upgrade first controller - - Scenario: - 1. Revert snapshot os_upgrade_env - 2. Select cluster for upgrade and upgraded cluster - 3. Select controller for upgrade - 4. Run "octane upgrade-node --isolated " - 5. Check tasks status after upgrade run completion - 6. Run minimal OSTF sanity check (user list) on target cluster - - """ - self.check_release_requirements() - self.check_run('upgrade_first_cic') - - self.show_step(1, initialize=True) - self.env.revert_snapshot("os_upgrade_env") - self.install_octane() - - self.show_step(2) - seed_cluster_id = self.fuel_web.get_last_created_cluster() - - self.upgrade_first_controller_code(seed_cluster_id) - - self.env.make_snapshot("upgrade_first_cic") - - @test(depends_on=[upgrade_first_cic], - groups=["upgrade_db"]) - @log_snapshot_after_test - def upgrade_db(self): - """Move and upgrade mysql db from target cluster to seed cluster - - Scenario: - 1. Revert snapshot upgrade_first_cic - 2. Select cluster for upgrade and upgraded cluster - 3. Select controller for db upgrade - 5. Run "octane upgrade-db " - 6. Check upgrade status - - """ - self.check_release_requirements() - self.check_run('upgrade_db') - - self.show_step(1, initialize=True) - self.env.revert_snapshot("upgrade_first_cic") - if MAKE_SNAPSHOT: - # some paranoid time sync sequence - self.env.sync_time(["admin"]) - self.env.sync_time() - self.install_octane() - - self.show_step(2) - seed_cluster_id = self.fuel_web.get_last_created_cluster() - - self.upgrade_db_code(seed_cluster_id) - - self.env.make_snapshot("upgrade_db") - - @test(depends_on=[upgrade_db], - groups=["upgrade_ceph"]) - @log_snapshot_after_test - def upgrade_ceph(self): - """Upgrade ceph - - Scenario: - 1. Revert snapshot upgrade_db - 2. Select cluster for upgrade and upgraded cluster - 3. Run octane upgrade-ceph - 4. Check CEPH health on seed env - """ - - self.check_release_requirements() - self.check_run('upgrade_ceph') - - self.show_step(1, initialize=True) - self.env.revert_snapshot("upgrade_db") - if MAKE_SNAPSHOT: - # some paranoid time sync sequence - self.env.sync_time(["admin"]) - self.env.sync_time() - self.install_octane() - - self.show_step(2) - seed_cluster_id = self.fuel_web.get_last_created_cluster() - - self.upgrade_ceph_code(seed_cluster_id) - - self.env.make_snapshot("upgrade_ceph") - - @test(depends_on=[upgrade_ceph], - groups=["upgrade_controllers"]) - @log_snapshot_after_test - def upgrade_controllers(self): - """Upgrade control plane and remaining controllers - - Scenario: - 1. Revert snapshot upgrade_ceph - 2. Select cluster for upgrade and upgraded cluster - 3. Run octane upgrade-control - 4. Check cluster consistency - 5. Check, if required pre-upgrade computes packages and run: - octane preupgrade-compute ${RELEASE_ID} [...] - where RELEASE_ID is deployable liberty - 6. Collect old controllers for upgrade - 7. Run octane upgrade-node - 8. Check tasks status after upgrade run completion - 9. Run network verification on target cluster - 10. Run minimal OSTF sanity check (user list) on target cluster - - """ - - self.check_release_requirements() - self.check_run('upgrade_controllers') - - self.show_step(1, initialize=True) - self.env.revert_snapshot("upgrade_ceph") - if MAKE_SNAPSHOT: - # some paranoid time sync sequence - self.env.sync_time(["admin"]) - self.env.sync_time() - self.install_octane() - - self.show_step(2) - seed_cluster_id = self.fuel_web.get_last_created_cluster() - - self.upgrade_control_plane_code(seed_cluster_id) - - self.pre_upgrade_computes(orig_cluster_id=self.orig_cluster_id) - - # upgrade controllers part - - self.upgrade_controllers_code(seed_cluster_id) - - self.env.make_snapshot("upgrade_controllers") - - @test(depends_on=[upgrade_controllers], groups=["upgrade_ceph_osd"]) - @log_snapshot_after_test - def upgrade_ceph_osd(self): - """Upgrade ceph osd - - Scenario: - 1. Revert snapshot upgrade_all_controllers - 2. Select cluster for upgrade and upgraded cluster - 3. Run octane upgrade-osd - 4. Check CEPH health on seed env - 5. run network verification on target cluster - 6. run minimal OSTF sanity check (user list) on target cluster - """ - - self.check_release_requirements() - self.check_run('upgrade_ceph_osd') - - self.show_step(1, initialize=True) - self.env.revert_snapshot("upgrade_controllers") - if MAKE_SNAPSHOT: - # some paranoid time sync sequence - self.env.sync_time(["admin"]) - self.env.sync_time() - self.install_octane() - - self.show_step(2) - seed_cluster_id = self.fuel_web.get_last_created_cluster() - - self.upgrade_ceph_osd_code(seed_cluster_id) - - self.env.make_snapshot("upgrade_ceph_osd") - - @test(depends_on=[upgrade_ceph_osd], - groups=["upgrade_old_nodes"]) - @log_snapshot_after_test - def upgrade_old_nodes(self): - """Upgrade all non controller nodes - no live migration - - Scenario: - 1. Revert snapshot upgrade_ceph_osd - 2. Select cluster for upgrade and upgraded cluster - 3. Collect nodes for upgrade - 4. Run octane upgrade-node --no-live-migration $SEED_ID - 5. Run network verification on target cluster - 6. Run minimal OSTF sanity check - """ - self.check_release_requirements() - - self.show_step(1, initialize=True) - self.env.revert_snapshot("upgrade_ceph_osd") - if MAKE_SNAPSHOT: - # some paranoid time sync sequence - self.env.sync_time(["admin"]) - self.env.sync_time() - self.install_octane() - - self.show_step(2) - seed_cluster_id = self.fuel_web.get_last_created_cluster() - - self.show_step(3) - - old_nodes = self.fuel_web.client.list_cluster_nodes( - self.orig_cluster_id) - - self.show_step(4) - - self.upgrade_nodes( - seed_cluster_id=seed_cluster_id, - nodes_str=" ".join([str(node["id"]) for node in old_nodes]), - live_migration=False - ) - - self.minimal_check(seed_cluster_id=seed_cluster_id, nwk_check=True) - - self.env.make_snapshot("upgrade_old_nodes") - - @test(depends_on=[upgrade_old_nodes], - groups=['cleanup_no_live', 'upgrade_cloud_no_live_migration']) - @log_snapshot_after_test - def octane_cleanup(self): - """Clean-up octane - - Scenario: - 1. Revert snapshot upgrade_ceph_osd - 2. Select upgraded cluster - 3. Cleanup upgraded env - 4. Run network verification on target cluster - 5. Run OSTF check - 6. Drop orig cluster - """ - self.check_release_requirements() - - self.show_step(1, initialize=True) - self.env.revert_snapshot("upgrade_old_nodes") - if MAKE_SNAPSHOT: - # some paranoid time sync sequence - self.env.sync_time(["admin"]) - self.env.sync_time() - - self.show_step(2) - seed_cluster_id = self.fuel_web.get_last_created_cluster() - - self.clean_up(seed_cluster_id=seed_cluster_id) - - self.show_step(4) - self.fuel_web.verify_network(seed_cluster_id) - - self.show_step(5) - self.fuel_web.run_ostf(seed_cluster_id) - - self.show_step(6) - self.fuel_web.delete_env_wait(self.orig_cluster_id) - - @test(depends_on=[upgrade_ceph_osd], - groups=["upgrade_nodes_live_migration"]) - @log_snapshot_after_test - def upgrade_nodes_live_migration(self): - """Upgrade all non controller nodes with live migration - - Scenario: - 1. Revert snapshot upgrade_ceph_osd - 2. Select cluster for upgrade and upgraded cluster - 3. Collect nodes for upgrade - 4. Upgrade each osd node using octane upgrade-node $SEED_ID - 5. Upgrade each rest node using octane upgrade-node $SEED_ID - 6. Run network verification on target cluster - 7. Run minimal OSTF sanity check - """ - - self.check_release_requirements() - - self.show_step(1, initialize=True) - self.env.revert_snapshot("upgrade_ceph_osd") - if MAKE_SNAPSHOT: - # some paranoid time sync sequence - self.env.sync_time(["admin"]) - self.env.sync_time() - self.install_octane() - - self.show_step(2) - seed_cluster_id = self.fuel_web.get_last_created_cluster() - - self.show_step(3) - osd_old_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - self.orig_cluster_id, roles=['ceph-osd']) - - self.show_step(4) - for node in osd_old_nodes: - logger.info("Upgrading node {!s}, role {!s}".format( - node['id'], node['roles'])) - - self.upgrade_nodes( - seed_cluster_id=seed_cluster_id, - nodes_str=node['id'], - live_migration=True - ) - - self.show_step(5) - old_nodes = self.fuel_web.client.list_cluster_nodes( - self.orig_cluster_id) - for node in old_nodes: - logger.info("Upgrading node {!s}, role {!s}".format( - node['id'], node['roles'])) - - self.upgrade_nodes( - seed_cluster_id=seed_cluster_id, - nodes_str=node['id'], - live_migration=True - ) - - self.minimal_check(seed_cluster_id=seed_cluster_id, nwk_check=True) - - self.env.make_snapshot("upgrade_nodes_live_migration") - - @test(depends_on=[upgrade_nodes_live_migration], - groups=['cleanup_live', 'upgrade_cloud_live_migration']) - @log_snapshot_after_test - def octane_cleanup_live(self): - """Clean-up octane - - Scenario: - 1. Revert snapshot upgrade_ceph_osd - 2. Select upgraded cluster - 3. Cleanup upgraded env - 4. Run network verification on target cluster - 5. Run OSTF check - 6. Drop orig cluster - """ - self.check_release_requirements() - - self.show_step(1, initialize=True) - self.env.revert_snapshot("upgrade_old_nodes") - if MAKE_SNAPSHOT: - # some paranoid time sync sequence - self.env.sync_time(["admin"]) - self.env.sync_time() - - self.show_step(2) - seed_cluster_id = self.fuel_web.get_last_created_cluster() - - self.clean_up(seed_cluster_id=seed_cluster_id) - - self.show_step(4) - self.fuel_web.verify_network(seed_cluster_id) - - self.show_step(5) - self.fuel_web.run_ostf(seed_cluster_id) - - self.show_step(6) - self.fuel_web.delete_env_wait(self.orig_cluster_id) diff --git a/fuelweb_test/tests/tests_upgrade/upgrade_base.py b/fuelweb_test/tests/tests_upgrade/upgrade_base.py deleted file mode 100644 index b25dc4319..000000000 --- a/fuelweb_test/tests/tests_upgrade/upgrade_base.py +++ /dev/null @@ -1,450 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals - -# pylint: disable=import-error -# pylint: disable=no-name-in-module -from distutils.version import LooseVersion -# pylint: enable=no-name-in-module -# pylint: enable=import-error -import re - -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_not_equal -from proboscis.asserts import assert_true -from proboscis import SkipTest -import requests -import six - -from fuelweb_test import logger -from fuelweb_test.helpers.utils import YamlEditor -from fuelweb_test.settings import OPENSTACK_RELEASE -from fuelweb_test.settings import OPENSTACK_RELEASE_UBUNTU -from fuelweb_test.settings import UPGRADE_FUEL_FROM -from fuelweb_test.settings import UPGRADE_FUEL_TO -from fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_base import \ - DataDrivenUpgradeBase - - -class OSUpgradeBase(DataDrivenUpgradeBase): - def __init__(self): - self.__old_cluster_name = None - super(OSUpgradeBase, self).__init__() - - @property - def old_cluster_name(self): - return self.__old_cluster_name - - @old_cluster_name.setter - def old_cluster_name(self, new_name): - if not isinstance(new_name, (six.string_types, six.text_type)): - logger.error('old_cluster_name === {!r}'.format(new_name)) - raise TypeError('{!r} is not string'.format(new_name)) - self.__old_cluster_name = new_name - - @staticmethod - def check_release_requirements(): - if OPENSTACK_RELEASE_UBUNTU not in OPENSTACK_RELEASE: - raise SkipTest('{0} not in {1}'.format( - OPENSTACK_RELEASE_UBUNTU, OPENSTACK_RELEASE)) - - def minimal_check(self, seed_cluster_id, nwk_check=False): - if nwk_check: - self.show_step(self.next_step) - self.fuel_web.verify_network(seed_cluster_id) - - self.show_step(self.next_step) - self.fuel_web.run_single_ostf_test( - cluster_id=seed_cluster_id, test_sets=['sanity'], - test_name=('fuel_health.tests.sanity.test_sanity_identity' - '.SanityIdentityTest.test_list_users')) - - def check_ceph_health(self, ip): - ceph_health = self.ssh_manager.check_call( - ip=ip, command="ceph health").stdout_str - - # There are an issue with PG calculation - LP#1464656 - try: - assert_true("HEALTH_OK" in ceph_health, - "Ceph health is not ok! Inspect output below:\n" - "{!r}".format(ceph_health)) - except AssertionError: - logger.warning("Ceph health is not ok! trying to check LP#1464656") - if "HEALTH_WARN" in ceph_health and "too many PGs per OSD" in \ - ceph_health: - logger.info("Known issue in ceph - see LP#1464656 for details") - else: - raise - - @property - def orig_cluster_id(self): - """Get cluster id for old_cluster_name - - :rtype: int - """ - if self.old_cluster_name is None: - raise RuntimeError('old_cluster_name is not set') - return self.fuel_web.client.get_cluster_id(self.old_cluster_name) - - def prepare_liberty_mirror(self): - """Create local mirror with Liberty packages""" - - self.add_proposed_to_fuel_mirror_config() - admin_remote = self.env.d_env.get_admin_remote() - admin_remote.check_call( - "cp {cfg}{{,.backup}}".format(cfg=self.FUEL_MIRROR_CFG_FILE)) - - with YamlEditor(self.FUEL_MIRROR_CFG_FILE, - ip=self.env.get_admin_node_ip()) as editor: - editor.content["mos_baseurl"] = ( - editor.content["mos_baseurl"].replace("$mos_version", "8.0")) - editor.content["fuel_release_match"]["version"] = "liberty-8.0" - for repo in editor.content["groups"]["mos"]: - repo["suite"] = repo["suite"].replace("$mos_version", "8.0") - repo["uri"] = repo["uri"].replace("$mos_version", "8.0") - for repo in editor.content["groups"]["ubuntu"]: - if repo.get("main"): - repo["name"] = "ubuntu-0" - elif repo["suite"] == "trusty-updates": - repo["name"] = "ubuntu-1" - elif repo["suite"] == "trusty-security": - repo["name"] = "ubuntu-2" - - cmds = [ - "fuel-mirror create -P ubuntu -G mos > mirror-mos.log 2>&1", - "fuel-mirror create -P ubuntu -G ubuntu > mirror-ubuntu.log 2>&1", - "fuel-mirror apply --default -P ubuntu -G mos", - "fuel-mirror apply --default -P ubuntu -G ubuntu", - "mv {cfg}{{,.liberty.yaml}}".format(cfg=self.FUEL_MIRROR_CFG_FILE), - "mv {cfg}.backup {cfg}".format(cfg=self.FUEL_MIRROR_CFG_FILE)] - for cmd in cmds: - admin_remote.check_call(cmd) - - def upgrade_mcollective_agents(self): - """Upgrade mcollective agent on nodes according to upgrade runbook; - This actions WILL NOT be automated by octane - """ - # Fetch latest available package - astute_deb_location = "http://mirror.fuel-infra.org/mos-repos/" \ - "ubuntu/snapshots/9.0-latest/pool/main/a/astute" - repo_content = requests.get(astute_deb_location).content - mco_package = re.findall('>(nailgun-mcagents_.*all\.deb)', - repo_content)[-1] - - # Update package on each node; use curl for predictable file name - nodes = self.fuel_web.client.list_cluster_nodes(self.orig_cluster_id) - for node in nodes: - d_node = self.fuel_web.get_devops_node_by_nailgun_node(node) - remote = self.fuel_web.get_ssh_for_node(node_name=d_node.name) - remote.check_call("curl {repo}/{pkg} > {pkg}".format( - repo=astute_deb_location, - pkg=mco_package)) - with remote.sudo(): - remote.check_call("dpkg -i {pkg}".format(pkg=mco_package)) - remote.check_call("service mcollective restart") - - def upgrade_release(self, use_net_template=False): - self.show_step(self.next_step) - - if not use_net_template: - return int( - self.ssh_manager.check_call( - ip=self.env.get_admin_node_ip(), - command='fuel2 release clone {0} {1} ' - '-f value -c id'.format( - self.orig_cluster_id, - self.fuel_web.client.get_release_id() - ), - error_info='RELEASE_ID clone failed' - ).stdout_str - ) - else: - raise NotImplementedError( - 'Upgrade with network templates is not supported now') - - def upgrade_env_code(self, release_id): - self.show_step(self.next_step) - seed_id = int( - self.ssh_manager.check_call( - ip=self.env.get_admin_node_ip(), - command="octane upgrade-env {0} {1}".format( - self.orig_cluster_id, - release_id - ), - error_info="'upgrade-env' command failed, " - "inspect logs for details" - ).stdout_str) - - new_cluster_id = int(self.fuel_web.get_last_created_cluster()) - - assert_not_equal( - self.orig_cluster_id, seed_id, - "Cluster IDs are the same: old={} and new={}".format( - self.orig_cluster_id, seed_id)) - - assert_equal( - seed_id, - new_cluster_id, - "Cluster ID was changed, but it's not the last:" - " abnormal activity or configuration error presents!\n" - "\tSEED ID: {}\n" - "\tLAST ID: {}".format(seed_id, new_cluster_id) - ) - - cluster_release_id = int( - self.fuel_web.get_cluster_release_id(seed_id) - ) - - assert_equal( - cluster_release_id, - release_id, - "Release ID {} is not equals to expected {}".format( - cluster_release_id, - release_id - ) - ) - - def upgrade_first_controller_code(self, seed_cluster_id): - self.show_step(self.next_step) - controller = self.fuel_web.get_devops_node_by_nailgun_node( - self.fuel_web.get_nailgun_cluster_nodes_by_roles( - self.orig_cluster_id, ["controller"])[0]) - primary = self.fuel_web.get_nailgun_node_by_devops_node( - self.fuel_web.get_nailgun_primary_node(controller) - ) - - self.show_step(self.next_step) - self.ssh_manager.check_call( - ip=self.ssh_manager.admin_ip, - command="octane upgrade-node --isolated " - "{0} {1}".format(seed_cluster_id, primary["id"]), - error_info="octane upgrade-node failed") - - self.show_step(self.next_step) - tasks_started_by_octane = [ - task for task in self.fuel_web.client.get_tasks() - if task['cluster'] == seed_cluster_id] - - for task in tasks_started_by_octane: - self.fuel_web.assert_task_success(task) - - self.show_step(self.next_step) - self.minimal_check(seed_cluster_id=seed_cluster_id) - - def upgrade_db_code(self, seed_cluster_id): - self.show_step(self.next_step) - - seed_controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - seed_cluster_id, ["controller"])[0] - - self.show_step(self.next_step) - self.ssh_manager.check_call( - ip=self.ssh_manager.admin_ip, - command="octane upgrade-db {0} {1}".format( - self.orig_cluster_id, seed_cluster_id), - error_info="octane upgrade-db failed") - - self.show_step(self.next_step) - - crm_status = self.ssh_manager.check_call( - ip=seed_controller["ip"], command="crm resource status").stdout - - while crm_status: - current = crm_status.pop(0) - if "vip" in current: - assert_true("Started" in current) - elif "master_p" in current: - next_element = crm_status.pop(0) - assert_true("Masters: [ node-" in next_element) - elif any(x in current for x in ["ntp", "mysql", "dns"]): - next_element = crm_status.pop(0) - assert_true("Started" in next_element) - elif any(x in current for x in ["nova", "cinder", "keystone", - "heat", "neutron", "glance"]): - next_element = crm_status.pop(0) - assert_true("Stopped" in next_element) - - def upgrade_ceph_code(self, seed_cluster_id): - seed_controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - seed_cluster_id, ["controller"])[0] - - self.show_step(self.next_step) - self.ssh_manager.check_call( - ip=self.ssh_manager.admin_ip, - command="octane upgrade-ceph {0} {1}".format( - self.orig_cluster_id, seed_cluster_id), - error_info="octane upgrade-ceph failed") - - self.show_step(self.next_step) - self.check_ceph_health(seed_controller['ip']) - - def upgrade_control_plane_code(self, seed_cluster_id): - self.show_step(self.next_step) - self.ssh_manager.check_call( - ip=self.ssh_manager.admin_ip, - command="octane upgrade-control {0} {1}".format( - self.orig_cluster_id, seed_cluster_id), - error_info="octane upgrade-control failed") - - self.show_step(self.next_step) - controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - seed_cluster_id, ["controller"]) - - old_controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - self.orig_cluster_id, ["controller"]) - - old_computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - self.orig_cluster_id, ["compute"]) - - def collect_management_ips(node_list): - result = [] - for item in node_list: - for data in item["network_data"]: - if data["name"] == "management": - result.append(data["ip"].split("/")[0]) - return result - - ping_ips = collect_management_ips(controllers + old_computes) - ping_ips.append(self.fuel_web.get_mgmt_vip(seed_cluster_id)) - - non_ping_ips = collect_management_ips(old_controllers) - - ping_cmd = "ping -W 1 -i 1 -s 56 -c 1 -w 10 {host}" - - for node in controllers + old_computes: - self.ssh_manager.check_call( - ip=node["ip"], command="ip -s -s neigh flush all") - - for ip in ping_ips: - self.ssh_manager.check_call( - ip=node["ip"], - command=ping_cmd.format(host=ip), - error_info="Can not ping {0} from {1}" - "need to check network" - " connectivity".format(ip, node["ip"])) - - for ip in non_ping_ips: - self.ssh_manager.check_call( - ip=node["ip"], - command=ping_cmd.format(host=ip), - error_info="Patch ports from old controllers wasn't " - "removed", - expected=[1, 2]) # No reply, Other errors - - crm = self.ssh_manager.check_call( - ip=controllers[0]["ip"], - command="crm resource status").stdout - - while crm: - current = crm.pop(0) - if "vip" in current: - assert_true("Started" in current) - elif "master_p" in current: - next_element = crm.pop(0) - assert_true("Masters: [ node-" in next_element) - elif any(x in current for x in ["ntp", "mysql", "dns", - "nova", "cinder", "keystone", - "heat", "neutron", "glance"]): - next_element = crm.pop(0) - assert_true("Started" in next_element) - - def upgrade_controllers_code(self, seed_cluster_id): - self.show_step(self.next_step) - old_controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - self.orig_cluster_id, ["controller"]) - - self.ssh_manager.check_call( - ip=self.ssh_manager.admin_ip, - command="octane upgrade-node {0} {1}".format( - seed_cluster_id, - " ".join([str(ctrl["id"]) for ctrl in old_controllers])), - error_info="octane upgrade-node failed") - - self.show_step(self.next_step) - tasks_started_by_octane = [ - task for task in self.fuel_web.client.get_tasks() - if task['cluster'] == seed_cluster_id] - - for task in tasks_started_by_octane: - self.fuel_web.assert_task_success(task) - - self.minimal_check(seed_cluster_id=seed_cluster_id, nwk_check=True) - - def upgrade_ceph_osd_code(self, seed_cluster_id): - seed_controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - seed_cluster_id, ["controller"] - )[0] - - self.show_step(self.next_step) - self.ssh_manager.check_call( - ip=self.ssh_manager.admin_ip, - command="octane upgrade-osd {0} {1}".format( - self.orig_cluster_id, - seed_cluster_id), - error_info="octane upgrade-osd failed" - ) - - self.show_step(self.next_step) - self.check_ceph_health(seed_controller['ip']) - - self.minimal_check(seed_cluster_id=seed_cluster_id, nwk_check=True) - - def pre_upgrade_computes(self, orig_cluster_id): - self.show_step(self.next_step) - - # Fuel-octane can run pre-upgrade only starting from version 9.0 and - # we are upgrading packages only if version difference is >1 step - if LooseVersion(UPGRADE_FUEL_TO) >= LooseVersion('9.0') and \ - LooseVersion(UPGRADE_FUEL_FROM) < LooseVersion('8.0'): - self.prepare_liberty_mirror() - - computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - orig_cluster_id, ["compute"] - ) - - liberty_releases = [ - release['id'] for release - in self.fuel_web.client.get_releases() - if 'Liberty on Ubuntu'.lower() in release['name'].lower() - ] - - prev_rel_id = liberty_releases.pop() - - logger.info('Liberty release id is: {}'.format(prev_rel_id)) - - self.ssh_manager.check_call( - ip=self.ssh_manager.admin_ip, - command="octane preupgrade-compute {0} {1}".format( - prev_rel_id, - " ".join([str(comp["id"]) for comp in computes])), - error_info="octane upgrade-node failed") - - def upgrade_nodes(self, seed_cluster_id, nodes_str, live_migration=False): - self.ssh_manager.check_call( - ip=self.ssh_manager.admin_ip, - command=( - "octane upgrade-node {migration} {seed_cluster_id} " - "{nodes!s}".format( - migration='' if live_migration else '--no-live-migration', - seed_cluster_id=seed_cluster_id, - nodes=nodes_str)), - error_info="octane upgrade-node failed") - - def clean_up(self, seed_cluster_id): - self.show_step(self.next_step) - self.ssh_manager.check_call( - ip=self.ssh_manager.admin_ip, - command="octane cleanup {0}".format(seed_cluster_id), - error_info="octane cleanup cmd failed") diff --git a/fuelweb_test/tests/tests_upgrade/upgrader_tool.py b/fuelweb_test/tests/tests_upgrade/upgrader_tool.py deleted file mode 100644 index 797f15af9..000000000 --- a/fuelweb_test/tests/tests_upgrade/upgrader_tool.py +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals - -# pylint: disable=import-error -# pylint: disable=no-name-in-module -from distutils.version import LooseVersion -# pylint: enable=no-name-in-module -# pylint: enable=import-error -import os - -from devops.helpers.templates import yaml_template_load -from proboscis import test, SkipTest -from proboscis.asserts import assert_true, assert_equal, fail - -from fuelweb_test import settings, logger -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_base import \ - DataDrivenUpgradeBase - - -@test -class UpgradeCustom(DataDrivenUpgradeBase): - def __init__(self): - super(UpgradeCustom, self).__init__() - data = yaml_template_load( - settings.UPGRADE_TEST_TEMPLATE)['upgrade_data'] - self.upgrade_data = data - - logger.debug("Got following data from upgrade template:") - for step in data: - logger.debug("\n".join( - ["{}:{}".format(key, value) for key, value in step.items()])) - - def _get_current_step(self): - step_name = settings.UPGRADE_CUSTOM_STEP_NAME - target_field = {'backup': 'backup_snapshot_name', - 'restore': 'restore_snapshot_name'} - for item in self.upgrade_data: - if not step_name == item['name']: - continue - if self.env.d_env.has_snapshot( - item[target_field[item['action']]]): - raise SkipTest( - "Step {!r} already executed".format(step_name)) - else: - return item - fail("Can not find step {!r} in config file {!r}".format( - step_name, settings.UPGRADE_TEST_TEMPLATE)) - - @test(groups=['upgrade_custom_backup']) - @log_snapshot_after_test - def upgrade_custom_backup(self): - """Create the backup for given env - - Scenario: - 1. Install fuel-octane package - 2. Create backup file using 'octane fuel-backup' - 3. Download the backup to the host - """ - current_step = self._get_current_step() - logger.info("Current step: {}".format(current_step['name'])) - assert_equal( - current_step['action'], 'backup', - "Steps order incorrect! {!r} should be 'backup'".format( - current_step['action'])) - - self.source_snapshot_name = current_step["source_snapshot_name"] - self.backup_snapshot_name = current_step["backup_snapshot_name"] - - self.backup_name = current_step["backup_name"] - self.repos_backup_name = current_step["repos_backup_name"] - - self.revert_source() - assert_equal( - LooseVersion(current_step['fuel_version']), - self.fuel_version, - "Wrong fuel version in current step; " - "should be {!r}, actual {!r}".format( - LooseVersion(current_step['fuel_version']), - self.fuel_version)) - # clean up existing files for avoiding "No space left" - self.env.d_env.get_admin_remote().check_call( - "rm -rf {}".format(self.remote_dir_for_backups)) - self.do_backup(self.backup_path, self.local_path, - self.repos_backup_path, self.repos_local_path) - self.env.make_snapshot(self.backup_snapshot_name, is_make=True) - - @test(groups=['upgrade_custom_restore']) - @log_snapshot_after_test - def upgrade_custom_restore(self): - """Reinstall Fuel and restore the data. - - Scenario: - 1. Revert the snapshot - 2. Reinstall Fuel master using iso given in ISO_PATH - 3. Install fuel-octane package - 4. Upload the backup back to reinstalled Fuel maser node - 5. Restore master node using 'octane fuel-restore' - - """ - current_step = self._get_current_step() - assert_equal( - current_step['action'], 'restore', - "Steps order incorrect! {!r} should be 'restore'".format( - current_step['action'])) - self.backup_snapshot_name = current_step["backup_snapshot_name"] - self.restore_snapshot_name = current_step["restore_snapshot_name"] - - self.backup_name = current_step["backup_name"] - self.repos_backup_name = current_step["repos_backup_name"] - - self.show_step(1) - self.revert_backup() - - assert_equal( - LooseVersion(current_step['source_fuel_version']), - self.fuel_version, - "Wrong fuel version in current step; " - "should be {!r}, actual {!r}".format( - LooseVersion(current_step['source_fuel_version']), - self.fuel_version)) - - assert_true(os.path.exists(self.repos_local_path)) - assert_true(os.path.exists(self.local_path)) - - self.show_step(2) - post_reinstall_snapshot = "post_reinstall_" + self.backup_snapshot_name - if not self.env.d_env.has_snapshot(post_reinstall_snapshot): - self.reinstall_master_node() - self.env.make_snapshot(post_reinstall_snapshot) - else: - self.env.d_env.revert(post_reinstall_snapshot) - self.env.d_env.resume() - self.show_step(3) - self.show_step(4) - self.show_step(5) - self.do_restore(self.backup_path, self.local_path, - self.repos_backup_path, self.repos_local_path) - assert_equal( - LooseVersion(current_step['target_fuel_version']), - self.fuel_version, - "Wrong fuel version in current step; " - "should be {!r}, actual {!r}".format( - LooseVersion(current_step['target_fuel_version']), - self.fuel_version)) - - self.env.make_snapshot(self.restore_snapshot_name, is_make=True) - self.cleanup() - - @test(groups=['upgrade_custom_tarball'], enabled=False) - @log_snapshot_after_test - def upgrade_custom_tarball(self): - """Upgrade master node via tarball""" - # TODO(vkhlyunev): revive this test when 6.0-8.0 will be implemented - self.check_run(self.backup_snapshot_name) - assert_true(self.source_snapshot_name) - self.env.revert_snapshot(self.source_snapshot_name) - - tarball_name = os.path.basename(settings.TARBALL_PATH) - self.upload_file(settings.TARBALL_PATH, self.tarball_remote_dir) - _, ext = os.path.splitext(tarball_name) - cmd = "cd {} && ".format(self.tarball_remote_dir) - cmd += "tar -xpvf" if ext.endswith("tar") else "lrzuntar" - - # pylint: disable=no-member - self.admin_remote.check_call(cmd) - # pylint: enable=no-member - cmd = "sh {} --no-rollback --password {}".format( - os.path.join(self.tarball_remote_dir, "upgrade.sh"), - settings.KEYSTONE_CREDS['password']) - - # pylint: disable=no-member - self.admin_remote.check_call(cmd, timeout=60 * 60) - # pylint: enable=no-member - - self.env.make_snapshot(self.restore_snapshot_name, is_make=True) diff --git a/gates_tests/__init__.py b/gates_tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/gates_tests/devops_templates/default.yaml b/gates_tests/devops_templates/default.yaml deleted file mode 100644 index 5b3d27d59..000000000 --- a/gates_tests/devops_templates/default.yaml +++ /dev/null @@ -1,235 +0,0 @@ ---- -aliases: - - dynamic_address_pool: - - &pool_default !os_env POOL_DEFAULT, 10.109.0.0/16:24 - - default_interface_model: - - &interface_model !os_env INTERFACE_MODEL, e1000 - -template: - devops_settings: - env_name: !os_env ENV_NAME - - address_pools: - # Network pools used by the environment - - fuelweb_admin-pool01: - net: *pool_default - params: - vlan_start: 0 - - # Reserved IP addresses. - # - 'l2_network_device' will be used as an IP address for creating - # libvirt network with assigned IP on it, if specified. - # - 'gateway' should be used in fuel-qa framework to get the gateway - # of the address pool. Can be different from libvirt IP address - # in case if a network already have another gateway. - # - all other IP addresses specified in the template can be used - # for different purposes in different test cases. - ip_reserved: - - # Address of the gateway in the address pool fuelweb_admin-pool01. - # If string - than is used 'as is', as a real IP address - # If numeric - than assumed as a relative address - # from the start of the address pool. - gateway: +1 - - # Libvirt bridge will get this IP address - l2_network_device: +1 - - # IP ranges, reserved in the address pool. - # - 'default' IP range stands for 'default' nodegroup in Fuel. - # - any other IP range names such as 'floating' can be specified to - # provide a special IP ranges to configure cluster settings. - # For special test cases, several IP ranges can be configured here - # for different networks. Names of these ranges should be - # agreed for template and test cases. - ip_ranges: - - # Relative or real IP addresses - default: [+2, -2] # admin IP range for 'default' nodegroup name - - public-pool01: - net: *pool_default - params: - vlan_start: 0 - ip_reserved: - gateway: +1 - l2_network_device: +1 # l2_network_device will get this IP address - ip_ranges: - default: [+2, +127] # public IP range for 'default' nodegroup name - floating: [+128, -2] - - storage-pool01: - net: *pool_default - params: - vlan_start: 101 - - management-pool01: - net: *pool_default - params: - vlan_start: 102 - - private-pool01: - net: *pool_default - params: - vlan_start: 960 - vlan_end: 1000 - - groups: - - name: default - driver: - name: devops.driver.libvirt - params: - connection_string: !os_env CONNECTION_STRING, qemu:///system - storage_pool_name: !os_env STORAGE_POOL_NAME, default - stp: True - hpet: False - use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true - - network_pools: # Address pools for OpenStack networks. - # Actual names should be used for keys - # (the same as in Nailgun, for example) - - fuelweb_admin: fuelweb_admin-pool01 - public: public-pool01 - storage: storage-pool01 - management: management-pool01 - private: private-pool01 - - l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks - admin: - address_pool: fuelweb_admin-pool01 - dhcp: false - forward: - mode: nat - - public: - address_pool: public-pool01 - dhcp: false - forward: - mode: nat - - storage: - address_pool: storage-pool01 - dhcp: false - - management: - address_pool: management-pool01 - dhcp: false - - private: - address_pool: private-pool01 - dhcp: false - - nodes: - - name: admin # Custom name of VM for Fuel admin node - role: fuel_master # Fixed role for Fuel master node properties - params: - vcpu: !os_env ADMIN_NODE_CPU, 2 - memory: !os_env ADMIN_NODE_MEMORY, 3072 - boot: - - hd - - cdrom # for boot from usb - without 'cdrom' - volumes: - - name: system - capacity: !os_env ADMIN_NODE_VOLUME_SIZE, 75 - format: qcow2 - - name: iso - source_image: !os_env ISO_PATH # if 'source_image' set, then volume capacity is calculated from it's size - format: raw - device: cdrom # for boot from usb - 'disk' - bus: ide # for boot from usb - 'usb' - interfaces: - - label: eth0 - l2_network_device: admin # Libvirt bridge name. It is *NOT* a Nailgun network - interface_model: *interface_model - network_config: - eth0: - networks: - - fuelweb_admin - - # Slave nodes - - - name: slave-01 - role: fuel_slave - params: &rack-01-slave-node-params - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 50 - format: qcow2 - - name: cinder - capacity: !os_env NODE_VOLUME_SIZE, 50 - format: qcow2 - - name: swift - capacity: !os_env NODE_VOLUME_SIZE, 50 - format: qcow2 - - # List of node interfaces - interfaces: - - label: eth0 - l2_network_device: admin # Libvirt bridge name. It is *NOT* Nailgun networks - interface_model: *interface_model - - label: eth1 - l2_network_device: public - interface_model: *interface_model - - label: eth2 - l2_network_device: storage - interface_model: *interface_model - - label: eth3 - l2_network_device: management - interface_model: *interface_model - - label: eth4 - l2_network_device: private - interface_model: *interface_model - - # How Nailgun/OpenStack networks should assigned for interfaces - network_config: - eth0: - networks: - - fuelweb_admin # Nailgun/OpenStack network name - eth1: - networks: - - public - eth2: - networks: - - storage - eth3: - networks: - - management - eth4: - networks: - - private - - - - name: slave-02 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-03 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-04 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-05 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-06 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-07 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-08 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-09 - role: fuel_slave - params: *rack-01-slave-node-params - diff --git a/gates_tests/devops_templates/ironic_template.yaml b/gates_tests/devops_templates/ironic_template.yaml deleted file mode 100644 index 8fafcea21..000000000 --- a/gates_tests/devops_templates/ironic_template.yaml +++ /dev/null @@ -1,208 +0,0 @@ ---- -aliases: - - dynamic_address_pool: - - &pool_default !os_env POOL_DEFAULT, 10.109.0.0/16:24 - - default_interface_model: - - &interface_model !os_env INTERFACE_MODEL, e1000 - - slave-interfaces: &slave-interfaces - - label: eth0 - l2_network_device: admin # Libvirt bridge name. It is *NOT* Nailgun networks - interface_model: *interface_model - - label: eth1 - l2_network_device: public - interface_model: *interface_model - - label: eth2 - l2_network_device: storage - interface_model: *interface_model - - label: eth3 - l2_network_device: management - interface_model: *interface_model - - label: eth4 - l2_network_device: private - interface_model: *interface_model - - label: eth5 - l2_network_device: ironic - interface_model: *interface_model - - slave-network_config: &slave-network_config - eth0: - networks: - - fuelweb_admin - eth1: - networks: - - public - eth2: - networks: - - storage - eth3: - networks: - - management - eth4: - networks: - - private - eth5: - networks: - - baremetal - - slave-node-params: &slave-node-params - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: cinder - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: swift - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - interfaces: *slave-interfaces - network_config: *slave-network_config - -template: - devops_settings: - env_name: !os_env ENV_NAME - address_pools: - # Network pools used by the environment - fuelweb_admin: - net: *pool_default - public: - net: *pool_default - storage: - net: *pool_default - management: - net: *pool_default - ironic: - net: *pool_default - private: - net: *pool_default - - groups: - - name: default - driver: - name: devops.driver.libvirt.libvirt_driver - params: - connection_string: !os_env CONNECTION_STRING, qemu:///system - storage_pool_name: !os_env STORAGE_POOL_NAME, default - stp: True - hpet: False - use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true - - network_pools: # Address pools for OpenStack networks. - # Actual names should be used for keys - # (the same as in Nailgun, for example) - - fuelweb_admin: fuelweb_admin - public: public - storage: storage - management: management - private: private - ironic: ironic - - l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks - admin: - address_pool: fuelweb_admin - dhcp: false - forward: - mode: nat - - public: - address_pool: public - dhcp: false - forward: - mode: nat - - storage: - address_pool: storage - dhcp: false - - management: - address_pool: management - dhcp: false - - private: - address_pool: private - dhcp: false - - ironic: - address_pool: ironic - dhcp: false - - nodes: - - name: admin # Custom name of VM for Fuel admin node - role: fuel_master # Fixed role for Fuel master node properties - params: - vcpu: !os_env ADMIN_NODE_CPU, 2 - memory: !os_env ADMIN_NODE_MEMORY, 3072 - boot: - - hd - - cdrom # for boot from usb - without 'cdrom' - volumes: - - name: system - capacity: !os_env ADMIN_NODE_VOLUME_SIZE, 80 - format: qcow2 - - name: iso - source_image: !os_env ISO_PATH # if 'source_image' set, then volume capacity is calculated from it's size - format: raw - device: cdrom # for boot from usb - 'disk' - bus: ide # for boot from usb - 'usb' - interfaces: - - label: eth0 - l2_network_device: admin # Libvirt bridge name. It is *NOT* a Nailgun network - interface_model: *interface_model - network_config: - eth0: - networks: - - fuelweb_admin - - - name: slave-01 - role: fuel_slave - params: *slave-node-params - - name: slave-02 - role: fuel_slave - params: *slave-node-params - - name: slave-03 - role: fuel_slave - params: *slave-node-params - - name: slave-04 - role: fuel_slave - params: *slave-node-params - - name: slave-05 - role: fuel_slave - params: *slave-node-params - - name: slave-06 - role: fuel_slave - params: *slave-node-params - # slaves 7-9 is not used by fuel-qa but can be used in manual tests - - name: slave-07 - role: fuel_slave - params: *slave-node-params - - name: slave-08 - role: fuel_slave - params: *slave-node-params - - name: slave-09 - role: fuel_slave - params: *slave-node-params - - name: ironic-slave-01 - role: ironic - params: &ironic-slave-node-params - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 50 - format: qcow2 - interfaces: - - l2_network_device: ironic - label: eth0 - interface_model: *interface_model diff --git a/gates_tests/helpers/__init__.py b/gates_tests/helpers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/gates_tests/helpers/exceptions.py b/gates_tests/helpers/exceptions.py deleted file mode 100644 index 2ecb982d3..000000000 --- a/gates_tests/helpers/exceptions.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class ConfigurationException(Exception): - pass - - -class PackageVersionError(Exception): - def __init__(self, package, version): - self.package = package - self.version = version - super(PackageVersionError, self).__init__() - - def __repr__(self): - return 'Package {0} has wrong version {1}'.format( - self.package, self.version) - - -class FuelQATestException(Exception): - def __init__(self, message): - self.message = message - super(FuelQATestException, self).__init__() - - def __str__(self): - return self.message - - -class FuelQAVariableNotSet(FuelQATestException): - def __init__(self, variable_name, expected_value): - self.variable_name = variable_name - self.expected_value = expected_value - super(FuelQAVariableNotSet, self).__init__( - "Variable {0} was not set in value {1}".format( - self.variable_name, self.expected_value)) - - def __str__(self): - return "Variable {0} was not set in value {1}".format( - self.variable_name, self.expected_value) diff --git a/gates_tests/helpers/fuel_library_modules_mapping.yaml b/gates_tests/helpers/fuel_library_modules_mapping.yaml deleted file mode 100644 index 8c591ba14..000000000 --- a/gates_tests/helpers/fuel_library_modules_mapping.yaml +++ /dev/null @@ -1,199 +0,0 @@ -bvt_2: - devops_settings_template: devops_templates/default.yaml - modules: - - apache - - apache_api_proxy - - apache_mpm - - api-proxy - - api_proxy.conf - - apt - - astute - - atop - - atop_retention - - auth_file - - credentials_file - - ceph - - ceph_nova_compute - - cluster - - cluster_haproxy - - cluster-haproxy - - cluster_vrouter - - cluster-vrouter - - cobbler - - compute - - concat - - controller - - corosync - - database - - datacat - - deployment_groups - - dnsmasq - - dnsmasq.conf - - docker - - fuel - - fuel_pkgs - - glance - - globals - - globals_yaml - - haproxy - - hiera - - horizon - - hosts - - inifile - - install_ssh_keys - - keystone - - l23network - - logging - - mon - - mcollective - - memcached - - monit - - nailgun - - neutron - - netconfig - - nova - - openrc.erb - - openssl - - openstack - - openstack-controller - - openstack_controller - - openstack-haproxy - - openstack_haproxy - - openstack-network - - openstack_network - - openstacklib - - pacemaker - - pacemaker_wrappers - - package_pins - - postgresql - - puppet_pull - - puppet-pull.sh - - rabbitmq - - roles/controller - - roles/compute - - roles/enable_compute - - roles/tasks - - rsync - - rsyslog - - spec - - ssh - - ssl - - staging - - stdlib - - sysctl - - sysfs - - test_compute - - test_controller - - tftp - - tools - - tweaks - - vcsrepo - - virtual_ips - - xinetd - - wait_for_backend - - wait_for_keystone_backends - - wait_for_glance_backends - - -deploy_ha_controller_neutron_example: - devops_settings_template: devops_templates/default.yaml - modules: - - plugins - -deploy_heat_ha: - devops_settings_template: devops_templates/default.yaml - modules: - - aodh - - ceilometer - - ceilometer_ha - - heat - - heat_ha - - mongo - - mongodb - - roles/mongo - -ha_neutron: - devops_settings_template: devops_templates/default.yaml - modules: - - cinder - - openstack-cinder - - roles/cinder - - roles/enable_cinder - - openstack_cinder - -reduced_footprint: - devops_settings_template: devops_templates/default.yaml - modules: - - generate_vms - - vm_libvirt - -ha_neutron_firewall: - devops_settings_template: devops_templates/default.yaml - modules: - - firewall - -ha_neutron_mysql_termination: - devops_settings_template: devops_templates/default.yaml - modules: - - galera - - mysql - - mysql.access.cnf - - mysql_access - - mysql_grant - - mysql_user_access - -ironic_deploy_ceph: - devops_settings_template: devops_templates/ironic_template.yaml - modules: - - ceph - - ironic - - ironic-conductor - - roles/ironic-conductor - - roles/ironic_conductor - -deploy_murano_ha_with_tun: - devops_settings_template: devops_templates/default.yaml - modules: - - murano - -deploy_ha_dns_ntp: - devops_settings_template: devops_templates/default.yaml - modules: - - dns - - ntp - - resolvconf - - resolv.conf - - resolv.dnsmasq.conf - -deploy_sahara_ha_tun: - devops_settings_template: devops_templates/default.yaml - modules: - - sahara - - sahara_templates - -neutron_vlan_ha: - devops_settings_template: devops_templates/default.yaml - modules: - - swift - - proxy - -positive_cic_maintenance_mode: - devops_settings_template: devops_templates/default.yaml - modules: - - umm - -deploy_ha_cgroup: - devops_settings_template: devops_templates/default.yaml - modules: - - cgroups - -basic_env_for_hugepages: - devops_settings_template: devops_templates/default.yaml - modules: - - allocated_hugepages - -test_logrotate: - devops_settings_template: devops_templates/default.yaml - modules: - - apache2.logrotate - - apache2.prerotate diff --git a/gates_tests/helpers/openstack_puppet_projects_mapping.yaml b/gates_tests/helpers/openstack_puppet_projects_mapping.yaml deleted file mode 100644 index 2acab529d..000000000 --- a/gates_tests/helpers/openstack_puppet_projects_mapping.yaml +++ /dev/null @@ -1,40 +0,0 @@ -bvt_2: - devops_settings_template: devops_templates/default.yaml - projects: - - openstack/puppet-ceph - - openstack/puppet-glance - - openstack/puppet-horizon - - openstack/puppet-keystone - - openstack/puppet-keystone - - openstack/puppet-neutron - - openstack/puppet-nova - - openstack/puppet-openstacklib - - openstack/puppet-oslo - -neutron_vlan_ha: - devops_settings_template: devops_templates/default.yaml - projects: - - openstack/puppet-cinder - - openstack/puppet-swift - -deploy_sahara_ha_tun: - devops_settings_template: devops_templates/default.yaml - projects: - - openstack/puppet-sahara - -deploy_murano_ha_with_tun: - devops_settings_template: devops_templates/default.yaml - projects: - - openstack/puppet-murano - -ironic_deploy_ceph: - devops_settings_template: devops_templates/ironic_template.yaml - projects: - - openstack/puppet-ironic - -deploy_heat_ha: - devops_settings_template: devops_templates/default.yaml - projects: - - openstack/puppet-aodh - - openstack/puppet-ceilometer - - openstack/puppet-heat diff --git a/gates_tests/helpers/utils.py b/gates_tests/helpers/utils.py deleted file mode 100644 index ca69cf340..000000000 --- a/gates_tests/helpers/utils.py +++ /dev/null @@ -1,531 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import itertools -import os -import yaml - -from proboscis import register -from proboscis.asserts import assert_equal -from devops.helpers import helpers - -from fuelweb_test.helpers.fuel_actions import BaseActions -from fuelweb_test.helpers.gerrit.gerrit_info_provider import \ - FuelLibraryModulesProvider -from fuelweb_test.helpers.ssh_manager import SSHManager -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers.utils import YamlEditor -from gates_tests.helpers import exceptions - - -def replace_rpm_package(package): - """Replaced rpm package.rpm on master node with package.rpm - from review - """ - ssh = SSHManager() - logger.info("Patching {}".format(package)) - if not settings.UPDATE_FUEL: - raise exceptions.FuelQAVariableNotSet('UPDATE_FUEL', 'True') - try: - # Upload package - target_path = '/var/www/nailgun/{}/'.format(package) - ssh.upload_to_remote( - ip=ssh.admin_ip, - source=settings.UPDATE_FUEL_PATH.rstrip('/'), - target=target_path) - - package_name = package - package_ext = '*.noarch.rpm' - pkg_path = os.path.join(target_path, - '{}{}'.format(package_name, package_ext)) - full_package_name = get_full_filename(wildcard_name=pkg_path) - logger.debug('Package name is {0}'.format(full_package_name)) - full_package_path = os.path.join(os.path.dirname(pkg_path), - full_package_name) - - # Update package on master node - if not does_new_pkg_equal_to_installed_pkg( - installed_package=package_name, - new_package=full_package_path): - update_rpm(path=full_package_path) - - except Exception: - logger.error("Could not upload package") - raise - - -def update_ostf(): - logger.info("Uploading new package from {0}".format( - settings.UPDATE_FUEL_PATH)) - ssh = SSHManager() - pack_path = '/var/www/nailgun/fuel-ostf/' - full_pack_path = os.path.join(pack_path, 'fuel-ostf*.noarch.rpm') - ssh.upload_to_remote( - ssh.admin_ip, - source=settings.UPDATE_FUEL_PATH.rstrip('/'), target=pack_path) - - # Check old fuel-ostf package - cmd = "rpm -q fuel-ostf" - - old_package = ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)['stdout_str'] - logger.info( - 'Current package version of ' - 'fuel-ostf: {0}'.format(old_package)) - - cmd = "rpm -qp {0}".format(full_pack_path) - new_package = ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)['stdout_str'] - logger.info('Package from review {0}'.format(new_package)) - - if old_package == new_package: - logger.info('Package {0} is installed'.format(new_package)) - return - - cmd = "service ostf stop" - ssh.execute_on_remote(ssh.admin_ip, cmd=cmd) - cmd = "service ostf status" - helpers.wait(lambda: "dead" in ssh.execute_on_remote( - ssh.admin_ip, cmd=cmd, - raise_on_assert=False, - assert_ec_equal=[3])['stdout_str'], timeout=60) - logger.info("OSTF status: inactive") - cmd = "rpm -e fuel-ostf" - ssh.execute_on_remote(ssh.admin_ip, cmd=cmd) - cmd = "rpm -Uvh --oldpackage {0}".format(full_pack_path) - ssh.execute_on_remote(ssh.admin_ip, cmd=cmd) - cmd = "rpm -q fuel-ostf" - installed_package = ssh.execute_on_remote( - ssh.admin_ip, cmd=cmd)['stdout_str'] - - assert_equal( - installed_package, new_package, - "The new package {0} was not installed. Actual {1}".format( - new_package, installed_package)) - cmd = "service ostf start" - ssh.execute_on_remote(ssh.admin_ip, cmd=cmd) - cmd = "service ostf status" - helpers.wait( - lambda: "running" in - ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)['stdout_str'], - timeout=60) - cmd = "curl -s -o /dev/null -w '%{http_code}' http://127.0.0.1:8777" - helpers.wait( - lambda: "401" in ssh.execute_on_remote( - ssh.admin_ip, cmd=cmd, raise_on_assert=False)['stdout_str'], - timeout=60) - logger.info("OSTF status: RUNNING") - - -def get_oswl_services_names(): - cmd = "systemctl list-units| grep oswl_ | awk '{print $1}'" - result = SSHManager().execute_on_remote( - SSHManager().admin_ip, cmd)['stdout_str'].strip() - logger.info('list of statistic services {0}'.format( - result.split('\n'))) - return result.split('\n') - - -def replace_fuel_nailgun_rpm(): - """ - Replace fuel_nailgun*.rpm from review - """ - logger.info("Patching fuel-nailgun") - ssh = SSHManager() - if not settings.UPDATE_FUEL: - raise exceptions.FuelQAVariableNotSet('UPDATE_FUEL', 'True') - pack_path = '/var/www/nailgun/fuel-nailgun/' - - full_pack_path = os.path.join(pack_path, - 'fuel-nailgun*.noarch.rpm') - logger.info('Package path {0}'.format(full_pack_path)) - ssh.upload_to_remote( - ip=ssh.admin_ip, - source=settings.UPDATE_FUEL_PATH.rstrip('/'), target=pack_path) - - # Check old fuel-nailgun package - cmd = "rpm -q fuel-nailgun" - - old_package = ssh.execute_on_remote( - ip=ssh.admin_ip, cmd=cmd)['stdout_str'] - logger.info( - 'Current package version of ' - 'fuel-nailgun: {0}'.format(old_package)) - - cmd = "rpm -qp {0}".format(full_pack_path) - new_package = ssh.execute_on_remote( - ip=ssh.admin_ip, cmd=cmd)['stdout_str'] - logger.info("Updating package {0} with {1}".format( - old_package, new_package)) - - if old_package == new_package: - logger.debug('Looks like package from review ' - 'was installed during setups of master node') - return - - # stop services - service_list = ['assassind', 'receiverd', 'nailgun', 'statsenderd'] - for service in service_list: - ssh.execute_on_remote( - ip=ssh.admin_ip, cmd='systemctl stop {0}'.format(service)) - logger.info('statistic services {0}'.format(get_oswl_services_names())) - # stop statistic services - for service in get_oswl_services_names(): - ssh.execute_on_remote( - ip=ssh.admin_ip, cmd='systemctl stop {0}'.format(service)) - - # Drop nailgun db manage.py dropdb - cmd = 'manage.py dropdb' - ssh.execute_on_remote(ssh.admin_ip, cmd) - - # Delete package - logger.info("Delete package {0}".format(old_package)) - cmd = "rpm -e fuel-nailgun" - ssh.execute_on_remote(ssh.admin_ip, cmd) - - logger.info("Install package {0}".format(new_package)) - - cmd = "rpm -Uvh --oldpackage {0}".format(full_pack_path) - - ssh.execute_on_remote(ssh.admin_ip, cmd) - - cmd = "rpm -q fuel-nailgun" - installed_package = ssh.execute_on_remote(ssh.admin_ip, cmd)['stdout_str'] - - assert_equal(installed_package, new_package, - "The new package {0} was not installed".format(new_package)) - - cmd = ('puppet apply --debug ' - '/etc/puppet/modules/fuel/examples/nailgun.pp') - ssh.execute_on_remote(ssh.admin_ip, cmd) - cmd_sync = 'fuel release --sync-deployment-tasks --dir /etc/puppet/' - ssh.execute_on_remote(ssh.admin_ip, cmd=cmd_sync) - - -def update_rpm(path, rpm_cmd='/bin/rpm -Uvh --force'): - cmd = '{rpm_cmd} {rpm_path}'\ - .format(rpm_cmd=rpm_cmd, rpm_path=path) - logger.info("Updating rpm '{0}'".format(path)) - try: - SSHManager().execute(SSHManager().admin_ip, cmd) - logger.info("Rpm '{0}' has been updated successfully " - .format(path)) - except Exception as ex: - logger.error("Could not update rpm '{0}' in the '{1}'" - .format(path, ex)) - raise - - -def restart_service(service_name, timeout=30): - restart_cmd = 'service {} restart'.format(service_name) - get_status_cmd = 'service {} status'.format(service_name) - logger.info("Restarting service '{0}'".format(service_name)) - try: - SSHManager().execute_on_remote(SSHManager().admin_ip, - restart_cmd) - helpers.wait( - lambda: 'running' in - SSHManager().execute_on_remote(SSHManager().admin_ip, - get_status_cmd)['stdout_str'], - timeout=timeout) - logger.info("Service '{0}' has been restarted successfully " - .format(service_name)) - except Exception as ex: - logger.error("Could not restart '{0}' service " - "in the '{1}'" - .format(service_name, ex)) - raise - - -def does_new_pkg_equal_to_installed_pkg(installed_package, - new_package): - rpm_query_cmd = '/bin/rpm -q' - current_version_cmd = '{rpm} {package}'\ - .format(rpm=rpm_query_cmd, package=installed_package) - urlfile_version_cmd = '{rpm} --package {package}'\ - .format(rpm=rpm_query_cmd, package=new_package) - - logger.info("Comparing installed package version against " - "the package version to be installed") - - current_version = SSHManager().execute_on_remote( - ip=SSHManager().admin_ip, cmd=current_version_cmd)['stdout_str'] - - new_version = SSHManager().execute_on_remote( - ip=SSHManager().admin_ip, cmd=urlfile_version_cmd)['stdout_str'] - - logger.info("Installed package version: {}".format(current_version)) - logger.info("Package version to be installed: {}".format(new_version)) - - return current_version == new_version - - -def get_full_filename(wildcard_name): - cmd = 'ls {}'.format(wildcard_name) - - logger.info("Getting full file name for: {}".format(wildcard_name)) - - full_pkg_name = SSHManager().execute_on_remote( - ip=SSHManager().admin_ip, - cmd=cmd)['stdout_str'] - return full_pkg_name - - -def get_sha_sum(file_path): - logger.debug('Get md5 fo file {0}'.format(file_path)) - md5_sum = SSHManager().execute_on_remote( - SSHManager().admin_ip, cmd='md5sum {0}'.format( - file_path))['stdout_str'].strip() - logger.info('MD5 is {0}'.format(md5_sum)) - return md5_sum - - -def fuel_library_modules_mapping(modules): - """ - find fuel-qa system test which have maximum coverage for edited - puppet modules and register that group with "review_in_fuel_library" name - modules - dictionary of puppet modules edited in review - Example: modules = {'horizon':'fuel-library/deployment/Puppetfile'} - """ - - # open yaml with covered modules - with open( - "gates_tests/helpers/fuel_library_modules_mapping.yaml", "r") as f: - mapping = yaml.load(f) - - if modules and isinstance(modules, dict): - all_modules = set(list(itertools.chain.from_iterable( - [mapping[test_group]['modules'] for test_group in mapping]))) - - logger.debug( - "List of puppet modules covered by system_tests {}".format( - all_modules)) - logger.info( - "List of modules edited in review {}".format(modules.keys())) - - # checking that module from review covered by system_test - for module in modules.keys(): - if module not in all_modules: - logger.warning( - "{}:{} module not exist or not covered by system_test" - .format(module, modules[module])) - - # find test group which has better coverage of modules from review - system_test = "bvt_2" - max_intersection = 0 - if not ("ceph" in modules and - {"roles/cinder.pp", "cinder", "openstack-cinder"} & - set(modules)): - for test in mapping: - test_intersection = len( - set(mapping[test]['modules']).intersection(set(modules))) - if test_intersection > max_intersection: - max_intersection = test_intersection - system_test = test - - devops_template = mapping[system_test]['devops_settings_template'] - - import gates_tests - - path_to_template = os.path.join( - os.path.dirname(os.path.abspath(gates_tests.__file__)), - devops_template) - - logger.debug("devops template is {}".format(path_to_template)) - - os.environ['DEVOPS_SETTINGS_TEMPLATE'] = path_to_template - - # To completely check ceph module we can't mix ceph and cinder togeher - else: - logger.warning( - "We cannot check cinder and ceph together {}" - .format(modules)) - system_test = "bvt_2" - - else: - logger.warning("There no modules that changed in review " - "so just run default system test") - system_test = "bvt_2" - logger.info( - "Puppet modules from review {}" - " will be checked by next system test: {}".format( - modules, system_test)) - - register(groups=['review_in_fuel_library'], - depends_on_groups=[system_test]) - - -def openstack_puppet_project_mapping(project): - """ - find fuel-qa system test which have maximum coverage for edited - openstack/puppet-project and register that group with - "review_in_openstack_puppet_project" name - project - puppet project edited in review - Example: project = "openstack/puppet-openstacklib" - """ - - # open yaml with covered projects - with open( - "gates_tests/helpers/openstack_puppet_projects_mapping.yaml", - "r") as f: - mapping = yaml.load(f) - - all_projects = set(list(itertools.chain.from_iterable( - [mapping[test_group]['modules'] for test_group in mapping]))) - logger.debug( - "List of openstack/puppet-projects " - "covered by system_tests {}".format( - all_projects)) - logger.info( - "Edited project in review - '{}'".format(project)) - - # checking that project from review covered by system_test - if project not in all_projects: - logger.warning( - "{} project not exist or not covered by system_test" - .format(project)) - - # find test group which cover project edited in review - system_test = "bvt_2" - for test in mapping: - if project in mapping[test]['projects']: - system_test = test - break - - devops_template = mapping[system_test]['devops_settings_template'] - - import gates_tests - - path_to_template = os.path.join( - os.path.dirname(os.path.abspath(gates_tests.__file__)), - devops_template) - - logger.debug("devops template is {}".format(path_to_template)) - - os.environ['DEVOPS_SETTINGS_TEMPLATE'] = path_to_template - logger.info( - "Edited project in review - '{}'" - " will be checked by next system test: {}".format( - project, system_test)) - - register(groups=['review_in_openstack_puppet_project'], - depends_on_groups=[system_test]) - - -def map_test_review_in_fuel_library(**kwargs): - groups = kwargs.get('run_groups', []) - old_groups = kwargs.get('groups', None) - groups.extend(old_groups or []) - if 'review_in_fuel_library' in groups: - if settings.GERRIT_CHANGE_ID and settings.GERRIT_PATCHSET_NUMBER: - mp = FuelLibraryModulesProvider.from_environment_vars() - modules = mp.get_changed_modules() - else: - modules = dict() - fuel_library_modules_mapping(modules) - - -def map_test_review_in_openstack_puppet_projects(**kwargs): - groups = kwargs.get('run_groups', []) - old_groups = kwargs.get('groups', None) - groups.extend(old_groups or []) - if 'review_in_openstack_puppet_project' in groups: - if settings.GERRIT_PROJECT: - project = settings.GERRIT_PROJECT - else: - project = str() - openstack_puppet_project_mapping(project) - - -def check_package_version_injected_in_bootstraps( - package, - cluster_id=None, - ironic=None): - - ssh = SSHManager() - try: - pack_path = '/var/www/nailgun/{}/'.format(package) - ssh.upload_to_remote( - ip=ssh.admin_ip, - source=settings.UPDATE_FUEL_PATH.rstrip('/'), - target=pack_path) - except Exception: - logger.exception("Could not upload package") - raise - - # Step 1 - unpack active bootstrap - logger.info("unpack active bootstrap") - - if ironic: - bootstrap = "/var/www/nailgun/bootstrap/ironic/{}".format(cluster_id) - else: - bootstrap = "/var/www/nailgun/bootstraps/active_bootstrap" - bootstrap_var = "/var/root.squashfs" - - cmd = "unsquashfs -d {} {}/root.squashfs".format( - bootstrap_var, bootstrap) - ssh.execute_on_remote( - ip=ssh.admin_ip, - cmd=cmd) - - # Step 2 - check package version - logger.info( - "check package {} version injected in ubuntu bootstrap".format( - package)) - - cmd = "ls {}|grep {} |grep deb |cut -f 2 -d '_'".format( - pack_path, package) - - package_from_review = ssh.execute_on_remote( - ip=ssh.admin_ip, - cmd=cmd)['stdout_str'] - - logger.info("package from review is {}".format(package_from_review)) - - awk_pattern = "awk '{print $2}'" - cmd = "chroot {}/ /bin/bash -c \"dpkg -s {}\"|grep Version|{}".format( - bootstrap_var, package, awk_pattern) - installed_package = ssh.execute_on_remote( - ip=ssh.admin_ip, - cmd=cmd)['stdout_str'] - logger.info("injected package is {}".format(installed_package)) - - assert_equal(installed_package, package_from_review, - "The new package {0} wasn't injected in bootstrap".format( - package_from_review)) - - # Step 3 - remove unpacked bootstrap - cmd = "rm -rf {}".format(bootstrap_var) - ssh.execute_on_remote( - ip=ssh.admin_ip, - cmd=cmd) - - -def update_bootstrap_cli_yaml(): - actions = BaseActions() - path = "/etc/fuel-bootstrap-cli/fuel_bootstrap_cli.yaml" - astute_yaml_path = "/etc/fuel/astute.yaml" - - with YamlEditor(astute_yaml_path, ip=actions.admin_ip) as editor: - repos = editor.content["BOOTSTRAP"]["repos"] - - repos.append({ - 'name': 'auxiliary', - 'priority': "1200", - 'section': 'main restricted', - 'suite': 'auxiliary', - 'type': 'deb', - 'uri': 'http://127.0.0.1:8080/ubuntu/auxiliary/'}) - - with YamlEditor(path, ip=actions.admin_ip) as editor: - editor.content['repos'] = repos diff --git a/gates_tests/tests/__init__.py b/gates_tests/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/gates_tests/tests/test_nailgun_agent.py b/gates_tests/tests/test_nailgun_agent.py deleted file mode 100644 index 5942f4f82..000000000 --- a/gates_tests/tests/test_nailgun_agent.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test - -from fuelweb_test import settings -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.settings import OPENSTACK_RELEASE -from fuelweb_test.tests.base_test_case import TestBasic - -from gates_tests.helpers.utils import \ - check_package_version_injected_in_bootstraps -from gates_tests.helpers.utils import update_bootstrap_cli_yaml - - -@test(groups=["review_nailgun_agent"]) -class NailgunAgentGate(TestBasic): - """Using in CI-gates - Update nailgun-agent on master node, deploy one node environment""" - - @test(depends_on_groups=['prepare_release'], - groups=["review_nailgun_agent_one_node"]) - @log_snapshot_after_test - def gate_patch_nailgun_agent(self): - """ Revert snapshot, update nailgun-agent, deploy one node - - Scenario: - 1. Revert snapshot "ready" - 2. Update fuel_bootstrap_cli.yaml - 3. Rebuild bootstrap - 4. Bootstrap 1 slave - 5. Verify nailgun-agent version in ubuntu bootstrap image - 6. Create environment via FUEL CLI - 7. Assign controller role - 8. Deploy - - """ - if not settings.UPDATE_FUEL: - raise Exception("{} variable doesn't exist" - .format(settings.UPDATE_FUEL)) - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready") - - self.show_step(2) - update_bootstrap_cli_yaml() - - self.show_step(3) - if settings.UPDATE_FUEL: - self.env.admin_actions.upload_packages( - local_packages_dir=settings.UPDATE_FUEL_PATH, - centos_repo_path=None, - ubuntu_repo_path=settings.LOCAL_MIRROR_UBUNTU, - clean_target=True) - - uuid, bootstrap_location = \ - self.env.fuel_bootstrap_actions.build_bootstrap_image() - self.env.fuel_bootstrap_actions. \ - import_bootstrap_image(bootstrap_location) - self.env.fuel_bootstrap_actions. \ - activate_bootstrap_image(uuid) - - self.show_step(4) - self.env.bootstrap_nodes( - self.env.d_env.nodes().slaves[:1]) - - self.show_step(5) - check_package_version_injected_in_bootstraps("nailgun-agent") - - release_id = self.fuel_web.get_releases_list_for_os( - release_name=OPENSTACK_RELEASE)[0] - - self.show_step(6) - cmd = ('fuel env create --name={0} --release={1} --nst=tun ' - '--json'.format(self.__class__.__name__, release_id)) - env_result = self.ssh_manager.execute_on_remote( - ip=self.ssh_manager.admin_ip, - cmd=cmd, jsonify=True)['stdout_json'] - cluster_id = env_result['id'] - - self.show_step(7) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - } - ) - - cluster_id = self.fuel_web.get_last_created_cluster() - self.show_step(8) - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.env.make_snapshot("review_nailgun_agent_one_node") diff --git a/gates_tests/tests/test_review_fuel_web.py b/gates_tests/tests/test_review_fuel_web.py deleted file mode 100644 index 12ea58059..000000000 --- a/gates_tests/tests/test_review_fuel_web.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.settings import OPENSTACK_RELEASE -from fuelweb_test.settings import UPDATE_FUEL -from fuelweb_test.tests.base_test_case import TestBasic -from fuelweb_test.tests.base_test_case import SetupEnvironment -from gates_tests.helpers import exceptions -from gates_tests.helpers.utils import replace_fuel_nailgun_rpm - - -@test(groups=['review_fuel_web']) -class GateFuelWeb(TestBasic): - """Using in fuel-web CI-gates - Update fuel-web packages during installation - of master node, deploy environment""" - - @test(depends_on=[SetupEnvironment.setup_master], - groups=["review_fuel_web_deploy"]) - @log_snapshot_after_test - def gate_fuel_web(self): - """ - Scenario: - 1. Revert snapshot "empty" - 2. Apply changes into nailgun - 3. Get release id - 4. Update networks - 5. Bootstrap 3 nodes - 6. Create cluster - 7. Add 1 controller nodes - 8. Add 1 compute node - 9. Add 1 cinder node - 10. Run network verify - 11. Deploy environment - 12. Run network verify - 13. Run OSTF - """ - if not UPDATE_FUEL: - raise exceptions.FuelQAVariableNotSet('UPDATE_FUEL', 'True') - self.show_step(1) - self.env.revert_snapshot("empty") - self.show_step(2) - replace_fuel_nailgun_rpm() - self.show_step(3) - release_id = self.fuel_web.get_releases_list_for_os( - release_name=OPENSTACK_RELEASE)[0] - self.show_step(4) - self.fuel_web.change_default_network_settings() - self.show_step(5) - self.env.bootstrap_nodes( - self.env.d_env.nodes().slaves[:3]) - self.show_step(6) - cmd = ('fuel env create --name={0} --release={1} --nst=tun ' - '--json'.format(self.__class__.__name__, release_id)) - env_result = self.ssh_manager.execute_on_remote( - self.ssh_manager.admin_ip, cmd=cmd, jsonify=True)['stdout_json'] - cluster_id = env_result['id'] - - self.show_step(7) - self.show_step(8) - self.show_step(9) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'], - } - ) - self.show_step(10) - self.fuel_web.verify_network(cluster_id) - self.show_step(11) - self.fuel_web.deploy_cluster_wait(cluster_id) - self.show_step(12) - self.fuel_web.verify_network(cluster_id) - self.show_step(13) - # run only smoke according to sanity and ha ran in deploy_wait() - self.fuel_web.run_ostf(cluster_id=cluster_id, - test_sets=['smoke']) diff --git a/gates_tests/tests/test_review_in_astute.py b/gates_tests/tests/test_review_in_astute.py deleted file mode 100644 index 19590f326..000000000 --- a/gates_tests/tests/test_review_in_astute.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from proboscis import test - -from fuelweb_test import logger -from fuelweb_test import settings -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.settings import OPENSTACK_RELEASE -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic -from gates_tests.helpers import utils - - -@test(groups=['review_astute']) -class GateAstute(TestBasic): - """Using in Astute CI-gates - Update Astute, create cluster, provision and deploy via CLI""" - - @test(depends_on=[SetupEnvironment.prepare_slaves_3], - groups=['review_astute_patched']) - @log_snapshot_after_test - def gate_patch_astute(self): - """ Revert 'ready_with_3_slaves' snapshot, - download package with astute changes, install package, - start deployment, check for regression - - Scenario: - 1. Revert environment ready_with_3_slaves - 2. Upload package - 3. Update Astute rpm package from review - 4. Update network configuration - 5. Create env - 6. Update nodes with roles: controller, compute, cinder - 7. Deploy cluster - 8. Start ostf tests to check that changes do not reproduce regression - """ - if not settings.UPDATE_FUEL: - raise Exception('UPDATE_FUEL variable is not set. ' - 'UPDATE_FUEL value is {}' - .format(settings.UPDATE_FUEL)) - - astute_service = 'astute' - package_name = 'rubygem-astute' - package_ext = '*.noarch.rpm' - target_path = '/var/www/nailgun/astute/' - - self.show_step(1) - self.env.revert_snapshot('ready_with_3_slaves') - - self.show_step(2) - self.ssh_manager.upload_to_remote( - self.ssh_manager.admin_ip, - source=settings.UPDATE_FUEL_PATH.rstrip('/'), - target=target_path) - - self.show_step(3) - pkg_path = os.path.join(target_path, - '{}{}'.format(package_name, package_ext)) - logger.debug('Package path is {0}'.format(pkg_path)) - full_package_name = utils.get_full_filename(wildcard_name=pkg_path) - logger.debug('Package name is {0}'.format(full_package_name)) - full_package_path = os.path.join(os.path.dirname(pkg_path), - full_package_name) - if not utils.does_new_pkg_equal_to_installed_pkg( - installed_package=package_name, - new_package=full_package_path): - utils.update_rpm(path=full_package_path) - utils.restart_service(service_name=astute_service, - timeout=10) - - self.show_step(4) - self.fuel_web.change_default_network_settings() - - self.show_step(5) - release_id = self.fuel_web.get_releases_list_for_os( - release_name=OPENSTACK_RELEASE)[0] - cmd = ('fuel env create --name={0} --release={1} ' - '--nst=tun --json'.format(self.__class__.__name__, - release_id)) - env_result = self.ssh_manager.execute_on_remote( - self.ssh_manager.admin_ip, - cmd=cmd, jsonify=True)['stdout_json'] - - self.show_step(6) - cluster_id = env_result['id'] - logger.debug('cluster id is {0}'.format(cluster_id)) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['compute'], - 'slave-03': ['cinder'] - } - ) - - self.show_step(7) - cluster_id = self.fuel_web.get_last_created_cluster() - self.fuel_web.deploy_cluster_wait(cluster_id) - - self.show_step(8) - self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['smoke']) diff --git a/gates_tests/tests/test_review_in_fuel_agent.py b/gates_tests/tests/test_review_in_fuel_agent.py deleted file mode 100644 index b490e8ac6..000000000 --- a/gates_tests/tests/test_review_in_fuel_agent.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import test - -from fuelweb_test.helpers.checkers import verify_bootstrap_on_node -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers import ironic_actions -from fuelweb_test import settings -from fuelweb_test.tests.test_ironic_base import TestIronicDeploy -from gates_tests.helpers import exceptions -from gates_tests.helpers.utils import ( - check_package_version_injected_in_bootstraps) -from gates_tests.helpers.utils import replace_rpm_package -from gates_tests.helpers.utils import update_bootstrap_cli_yaml - - -@test(groups=["review_fuel_agent"]) -class Gate(TestIronicDeploy): - """Using in fuel-agent CI-gates. - - Update fuel-agent on master node, bootstrap from review, - build environment images and provision one node. - """ - @test(depends_on_groups=['prepare_release'], - groups=["review_fuel_agent_ironic_deploy"]) - @log_snapshot_after_test - def gate_patch_fuel_agent(self): - """Revert snapshot, update fuel-agent, bootstrap from review - and provision one node. - - Scenario: - 1. Revert snapshot "ready" - 2. Update fuel-agent, fuel-bootstrap-cli on master node - 3. Update fuel_bootstrap_cli.yaml - 4. Rebuild bootstrap - 5. Verify fuel-agent version in ubuntu bootstrap image - 6. Bootstrap 5 slaves - 7. Verify Ubuntu bootstrap on slaves - 8. Add 1 node with controller - 9. Add 1 node ironic role - 10. Deploy the cluster - 11. Verify fuel-agent version in ironic-bootstrap - 12. Upload image to glance - 13. Enroll Ironic nodes - 14. Boot nova instance - 15. Check Nova instance status - - Snapshot review_fuel_agent_ironic_deploy - """ - if not settings.UPDATE_FUEL: - raise exceptions.FuelQAVariableNotSet(settings.UPDATE_FUEL, 'true') - - self.show_step(1, initialize=True) - self.env.revert_snapshot("ready") - - self.show_step(2) - replace_rpm_package('fuel-agent') - replace_rpm_package('fuel-bootstrap-cli') - - self.show_step(3) - update_bootstrap_cli_yaml() - - self.show_step(4) - if settings.UPDATE_FUEL: - self.env.admin_actions.upload_packages( - local_packages_dir=settings.UPDATE_FUEL_PATH, - centos_repo_path=None, - ubuntu_repo_path=settings.LOCAL_MIRROR_UBUNTU, - clean_target=True) - uuid, bootstrap_location = ( - self.env.fuel_bootstrap_actions.build_bootstrap_image()) - (self.env.fuel_bootstrap_actions. - import_bootstrap_image(bootstrap_location)) - (self.env.fuel_bootstrap_actions. - activate_bootstrap_image(uuid)) - - self.show_step(5) - check_package_version_injected_in_bootstraps("fuel-agent") - - self.show_step(6) - self.env.bootstrap_nodes( - self.env.d_env.nodes().slaves[:2]) - - self.show_step(7) - for node in self.env.d_env.nodes().slaves[:2]: - _ip = self.fuel_web.get_nailgun_node_by_devops_node(node)['ip'] - verify_bootstrap_on_node(_ip, os_type="ubuntu", uuid=uuid) - - data = { - "net_provider": 'neutron', - "net_segment_type": settings.NEUTRON_SEGMENT['vlan'], - "ironic": True} - - nodes = { - 'slave-01': ['controller'], - 'slave-02': ['ironic']} - - self.show_step(8) - self.show_step(9) - self.show_step(10) - - cluster_id = self._deploy_ironic_cluster(settings=data, nodes=nodes) - - ironic_conn = ironic_actions.IronicActions( - self.fuel_web.get_public_vip(cluster_id)) - - self.show_step(11) - check_package_version_injected_in_bootstraps("fuel-agent", - cluster_id=cluster_id, - ironic=True) - - self.show_step(12) - self.show_step(13) - self._create_os_resources(ironic_conn) - self.show_step(14) - self._boot_nova_instances(ironic_conn) - - self.show_step(15) - ironic_conn.wait_for_vms(ironic_conn) - ironic_conn.verify_vms_connection(ironic_conn) - - self.env.make_snapshot("review_fuel_agent_ironic_deploy") diff --git a/gates_tests/tests/test_review_in_fuel_client.py b/gates_tests/tests/test_review_in_fuel_client.py deleted file mode 100644 index 9c28b008e..000000000 --- a/gates_tests/tests/test_review_in_fuel_client.py +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from __future__ import unicode_literals - -import os -import traceback - -from devops.helpers.helpers import wait -from proboscis import asserts -from proboscis import test - -from fuelweb_test import logger -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test.helpers.utils import get_package_version -from fuelweb_test.settings import OPENSTACK_RELEASE -from fuelweb_test.settings import UPDATE_FUEL -from fuelweb_test.settings import UPDATE_FUEL_PATH -from fuelweb_test.tests import test_cli_base -from fuelweb_test.tests.base_test_case import SetupEnvironment -from gates_tests.helpers import exceptions - - -@test(groups=["review_fuel"]) -class CreateDeployEnvironmentCli(test_cli_base.CommandLine): - """ - Check CRUD operation with cluster over fuel cli tool. - Executes for each review in openstack/python-fuelclient - """ - @staticmethod - def upload_package(remote, target_path, package_name): - logger.info('Copy changes') - try: - remote.upload(UPDATE_FUEL_PATH.rstrip('/'), target_path) - except OSError: - logger.debug(traceback.format_exc()) - raise exceptions.ConfigurationException( - 'Can not find {0}, ' - 'please check exported variables'.format(UPDATE_FUEL_PATH)) - cmd = "ls -all {0} | grep {1}".format(target_path, package_name) - remote.check_call( - cmd, - error_info='Can not upload changes to master node.') - - @staticmethod - def replace_package(remote, package_name, package_path): - cmd = "ls -all {0} | grep noarch.rpm| awk '{{print $9}}' ".format( - package_path) - result = remote.check_call(cmd, error_info='Failed package replace.') - package_from_review = result.stdout_str - income_version = get_package_version( - remote, os.path.join(package_path, package_from_review), - income=True) - logger.info('Version of package from review {0}'.format( - income_version)) - - installed_rpm = get_package_version( - remote, package_name) - logger.info('Version of installed package{0}'.format(installed_rpm)) - - if installed_rpm != income_version: - logger.info('Try to install package {0}'.format( - package_from_review)) - - cmd = 'rpm -Uvh --oldpackage {0}{1}*.noarch.rpm'.format( - package_path, package_name) - install_result = remote.execute(cmd) - logger.debug('Install package result {0}'.format(install_result)) - installed_rpm = get_package_version( - remote, package_name) - - asserts.assert_equal( - installed_rpm, income_version, - 'Package {0} from review ' - 'installation fails. Current installed ' - 'package is {1}'.format(income_version, installed_rpm)) - - @test(depends_on=[SetupEnvironment.prepare_slaves_1], - groups=["review_fuel_client"]) - @log_snapshot_after_test - def review_fuel_cli_one_node_deploy(self): - """ Revert snapshot, apply changes from review and deploy - cluster with controller node only over cli. - - Scenario: - 1. Revert snapshot 'ready_with_1_slave' - 2. Apply changes from review - 3. Bootstrap 1 node - 4. Show releases list - 5. Create cluster over cli - 6. Update networks - 7. Update SSL settings - 8. List environments - 9. Add and provision 1 node with controller role - 10. Delete cluster - - Duration 20m - """ - if not UPDATE_FUEL: - raise exceptions.FuelQAVariableNotSet(UPDATE_FUEL, 'true') - self.show_step(1, initialize=True) - self.env.revert_snapshot('ready_with_1_slaves') - target_path = '/var/www/nailgun/python-fuelclient/' - package_name = 'python-fuelclient' - with self.env.d_env.get_admin_remote() as remote: - self.show_step(2) - self.upload_package(remote, target_path, package_name) - self.replace_package(remote, package_name=package_name, - package_path=target_path) - - self.show_step(3) - self.env.bootstrap_nodes( - self.env.d_env.nodes().slaves[:1]) - - node_id = [self.fuel_web.get_nailgun_node_by_devops_node( - self.env.d_env.nodes().slaves[0])['id']] - - with self.env.d_env.get_admin_remote() as remote: - self.show_step(3) - # get releases list - self.show_step(4) - list_release_cmd = 'fuel release --json' - list_release_res = remote.check_call(list_release_cmd).stdout_json - active_releases = [ - release for release - in list_release_res if release['is_deployable'] and - OPENSTACK_RELEASE.lower() in release['name'].lower()] - - active_release_id = [release['id'] for release in active_releases] - asserts.assert_true( - active_release_id, 'Can not find deployable release. ' - 'Current release data {0}'.format(list_release_res)) - - logger.info('Available for deploy: \n{!s}'.format( - '\n'.join( - ['\tID: {id}\n' - '\tSTATE: {state}\n' - '\tNAME: {name}\n' - '\tVERSION: {version}\n' - '\tOS: {operating_system}\n'.format(**release) for - release in active_releases])) - ) - - # Create an environment - self.show_step(5) - cmd = ('fuel env create --name={0} --release={1} ' - '--nst=tun --json'.format(self.__class__.__name__, - active_release_id[0])) - - env_result = remote.check_call(cmd).stdout_json - cluster_id = env_result['id'] - cluster_name = env_result['name'] - - # Update network parameters - self.show_step(6) - self.update_cli_network_configuration(cluster_id) - - # Update SSL configuration - self.show_step(7) - self.update_ssl_configuration(cluster_id) - - self.show_step(8) - cmd = 'fuel env --json' - env_list_res = remote.check_call(cmd).stdout_json - asserts.assert_true( - cluster_id in [cluster['id'] for cluster in env_list_res], - 'Can not find created before environment' - ' id in fuel environment list.') - asserts.assert_true( - cluster_name in [cluster['name'] for cluster in env_list_res], - 'Can not find cluster name in fuel env command output') - - # Add and provision a controller node - self.show_step(9) - logger.info("Add to the cluster and start provisioning " - "a controller node [{0}]".format(node_id[0])) - cmd = ('fuel --env-id={0} node set --node {1} --role=controller' - .format(cluster_id, node_id[0])) - remote.execute(cmd) - self.update_node_interfaces(node_id[0]) - cmd = ('fuel --env-id={0} node --provision --node={1} --json' - .format(cluster_id, node_id[0])) - task = remote.check_call(cmd).stdout_json - self.assert_cli_task_success(task, timeout=30 * 60) - - self.show_step(10) - remote.check_call( - 'fuel --env {0} env delete --force'.format(cluster_id)) - - wait(lambda: - remote.execute("fuel env | awk '{print $1}'" - " | tail -n 1 | grep '^.$'") - ['exit_code'] == 1, timeout=60 * 10, - timeout_msg='cluster {0} was not deleted'.format(cluster_id)) - - self.env.make_snapshot("review_fuel_cli_one_node_deploy") diff --git a/gates_tests/tests/test_review_in_ostf.py b/gates_tests/tests/test_review_in_ostf.py deleted file mode 100644 index 89dbae6a2..000000000 --- a/gates_tests/tests/test_review_in_ostf.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import SkipTest -from proboscis import test - -from fuelweb_test.helpers.decorators import log_snapshot_after_test -from fuelweb_test import settings -from fuelweb_test.tests.base_test_case import SetupEnvironment -from fuelweb_test.tests.base_test_case import TestBasic -from gates_tests.helpers import exceptions -from gates_tests.helpers.utils import update_ostf - - -@test(groups=["gate_ostf"]) -class GateOstf(TestBasic): - """Update fuel-ostf, - Check how it works on pre deployed cluster - Executes for each review in openstack/fuel-ostf""" - - # TODO: remove below skipped tests and run_ostf with skipped args calls - # TODO: after bugs 1611712 and 1611713 will be fixed - tests_to_skip = \ - ['Advanced stack actions: suspend, resume and check', - 'Typical stack actions: create, delete, show details, etc.', - 'Update stack actions: inplace, replace and update whole template', - 'Check stack rollback', - 'Instance live migration'] - - @test(depends_on=[SetupEnvironment.prepare_release], - groups=["gate_ostf_ceph_ha"]) - @log_snapshot_after_test - def gate_ostf_ceph_ha(self): - """Deploy ceph with cinder in HA mode - - Scenario: - 1. Create cluster - 2. Add 3 nodes with controller roles - 3. Add 3 nodes with compute and ceph OSD - 4. Deploy the cluster - 5. Run OSTF - - Duration 90m - Snapshot gate_ostf_ceph_ha - - """ - self.check_run('gate_ostf_ceph_ha') - - self.env.revert_snapshot("ready") - self.env.bootstrap_nodes( - self.env.d_env.nodes().slaves[:6]) - csettings = {} - csettings.update( - { - 'volumes_ceph': True, - 'images_ceph': True, - 'objects_ceph': True, - 'ephemeral_ceph': True, - 'volumes_lvm': False, - 'osd_pool_size': "3", - 'tenant': 'ceph1', - 'user': 'ceph1', - 'password': 'ceph1' - } - ) - cluster_id = self.fuel_web.create_cluster( - name=self.__class__.__name__, - mode=settings.DEPLOYMENT_MODE, - settings=csettings - ) - self.fuel_web.update_nodes( - cluster_id, - { - 'slave-01': ['controller'], - 'slave-02': ['controller'], - 'slave-03': ['controller'], - 'slave-04': ['compute', 'ceph-osd'], - 'slave-05': ['compute', 'ceph-osd'], - 'slave-06': ['compute', 'ceph-osd'], - } - ) - self.fuel_web.deploy_cluster_wait(cluster_id) - - all_test_suits = self.fuel_web.get_all_ostf_set_names(cluster_id) - test_to_execute = [ - suite for suite in all_test_suits - if suite not in ['configuration']] - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=test_to_execute, - should_fail=5, - failed_test_name=self.tests_to_skip) - - self.env.make_snapshot("gate_ostf_ceph_ha", is_make=True) - - @test(depends_on=[gate_ostf_ceph_ha], - groups=["gate_ostf_update"]) - @log_snapshot_after_test - def gate_ostf_update(self): - """ Update ostf start on deployed cluster - - Scenario: - 1. Revert snapshot "gate_ostf_ceph_ha" - 2. Update ostf - 3. Check ceph cluster health - 4. Run ostf - - Duration 35m - - """ - if not settings.UPDATE_FUEL: - raise exceptions.ConfigurationException( - 'Variable "UPDATE_FUEL" was not set to true') - self.show_step(1, initialize=True) - if not self.env.revert_snapshot( - 'gate_ostf_ceph_ha'): - raise SkipTest('Snapshot gate_ostf_ceph_ha not found') - self.show_step(2) - update_ostf() - cluster_id = self.fuel_web.get_last_created_cluster() - self.show_step(3) - self.fuel_web.check_ceph_status(cluster_id, recovery_timeout=500) - self.show_step(4) - all_test_suits = self.fuel_web.get_all_ostf_set_names(cluster_id) - test_to_execute = [ - suite for suite in all_test_suits - if suite not in ['configuration']] - self.fuel_web.run_ostf( - cluster_id=cluster_id, - test_sets=test_to_execute, - should_fail=5, - failed_test_name=self.tests_to_skip) diff --git a/packages_tests/deb/deployment/ceph/ceph-deploy/test.yaml b/packages_tests/deb/deployment/ceph/ceph-deploy/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/deb/deployment/ceph/ceph-deploy/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/deb/deployment/ceph/libleveldb1/test.yaml b/packages_tests/deb/deployment/ceph/libleveldb1/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/deb/deployment/ceph/libleveldb1/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/deb/deployment/ceph/libsnappy1/test.yaml b/packages_tests/deb/deployment/ceph/libsnappy1/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/deb/deployment/ceph/libsnappy1/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/deb/deployment/ceph/libtcmalloc-minimal4/test.yaml b/packages_tests/deb/deployment/ceph/libtcmalloc-minimal4/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/deb/deployment/ceph/libtcmalloc-minimal4/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/deb/deployment/ceph/libunwind8/test.yaml b/packages_tests/deb/deployment/ceph/libunwind8/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/deb/deployment/ceph/libunwind8/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/deb/deployment/ceph/python-blinker/test.yaml b/packages_tests/deb/deployment/ceph/python-blinker/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/deb/deployment/ceph/python-blinker/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/deb/deployment/ceph/python-flask/test.yaml b/packages_tests/deb/deployment/ceph/python-flask/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/deb/deployment/ceph/python-flask/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/deb/deployment/ceph/python-itsdangerous/test.yaml b/packages_tests/deb/deployment/ceph/python-itsdangerous/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/deb/deployment/ceph/python-itsdangerous/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/deb/deployment/ceph/python-pyinotify/test.yaml b/packages_tests/deb/deployment/ceph/python-pyinotify/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/deb/deployment/ceph/python-pyinotify/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/deb/deployment/ceph/python-werkzeug/test.yaml b/packages_tests/deb/deployment/ceph/python-werkzeug/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/deb/deployment/ceph/python-werkzeug/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/deb/deployment/ceph/test.yaml b/packages_tests/deb/deployment/ceph/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/deb/deployment/ceph/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/deb/deployment/ceph/xfsprogs/test.yaml b/packages_tests/deb/deployment/ceph/xfsprogs/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/deb/deployment/ceph/xfsprogs/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/deb/deployment/cinder/cinder-backup/test.yaml b/packages_tests/deb/deployment/cinder/cinder-backup/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/deb/deployment/cinder/cinder-backup/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/deb/deployment/cinder/test.yaml b/packages_tests/deb/deployment/cinder/test.yaml deleted file mode 100644 index b6927ac7b..000000000 --- a/packages_tests/deb/deployment/cinder/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - cinder \ No newline at end of file diff --git a/packages_tests/deb/deployment/general/test.yaml b/packages_tests/deb/deployment/general/test.yaml deleted file mode 100644 index 4c0ba1055..000000000 --- a/packages_tests/deb/deployment/general/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - deployment \ No newline at end of file diff --git a/packages_tests/deb/deployment/glance/test.yaml b/packages_tests/deb/deployment/glance/test.yaml deleted file mode 100644 index b4316b74c..000000000 --- a/packages_tests/deb/deployment/glance/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - glance \ No newline at end of file diff --git a/packages_tests/deb/deployment/neutron/dkms/test.yaml b/packages_tests/deb/deployment/neutron/dkms/test.yaml deleted file mode 100644 index b376b1564..000000000 --- a/packages_tests/deb/deployment/neutron/dkms/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - neutron diff --git a/packages_tests/deb/deployment/neutron/libipset3/test.yaml b/packages_tests/deb/deployment/neutron/libipset3/test.yaml deleted file mode 100644 index b376b1564..000000000 --- a/packages_tests/deb/deployment/neutron/libipset3/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - neutron diff --git a/packages_tests/deb/deployment/neutron/neutron-common/test.yaml b/packages_tests/deb/deployment/neutron/neutron-common/test.yaml deleted file mode 100644 index 0131e6230..000000000 --- a/packages_tests/deb/deployment/neutron/neutron-common/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - neutron \ No newline at end of file diff --git a/packages_tests/deb/deployment/neutron/test.yaml b/packages_tests/deb/deployment/neutron/test.yaml deleted file mode 100644 index b376b1564..000000000 --- a/packages_tests/deb/deployment/neutron/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - neutron diff --git a/packages_tests/deb/deployment/nova/nova-compute-kvm/test.yaml b/packages_tests/deb/deployment/nova/nova-compute-kvm/test.yaml deleted file mode 100644 index 340bf9df1..000000000 --- a/packages_tests/deb/deployment/nova/nova-compute-kvm/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - nova-compute \ No newline at end of file diff --git a/packages_tests/deb/deployment/nova/nova-compute-libvirt/test.yaml b/packages_tests/deb/deployment/nova/nova-compute-libvirt/test.yaml deleted file mode 100644 index 406ea608e..000000000 --- a/packages_tests/deb/deployment/nova/nova-compute-libvirt/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - nova_compute \ No newline at end of file diff --git a/packages_tests/deb/deployment/nova/nova-compute-qemu/test.yaml b/packages_tests/deb/deployment/nova/nova-compute-qemu/test.yaml deleted file mode 100644 index 406ea608e..000000000 --- a/packages_tests/deb/deployment/nova/nova-compute-qemu/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - nova_compute \ No newline at end of file diff --git a/packages_tests/deb/deployment/nova/nova-compute/test.yaml b/packages_tests/deb/deployment/nova/nova-compute/test.yaml deleted file mode 100644 index 406ea608e..000000000 --- a/packages_tests/deb/deployment/nova/nova-compute/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - nova_compute \ No newline at end of file diff --git a/packages_tests/deb/deployment/nova/nova-network/test.yaml b/packages_tests/deb/deployment/nova/nova-network/test.yaml deleted file mode 100644 index edd884dbd..000000000 --- a/packages_tests/deb/deployment/nova/nova-network/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - nova \ No newline at end of file diff --git a/packages_tests/deb/deployment/nova/test.yaml b/packages_tests/deb/deployment/nova/test.yaml deleted file mode 100644 index edd884dbd..000000000 --- a/packages_tests/deb/deployment/nova/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - nova \ No newline at end of file diff --git a/packages_tests/deb/deployment/swift/test.yaml b/packages_tests/deb/deployment/swift/test.yaml deleted file mode 100644 index c0ef15938..000000000 --- a/packages_tests/deb/deployment/swift/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - swift diff --git a/packages_tests/deb/deployment/test.yaml b/packages_tests/deb/deployment/test.yaml deleted file mode 100644 index 6d07d54cd..000000000 --- a/packages_tests/deb/deployment/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - deployment diff --git a/packages_tests/deb/master/test.yaml b/packages_tests/deb/master/test.yaml deleted file mode 100644 index d1da59516..000000000 --- a/packages_tests/deb/master/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - master \ No newline at end of file diff --git a/packages_tests/deb/packages.yaml b/packages_tests/deb/packages.yaml deleted file mode 100644 index 28ed417b9..000000000 --- a/packages_tests/deb/packages.yaml +++ /dev/null @@ -1,3141 +0,0 @@ -provisioning: - projects: - - name: accountsservice - packages: - - libaccountsservice0 - - name: acl - packages: - - libacl1 - - name: apparmor - packages: - - libapparmor-perl - - libapparmor1 - - name: apt - packages: - - apt-transport-https - - apt-utils - - libapt-inst1.5 - - libapt-pkg4.12 - - name: aptitude - packages: - - aptitude-common - - name: attr - packages: - - libattr1 - - name: audit - packages: - - libaudit-common - - libaudit1 - - name: augeas - packages: - - augeas-lenses - - libaugeas0 - - name: autogen - packages: - - libopts25 - - name: bind9 - packages: - - bind9-host - - dnsutils - - libbind9-90 - - libdns100 - - libisc95 - - libisccc90 - - libisccfg90 - - liblwres90 - - name: boost1.54 - packages: - - libboost-iostreams1.54.0 - - name: busybox - packages: - - busybox-initramfs - - busybox-static - - name: bzip2 - packages: - - libbz2-1.0 - - name: cdebconf - packages: - - libdebconfclient0 - - name: cgmanager - packages: - - libcgmanager0 - - name: chardet - packages: - - python-chardet - - name: cmd2 - packages: - - python-cmd2 - - name: command-not-found - packages: - - command-not-found-data - - python3-commandnotfound - - name: console-setup - packages: - - keyboard-configuration - - name: consolekit - packages: - - libck-connector0 - - name: curl - packages: - - libcurl3 - - libcurl3-gnutls - - name: cwidget - packages: - - libcwidget3 - - name: cyrus-sasl2 - packages: - - libsasl2-2 - - libsasl2-modules - - libsasl2-modules-db - - name: db5.3 - packages: - - libdb5.3 - - name: dbus - packages: - - libdbus-1-3 - - name: dbus-glib - packages: - - libdbus-glib-1-2 - - name: dbus-python - packages: - - python3-dbus - - name: debconf - packages: - - debconf-i18n - - debconf-utils - - name: discover - packages: - - libdiscover2 - - name: e2fsprogs - packages: - - e2fslibs - - libcomerr2 - - libss2 - - name: eglibc - packages: - - libc-bin - - libc6 - - multiarch-support - - name: elfutils - packages: - - libelf1 - - name: expat - packages: - - libexpat1 - - name: file - packages: - - libmagic1 - - name: freetype - packages: - - libfreetype6 - - name: fribidi - packages: - - libfribidi0 - - name: fuse - packages: - - libfuse2 - - name: gcc-4.8 - packages: - - gcc-4.8-base - - libstdc++6 - - name: gccgo-4.9 - packages: - - gcc-4.9-base - - libgcc1 - - name: gcr - packages: - - libgck-1-0 - - libgcr-3-common - - libgcr-base-3-1 - - name: gdbm - packages: - - libgdbm3 - - name: geoip - packages: - - libgeoip1 - - name: gettext - packages: - - gettext-base - - libasprintf0c2 - - name: glib2.0 - packages: - - libglib2.0-0 - - libglib2.0-data - - name: gnupg - packages: - - gpgv - - name: gnutls26 - packages: - - libgnutls-openssl27 - - libgnutls26 - - name: gobject-introspection - packages: - - gir1.2-glib-2.0 - - libgirepository-1.0-1 - - name: gpm - packages: - - libgpm2 - - name: groff - packages: - - groff-base - - name: grub2 - packages: - - grub-common - - grub-pc - - grub-pc-bin - - grub2-common - - name: heimdal - packages: - - libasn1-8-heimdal - - libgssapi3-heimdal - - libhcrypto4-heimdal - - libheimbase1-heimdal - - libheimntlm0-heimdal - - libhx509-5-heimdal - - libkrb5-26-heimdal - - libroken18-heimdal - - libwind0-heimdal - - name: hiera - packages: - - ruby-hiera - - name: icu - packages: - - libicu52 - - name: initramfs-tools - packages: - - initramfs-tools-bin - - name: iptables - packages: - - libxtables10 - - name: iputils - packages: - - iputils-ping - - iputils-tracepath - - name: isc-dhcp - packages: - - isc-dhcp-client - - isc-dhcp-common - - name: jquery - packages: - - libjs-jquery - - name: json-c - packages: - - libjson-c2 - - libjson0 - - name: keyutils - packages: - - libkeyutils1 - - name: klibc - packages: - - klibc-utils - - libklibc - - name: kmod - packages: - - libkmod2 - - module-init-tools - - name: krb5 - packages: - - krb5-locales - - libgssapi-krb5-2 - - libk5crypto3 - - libkrb5-3 - - libkrb5support0 - - name: langpack-locales - packages: - - locales - - name: language-selector - packages: - - language-selector-common - - name: libbsd - packages: - - libbsd0 - - name: libcap-ng - packages: - - libcap-ng0 - - name: libcap2 - packages: - - libcap2-bin - - libpam-cap - - name: libdrm - packages: - - libdrm2 - - name: libedit - packages: - - libedit2 - - name: libept - packages: - - libept1.4.12 - - name: libestr - packages: - - libestr0 - - name: libffi - packages: - - libffi6 - - name: libgpg-error - packages: - - libgpg-error0 - - name: libidn - packages: - - libidn11 - - name: liblockfile - packages: - - liblockfile-bin - - liblockfile1 - - name: libnfnetlink - packages: - - libnfnetlink0 - - name: libnih - packages: - - libnih-dbus1 - - libnih1 - - name: libnl3 - packages: - - libnl-3-200 - - libnl-genl-3-200 - - name: libpcap - packages: - - libpcap0.8 - - name: libpipeline - packages: - - libpipeline1 - - name: libpng - packages: - - libpng12-0 - - name: libselinux - packages: - - libselinux1 - - name: libsemanage - packages: - - libsemanage-common - - libsemanage1 - - name: libsepol - packages: - - libsepol1 - - name: libsigc++-2.0 - packages: - - libsigc++-2.0-0c2a - - name: libusb - packages: - - libusb-0.1-4 - - name: libusbx - packages: - - libusb-1.0-0 - - name: libx11 - packages: - - libx11-6 - - libx11-data - - name: libxau - packages: - - libxau6 - - name: libxcb - packages: - - libxcb1 - - name: libxdmcp - packages: - - libxdmcp6 - - name: libxext - packages: - - libxext6 - - name: libxmu - packages: - - libxmuu1 - - name: libyaml - packages: - - libyaml-0-2 - - name: linux - packages: - - linux-headers-3.13.0-46 - - linux-headers-3.13.0-46-generic - - linux-image-3.13.0-46-generic - - linux-image-extra-3.13.0-46-generic - - name: linux-meta - packages: - - linux-headers-generic - - linux-headers-generic-lts-trusty - - linux-image-generic - - linux-image-generic-lts-trusty - - name: lsb - packages: - - lsb-base - - lsb-release - - name: lvm2 - packages: - - dmsetup - - libdevmapper-event1.02.1 - - libdevmapper1.02.1 - - name: mcollective - packages: - - mcollective-common - - name: mpdecimal - packages: - - libmpdec2 - - name: mtr - packages: - - mtr-tiny - - name: ncurses - packages: - - libncurses5 - - libncursesw5 - - libtinfo5 - - ncurses-base - - ncurses-bin - - name: netkit-ftp - packages: - - ftp - - name: netkit-telnet - packages: - - telnet - - name: newt - packages: - - libnewt0.52 - - whiptail - - name: ntp - packages: - - ntpdate - - name: numactl - packages: - - libnuma1 - - name: openldap - packages: - - libldap-2.4-2 - - name: openssh - packages: - - openssh-client - - openssh-server - - openssh-sftp-server - - name: openssl - packages: - - libssl1.0.0 - - name: p11-kit - packages: - - libp11-kit0 - - name: pam - packages: - - libpam-modules - - libpam-modules-bin - - libpam-runtime - - libpam0g - - name: parted - packages: - - libparted0debian1 - - name: pciutils - packages: - - libpci3 - - name: pcre3 - packages: - - libpcre3 - - name: perl - packages: - - perl-base - - perl-modules - - name: plymouth - packages: - - libplymouth2 - - plymouth-theme-ubuntu-text - - name: policykit-1 - packages: - - libpolkit-gobject-1-0 - - name: popt - packages: - - libpopt0 - - name: prettytable - packages: - - python-prettytable - - name: procps - packages: - - libprocps3 - - name: puppet - packages: - - puppet-common - - name: pygobject - packages: - - python3-gi - - name: pyparsing - packages: - - python-pyparsing - - name: python-apt - packages: - - python-apt-common - - python3-apt - - name: python-cliff - packages: - - python-cliff-doc - - name: python-defaults - packages: - - libpython-stdlib - - python - - python-minimal - - name: python-setuptools - packages: - - python-pkg-resources - - name: python2.7 - packages: - - libpython2.7 - - libpython2.7-minimal - - libpython2.7-stdlib - - python2.7-minimal - - name: python3-defaults - packages: - - libpython3-stdlib - - python3 - - python3-minimal - - name: python3-stdlib-extensions - packages: - - python3-gdbm - - name: python3.4 - packages: - - libpython3.4-minimal - - libpython3.4-stdlib - - python3.4-minimal - - name: pyyaml - packages: - - python-yaml - - name: readline5 - packages: - - libreadline5 - - name: readline6 - packages: - - libreadline6 - - readline-common - - name: rethtool - packages: - - ruby-rethtool - - name: rtmpdump - packages: - - librtmp0 - - name: ruby-defaults - packages: - - ruby - - name: ruby1.9.1 - packages: - - libruby1.9.1 - - name: scapy - packages: - - python-scapy - - name: shadow - packages: - - login - - passwd - - name: six - packages: - - python-six - - name: slang2 - packages: - - libslang2 - - name: sphinx - packages: - - libjs-sphinxdoc - - name: sqlite3 - packages: - - libsqlite3-0 - - name: stevedore - packages: - - python-stevedore - - name: systemd - packages: - - libpam-systemd - - libsystemd-daemon0 - - libsystemd-login0 - - libudev1 - - systemd-services - - udev - - name: sysvinit - packages: - - initscripts - - sysv-rc - - sysvinit-utils - - name: tasksel - packages: - - tasksel-data - - name: tcp-wrappers - packages: - - libwrap0 - - name: texinfo - packages: - - info - - install-info - - name: ubuntu-meta - packages: - - ubuntu-minimal - - ubuntu-standard - - name: ubuntu-release-upgrader - packages: - - python3-distupgrade - - ubuntu-release-upgrader-core - - name: undefined - packages: - - accountsservice - - acl - - adduser - - anacron - - apparmor - - apt - - apt-xapian-index - - aptitude - - base-files - - base-passwd - - bash - - bash-completion - - bsdmainutils - - bzip2 - - ca-certificates - - cliff-tablib - - command-not-found - - console-setup - - coreutils - - cpio - - crda - - cron - - curl - - daemonize - - dash - - dbus - - debconf - - debianutils - - dh-python - - diffutils - - discover - - discover-data - - dmidecode - - dosfstools - - dpkg - - e2fsprogs - - ed - - eject - - facter - - file - - findutils - - friendly-recovery - - fuse - - gdisk - - geoip-database - - gnupg - - grep - - grub-gfxpayload-lists - - gzip - - hdparm - - hostname - - ifupdown - - init-system-helpers - - initramfs-tools - - insserv - - installation-report - - iproute2 - - iptables - - irqbalance - - iso-codes - - kbd - - kmod - - language-pack-en - - language-pack-en-base - - language-pack-gnome-en - - language-pack-gnome-en-base - - laptop-detect - - less - - libarchive-extract-perl - - libcap2 - - libclass-accessor-perl - - libgcrypt11 - - libio-string-perl - - liblocale-gettext-perl - - liblog-message-simple-perl - - libmodule-pluggable-perl - - libparse-debianchangelog-perl - - libpod-latex-perl - - libsub-name-perl - - libtasn1-6 - - libterm-ui-perl - - libtext-charwidth-perl - - libtext-iconv-perl - - libtext-soundex-perl - - libtext-wrapi18n-perl - - libtimedate-perl - - libxml2 - - linux-firmware - - lockfile-progs - - logrotate - - lshw - - lsof - - ltrace - - lvm2 - - makedev - - man-db - - manpages - - mawk - - mcollective - - mime-support - - mlocate - - mountall - - nailgun-agent - - nailgun-mcagents - - nailgun-net-check - - nano - - net-tools - - netbase - - netcat-openbsd - - ntfs-3g - - ntp - - ohai - - openssl - - os-prober - - parted - - pciutils - - perl - - plymouth - - popularity-contest - - powermgmt-base - - ppp - - pppconfig - - pppoeconf - - procps - - psmisc - - puppet - - python-amqp - - python-apt - - python-cliff - - python-daemonize - - python-debian - - python-pbr - - python-pypcap - - python-setuptools - - python-tablib - - python2.7 - - python3.4 - - resolvconf - - rsync - - rsyslog - - ruby-augeas - - ruby-cstruct - - ruby-httpclient - - ruby-ipaddress - - ruby-json - - ruby-mixlib-cli - - ruby-mixlib-config - - ruby-mixlib-log - - ruby-netaddr - - ruby-openstack - - ruby-rgen - - ruby-safe-yaml - - ruby-shadow - - ruby-sigar - - ruby-stomp - - ruby-systemu - - ruby-yajl - - ruby1.9.1 - - sed - - sensible-utils - - sgml-base - - shared-mime-info - - strace - - sudo - - systemd-shim - - tar - - tasksel - - tcpdump - - time - - tzdata - - ubuntu-keyring - - ucf - - ufw - - upstart - - ureadahead - - usbutils - - util-linux - - vim - - virt-what - - vlan - - watershed - - wget - - wireless-regdb - - xauth - - xml-core - - xz-utils - - name: underscore - packages: - - libjs-underscore - - name: update-manager - packages: - - python3-update-manager - - update-manager-core - - name: ustr - packages: - - libustr-1.0-1 - - name: util-linux - packages: - - bsdutils - - libblkid1 - - libmount1 - - libuuid1 - - mount - - uuid-runtime - - name: vim - packages: - - vim-common - - vim-runtime - - vim-tiny - - name: xapian-bindings - packages: - - python-xapian - - name: xapian-core - packages: - - libxapian22 - - name: xkeyboard-config - packages: - - xkb-data - - name: xz-utils - packages: - - liblzma5 - - name: zlib - packages: - - zlib1g -deployment: - projects: - - name: ceph - packages: - - ceph-common - - ceph-fs-common - - ceph-fuse - - ceph-mds - - libcephfs1 - - librados2 - - librbd1 - - python-ceph - - radosgw - - python-blinker - - python-flask - - libtcmalloc-minimal4 - - libleveldb1 - - libunwind8 - - python-pyinotify - - libsnappy1 - - ceph-deploy - - python-itsdangerous - - python-werkzeug - - xfsprogs - - name: cinder - packages: - - cinder-api - - cinder-backup - - cinder-common - - cinder-scheduler - - cinder-volume - - python-cinder - - name: glance - packages: - - glance-api - - glance-common - - glance-registry - - python-glance - - name: neutron - packages: - - neutron-common - - neutron-dhcp-agent - - neutron-l3-agent - - neutron-lbaas-agent - - neutron-metadata-agent - - neutron-metering-agent - - neutron-plugin-bigswitch - - neutron-plugin-bigswitch-agent - - neutron-plugin-brocade - - neutron-plugin-cisco - - neutron-plugin-hyperv - - neutron-plugin-ibm - - neutron-plugin-ibm-agent - - neutron-plugin-linuxbridge - - neutron-plugin-linuxbridge-agent - - neutron-plugin-metaplugin - - neutron-plugin-metering-agent - - neutron-plugin-midonet - - neutron-plugin-ml2 - - neutron-plugin-mlnx - - neutron-plugin-mlnx-agent - - neutron-plugin-nec - - neutron-plugin-nec-agent - - neutron-plugin-nicira - - neutron-plugin-oneconvergence - - neutron-plugin-oneconvergence-agent - - neutron-plugin-openflow-agent - - neutron-plugin-openvswitch - - neutron-plugin-openvswitch-agent - - neutron-plugin-plumgrid - - neutron-plugin-ryu - - neutron-plugin-ryu-agent - - neutron-plugin-vpn-agent - - neutron-server - - neutron-vpn-agent - - python-neutron - - libipset3 - - openvswitch-common - - openvswitch-switch - - dkms - - ipset - - openvswitch-datapath-dkms - - python-jsonrpclib - - name: nova - packages: - - nova-api - - nova-cert - - nova-common - - nova-compute - - nova-compute-kvm - - nova-compute-libvirt - - nova-compute-qemu - - nova-conductor - - nova-consoleauth - - nova-network - - nova-novncproxy - - nova-objectstore - - nova-scheduler - - python-nova - - name: swift - packages: - - python-swift - - swift-account - - swift-container - - swift-object - - swift-proxy - - name: general - packages: - - alsa-base-udeb - - libasound2 - - libasound2-data - - alsa-utils-udeb - - libjs-angularjs - - apache2-bin - - apache2-data - - dh-apparmor - - libapr1 - - libaprutil1 - - libaprutil1-dbd-sqlite3 - - libaprutil1-ldap - - apt-cdrom-setup - - apt-mirror-setup - - apt-setup-udeb - - libatk-adaptor-udeb - - libatk-bridge-2.0-0-udeb - - libatk1.0-udeb - - attr-udeb - - libattr1-udeb - - libavahi-client3 - - libavahi-common-data - - libavahi-common3 - - libavahi-common3-udeb - - libavahi-core7-udeb - - lsb-release-udeb - - bootstrap-base - - python-bs4 - - beep-udeb - - binutils-static-udeb - - biosdevname-udeb - - libblas3 - - libbluetooth3 - - bogl-bterm-udeb - - libboost-filesystem1.54.0 - - libboost-program-options1.54.0 - - libboost-system1.54.0 - - libboost-thread1.54.0 - - libbrlapi0.6 - - brltty-udeb - - btrfs-tools-udeb - - busybox-udeb - - libc-ares2 - - libcairo2-udeb - - cdebconf-gtk-udeb - - cdebconf-newt-udeb - - cdebconf-priority - - cdebconf-text-udeb - - cdebconf-udeb - - libdebconfclient0-udeb - - cdebconf-gtk-entropy - - cdebconf-newt-entropy - - cdebconf-text-entropy - - cdebconf-newt-detect-keys - - cdebconf-gtk-terminal - - cdebconf-newt-terminal - - cdebootstrap-udeb - - genisoimage - - load-cdrom - - ceilometer-agent-central - - ceilometer-agent-compute - - ceilometer-agent-notification - - ceilometer-alarm-evaluator - - ceilometer-alarm-notifier - - ceilometer-api - - ceilometer-collector - - ceilometer-common - - python-ceilometer - - python-cheetah - - choose-mirror-bin - - libcloog-isl4 - - cloud-guest-utils - - liblrm2 - - libpils2 - - libplumb2 - - libplumbgpl2 - - libstonith1 - - python-cm-api - - cobbler-enlist-udeb - - python-configobj - - console-keymaps-acorn - - console-keymaps-amiga - - console-keymaps-atari - - console-keymaps-at - - console-keymaps-dec - - console-keymaps-mac - - console-keymaps-sun - - console-keymaps-usb - - console-setup-amiga-ekmap - - console-setup-ataritt-ekmap - - console-setup-fonts-udeb - - console-setup-macintoshold-ekmap - - console-setup-pc-ekmap - - console-setup-sun4-ekmap - - console-setup-sun5-ekmap - - console-setup-udeb - - libcfg6 - - libcmap4 - - libcorosync-common4 - - libcpg4 - - libquorum5 - - libsam4 - - libtotem-pg5 - - libvotequorum6 - - cpuburn-udeb - - libcpufreq0 - - libcrack2-udeb - - cryptsetup-bin - - libcryptsetup4 - - cryptsetup-udeb - - libcryptsetup4-udeb - - libcups2 - - libcupsimage2 - - libcupsfilters1 - - curl-udeb - - libcurl3-udeb - - python-d2to1 - - libjs-d3 - - python-dbus - - python-dbus-dev - - debian-archive-keyring-udeb - - debian-installer-udebs - - di-utils-exit-installer - - di-utils-mapdevfs - - di-utils-reboot - - di-utils-shell - - di-utils-terminfo - - di-utils - - debian-ports-archive-keyring-udeb - - debootstrap-udeb - - libfdt1 - - devio-udeb - - libdirectfb-1.2-9-udeb - - libdirectfb-bin-udeb - - libdistro-info-perl - - python-django-floppyforms - - dmidecode-udeb - - dmraid-udeb - - libdmraid1.0.0.rc16-udeb - - dnsmasq-base - - dnsmasq-utils - - python-dnspython - - dosfstools-udeb - - dpkg-dev - - libdpkg-perl - - e2fsprogs-udeb - - libc-dev-bin - - libc6-dbg - - libc6-dev - - libc6-udeb - - libnss-dns-udeb - - libnss-files-udeb - - eject-udeb - - libdw1 - - emdebian-archive-keyring-udeb - - erlang-asn1 - - erlang-base - - erlang-corba - - erlang-crypto - - erlang-dev - - erlang-diameter - - erlang-edoc - - erlang-eldap - - erlang-erl-docgen - - erlang-eunit - - erlang-ic - - erlang-inets - - erlang-mnesia - - erlang-nox - - erlang-odbc - - erlang-os-mon - - erlang-parsetools - - erlang-percept - - erlang-public-key - - erlang-runtime-tools - - erlang-snmp - - erlang-ssh - - erlang-ssl - - erlang-syntax-tools - - erlang-tools - - erlang-webtool - - erlang-xmerl - - espeak-data-udeb - - espeakup-udeb - - libexpat1-udeb - - libfakeroot - - fbset-udeb - - python-feedparser - - python3-magic - - libflac8 - - fontconfig-config - - libfontconfig1 - - fontconfig-udeb - - fonts-dejavu-core - - ttf-dejavu-core - - fonts-dejavu-mono-udeb - - fonts-dejavu-udeb - - fonts-farsiweb-udeb - - fonts-freefont-udeb - - fonts-knda-udeb - - fonts-khmeros-udeb - - fonts-lao-udeb - - fonts-lklug-sinhala-udeb - - fonts-telu-udeb - - fonts-taml-udeb - - fonts-sil-abyssinica-udeb - - fonts-sil-padauk-udeb - - fonts-sil-scheherazade-udeb - - fonts-mlym-udeb - - fonts-tibetan-machine-udeb - - fonts-thai-tlwg-udeb - - fonts-ukij-uyghur-udeb - - libfreetype6-udeb - - libfribidi0-udeb - - fuse-udeb - - libfuse2-udeb - - cpp-4.8 - - g++-4.8 - - libasan0 - - libatomic1 - - libgcc-4.8-dev - - libgfortran3 - - libgomp1 - - libitm1 - - libquadmath0 - - libstdc++-4.8-dev - - libtsan0 - - cpp - - g++ - - gcc - - libgdk-pixbuf2.0-0-udeb - - libasprintf-dev - - libgettextpo-dev - - libgettextpo0 - - libgs9 - - libgs9-common - - git-man - - libglib2.0-udeb - - libgmp10 - - gnupg-udeb - - gpgv-udeb - - libgoogle-perftools4 - - grub-mount-udeb - - libgtk2.0-0-udeb - - libgtk-3-0-udeb - - gtk2-engines-udeb - - hardening-includes - - libharfbuzz0-udeb - - hdparm-udeb - - libheartbeat2 - - heat-api - - heat-api-cfn - - heat-api-cloudwatch - - heat-common - - heat-docker - - heat-engine - - python-heat - - libhivex0 - - libwin-hivex-perl - - openstack-dashboard - - python-django-horizon - - archdetect - - disk-detect - - driver-injection-disk-detect - - ethdetect - - libijs-0.35 - - save-logs - - libio-stringy-perl - - libiperf0 - - iproute - - iputils-arping - - ipxe-qemu - - isc-dhcp-client-udeb - - libisl10 - - load-iso - - libjbig2dec0 - - libjbig0 - - jfsutils-udeb - - python-jinja2 - - libjs-jquery-metadata - - libjs-jquery-tablesorter - - libjs-jquery-ui - - libjson-glib-1.0-0 - - libjson-glib-1.0-common - - python-kazoo - - kbd-udeb - - kexec-tools-udeb - - python-keystone - - initrd-kickseed - - kickseed-common - - klibc-utils-floppy-udeb - - klibc-utils-udeb - - libklibc-udeb - - libkmod2-udeb - - liblapack3 - - liblcms2-2 - - node-less - - libaio1 - - libaio1-udeb - - libapache2-mod-fastcgi - - libasyncns0 - - libbsd0-udeb - - libcaca0 - - libconfig9 - - libcroco3 - - libdebian-installer-extra4-udeb - - libdebian-installer4-udeb - - libdrm-intel1 - - libdrm-nouveau2 - - libdrm-radeon1 - - libdrm2-udeb - - eatmydata - - libesmtp6 - - libevent-2.0-5 - - libfakekey0-udeb - - libfcgi0ldbl - - libffi6-udeb - - libfontenc1-udeb - - libgcrypt11-udeb - - libgd3 - - libgpg-error0-udeb - - libgssglue1 - - libguestfs-perl - - libguestfs-tools - - libguestfs0 - - libibverbs1 - - libiksemel3 - - libjpeg-turbo8 - - libjpeg8 - - ldmtool - - libldm-1.0-0 - - liblinear-tools - - liblinear1 - - libmemcached10 - - libmnl0 - - libnet1 - - libnetfilter-conntrack3 - - libnfsidmap2 - - libnl1 - - libnl-route-3-200 - - libnl-3-200-udeb - - libnl-genl-3-200-udeb - - libogg0 - - libpaper-utils - - libpaper1 - - libpciaccess0 - - libpciaccess0-udeb - - libpgm-5.1-0 - - libpng12-0-udeb - - libqb0 - - librabbitmq1 - - librdmacm1 - - libsdl1.2debian - - libseccomp2 - - libsigsegv2 - - libsmi2ldbl - - smitools - - libsndfile1 - - libssh2-1 - - libtextwrap1-udeb - - libtirpc1 - - libltdl7 - - libunistring0 - - libusb-0.1-udeb - - libusb-1.0-0-udeb - - libv8-3.14.5 - - libvirt-bin - - libvirt0 - - python-libvirt - - libvorbis0a - - libvorbisenc2 - - libvpx1 - - libx11-xcb1 - - libx11-6-udeb - - libx86-1 - - libxau6-udeb - - libxcb-dri2-0 - - libxcb-dri3-0 - - libxcb-glx0 - - libxcb-present0 - - libxcb-sync1 - - libxcb1-udeb - - libxcursor1-udeb - - libxdamage1 - - libxdmcp6-udeb - - libxext6-udeb - - libxfixes3 - - libxfixes3-udeb - - libxfont1-udeb - - libxi6-udeb - - libxinerama1-udeb - - libxkbfile1-udeb - - libxml2-utils - - python-libxml2 - - libxml2-udeb - - libxpm4 - - libxrender1-udeb - - libxshmfence1 - - libxshmfence1-udeb - - libxslt1.1 - - libxtst6-udeb - - libxxf86vm1 - - linux-headers-3.13.0-45 - - linux-headers-3.13.0-45-generic - - linux-image-3.13.0-45-generic - - linux-libc-dev - - block-modules-3.13.0-45-generic-di - - crypto-modules-3.13.0-45-generic-di - - fat-modules-3.13.0-45-generic-di - - fb-modules-3.13.0-45-generic-di - - firewire-core-modules-3.13.0-45-generic-di - - floppy-modules-3.13.0-45-generic-di - - fs-core-modules-3.13.0-45-generic-di - - fs-secondary-modules-3.13.0-45-generic-di - - input-modules-3.13.0-45-generic-di - - ipmi-modules-3.13.0-45-generic-di - - irda-modules-3.13.0-45-generic-di - - kernel-image-3.13.0-45-generic-di - - linux-udebs-generic - - md-modules-3.13.0-45-generic-di - - message-modules-3.13.0-45-generic-di - - mouse-modules-3.13.0-45-generic-di - - multipath-modules-3.13.0-45-generic-di - - nfs-modules-3.13.0-45-generic-di - - nic-modules-3.13.0-45-generic-di - - nic-pcmcia-modules-3.13.0-45-generic-di - - nic-shared-modules-3.13.0-45-generic-di - - nic-usb-modules-3.13.0-45-generic-di - - parport-modules-3.13.0-45-generic-di - - pata-modules-3.13.0-45-generic-di - - pcmcia-modules-3.13.0-45-generic-di - - pcmcia-storage-modules-3.13.0-45-generic-di - - plip-modules-3.13.0-45-generic-di - - ppp-modules-3.13.0-45-generic-di - - sata-modules-3.13.0-45-generic-di - - scsi-modules-3.13.0-45-generic-di - - serial-modules-3.13.0-45-generic-di - - speakup-modules-3.13.0-45-generic-di - - squashfs-modules-3.13.0-45-generic-di - - storage-core-modules-3.13.0-45-generic-di - - usb-modules-3.13.0-45-generic-di - - virtio-modules-3.13.0-45-generic-di - - vlan-modules-3.13.0-45-generic-di - - libatm1 - - nic-firmware - - scsi-firmware - - linux-lts-utopic-udebs-generic - - kernel-signed-image-3.13.0-45-generic-di - - libsctp1 - - libllvm3.4 - - libsensors4 - - lowmemcheck - - ltsp-client-builder - - liblua5.2-0 - - dmsetup-udeb - - libdevmapper1.02.1-udeb - - lvm2-udeb - - lvmcfg-utils - - python-lxml - - liblzo2-2 - - liblzo2-2-udeb - - python-m2crypto - - maas-enlist-udeb - - make - - kdump-tools - - python-mako - - manpages-dev - - python-markupsafe - - matchbox-keyboard-udeb - - matchbox-window-manager-udeb - - mbr-udeb - - mc-data - - mcollective-client - - mdadm-udeb - - mdcfg-utils - - load-media - - libgl1-mesa-dri - - libgl1-mesa-glx - - libglapi-mesa - - python-migrate - - libapache2-mod-wsgi - - mongodb-clients - - mongodb-server - - libmpc3 - - libmpfr4 - - python-msgpack - - libmtdev1-udeb - - kpartx - - kpartx-udeb - - multipath-udeb - - libmysqlclient18 - - mysql-client - - mysql-client-5.5 - - mysql-client-core-5.5 - - mysql-common - - mysql-server - - mysql-server-5.5 - - mysql-server-core-5.5 - - mysql-client-5.6 - - mysql-client-core-5.6 - - mysql-server-wsrep-5.6 - - mysql-server-wsrep-core-5.6 - - mysql-wsrep-common-5.6 - - mythbuntu-diskless-client-builder - - nano-udeb - - nbd-client-udeb - - ncurses-term - - ndisc6-udeb - - rdnssd-udeb - - download-installer - - libsnmp-base - - libsnmp30 - - snmp - - snmpd - - netcat-traditional - - libnetcf1 - - netcfg-static - - python-netifaces - - libnetpbm10 - - netpbm - - nfs-common - - nginx-common - - nginx-core - - python-nose - - python-novnc - - libnspr4 - - libnss3 - - libnss3-nssdb - - libnss3-tools - - ntfs-3g-udeb - - open-iscsi-udeb - - libopenhpi2 - - openhpid - - libopenipmi0 - - openssh-client-udeb - - openssh-server-udeb - - libcrypto1.0.0-udeb - - libssl1.0.0-udeb - - libssl0.9.8 - - libcrypto0.9.8-udeb - - os-prober-udeb - - python-oslo.config - - python-oslo.messaging - - python-oslo.rootwrap - - libcib3 - - libcrmcluster4 - - libcrmcommon3 - - libcrmservice1 - - liblrmd1 - - libpe-rules2 - - libpe-status4 - - libpengine4 - - libstonithd2 - - libtransitioner2 - - pacemaker-cli-utils - - libpango1.0-udeb - - python-paramiko - - partconf-find-partitions - - partconf-mkfstab - - libparted0-udeb - - parted-udeb - - partman-utils - - partman-crypto-dm - - python-passlib - - python-paste - - python-pastedeploy - - python-pastedeploy-tpl - - python-pastescript - - libpci3-udeb - - pciutils-udeb - - pcmciautils-udeb - - libpcrecpp0 - - libpcre3-udeb - - python-pcs - - libperl5.18 - - python-pexpect - - php5-json - - libapache2-mod-php5 - - php5-cli - - php5-common - - php5-gd - - php5-ldap - - php5-pgsql - - php5-readline - - libpixman-1-0 - - libpixman-1-0-udeb - - python-ply - - libpopt0-udeb - - libpq5 - - postgresql-client-9.3 - - postgresql - - postgresql-client-common - - ppp-udeb - - env-preseed - - file-preseed - - initrd-preseed - - network-preseed - - preseed-common - - libpulse0 - - pwgen-udeb - - python-pyasn1 - - python-pycurl - - python3-pycurl - - python-gi - - python-pylibmc - - python-bson - - python-bson-ext - - python-gridfs - - python-pymongo - - python-pymongo-ext - - python-openssl - - python-serial - - python-babel-localedata - - python3-debian - - python-appconf - - python-compressor - - python-openstack-auth - - python-jsonpatch - - libpython3.4 - - python-pyudev - - python-zmq - - qemu-keymaps - - qemu-kvm - - qemu-system - - qemu-system-arm - - qemu-system-common - - qemu-system-mips - - qemu-system-misc - - qemu-system-ppc - - qemu-system-sparc - - qemu-system-x86 - - qemu-user - - qemu-utils - - python-qpid - - rdate-udeb - - redboot-tools-udeb - - gptsync-udeb - - mkreiserfs-udeb - - reiserfsprogs-udeb - - python-requests - - rescue-check - - rescue-mode - - libjs-rickshaw - - python-routes - - ruby-dev - - ruby1.9.1-dev - - libtxc-dxtn-s2tc0 - - python-scgi - - libsgutils2-2 - - simple-cdd-profiles - - python-simplegeneric - - python-simplejson - - python-singledispatch - - python3-six - - libslang2-udeb - - python3-software-properties - - software-properties-common - - libspice-server1 - - python-sqlalchemy - - python-sqlalchemy-ext - - squid-deb-proxy-client-udeb - - strace-udeb - - python-suds - - libsysfs2-udeb - - syslinux-udeb - - libudev1-udeb - - udev-udeb - - tcpd - - libtidy-0.99-0 - - libtiff5 - - ttf-cjk-compact-udeb - - ttf-dejavu-mono-udeb - - ttf-dejavu-udeb - - ttf-freefont-udeb - - ttf-kannada-fonts-udeb - - ttf-malayalam-fonts-udeb - - ttf-tamil-fonts-udeb - - ttf-telugu-fonts-udeb - - python-twisted - - python-twisted-bin - - python-twisted-conch - - python-twisted-core - - python-twisted-lore - - python-twisted-mail - - python-twisted-names - - python-twisted-news - - python-twisted-runner - - python-twisted-web - - python-twisted-words - - tzsetup-udeb - - oem-config-check - - oem-config-udeb - - ubuntu-keyring-udeb - - alembic - - apache2 - - at - - atop - - bc - - binutils - - bridge-utils - - btrfs-tools - - build-essential - - ceph - - cgroup-lite - - cirros-testvm - - cloud-init - - cluster-glue - - conntrack - - corosync - - cpu-checker - - cpufrequtils - - crash - - crmsh - - cryptsetup - - dctrl-tools - - debhelper - - devscripts - - diffstat - - disktype - - distro-info-data - - dput - - dstat - - ebtables - - ethtool - - fakeroot - - fence-agents - - fencing-agent - - fping - - fuel-utils - - galera - - gawk - - gcc-4.8 - - gdb - - gettext - - ghostscript - - git - - gsfonts - - haproxy - - hicolor-icon-theme - - htop - - icoutils - - ieee-data - - ifenslave - - intel-microcode - - intltool-debian - - iperf - - iperf3 - - ipmitool - - iptables-persistent - - iucode-tool - - iw - - javascript-common - - jfsutils - - kexec-tools - - keystone - - libalgorithm-diff-perl - - libalgorithm-diff-xs-perl - - libalgorithm-merge-perl - - libapt-pkg-perl - - libarchive-zip-perl - - libauthen-sasl-perl - - libautodie-perl - - libclone-perl - - libcommon-sense-perl - - libconfig-general-perl - - libdbd-mysql-perl - - libdbi-perl - - libdigest-hmac-perl - - libemail-valid-perl - - libencode-locale-perl - - liberror-perl - - libexporter-lite-perl - - libfile-basedir-perl - - libfile-copy-recursive-perl - - libfile-fcntllock-perl - - libfile-listing-perl - - libfont-afm-perl - - libhtml-form-perl - - libhtml-format-perl - - libhtml-parser-perl - - libhtml-tagset-perl - - libhtml-template-perl - - libhtml-tree-perl - - libhttp-cookies-perl - - libhttp-daemon-perl - - libhttp-date-perl - - libhttp-message-perl - - libhttp-negotiate-perl - - libintl-perl - - libio-html-perl - - libio-pty-perl - - libio-socket-inet6-perl - - libio-socket-ssl-perl - - libipc-run-perl - - libipc-system-simple-perl - - libjs-jquery.quicksearch - - libjs-jsencrypt - - libjs-qunit - - libjs-spin.js - - libjs-swfobject - - libjson-perl - - libjson-xs-perl - - liblist-moreutils-perl - - liblwp-mediatypes-perl - - liblwp-protocol-https-perl - - libmail-sendmail-perl - - libmailtools-perl - - libnet-dns-perl - - libnet-domain-tld-perl - - libnet-http-perl - - libnet-ip-perl - - libnet-smtp-ssl-perl - - libnet-ssleay-perl - - libnet-telnet-perl - - libparse-debcontrol-perl - - libperlio-gzip-perl - - libsocket6-perl - - libstring-shellquote-perl - - libsub-identify-perl - - libsys-hostname-long-perl - - libsys-virt-perl - - libterm-readkey-perl - - libtext-levenshtein-perl - - libtie-ixhash-perl - - liburi-perl - - libwww-perl - - libwww-robotrules-perl - - libxml-parser-perl - - libxml-xpath-perl - - lintian - - linux-firmware-nonfree - - lksctp-tools - - lldpad - - lshell - - makedumpfile - - mc - - mcelog - - mdadm - - memcached - - menu - - monit - - msr-tools - - murano - - murano-apps - - murano-dashboard - - netcat - - nginx - - nmap - - nodejs - - novnc - - open-iscsi - - openipmi - - pacemaker - - patch - - patchutils - - percona-toolkit - - percona-xtrabackup - - php5 - - pm-utils - - po-debconf - - poppler-data - - postfix - - postgresql-9.3 - - postgresql-common - - pssh - - python-amqplib - - python-anyjson - - python-babel - - python-barbicanclient - - python-boto - - python-carrot - - python-ceilometerclient - - python-cinderclient - - python-cloudfiles - - python-concurrent.futures - - python-croniter - - python-crypto - - python-daemon - - python-dateutil - - python-decorator - - python-dingus - - python-django - - python-django-pyscss - - python-dns - - python-docker - - python-dogpile.cache - - python-dogpile.core - - python-ecdsa - - python-ethtool - - python-eventlet - - python-extras - - python-fixtures - - python-formencode - - python-gflags - - python-glance-store - - python-glanceclient - - python-greenlet - - python-happybase - - python-heatclient - - python-httplib2 - - python-ipaddr - - python-iso8601 - - python-json-patch - - python-json-pointer - - python-jsonpath-rw - - python-jsonschema - - python-keyring - - python-keystoneclient - - python-keystonemiddleware - - python-kombu - - python-ldap - - python-lesscpy - - python-librabbitmq - - python-lockfile - - python-logutils - - python-memcache - - python-mimeparse - - python-mock - - python-muranoclient - - python-mysqldb - - python-netaddr - - python-networkx - - python-neutronclient - - python-novaclient - - python-numpy - - python-oauth - - python-oauthlib - - python-openid - - python-openstackclient - - python-oslo.db - - python-oslo.i18n - - python-oslo.serialization - - python-oslo.utils - - python-osprofiler - - python-pam - - python-pecan - - python-posix-ipc - - python-psutil - - python-pycadf - - python-pymemcache - - python-pyscss - - python-pysnmp4 - - python-pysnmp4-apps - - python-pysnmp4-mibs - - python-rabbit - - python-repoze.lru - - python-retrying - - python-rfc3986 - - python-rtslib-fb - - python-saharaclient - - python-secretstorage - - python-setuptools-git - - python-stompy - - python-support - - python-swiftclient - - python-taskflow - - python-tempita - - python-testtools - - python-thrift - - python-tooz - - python-troveclient - - python-tz - - python-urllib3 - - python-warlock - - python-webob - - python-wsme - - python-xattr - - python-xstatic - - python-xstatic-angular - - python-xstatic-angular-cookies - - python-xstatic-angular-mock - - python-xstatic-bootstrap-datepicker - - python-xstatic-bootstrap-scss - - python-xstatic-d3 - - python-xstatic-font-awesome - - python-xstatic-hogan - - python-xstatic-jasmine - - python-xstatic-jquery - - python-xstatic-jquery-migrate - - python-xstatic-jquery-ui - - python-xstatic-jquery.quicksearch - - python-xstatic-jquery.tablesorter - - python-xstatic-jsencrypt - - python-xstatic-qunit - - python-xstatic-rickshaw - - python-xstatic-spin - - python-yaql - - python3-chardet - - qemu - - rabbitmq-server - - reiserfsprogs - - resource-agents - - rpcbind - - ruby-i18n - - sahara - - screen - - scrub - - seabios - - sg3-utils - - sharutils - - sheepdog - - socat - - ssh-import-id - - ssl-cert - - supermin - - swift - - swift-plugin-s3 - - sysstat - - t1utils - - tcptraceroute - - tgt - - tmux - - traceroute - - unattended-upgrades - - unzip - - update-inetd - - vbetool - - vgabios - - wdiff - - websockify - - xinetd - - zerofree - - zfs-fuse - - anna - - base-installer - - bterm-unifont - - cdrom-checker - - cdrom-detect - - cdrom-retriever - - choose-mirror - - clock-setup - - efi-reader - - elilo-installer - - finish-install - - grub-installer - - hw-detect - - installation-locale - - iso-scan - - lilo-installer - - live-installer - - localechooser - - lowmem - - lvmcfg - - main-menu - - mdcfg - - media-retriever - - migration-assistant - - mountmedia - - net-retriever - - netcfg - - network-console - - nobootloader - - oldsys-preseed - - partman-auto-crypto - - partman-auto-loop - - partman-auto-lvm - - partman-auto-raid - - partman-auto - - partman-base - - partman-basicfilesystems - - partman-basicmethods - - partman-btrfs - - partman-crypto - - partman-efi - - partman-ext3 - - partman-iscsi - - partman-jfs - - partman-lvm - - partman-md - - partman-multipath - - partman-nbd - - partman-partitioning - - partman-target - - partman-xfs - - pkgsel - - rootskel-gtk - - rootskel - - system-integrity-check - - udpkg - - usb-discover - - userdevfs - - libodbc1 - - libusbredirparser1 - - usbutils-udeb - - user-setup-udeb - - python-utidylib - - cfdisk-udeb - - fdisk-udeb - - libblkid1-udeb - - libmount1-udeb - - libuuid1-udeb - - util-linux-udeb - - vlan-udeb - - libvte9-udeb - - libvte-2.90-9-udeb - - python-waitress - - watershed-udeb - - python-websocket - - python-webtest - - wget-udeb - - wide-dhcpv6-client-udeb - - libiw30-udeb - - wireless-tools-udeb - - wpasupplicant-udeb - - x11-xkb-utils-udeb - - libxen-4.4 - - libxenstore3.0 - - xfsprogs-udeb - - libxft2-udeb - - xkb-data-udeb - - libxmlrpc-core-c3-udeb - - xserver-xorg-core-udeb - - xserver-xorg-input-evdev-udeb - - xserver-xorg-input-evdev-udeb-lts-utopic - - xserver-xorg-video-fbdev-udeb - - xserver-xorg-video-fbdev-udeb-lts-utopic - - libyajl2 - - zabbix-agent - - zabbix-frontend-php - - zabbix-server-mysql - - zabbix-server-pgsql - - libzmq3 - - zlib1g-udeb - - libzookeeper-mt2 - - python-zope.interface -master: - projects: - - name: apr - packages: - - apr - - name: apr-util - packages: - - apr-util - - apr-util-ldap - - name: atk - packages: - - atk - - name: atlas - packages: - - atlas - - name: atop - packages: - - atop - - name: avahi - packages: - - avahi-libs - - name: babel - packages: - - python-babel - - name: bridge-utils - packages: - - bridge-utils - - name: cairo - packages: - - cairo - - name: cdrkit - packages: - - genisoimage - - name: cobbler - packages: - - cobbler - - cobbler-web - - name: createrepo - packages: - - createrepo - - name: cryptsetup-luks - packages: - - cryptsetup-luks - - cryptsetup-luks-libs - - name: cups - packages: - - cups-libs - - name: cvs - packages: - - cvs - - name: cyrus-sasl - packages: - - cyrus-sasl-md5 - - name: dbus - packages: - - dbus - - name: dbus-python - packages: - - dbus-python - - name: debootstrap - packages: - - debootstrap - - name: deltarpm - packages: - - python-deltarpm - - deltarpm - - name: dhcp - packages: - - dhcp - - name: dnsmasq - packages: - - dnsmasq - - name: docker-io - packages: - - docker-io - - name: e2fsprogs - packages: - - libcom_err - - libcom_err-devel - - name: eggdbus - packages: - - eggdbus - - name: fence-agents - packages: - - fence-agents - - name: fontconfig - packages: - - fontconfig - - name: freetype - packages: - - freetype - - name: fuel-library - packages: - - fuel-library6.1 - - name: fuel-ostf - packages: - - fuel-ostf - - name: fuelmenu - packages: - - fuelmenu - - name: gcc - packages: - - libstdc++ - - libgfortran - - name: gd - packages: - - gd - - name: gettext - packages: - - gettext - - name: gnutls - packages: - - gnutls-utils - - gnutls - - name: gtk2 - packages: - - gtk2 - - name: hal - packages: - - hal-libs - - hal - - name: hdparm - packages: - - hdparm - - name: hicolor-icon-theme - packages: - - hicolor-icon-theme - - name: httpd - packages: - - httpd-tools - - httpd - - mod_ssl - - name: ipmitool - packages: - - ipmitool - - name: jasper - packages: - - jasper-libs - - name: keyutils - packages: - - keyutils-libs-devel - - name: krb5 - packages: - - krb5-devel - - name: libcgroup - packages: - - libcgroup - - name: libevent - packages: - - libevent-devel - - libevent-headers - - libevent - - libevent-doc - - name: libjpeg-turbo - packages: - - libjpeg-turbo - - name: libnl - packages: - - libnl - - name: libpng - packages: - - libpng - - name: libselinux - packages: - - libselinux-devel - - name: libsepol - packages: - - libsepol-devel - - name: libthai - packages: - - libthai - - name: libtiff - packages: - - libtiff - - name: libvirt - packages: - - libvirt-client - - name: libxcb - packages: - - libxcb - - name: libxml2 - packages: - - libxml2-python - - name: libxslt - packages: - - libxslt - - name: lrzip - packages: - - lrzip - - name: lsof - packages: - - lsof - - name: lxc - packages: - - lxc-libs - - lxc - - name: lzo - packages: - - lzo - - name: m2crypto - packages: - - m2crypto - - name: mailcap - packages: - - mailcap - - name: man - packages: - - man - - name: mesa - packages: - - mesa-libGL - - mesa-libGLU - - mesa-dri-filesystem - - mesa-dri-drivers - - name: mesa-dri1-drivers - packages: - - mesa-dri1-drivers - - name: mesa-private-llvm - packages: - - mesa-private-llvm - - name: monit - packages: - - monit - - name: mtools - packages: - - mtools - - name: nailgun - packages: - - nailgun - - name: nailgun-redhat-license - packages: - - nailgun-redhat-license - - name: net-snmp - packages: - - net-snmp-utils - - net-snmp-libs - - name: nginx - packages: - - nginx - - name: numactl - packages: - - numactl - - name: numpy - packages: - - numpy - - numpy-f2py - - name: openssl - packages: - - openssl-devel - - name: openstack-keystone - packages: - - openstack-keystone - - python-keystone - - name: pango - packages: - - pango - - name: perl - packages: - - perl-Net-Telnet - - perl-Module-Pluggable - - perl-Pod-Escapes - - perl-Pod-Simple - - name: pexpect - packages: - - pexpect - - name: pixman - packages: - - pixman - - name: pm-utils - packages: - - pm-utils - - name: polkit - packages: - - polkit - - name: portreserve - packages: - - portreserve - - name: postgresql - packages: - - postgresql-libs - - postgresql-server - - postgresql - - name: pygobject2 - packages: - - pygobject2 - - name: python - packages: - - python-PrettyTable - - python-posix_ipc - - python-devel - - python-MarkupSafe - - name: python-alembic - packages: - - python-alembic - - name: python-amqp - packages: - - python-amqp - - name: python-amqplib - packages: - - python-amqplib - - name: python-anyjson - packages: - - python-anyjson - - name: python-backports - packages: - - python-backports - - python-backports-ssl_match_hostname - - name: python-beaker - packages: - - python-beaker - - name: python-beautifulsoup4 - packages: - - python-beautifulsoup4 - - name: python-ceilometerclient - packages: - - python-ceilometerclient - - name: python-chardet - packages: - - python-chardet - - name: python-cheetah - packages: - - python-cheetah - - name: python-cherrypy - packages: - - python-cherrypy - - name: python-cinderclient - packages: - - python-cinderclient - - name: python-crypto - packages: - - python-crypto - - name: python-daemon - packages: - - python-daemon - - name: python-dateutil - packages: - - python-dateutil - - name: python-decorator - packages: - - python-decorator - - name: python-django - packages: - - python-django - - name: python-dmidecode - packages: - - python-dmidecode - - name: python-dogpile-cache - packages: - - python-dogpile-cache - - name: python-dogpile-core - packages: - - python-dogpile-core - - name: python-ecdsa - packages: - - python-ecdsa - - name: python-ethtool - packages: - - python-ethtool - - name: python-eventlet - packages: - - python-eventlet - - name: python-fabric - packages: - - python-fabric - - name: python-fuelclient - packages: - - python-fuelclient - - name: python-futures - packages: - - python-futures - - name: python-fysom - packages: - - python-fysom - - name: python-gevent - packages: - - python-gevent - - name: python-glanceclient - packages: - - python-glanceclient - - name: python-greenlet - packages: - - python-greenlet - - name: python-heatclient - packages: - - python-heatclient - - name: python-html5lib - packages: - - python-html5lib - - name: python-httplib2 - packages: - - python-httplib2 - - name: python-importlib - packages: - - python-importlib - - name: python-ipaddr - packages: - - python-ipaddr - - name: python-iso8601 - packages: - - python-iso8601 - - name: python-jinja2 - packages: - - python-jinja2 - - name: python-jsonpatch - packages: - - python-jsonpatch - - name: python-jsonpointer - packages: - - python-jsonpointer - - name: python-jsonschema - packages: - - python-jsonschema - - name: python-keyring - packages: - - python-keyring - - name: python-keystoneclient - packages: - - python-keystoneclient - - name: python-keystonemiddleware - packages: - - python-keystonemiddleware - - name: python-kombu - packages: - - python-kombu - - name: python-ldap - packages: - - python-ldap - - name: python-lockfile - packages: - - python-lockfile - - name: python-logutils - packages: - - python-logutils - - name: python-lxml - packages: - - python-lxml - - name: python-mako - packages: - - python-mako - - name: python-markdown - packages: - - python-markdown - - name: python-meld3 - packages: - - python-meld3 - - name: python-memcached - packages: - - python-memcached - - name: python-migrate - packages: - - python-migrate - - name: python-muranoclient - packages: - - python-muranoclient - - name: python-netaddr - packages: - - python-netaddr - - name: python-netifaces - packages: - - python-netifaces - - name: python-networkx - packages: - - python-networkx-core - - name: python-neutronclient - packages: - - python-neutronclient - - name: python-nose - packages: - - python-nose - - name: python-novaclient - packages: - - python-novaclient - - name: python-oauthlib - packages: - - python-oauthlib - - name: python-ordereddict - packages: - - python-ordereddict - - name: python-oslo-config - packages: - - python-oslo-config - - name: python-oslo-db - packages: - - python-oslo-db - - name: python-oslo-i18n - packages: - - python-oslo-i18n - - name: python-oslo-messaging - packages: - - python-oslo-messaging - - name: python-oslo-serialization - packages: - - python-oslo-serialization - - name: python-oslo-utils - packages: - - python-oslo-utils - - name: python-paramiko - packages: - - python-paramiko - - name: python-passlib - packages: - - python-passlib - - name: python-paste - packages: - - python-paste - - name: python-paste-deploy - packages: - - python-paste-deploy - - name: python-pbr - packages: - - python-pbr - - name: python-pecan - packages: - - python-pecan - - name: python-pip - packages: - - python-pip - - name: python-ply - packages: - - python-ply - - name: python-psycopg2 - packages: - - python-psycopg2 - - name: python-pycadf - packages: - - python-pycadf - - name: python-pygments - packages: - - python-pygments - - name: python-repoze-lru - packages: - - python-repoze-lru - - name: python-requests - packages: - - python-requests - - name: python-rhsm - packages: - - python-rhsm - - name: python-routes - packages: - - python-routes - - name: python-saharaclient - packages: - - python-saharaclient - - name: python-simplegeneric - packages: - - python-simplegeneric - - name: python-simplejson - packages: - - python-simplejson - - name: python-singledispatch - packages: - - python-singledispatch - - name: python-sqlalchemy - packages: - - python-sqlalchemy - - name: python-suds - packages: - - python-suds - - name: python-swiftclient - packages: - - python-swiftclient - - name: python-tempita - packages: - - python-tempita - - name: python-testresources - packages: - - python-testresources - - name: python-unittest2 - packages: - - python-unittest2 - - name: python-urllib3 - packages: - - python-urllib3 - - name: python-urwid - packages: - - python-urwid - - name: python-waitress - packages: - - python-waitress - - name: python-warlock - packages: - - python-warlock - - name: python-webob - packages: - - python-webob - - name: python-webpy - packages: - - python-webpy - - name: python-webtest - packages: - - python-webtest - - name: python-wsgilog - packages: - - python-wsgilog - - name: python-wsgiref - packages: - - python-wsgiref - - name: python-yaql - packages: - - python-yaql - - name: rabbitmq-server - packages: - - rabbitmq-server - - name: ruby21-augeas - packages: - - ruby21-augeas - - name: ruby21-facter - packages: - - ruby21-facter - - name: ruby21-hiera - packages: - - ruby21-hiera - - name: ruby21-mcollective - packages: - - ruby21-mcollective-common - - ruby21-mcollective - - name: ruby21-nailgun-mcagents - packages: - - ruby21-nailgun-mcagents - - name: ruby21-puppet - packages: - - ruby21-puppet - - name: ruby21-rubygem - packages: - - ruby21-rubygem-Platform - - ruby21-rubygem-json_pure - - name: ruby21-rubygem-activesupport - packages: - - ruby21-rubygem-activesupport - - name: ruby21-rubygem-amq-client - packages: - - ruby21-rubygem-amq-client - - name: ruby21-rubygem-amq-protocol - packages: - - ruby21-rubygem-amq-protocol - - name: ruby21-rubygem-amqp - packages: - - ruby21-rubygem-amqp - - name: ruby21-rubygem-astute - packages: - - ruby21-rubygem-astute - - name: ruby21-rubygem-eventmachine - packages: - - ruby21-rubygem-eventmachine - - name: ruby21-rubygem-i18n - packages: - - ruby21-rubygem-i18n - - name: ruby21-rubygem-json - packages: - - ruby21-rubygem-json - - name: ruby21-rubygem-mcollective-client - packages: - - ruby21-rubygem-mcollective-client - - name: ruby21-rubygem-mime-types - packages: - - ruby21-rubygem-mime-types - - name: ruby21-rubygem-net-ssh - packages: - - ruby21-rubygem-net-ssh - - name: ruby21-rubygem-net-ssh-gateway - packages: - - ruby21-rubygem-net-ssh-gateway - - name: ruby21-rubygem-net-ssh-multi - packages: - - ruby21-rubygem-net-ssh-multi - - name: ruby21-rubygem-netaddr - packages: - - ruby21-rubygem-netaddr - - name: ruby21-rubygem-open4 - packages: - - ruby21-rubygem-open4 - - name: ruby21-rubygem-openstack - packages: - - ruby21-rubygem-openstack - - name: ruby21-rubygem-popen4 - packages: - - ruby21-rubygem-popen4 - - name: ruby21-rubygem-raemon - packages: - - ruby21-rubygem-raemon - - name: ruby21-rubygem-rest-client - packages: - - ruby21-rubygem-rest-client - - name: ruby21-rubygem-rgen - packages: - - ruby21-rubygem-rgen - - name: ruby21-rubygem-shadow - packages: - - ruby21-rubygem-shadow - - name: ruby21-rubygem-stomp - packages: - - ruby21-rubygem-stomp - - name: ruby21-rubygem-symboltable - packages: - - ruby21-rubygem-symboltable - - name: ruby21-rubygem-systemu - packages: - - ruby21-rubygem-systemu - - name: rubygem-net-ssh - packages: - - rubygem-net-ssh - - name: rubygem-net-ssh-gateway - packages: - - rubygem-net-ssh-gateway - - name: scipy - packages: - - scipy - - name: screen - packages: - - screen - - name: send2syslog - packages: - - send2syslog - - name: shotgun - packages: - - shotgun - - name: strace - packages: - - strace - - name: subscription-manager - packages: - - subscription-manager - - name: suitesparse - packages: - - suitesparse - - name: supervisor - packages: - - supervisor - - name: syslinux - packages: - - syslinux - - name: sysstat - packages: - - sysstat - - name: tcl - packages: - - tcl - - name: tftp - packages: - - tftp-server - - name: tk - packages: - - tk - - name: undefined - packages: - - tcp_wrappers - - libXau - - libX11 - - ConsoleKit-libs - - sg3_utils - - lm_sensors-libs - - PyYAML - - GeoIP - - libX11-common - - libXpm - - sg3_utils-libs - - MAKEDEV - - ConsoleKit - - pyOpenSSL - - libICE - - wxBase - - wxGTK-gl - - libSM - - mod_wsgi - - libXfixes - - libXcomposite - - tcp_wrappers-libs - - SDL - - libXcursor - - libXrender - - libXxf86vm - - libXrandr - - wxGTK - - MySQL-python - - libXext - - libXinerama - - libXdamage - - libXft - - MySQL-shared - - libXi - - PyPAM - - bfa-firmware - - setup - - aic94xx-firmware-30 - - tzdata-2014e - - dhclient - - basesystem - - glibc-common - - grub - - glibc - - bash - - wget - - libcap - - sudo - - info - - e2fsprogs - - chkconfig - - efibootmgr - - audit-libs - - acl - - libxml2 - - telnet - - bzip2-libs - - readline - - ql2400-firmware - - libselinux - - ivtv-firmware-20080701 - - sed - - iwl6000-firmware - - xz-libs - - atmel-firmware - - libidn - - iwl4965-firmware - - file-libs - - iwl6000g2a-firmware - - libudev-147 - - iwl100-firmware - - findutils - - ipw2100-firmware - - lua - - rootfiles - - pcre - - fuel-target-centos-images - - cyrus-sasl-lib - - libpcap - - expat - - perl - - libgpg-error - - cpio - - sysvinit-tools - - libyaml - - pth - - p11-kit - - ca-certificates - - glib2 - - dbus-glib - - device-mapper-persistent-data - - libnih - - libutempter - - vim-minimal - - net-tools - - libselinux-ruby - - tar - - libicu - - libss - - db4-utils - - pinentry - - binutils - - m4 - - dash - - groff - - coreutils-libs - - cracklib - - coreutils - - module-init-tools - - redhat-logos - - pciutils - - logrotate - - nss-sysinit - - openldap - - libcap-ng - - ethtool - - gpm-libs - - krb5-libs - - ruby - - libssh2 - - curl - - rpm - - gpgme - - bind-libs - - mysql-libs - - fipscheck - - libsemanage - - python - - python-iniparse - - rpm-python - - python-six - - pyparsing - - python-tablib - - yum-metadata-parser - - python-pypcap - - python-urlgrabber - - yum - - python-daemonize - - python-cliff - - nailgun-net-check - - newt - - plymouth-core-libs - - centos-release-6 - - iptables - - iputils-20071127 - - initscripts - - device-mapper-libs - - device-mapper-event-libs - - virt-what - - device-mapper-event - - libdrm - - ntpdate - - kbd - - dracut-kernel-004 - - cyrus-sasl - - crontabs - - cronie - - selinux-policy - - kernel - - system-config-firewall-base - - fuel-docker-images - - lvm2 - - openssh-server - - libgcc - - b43-openfwwf - - filesystem - - dhcp-common - - authconfig - - ncurses-base - - vim-enhanced - - nss-softokn-freebl - - ncurses-libs - - nmap-ncat - - libattr - - passwd - - zlib - - audit - - popt - - gdisk - - libacl - - mlocate - - db4 - - rsync - - nspr - - attr - - daemonize - - nss-util - - iwl1000-firmware - - libsepol - - iwl5000-firmware - - shadow-utils - - libertas-usb8388-firmware-20150115 - - dbus-libs - - ql2500-firmware - - sqlite - - zd1211-firmware - - gawk - - iwl3945-firmware - - libuuid - - iwl6050-firmware - - libblkid - - iwl5150-firmware - - libselinux-utils - - ipw2200-firmware - - bzip2 - - fuel-bootstrap-image - - grep - - which - - elfutils-libelf - - gdbm - - perl-libs - - perl-version - - pytz-2010h - - nss-softokn - - checkpolicy - - libedit - - pciutils-libs - - dmidecode - - pkgconfig - - libtasn1 - - p11-kit-trust - - gamin - - shared-mime-info - - grubby - - libgcrypt - - file - - upstart - - procps - - psmisc - - augeas-libs - - gmp - - libusb - - e2fsprogs-libs - - libgomp - - diffutils - - make - - hal-info-20090716 - - vim-common - - ncurses - - less-436 - - gzip - - cracklib-dicts - - pam - - hwdata - - plymouth-scripts - - libpciaccess - - nss - - nss-tools - - libuser - - mingetty - - keyutils-libs - - openssl - - libcurl - - rpm-libs - - gnupg2 - - ruby21-rubygem - - tcpdump - - bind-utils - - fipscheck-lib - - ustr - - vconfig - - libffi - - python-libs - - python-setuptools - - python-argparse - - python-stevedore - - python-cmd2 - - pygpgme - - python-pycurl - - yum-plugin-fastestmirror - - scapy - - python-cliff-tablib - - slang - - newt-python - - kbd-misc - - policycoreutils - - iproute - - util-linux-ng - - udev-147 - - device-mapper - - openssh - - lvm2-libs - - plymouth - - ntp - - dracut-004 - - rsyslog - - postfix - - cronie-anacron - - iptables-ipv6 - - linux-firmware-20150115 - - selinux-policy-targeted - - openssh-clients - - erlang-stdlib-R14B - - erlang-ssl-R14B - - erlang-crypto-R14B - - yum-utils - - erlang-otp_mibs-R14B - - erlang-tools-R14B - - erlang-public_key-R14B - - erlang-kernel-R14B - - erlang-hipe-R14B - - erlang-snmp-R14B - - erlang-et-R14B - - erlang-sasl-R14B - - erlang-webtool-R14B - - erlang-os_mon-R14B - - erlang-inets-R14B - - erlang-xmerl-R14B - - erlang-syntax_tools-R14B - - erlang-compiler-R14B - - erlang-mnesia-R14B - - erlang-observer-R14B - - erlang-erts-R14B - - erlang-runtime_tools-R14B - - erlang-gs-R14B - - erlang-wx-R14B - - pydot-ng - - fuel-image - - fuel-provisioning-scripts - - name: uwsgi - packages: - - uwsgi-plugin-python - - uwsgi - - uwsgi-plugin-common - - name: xinetd - packages: - - xinetd - - name: xz - packages: - - xz - - xz-lzma-compat - - name: yajl - packages: - - yajl - - name: zlib - packages: - - zlib-devel -bootstrap: - projects: - - name: undefined - packages: - - bash - - bfa-firmware - - ql2100-firmware - - ql2200-firmware - - ql23xx-firmware - - cronie-noanacron - - crontabs - - dhclient - - dmidecode - - iputils - - logrotate - - mcollective - - mingetty - - net-tools - - ntp - - ntpdate - - openssh-clients - - openssh-server - - pciutils - - rsyslog - - scapy - - tcpdump - - vconfig - - vim-minimal - - wget - - nailgun-agent - - nailgun-mcagents - - nailgun-net-check - - fuel-agent - - python-tasklib \ No newline at end of file diff --git a/packages_tests/deb/provisioning/test.yaml b/packages_tests/deb/provisioning/test.yaml deleted file mode 100644 index 4c0ba1055..000000000 --- a/packages_tests/deb/provisioning/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - deployment \ No newline at end of file diff --git a/packages_tests/rpm/deployment/ceph/at/test.yaml b/packages_tests/rpm/deployment/ceph/at/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/rpm/deployment/ceph/at/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/rpm/deployment/ceph/bc/test.yaml b/packages_tests/rpm/deployment/ceph/bc/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/rpm/deployment/ceph/bc/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/rpm/deployment/ceph/ceph-deploy/test.yaml b/packages_tests/rpm/deployment/ceph/ceph-deploy/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/rpm/deployment/ceph/ceph-deploy/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/rpm/deployment/ceph/ceph/test.yaml b/packages_tests/rpm/deployment/ceph/ceph/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/rpm/deployment/ceph/ceph/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/rpm/deployment/ceph/db4-devel/test.yaml b/packages_tests/rpm/deployment/ceph/db4-devel/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/rpm/deployment/ceph/db4-devel/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/rpm/deployment/ceph/ed/test.yaml b/packages_tests/rpm/deployment/ceph/ed/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/rpm/deployment/ceph/ed/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/rpm/deployment/ceph/gdbm-devel/test.yaml b/packages_tests/rpm/deployment/ceph/gdbm-devel/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/rpm/deployment/ceph/gdbm-devel/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/rpm/deployment/ceph/gperftools-libs/test.yaml b/packages_tests/rpm/deployment/ceph/gperftools-libs/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/rpm/deployment/ceph/gperftools-libs/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/rpm/deployment/ceph/leveldb/test.yaml b/packages_tests/rpm/deployment/ceph/leveldb/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/rpm/deployment/ceph/leveldb/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/rpm/deployment/ceph/libunwind/test.yaml b/packages_tests/rpm/deployment/ceph/libunwind/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/rpm/deployment/ceph/libunwind/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/rpm/deployment/ceph/mailx/test.yaml b/packages_tests/rpm/deployment/ceph/mailx/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/rpm/deployment/ceph/mailx/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/rpm/deployment/ceph/patch/test.yaml b/packages_tests/rpm/deployment/ceph/patch/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/rpm/deployment/ceph/patch/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/rpm/deployment/ceph/pax/test.yaml b/packages_tests/rpm/deployment/ceph/pax/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/rpm/deployment/ceph/pax/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/rpm/deployment/ceph/python-flask/test.yaml b/packages_tests/rpm/deployment/ceph/python-flask/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/rpm/deployment/ceph/python-flask/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/rpm/deployment/ceph/python-itsdangerous/test.yaml b/packages_tests/rpm/deployment/ceph/python-itsdangerous/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/rpm/deployment/ceph/python-itsdangerous/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/rpm/deployment/ceph/python-werkzeug/test.yaml b/packages_tests/rpm/deployment/ceph/python-werkzeug/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/rpm/deployment/ceph/python-werkzeug/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/rpm/deployment/ceph/redhat-lsb-core/test.yaml b/packages_tests/rpm/deployment/ceph/redhat-lsb-core/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/rpm/deployment/ceph/redhat-lsb-core/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/rpm/deployment/ceph/snappy/test.yaml b/packages_tests/rpm/deployment/ceph/snappy/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/rpm/deployment/ceph/snappy/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/rpm/deployment/ceph/test.yaml b/packages_tests/rpm/deployment/ceph/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/rpm/deployment/ceph/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/rpm/deployment/ceph/time/test.yaml b/packages_tests/rpm/deployment/ceph/time/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/rpm/deployment/ceph/time/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/rpm/deployment/cinder/libmnl/test.yaml b/packages_tests/rpm/deployment/cinder/libmnl/test.yaml deleted file mode 100644 index b6927ac7b..000000000 --- a/packages_tests/rpm/deployment/cinder/libmnl/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - cinder \ No newline at end of file diff --git a/packages_tests/rpm/deployment/cinder/test.yaml b/packages_tests/rpm/deployment/cinder/test.yaml deleted file mode 100644 index b6927ac7b..000000000 --- a/packages_tests/rpm/deployment/cinder/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - cinder \ No newline at end of file diff --git a/packages_tests/rpm/deployment/general/test.yaml b/packages_tests/rpm/deployment/general/test.yaml deleted file mode 100644 index 4c0ba1055..000000000 --- a/packages_tests/rpm/deployment/general/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - deployment \ No newline at end of file diff --git a/packages_tests/rpm/deployment/glance/test.yaml b/packages_tests/rpm/deployment/glance/test.yaml deleted file mode 100644 index b4316b74c..000000000 --- a/packages_tests/rpm/deployment/glance/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - glance \ No newline at end of file diff --git a/packages_tests/rpm/deployment/neutron/dkms/test.yaml b/packages_tests/rpm/deployment/neutron/dkms/test.yaml deleted file mode 100644 index 71d5b65f3..000000000 --- a/packages_tests/rpm/deployment/neutron/dkms/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - neutron \ No newline at end of file diff --git a/packages_tests/rpm/deployment/neutron/ipset/test.yaml b/packages_tests/rpm/deployment/neutron/ipset/test.yaml deleted file mode 100644 index 71d5b65f3..000000000 --- a/packages_tests/rpm/deployment/neutron/ipset/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - neutron \ No newline at end of file diff --git a/packages_tests/rpm/deployment/neutron/libipset3/test.yaml b/packages_tests/rpm/deployment/neutron/libipset3/test.yaml deleted file mode 100644 index 71d5b65f3..000000000 --- a/packages_tests/rpm/deployment/neutron/libipset3/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - neutron \ No newline at end of file diff --git a/packages_tests/rpm/deployment/neutron/openstack-neutron/test.yaml b/packages_tests/rpm/deployment/neutron/openstack-neutron/test.yaml deleted file mode 100644 index 71d5b65f3..000000000 --- a/packages_tests/rpm/deployment/neutron/openstack-neutron/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - neutron \ No newline at end of file diff --git a/packages_tests/rpm/deployment/neutron/openvswitch-common/test.yaml b/packages_tests/rpm/deployment/neutron/openvswitch-common/test.yaml deleted file mode 100644 index 71d5b65f3..000000000 --- a/packages_tests/rpm/deployment/neutron/openvswitch-common/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - neutron \ No newline at end of file diff --git a/packages_tests/rpm/deployment/neutron/openvswitch-switch/test.yaml b/packages_tests/rpm/deployment/neutron/openvswitch-switch/test.yaml deleted file mode 100644 index 71d5b65f3..000000000 --- a/packages_tests/rpm/deployment/neutron/openvswitch-switch/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - neutron \ No newline at end of file diff --git a/packages_tests/rpm/deployment/neutron/python-jsonrpclib/test.yaml b/packages_tests/rpm/deployment/neutron/python-jsonrpclib/test.yaml deleted file mode 100644 index 71d5b65f3..000000000 --- a/packages_tests/rpm/deployment/neutron/python-jsonrpclib/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - neutron \ No newline at end of file diff --git a/packages_tests/rpm/deployment/neutron/test.yaml b/packages_tests/rpm/deployment/neutron/test.yaml deleted file mode 100644 index 71d5b65f3..000000000 --- a/packages_tests/rpm/deployment/neutron/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - neutron \ No newline at end of file diff --git a/packages_tests/rpm/deployment/nova/openstack-nova-compute/test.yaml b/packages_tests/rpm/deployment/nova/openstack-nova-compute/test.yaml deleted file mode 100644 index 406ea608e..000000000 --- a/packages_tests/rpm/deployment/nova/openstack-nova-compute/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - nova_compute \ No newline at end of file diff --git a/packages_tests/rpm/deployment/nova/openstack-nova-network/test.yaml b/packages_tests/rpm/deployment/nova/openstack-nova-network/test.yaml deleted file mode 100644 index edd884dbd..000000000 --- a/packages_tests/rpm/deployment/nova/openstack-nova-network/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - nova \ No newline at end of file diff --git a/packages_tests/rpm/deployment/nova/test.yaml b/packages_tests/rpm/deployment/nova/test.yaml deleted file mode 100644 index edd884dbd..000000000 --- a/packages_tests/rpm/deployment/nova/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - nova \ No newline at end of file diff --git a/packages_tests/rpm/deployment/perl/perl-Test-Simple/test.yaml b/packages_tests/rpm/deployment/perl/perl-Test-Simple/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/rpm/deployment/perl/perl-Test-Simple/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/rpm/deployment/perl/test.yaml b/packages_tests/rpm/deployment/perl/test.yaml deleted file mode 100644 index 8f938b75d..000000000 --- a/packages_tests/rpm/deployment/perl/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - ceph diff --git a/packages_tests/rpm/deployment/swift/test.yaml b/packages_tests/rpm/deployment/swift/test.yaml deleted file mode 100644 index f910c5806..000000000 --- a/packages_tests/rpm/deployment/swift/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - swift \ No newline at end of file diff --git a/packages_tests/rpm/deployment/test.yaml b/packages_tests/rpm/deployment/test.yaml deleted file mode 100644 index 4c0ba1055..000000000 --- a/packages_tests/rpm/deployment/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - deployment \ No newline at end of file diff --git a/packages_tests/rpm/master/test.yaml b/packages_tests/rpm/master/test.yaml deleted file mode 100644 index d1da59516..000000000 --- a/packages_tests/rpm/master/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - master \ No newline at end of file diff --git a/packages_tests/rpm/packages.yaml b/packages_tests/rpm/packages.yaml deleted file mode 100644 index 5814b7341..000000000 --- a/packages_tests/rpm/packages.yaml +++ /dev/null @@ -1,2614 +0,0 @@ -provisioning: - projects: - - name: acl - packages: - - acl - - libacl - - name: atmel-firmware - packages: - - atmel-firmware - - name: attr - packages: - - libattr - - attr - - name: audit - packages: - - audit - - audit-libs - - name: augeas - packages: - - augeas-libs - - name: authconfig - packages: - - authconfig - - name: b43-openfwwf - packages: - - b43-openfwwf - - name: basesystem - packages: - - basesystem - - name: bash - packages: - - bash - - name: bfa-firmware - packages: - - bfa-firmware - - name: bind - packages: - - bind-libs - - bind-utils - - name: binutils - packages: - - binutils - - name: bzip2 - packages: - - bzip2-libs - - bzip2 - - name: ca-certificates - packages: - - ca-certificates - - name: checkpolicy - packages: - - checkpolicy - - name: chkconfig - packages: - - chkconfig - - name: cloog - packages: - - cloog-ppl - - name: compat-readline5 - packages: - - compat-readline5 - - name: coreutils - packages: - - coreutils-libs - - coreutils - - name: cpio - packages: - - cpio - - name: cracklib - packages: - - cracklib - - cracklib-dicts - - name: cronie - packages: - - cronie-anacron - - cronie - - name: crontabs - packages: - - crontabs - - name: curl - packages: - - libcurl - - curl - - name: cyrus-sasl - packages: - - cyrus-sasl-lib - - cyrus-sasl - - name: daemonize - packages: - - daemonize - - name: dash - packages: - - dash - - name: db4 - packages: - - db4 - - db4-utils - - name: dbus - packages: - - dbus-libs - - name: dbus-glib - packages: - - dbus-glib - - name: device-mapper-persistent-data - packages: - - device-mapper-persistent-data - - name: dhcp - packages: - - dhcp-common - - dhclient - - name: diffutils - packages: - - diffutils - - name: dmidecode - packages: - - dmidecode - - name: e2fsprogs - packages: - - e2fsprogs - - e2fsprogs-libs - - libss - - libcom_err - - name: efibootmgr - packages: - - efibootmgr - - name: elfutils - packages: - - elfutils-libelf - - name: ethtool - packages: - - ethtool - - name: expat - packages: - - expat - - name: facter - packages: - - facter - - name: file - packages: - - file-libs - - file - - name: filesystem - packages: - - filesystem - - name: findutils - packages: - - findutils - - name: fipscheck - packages: - - fipscheck-lib - - fipscheck - - name: gamin - packages: - - gamin - - name: gawk - packages: - - gawk - - name: gcc - packages: - - libgomp - - libgcc - - gcc - - cpp - - libstdc++ - - name: gdbm - packages: - - gdbm - - name: gdisk - packages: - - gdisk - - name: glib2 - packages: - - glib2 - - name: glibc - packages: - - glibc-common - - glibc - - glibc-headers - - glibc-devel - - name: gmp - packages: - - gmp - - name: gnupg2 - packages: - - gnupg2 - - name: gpgme - packages: - - gpgme - - name: gpm - packages: - - gpm-libs - - name: grep - packages: - - grep - - name: groff - packages: - - groff - - name: grub - packages: - - grub - - name: grubby - packages: - - grubby - - name: gzip - packages: - - gzip - - name: hiera - packages: - - hiera - - name: hwdata - packages: - - hwdata - - name: icu - packages: - - libicu - - name: initscripts - packages: - - initscripts - - name: iproute - packages: - - iproute - - name: iptables - packages: - - iptables-ipv6 - - iptables - - name: ipw2100-firmware - packages: - - ipw2100-firmware - - name: ipw2200-firmware - packages: - - ipw2200-firmware - - name: kbd - packages: - - kbd-misc - - kbd - - name: kernel - packages: - - kernel-devel - - kernel-headers - - kernel - - name: keyutils - packages: - - keyutils-libs - - name: krb5 - packages: - - krb5-libs - - name: libcap - packages: - - libcap - - name: libcap-ng - packages: - - libcap-ng - - name: libdrm - packages: - - libdrm - - name: libedit - packages: - - libedit - - name: libffi - packages: - - libffi - - name: libgcrypt - packages: - - libgcrypt - - name: libgpg-error - packages: - - libgpg-error - - name: libidn - packages: - - libidn - - name: libnih - packages: - - libnih - - name: libpcap - packages: - - libpcap - - name: libpciaccess - packages: - - libpciaccess - - name: libselinux - packages: - - libselinux - - libselinux-utils - - libselinux-ruby - - name: libsemanage - packages: - - libsemanage - - name: libsepol - packages: - - libsepol - - name: libssh2 - packages: - - libssh2 - - name: libtasn1 - packages: - - libtasn1 - - name: libusb - packages: - - libusb - - name: libuser - packages: - - libuser - - name: libutempter - packages: - - libutempter - - name: libxml2 - packages: - - libxml2 - - name: libyaml - packages: - - libyaml - - name: linux-firmware-20150115 - packages: - - iwl1000-firmware - - iwl6000-firmware - - iwl3945-firmware - - iwl6050-firmware - - iwl5150-firmware - - iwl5000-firmware - - iwl4965-firmware - - iwl6000g2a-firmware - - iwl100-firmware - - name: logrotate - packages: - - logrotate - - name: lua - packages: - - lua - - name: lvm2 - packages: - - device-mapper - - lvm2-libs - - device-mapper-libs - - device-mapper-event-libs - - device-mapper-event - - lvm2 - - name: m4 - packages: - - m4 - - name: make - packages: - - make - - name: mcollective - packages: - - mcollective - - mcollective-common - - name: mdadm - packages: - - mdadm - - name: mingetty - packages: - - mingetty - - name: mlocate - packages: - - mlocate - - name: module-init-tools - packages: - - module-init-tools - - name: mpfr - packages: - - mpfr - - name: mysql - packages: - - mysql-libs - - name: nailgun-agent - packages: - - nailgun-agent - - name: nailgun-mcagents - packages: - - nailgun-mcagents - - name: nailgun-net-check - packages: - - nailgun-net-check - - name: ncurses - packages: - - ncurses-base - - ncurses-libs - - ncurses - - name: net-tools - packages: - - net-tools - - name: newt - packages: - - newt - - newt-python - - name: nmap - packages: - - nmap-ncat - - name: nspr - packages: - - nspr - - name: nss - packages: - - nss-sysinit - - nss - - nss-tools - - name: nss-softokn - packages: - - nss-softokn-freebl - - nss-softokn - - name: nss-util - packages: - - nss-util - - name: ntp - packages: - - ntp - - ntpdate - - name: openldap - packages: - - openldap - - name: openssh - packages: - - openssh - - openssh-clients - - openssh-server - - name: openssl - packages: - - openssl - - name: p11-kit - packages: - - p11-kit-trust - - p11-kit - - name: pam - packages: - - pam - - name: passwd - packages: - - passwd - - name: pciutils - packages: - - pciutils-libs - - pciutils - - name: pcre - packages: - - pcre - - name: perl - packages: - - perl-libs - - perl-version - - perl - - perl - - perl - - perl - - name: pinentry - packages: - - pinentry - - name: pkgconfig - packages: - - pkgconfig - - name: plymouth - packages: - - plymouth-core-libs - - plymouth - - plymouth-scripts - - name: policycoreutils - packages: - - policycoreutils - - name: popt - packages: - - popt - - name: postfix - packages: - - postfix - - name: ppl - packages: - - ppl - - name: procps - packages: - - procps - - name: psmisc - packages: - - psmisc - - name: pth - packages: - - pth - - name: puppet - packages: - - puppet - - name: pygpgme - packages: - - pygpgme - - name: pyparsing - packages: - - pyparsing - - name: python - packages: - - python-libs - - python - - python - - name: python-argparse - packages: - - python-argparse - - name: python-cliff - packages: - - python-cliff - - name: python-cliff-tablib - packages: - - python-cliff-tablib - - name: python-cmd2 - packages: - - python-cmd2 - - name: python-daemonize - packages: - - python-daemonize - - name: python-iniparse - packages: - - python-iniparse - - name: python-pycurl - packages: - - python-pycurl - - name: python-pypcap - packages: - - python-pypcap - - name: python-setuptools - packages: - - python-setuptools - - name: python-six - packages: - - python-six - - name: python-stevedore - packages: - - python-stevedore - - name: python-tablib - packages: - - python-tablib - - name: python-urlgrabber - packages: - - python-urlgrabber - - name: ql2400-firmware - packages: - - ql2400-firmware - - name: ql2500-firmware - packages: - - ql2500-firmware - - name: readline - packages: - - readline - - name: redhat-logos - packages: - - redhat-logos - - name: rootfiles - packages: - - rootfiles - - name: rpm - packages: - - rpm-libs - - rpm-python - - rpm - - name: rsync - packages: - - rsync - - name: rsyslog - packages: - - rsyslog - - name: ruby - packages: - - ruby - - ruby-rdoc - - ruby-libs - - ruby-irb - - name: ruby-augeas - packages: - - ruby-augeas - - name: ruby-rgen - packages: - - ruby-rgen - - name: ruby-shadow - packages: - - ruby-shadow - - name: rubygem-cstruct - packages: - - rubygem-cstruct - - name: rubygem-extlib - packages: - - rubygem-extlib - - name: rubygem-httpclient - packages: - - rubygem-httpclient - - name: rubygem-ipaddress - packages: - - rubygem-ipaddress - - name: rubygem-json - packages: - - rubygem-json - - name: rubygem-mixlib-cli - packages: - - rubygem-mixlib-cli - - name: rubygem-mixlib-config - packages: - - rubygem-mixlib-config - - name: rubygem-mixlib-log - packages: - - rubygem-mixlib-log - - name: rubygem-netaddr - packages: - - rubygem-netaddr - - name: rubygem-ohai - packages: - - rubygem-ohai - - name: rubygem-openstack - packages: - - rubygem-openstack - - name: rubygem-rethtool - packages: - - rubygem-rethtool - - name: rubygem-stomp - packages: - - rubygem-stomp - - name: rubygem-systemu - packages: - - rubygem-systemu - - name: rubygem-yajl-ruby - packages: - - rubygem-yajl-ruby - - name: rubygems - packages: - - rubygems - - name: scapy - packages: - - scapy - - name: sed - packages: - - sed - - name: selinux-policy - packages: - - selinux-policy-targeted - - selinux-policy - - name: setup - packages: - - setup - - name: shadow-utils - packages: - - shadow-utils - - name: shared-mime-info - packages: - - shared-mime-info - - name: slang - packages: - - slang - - name: sqlite - packages: - - sqlite - - name: sudo - packages: - - sudo - - name: system-config-firewall - packages: - - system-config-firewall-base - - name: sysvinit - packages: - - sysvinit-tools - - name: tar - packages: - - tar - - name: tcpdump - packages: - - tcpdump - - name: telnet - packages: - - telnet - - name: texinfo - packages: - - info - - name: undefined - packages: - - ivtv-firmware-20080701 - - MAKEDEV - - PyYAML - - tcp_wrappers-libs - - libudev-147 - - udev-147 - - dracut-004 - - linux-firmware-20150115 - - aic94xx-firmware-30 - - tzdata-2014e - - libertas-usb8388-firmware-20150115 - - less-436 - - centos-release-6 - - iputils-20071127 - - dracut-kernel-004 - - name: upstart - packages: - - upstart - - name: ustr - packages: - - ustr - - name: util-linux-ng - packages: - - libblkid - - util-linux-ng - - libuuid - - name: vconfig - packages: - - vconfig - - name: vim - packages: - - vim-minimal - - vim-enhanced - - vim-common - - name: virt-what - packages: - - virt-what - - name: wget - packages: - - wget - - name: which - packages: - - which - - name: xz - packages: - - xz-libs - - name: yum - packages: - - yum - - name: yum-metadata-parser - packages: - - yum-metadata-parser - - name: yum-utils - packages: - - yum-utils - - yum-plugin-fastestmirror - - name: zd1211-firmware - packages: - - zd1211-firmware - - name: zlib - packages: - - zlib -deployment: - projects: - - name: ceph - packages: - - ceph - - ceph-common - - ceph-radosgw - - ceph-test - - libcephfs1 - - librados2 - - librbd1 - - python-ceph - - rbd-fuse - - at - - bc - - ceph-deploy - - db4-devel - - ed - - gdbm-devel - - gperftools-libs - - leveldb - - libunwind - - mailx - - patch - - pax - - python-distribute - - python-flask - - python-itsdangerous - - python-werkzeug - - redhat-lsb-core - - snappy - - time - - name: cinder - packages: - - openstack-cinder - - python-cinder - - libmnl - - name: glance - packages: - - openstack-glance - - python-glance - - name: neutron - packages: - - openstack-neutron - - openstack-neutron-bigswitch - - openstack-neutron-brocade - - openstack-neutron-cisco - - openstack-neutron-hyperv - - openstack-neutron-ibm - - openstack-neutron-linuxbridge - - openstack-neutron-mellanox - - openstack-neutron-metaplugin - - openstack-neutron-metering-agent - - openstack-neutron-midonet - - openstack-neutron-ml2 - - openstack-neutron-nec - - openstack-neutron-nuage - - openstack-neutron-ofagent - - openstack-neutron-oneconvergence-nvsd - - openstack-neutron-opencontrail - - openstack-neutron-openvswitch - - openstack-neutron-plumgrid - - openstack-neutron-ryu - - openstack-neutron-vpn-agent - - python-neutron - - dkms - - ipset - - libipset3 - - openstack-datapath-dkms - - python-jsonrpclib - - name: nova - packages: - - openstack-nova - - openstack-nova-api - - openstack-nova-cells - - openstack-nova-cert - - openstack-nova-common - - openstack-nova-compute - - openstack-nova-conductor - - openstack-nova-console - - openstack-nova-network - - openstack-nova-novncproxy - - openstack-nova-objectstore - - openstack-nova-scheduler - - python-nova - - name: perl - packages: - - perl-CGI - - perl-Config-General - - perl-DBD-MySQL - - perl-DBI - - perl-Error - - perl-ExtUtils-MakeMaker - - perl-ExtUtils-ParseXS - - perl-LockFile-Simple - - perl-TermReadKey - - perl-Test-Harness - - perl-Test-Simple - - perl-Time-HiRes - - perl-TimeDate - - perl-devel - - name: swift - packages: - - openstack-swift - - openstack-swift-account - - openstack-swift-container - - openstack-swift-object - - openstack-swift-proxy - - name: general - packages: - - abrt - - abrt-addon-ccpp - - abrt-addon-kerneloops - - abrt-addon-python - - abrt-cli - - abrt-libs - - abrt-tui - - acpid - - aic94xx-firmware - - alsa-lib - - alsa-utils - - ami-creator - - audit-libs-python - - autoconf - - automake - - avahi - - b43-fwcutter - - biosdevname - - blktrace - - bluez-libs - - boost-filesystem - - boost-iostreams - - boost-program-options - - boost-system - - boost-thread - - brlapi - - brltty - - btparser - - btrfs-progs - - busybox - - byacc - - c-ares19 - - celt051 - - centos-indexhtml - - centos-release - - ntsysv - - cifs-utils - - cirros-testvm - - cirros-testvm-mellanox - - cloud-init - - cloud-utils - - cloud-utils-growpart - - cman - - cluster-glue-libs - - modcluster - - conntrack-tools - - corosync - - corosynclib - - cpufreq-init - - cpuspeed - - crda - - crmsh - - cyrus-sasl-plain - - db4-cxx - - dejavu-fonts-common - - dejavu-sans-fonts - - deltaiso - - desktop-file-utils - - kpartx - - disktype - - dmraid - - dmraid-events - - dnsmasq-utils - - dosfstools - - dpkg - - dpkg-devel - - dracut - - dracut-kernel - - dstat - - ebtables - - eject - - elfutils - - elfutils-libs - - erlang - - erlang-appmon - - erlang-asn1 - - erlang-common_test - - erlang-compiler - - erlang-cosEvent - - erlang-cosEventDomain - - erlang-cosFileTransfer - - erlang-cosNotification - - erlang-cosProperty - - erlang-cosTime - - erlang-cosTransactions - - erlang-crypto - - erlang-debugger - - erlang-dialyzer - - erlang-diameter - - erlang-doc - - erlang-docbuilder - - erlang-edoc - - erlang-erl_docgen - - erlang-erl_interface - - erlang-erts - - erlang-et - - erlang-eunit - - erlang-examples - - erlang-gs - - erlang-hipe - - erlang-ic - - erlang-inets - - erlang-inviso - - erlang-jinterface - - erlang-kernel - - erlang-megaco - - erlang-mnesia - - erlang-observer - - erlang-odbc - - erlang-orber - - erlang-os_mon - - erlang-otp_mibs - - erlang-parsetools - - erlang-percept - - erlang-pman - - erlang-public_key - - erlang-reltool - - erlang-runtime_tools - - erlang-sasl - - erlang-snmp - - erlang-ssh - - erlang-ssl - - erlang-stdlib - - erlang-syntax_tools - - erlang-test_server - - erlang-toolbar - - erlang-tools - - erlang-tv - - erlang-typer - - erlang-webtool - - erlang-wx - - erlang-xmerl - - eswitchd - - euca2ools - - fcgiwrap - - fdupes - - febootstrap-supermin-helper - - fence-virt - - fencing-agent - - flac - - flex - - fontpackages-filesystem - - fping - - fprintd - - fprintd-pam - - fuel-utils - - fuse - - fuse-libs - - galera - - gcc-c++ - - gcc-gfortran - - libstdc++-devel - - gdb - - ghostscript - - ghostscript-fonts - - giflib - - git - - perl-Git - - gpxe-bootimgs - - hal-info - - haproxy - - hivex - - perl-hivex - - htop - - http-parser - - hunspell - - hunspell-en - - iksemel - - iperf - - iperf3 - - ipxe-roms-qemu - - irqbalance - - iscsi-initiator-utils - - ivtv-firmware - - iw - - java-1.6.0-openjdk - - jline - - jpackage-utils - - json-c - - keepalived - - kernel-firmware - - kernel-firmware - - kernel-lt - - kernel-lt-devel - - kernel-lt-headers - - kexec-tools - - keyutils - - ledmon - - less - - libaio - - libasyncns - - libconfig - - libdaemon - - libewf - - libfcgi - - libfontenc - - libfprint - - libgssglue - - libguestfs - - libguestfs-tools-c - - python-libguestfs - - libibverbs - - libiscsi - - libmemcached - - libmlx4 - - libnet - - libnfnetlink - - libogg - - libproxy - - libproxy-bin - - libproxy-python - - libqb - - librdmacm - - librelp - - libreport - - libreport-cli - - libreport-compat - - libreport-plugin-kerneloops - - libreport-plugin-logger - - libreport-plugin-mailx - - libreport-plugin-reportuploader - - libreport-plugin-rhtsupport - - libreport-python - - libselinux-python - - libsemanage-python - - libsmi - - libsndfile - - libtalloc - - libtar - - libtdb - - libtevent - - libtirpc - - libtool-ltdl - - libusbx - - libuv - - libuv-devel - - libvirt - - libvirt-daemon - - libvirt-daemon-config-network - - libvirt-daemon-config-nwfilter - - libvirt-daemon-driver-interface - - libvirt-daemon-driver-lxc - - libvirt-daemon-driver-network - - libvirt-daemon-driver-nodedev - - libvirt-daemon-driver-nwfilter - - libvirt-daemon-driver-qemu - - libvirt-daemon-driver-secret - - libvirt-daemon-driver-storage - - libvirt-python - - libvorbis - - libertas-usb8388-firmware - - linux-firmware - - python-imgcreate - - lldpad - - lldpad-libs - - lshell - - lzop - - man-pages - - man-pages-overrides - - mc - - mcollective-client - - memcached - - mlnx-ofed-light - - libmongodb - - mongodb - - mongodb-server - - mtr - - murano - - murano-apps - - murano-dashboard - - nagios - - nagios-common - - nagios-plugins - - nagios-plugins-apt - - nagios-plugins-breeze - - nagios-plugins-by_ssh - - nagios-plugins-cluster - - nagios-plugins-dhcp - - nagios-plugins-dig - - nagios-plugins-disk - - nagios-plugins-disk_smb - - nagios-plugins-dns - - nagios-plugins-dummy - - nagios-plugins-file_age - - nagios-plugins-flexlm - - nagios-plugins-hpjd - - nagios-plugins-http - - nagios-plugins-icmp - - nagios-plugins-ide_smart - - nagios-plugins-ircd - - nagios-plugins-ldap - - nagios-plugins-linux_raid - - nagios-plugins-load - - nagios-plugins-log - - nagios-plugins-mailq - - nagios-plugins-mrtg - - nagios-plugins-mrtgtraf - - nagios-plugins-mysql - - nagios-plugins-nagios - - nagios-plugins-nt - - nagios-plugins-ntp - - nagios-plugins-ntp-perl - - nagios-plugins-nwstat - - nagios-plugins-oracle - - nagios-plugins-overcr - - nagios-plugins-perl - - nagios-plugins-pgsql - - nagios-plugins-ping - - nagios-plugins-procs - - nagios-plugins-real - - nagios-plugins-rpc - - nagios-plugins-sensors - - nagios-plugins-smtp - - nagios-plugins-snmp - - nagios-plugins-ssh - - nagios-plugins-swap - - nagios-plugins-tcp - - nagios-plugins-time - - nagios-plugins-ups - - nagios-plugins-users - - nagios-plugins-wave - - nagios-plugins-os-libvirt - - nagios-plugins-os-rabbitmq - - nagios-plugins-os-swift - - nano - - nc - - net-snmp - - netcf-libs - - netpbm - - netpbm-progs - - nfs-utils - - nfs-utils-lib - - nmap - - nodejs - - nodejs-less - - novnc - - nagios-plugins-nrpe - - nrpe - - numad - - oddjob - - openpgm - - openstack-ceilometer-alarm - - openstack-ceilometer-api - - openstack-ceilometer-central - - openstack-ceilometer-collector - - openstack-ceilometer-common - - openstack-ceilometer-compute - - openstack-ceilometer-notification - - python-ceilometer - - openstack-heat-api - - openstack-heat-api-cfn - - openstack-heat-api-cloudwatch - - openstack-heat-common - - openstack-heat-docker - - openstack-heat-engine - - openstack-swift-plugin-swift3 - - openstack-utils - - openvswitch - - kmod-openvswitch - - pacemaker - - pacemaker-cli - - pacemaker-cluster-libs - - pacemaker-libs - - parted - - pcapy - - pcmciautils - - pcs - - pcs - - percona-toolkit - - percona-xtrabackup - - php - - php-bcmath - - php-cli - - php-common - - php-gd - - php-mbstring - - php-mysql - - php-pdo - - php-pgsql - - php-xml - - pinfo - - policycoreutils-python - - postgresql-devel - - prelink - - psacct - - pssh - - pulseaudio-libs - - puppet-server - - puppetdb - - puppetdb-terminus - - puppetlabs-release - - pushy - - pykickstart - - pyparsing-doc - - pysendfile - - pysnmp - - python-XStatic - - python-XStatic-Angular - - python-XStatic-Angular-Cookies - - python-XStatic-Angular-Mock - - python-XStatic-Bootstrap-Datepicker - - python-XStatic-Bootstrap-SCSS - - python-XStatic-D3 - - python-XStatic-Font-Awesome - - python-XStatic-Hogan - - python-XStatic-JQuery-Migrate - - python-XStatic-JQuery.TableSorter - - python-XStatic-JQuery.quicksearch - - python-XStatic-JSEncrypt - - python-XStatic-Jasmine - - python-XStatic-QUnit - - python-XStatic-Rickshaw - - python-XStatic-Spin - - python-XStatic-jQuery - - python-XStatic-jquery-ui - - python-routes1.12 - - python-actdiag - - python-barbicanclient - - python-blockdiag - - python-boto - - python-cloudfiles - - python-cm-api - - python-configobj - - python-croniter - - python-d2to1 - - python-django-appconf - - python-django-compressor - - python-django-floppyforms - - openstack-dashboard - - python-django-horizon - - python-django-horizon-doc - - python-django-openstack-auth - - python-django-pyscss - - python-dns - - python-docker-py - - python-docutils - - python-funcparserlib - - python-gflags - - python-glance-store - - python-greenlet-devel - - python-happybase - - python-imaging - - python-iwlib - - python-jsonpath-rw - - python-kazoo - - python-keystoneclient-doc - - python-lesscpy - - python-msgpack - - python-nose-cover3 - - python-novaclient-doc - - python-nwdiag - - python-openstackclient - - python-oslo-config-doc - - python-oslo-rootwrap - - python-osnagios - - python-osprofiler - - python-paste-deploy1.5 - - python-psutil - - python-py - - python-pyasn1 - - python-pylibmc - - python-pymemcache - - python-bson - - python-pymongo - - python-pyscss - - python-pyudev - - python-qpid - - python-rabbit - - python-reportlab - - python-retrying - - python-rfc3986 - - python-rtslib-fb - - python-seqdiag - - python-sphinx10 - - python-sphinx10-doc - - python-swiftclient-doc - - python-taskflow - - python-thrift - - python-tooz - - python-troveclient - - python-versiontools - - python-virtualenv - - python-webcolors - - python-websocket-client - - python-websockify - - python-wsme - - python-zmq - - pytz - - pyxattr - - pyxf86config - - qemu - - qemu-common - - qemu-img - - qemu-kvm - - qpid-cpp-client - - qpid-cpp-client-ssl - - qpid-cpp-server - - qpid-cpp-server-ssl - - python-qpid-qmf - - qpid-qmf - - qpid-tests - - qpid-tools - - quota - - radvd - - rdate - - readahead - - resource-agents - - rfkill - - rhino - - ccs - - ricci - - rng-tools - - rpcbind - - rpm-build - - rst2pdf - - rsyslog-relp - - rt61pci-firmware - - rt73usb-firmware - - ruby-devel - - ruby-ri - - ruby-mysql - - rubygem-gem_plugin - - rubygem-json_pure - - rubygem-json_pure-doc - - rubygem-abstract - - rubygem-activesupport - - rubygem-arel - - rubygem-builder - - rubygem-bundler - - rubygem-bunny - - rubygem-daemons - - rubygem-erubis - - rubygem-eventmachine - - rubygem-eventmachine-doc - - ruby-extlib - - rubygem-extlib-doc - - rubygem-fastthread - - rubygem-highline - - rubygem-httpclient-doc - - rubygem-i18n - - rubygem-ipaddress-doc - - rubygem-json-doc - - rubygem-mail - - rubygem-mime-types - - rubygem-mime-types-doc - - rubygem-mixlib-authentication - - rubygem-mixlib-cli-doc - - rubygem-mixlib-shellout - - rubygem-moneta - - rubygem-mongrel - - rubygem-net-ssh-multi - - rubygem-polyglot - - rubygem-rack - - rubygem-rack-mount - - rubygem-rack-test - - rubygem-raemon - - rubygem-rake - - rubygem-rdoc - - rubygem-rest-client - - rubygem-stomp-doc - - rubygem-symboltable - - rubygem-thin - - rubygem-thin-doc - - rubygem-thor - - rubygem-treetop - - rubygem-tzinfo - - rubygem-uuidtools - - sahara - - samba-client - - samba-common - - samba-winbind - - samba-winbind-clients - - python-saslwrapper - - saslwrapper - - scl-utils - - scrub - - scsi-target-utils - - seabios-bin - - seavgabios-bin - - setools-libs - - setools-libs-python - - setserial - - setuptool - - sgabios-bin - - sgpio - - sheepdog - - smartmontools - - socat - - sos - - sphinxcontrib-actdiag - - sphinxcontrib-blockdiag - - sphinxcontrib-nwdiag - - sphinxcontrib-seqdiag - - spice-server - - squashfs-tools - - libsysfs - - sysfsutils - - system-config-firewall-tui - - system-config-keyboard - - system-config-keyboard-base - - system-config-network-tui - - systemtap-runtime - - tcptraceroute - - tcsh - - tinyproxy - - tmpwatch - - tmux - - traceroute - - tunctl - - tzdata - - tzdata-java - - libudev - - udev - - Django - - Django14 - - MySQL-client - - MySQL-client-wsrep - - MySQL-server - - MySQL-server-wsrep - - MySQL-shared-wsrep - - OpenIPMI-libs - - libXfont - - libXt - - libXtst - - libnetfilter_conntrack - - lm_sensors - - microcode_ctl - - mod_fastcgi - - pam_passwdqc - - unixODBC - - unzip - - urw-fonts - - usbutils - - usermode - - v8 - - web-assets-filesystem - - whatmask - - wireless-tools - - wireshark - - words - - xdg-utils - - xfsprogs - - xmlrpc-c - - xmlrpc-c-client - - xorg-x11-drv-ati-firmware - - xorg-x11-font-utils - - yum-plugin-security - - zabbix - - zabbix-agent - - zabbix-sender - - zabbix-server - - zabbix-server-mysql - - zabbix-server-pgsql - - zabbix-web - - zabbix-web-mysql - - zabbix-web-pgsql - - zeromq - - zeromq3 - - zip -master: - projects: - - name: apr - packages: - - apr - - name: apr-util - packages: - - apr-util - - apr-util-ldap - - name: atk - packages: - - atk - - name: atlas - packages: - - atlas - - name: atop - packages: - - atop - - name: avahi - packages: - - avahi-libs - - name: babel - packages: - - python-babel - - name: bridge-utils - packages: - - bridge-utils - - name: cairo - packages: - - cairo - - name: cdrkit - packages: - - genisoimage - - name: cobbler - packages: - - cobbler - - cobbler-web - - name: createrepo - packages: - - createrepo - - name: cryptsetup-luks - packages: - - cryptsetup-luks - - cryptsetup-luks-libs - - name: cups - packages: - - cups-libs - - name: cvs - packages: - - cvs - - name: cyrus-sasl - packages: - - cyrus-sasl-md5 - - name: dbus - packages: - - dbus - - name: dbus-python - packages: - - dbus-python - - name: debootstrap - packages: - - debootstrap - - name: deltarpm - packages: - - python-deltarpm - - deltarpm - - name: dhcp - packages: - - dhcp - - name: dnsmasq - packages: - - dnsmasq - - name: docker-io - packages: - - docker-io - - name: e2fsprogs - packages: - - libcom_err - - libcom_err-devel - - name: eggdbus - packages: - - eggdbus - - name: fence-agents - packages: - - fence-agents - - name: fontconfig - packages: - - fontconfig - - name: freetype - packages: - - freetype - - name: fuel-library - packages: - - fuel-library6.1 - - name: fuel-ostf - packages: - - fuel-ostf - - name: fuelmenu - packages: - - fuelmenu - - name: gcc - packages: - - libstdc++ - - libgfortran - - name: gd - packages: - - gd - - name: gettext - packages: - - gettext - - name: gnutls - packages: - - gnutls-utils - - gnutls - - name: gtk2 - packages: - - gtk2 - - name: hal - packages: - - hal-libs - - hal - - name: hdparm - packages: - - hdparm - - name: hicolor-icon-theme - packages: - - hicolor-icon-theme - - name: httpd - packages: - - httpd-tools - - httpd - - mod_ssl - - name: ipmitool - packages: - - ipmitool - - name: jasper - packages: - - jasper-libs - - name: keyutils - packages: - - keyutils-libs-devel - - name: krb5 - packages: - - krb5-devel - - name: libcgroup - packages: - - libcgroup - - name: libevent - packages: - - libevent-devel - - libevent-headers - - libevent - - libevent-doc - - name: libjpeg-turbo - packages: - - libjpeg-turbo - - name: libnl - packages: - - libnl - - name: libpng - packages: - - libpng - - name: libselinux - packages: - - libselinux-devel - - name: libsepol - packages: - - libsepol-devel - - name: libthai - packages: - - libthai - - name: libtiff - packages: - - libtiff - - name: libvirt - packages: - - libvirt-client - - name: libxcb - packages: - - libxcb - - name: libxml2 - packages: - - libxml2-python - - name: libxslt - packages: - - libxslt - - name: lrzip - packages: - - lrzip - - name: lsof - packages: - - lsof - - name: lxc - packages: - - lxc-libs - - lxc - - name: lzo - packages: - - lzo - - name: m2crypto - packages: - - m2crypto - - name: mailcap - packages: - - mailcap - - name: man - packages: - - man - - name: mesa - packages: - - mesa-libGL - - mesa-libGLU - - mesa-dri-filesystem - - mesa-dri-drivers - - name: mesa-dri1-drivers - packages: - - mesa-dri1-drivers - - name: mesa-private-llvm - packages: - - mesa-private-llvm - - name: monit - packages: - - monit - - name: mtools - packages: - - mtools - - name: nailgun - packages: - - nailgun - - name: nailgun-redhat-license - packages: - - nailgun-redhat-license - - name: net-snmp - packages: - - net-snmp-utils - - net-snmp-libs - - name: nginx - packages: - - nginx - - name: numactl - packages: - - numactl - - name: numpy - packages: - - numpy - - numpy-f2py - - name: openssl - packages: - - openssl-devel - - name: openstack-keystone - packages: - - openstack-keystone - - python-keystone - - name: pango - packages: - - pango - - name: perl - packages: - - perl-Net-Telnet - - perl-Module-Pluggable - - perl-Pod-Escapes - - perl-Pod-Simple - - name: pexpect - packages: - - pexpect - - name: pixman - packages: - - pixman - - name: pm-utils - packages: - - pm-utils - - name: polkit - packages: - - polkit - - name: portreserve - packages: - - portreserve - - name: postgresql - packages: - - postgresql-libs - - postgresql-server - - postgresql - - name: pygobject2 - packages: - - pygobject2 - - name: python - packages: - - python-PrettyTable - - python-posix_ipc - - python-devel - - python-MarkupSafe - - name: python-alembic - packages: - - python-alembic - - name: python-amqp - packages: - - python-amqp - - name: python-amqplib - packages: - - python-amqplib - - name: python-anyjson - packages: - - python-anyjson - - name: python-backports - packages: - - python-backports - - python-backports-ssl_match_hostname - - name: python-beaker - packages: - - python-beaker - - name: python-beautifulsoup4 - packages: - - python-beautifulsoup4 - - name: python-ceilometerclient - packages: - - python-ceilometerclient - - name: python-chardet - packages: - - python-chardet - - name: python-cheetah - packages: - - python-cheetah - - name: python-cherrypy - packages: - - python-cherrypy - - name: python-cinderclient - packages: - - python-cinderclient - - name: python-crypto - packages: - - python-crypto - - name: python-daemon - packages: - - python-daemon - - name: python-dateutil - packages: - - python-dateutil - - name: python-decorator - packages: - - python-decorator - - name: python-django - packages: - - python-django - - name: python-dmidecode - packages: - - python-dmidecode - - name: python-dogpile-cache - packages: - - python-dogpile-cache - - name: python-dogpile-core - packages: - - python-dogpile-core - - name: python-ecdsa - packages: - - python-ecdsa - - name: python-ethtool - packages: - - python-ethtool - - name: python-eventlet - packages: - - python-eventlet - - name: python-fabric - packages: - - python-fabric - - name: python-fuelclient - packages: - - python-fuelclient - - name: python-futures - packages: - - python-futures - - name: python-fysom - packages: - - python-fysom - - name: python-gevent - packages: - - python-gevent - - name: python-glanceclient - packages: - - python-glanceclient - - name: python-greenlet - packages: - - python-greenlet - - name: python-heatclient - packages: - - python-heatclient - - name: python-html5lib - packages: - - python-html5lib - - name: python-httplib2 - packages: - - python-httplib2 - - name: python-importlib - packages: - - python-importlib - - name: python-ipaddr - packages: - - python-ipaddr - - name: python-iso8601 - packages: - - python-iso8601 - - name: python-jinja2 - packages: - - python-jinja2 - - name: python-jsonpatch - packages: - - python-jsonpatch - - name: python-jsonpointer - packages: - - python-jsonpointer - - name: python-jsonschema - packages: - - python-jsonschema - - name: python-keyring - packages: - - python-keyring - - name: python-keystoneclient - packages: - - python-keystoneclient - - name: python-keystonemiddleware - packages: - - python-keystonemiddleware - - name: python-kombu - packages: - - python-kombu - - name: python-ldap - packages: - - python-ldap - - name: python-lockfile - packages: - - python-lockfile - - name: python-logutils - packages: - - python-logutils - - name: python-lxml - packages: - - python-lxml - - name: python-mako - packages: - - python-mako - - name: python-markdown - packages: - - python-markdown - - name: python-meld3 - packages: - - python-meld3 - - name: python-memcached - packages: - - python-memcached - - name: python-migrate - packages: - - python-migrate - - name: python-muranoclient - packages: - - python-muranoclient - - name: python-netaddr - packages: - - python-netaddr - - name: python-netifaces - packages: - - python-netifaces - - name: python-networkx - packages: - - python-networkx-core - - name: python-neutronclient - packages: - - python-neutronclient - - name: python-nose - packages: - - python-nose - - name: python-novaclient - packages: - - python-novaclient - - name: python-oauthlib - packages: - - python-oauthlib - - name: python-ordereddict - packages: - - python-ordereddict - - name: python-oslo-config - packages: - - python-oslo-config - - name: python-oslo-db - packages: - - python-oslo-db - - name: python-oslo-i18n - packages: - - python-oslo-i18n - - name: python-oslo-messaging - packages: - - python-oslo-messaging - - name: python-oslo-serialization - packages: - - python-oslo-serialization - - name: python-oslo-utils - packages: - - python-oslo-utils - - name: python-paramiko - packages: - - python-paramiko - - name: python-passlib - packages: - - python-passlib - - name: python-paste - packages: - - python-paste - - name: python-paste-deploy - packages: - - python-paste-deploy - - name: python-pbr - packages: - - python-pbr - - name: python-pecan - packages: - - python-pecan - - name: python-pip - packages: - - python-pip - - name: python-ply - packages: - - python-ply - - name: python-psycopg2 - packages: - - python-psycopg2 - - name: python-pycadf - packages: - - python-pycadf - - name: python-pygments - packages: - - python-pygments - - name: python-repoze-lru - packages: - - python-repoze-lru - - name: python-requests - packages: - - python-requests - - name: python-rhsm - packages: - - python-rhsm - - name: python-routes - packages: - - python-routes - - name: python-saharaclient - packages: - - python-saharaclient - - name: python-simplegeneric - packages: - - python-simplegeneric - - name: python-simplejson - packages: - - python-simplejson - - name: python-singledispatch - packages: - - python-singledispatch - - name: python-sqlalchemy - packages: - - python-sqlalchemy - - name: python-suds - packages: - - python-suds - - name: python-swiftclient - packages: - - python-swiftclient - - name: python-tempita - packages: - - python-tempita - - name: python-testresources - packages: - - python-testresources - - name: python-unittest2 - packages: - - python-unittest2 - - name: python-urllib3 - packages: - - python-urllib3 - - name: python-urwid - packages: - - python-urwid - - name: python-waitress - packages: - - python-waitress - - name: python-warlock - packages: - - python-warlock - - name: python-webob - packages: - - python-webob - - name: python-webpy - packages: - - python-webpy - - name: python-webtest - packages: - - python-webtest - - name: python-wsgilog - packages: - - python-wsgilog - - name: python-wsgiref - packages: - - python-wsgiref - - name: python-yaql - packages: - - python-yaql - - name: rabbitmq-server - packages: - - rabbitmq-server - - name: ruby21-augeas - packages: - - ruby21-augeas - - name: ruby21-facter - packages: - - ruby21-facter - - name: ruby21-hiera - packages: - - ruby21-hiera - - name: ruby21-mcollective - packages: - - ruby21-mcollective-common - - ruby21-mcollective - - name: ruby21-nailgun-mcagents - packages: - - ruby21-nailgun-mcagents - - name: ruby21-puppet - packages: - - ruby21-puppet - - name: ruby21-rubygem - packages: - - ruby21-rubygem-Platform - - ruby21-rubygem-json_pure - - name: ruby21-rubygem-activesupport - packages: - - ruby21-rubygem-activesupport - - name: ruby21-rubygem-amq-client - packages: - - ruby21-rubygem-amq-client - - name: ruby21-rubygem-amq-protocol - packages: - - ruby21-rubygem-amq-protocol - - name: ruby21-rubygem-amqp - packages: - - ruby21-rubygem-amqp - - name: ruby21-rubygem-astute - packages: - - ruby21-rubygem-astute - - name: ruby21-rubygem-eventmachine - packages: - - ruby21-rubygem-eventmachine - - name: ruby21-rubygem-i18n - packages: - - ruby21-rubygem-i18n - - name: ruby21-rubygem-json - packages: - - ruby21-rubygem-json - - name: ruby21-rubygem-mcollective-client - packages: - - ruby21-rubygem-mcollective-client - - name: ruby21-rubygem-mime-types - packages: - - ruby21-rubygem-mime-types - - name: ruby21-rubygem-net-ssh - packages: - - ruby21-rubygem-net-ssh - - name: ruby21-rubygem-net-ssh-gateway - packages: - - ruby21-rubygem-net-ssh-gateway - - name: ruby21-rubygem-net-ssh-multi - packages: - - ruby21-rubygem-net-ssh-multi - - name: ruby21-rubygem-netaddr - packages: - - ruby21-rubygem-netaddr - - name: ruby21-rubygem-open4 - packages: - - ruby21-rubygem-open4 - - name: ruby21-rubygem-openstack - packages: - - ruby21-rubygem-openstack - - name: ruby21-rubygem-popen4 - packages: - - ruby21-rubygem-popen4 - - name: ruby21-rubygem-raemon - packages: - - ruby21-rubygem-raemon - - name: ruby21-rubygem-rest-client - packages: - - ruby21-rubygem-rest-client - - name: ruby21-rubygem-rgen - packages: - - ruby21-rubygem-rgen - - name: ruby21-rubygem-shadow - packages: - - ruby21-rubygem-shadow - - name: ruby21-rubygem-stomp - packages: - - ruby21-rubygem-stomp - - name: ruby21-rubygem-symboltable - packages: - - ruby21-rubygem-symboltable - - name: ruby21-rubygem-systemu - packages: - - ruby21-rubygem-systemu - - name: rubygem-net-ssh - packages: - - rubygem-net-ssh - - name: rubygem-net-ssh-gateway - packages: - - rubygem-net-ssh-gateway - - name: scipy - packages: - - scipy - - name: screen - packages: - - screen - - name: send2syslog - packages: - - send2syslog - - name: shotgun - packages: - - shotgun - - name: strace - packages: - - strace - - name: subscription-manager - packages: - - subscription-manager - - name: suitesparse - packages: - - suitesparse - - name: supervisor - packages: - - supervisor - - name: syslinux - packages: - - syslinux - - name: sysstat - packages: - - sysstat - - name: tcl - packages: - - tcl - - name: tftp - packages: - - tftp-server - - name: tk - packages: - - tk - - name: undefined - packages: - - tcp_wrappers - - libXau - - libX11 - - ConsoleKit-libs - - sg3_utils - - lm_sensors-libs - - PyYAML - - GeoIP - - libX11-common - - libXpm - - sg3_utils-libs - - MAKEDEV - - ConsoleKit - - pyOpenSSL - - libICE - - wxBase - - wxGTK-gl - - libSM - - mod_wsgi - - libXfixes - - libXcomposite - - tcp_wrappers-libs - - SDL - - libXcursor - - libXrender - - libXxf86vm - - libXrandr - - wxGTK - - MySQL-python - - libXext - - libXinerama - - libXdamage - - libXft - - MySQL-shared - - libXi - - PyPAM - - bfa-firmware - - setup - - aic94xx-firmware-30 - - tzdata-2014e - - dhclient - - basesystem - - glibc-common - - grub - - glibc - - bash - - wget - - libcap - - sudo - - info - - e2fsprogs - - chkconfig - - efibootmgr - - audit-libs - - acl - - libxml2 - - telnet - - bzip2-libs - - readline - - ql2400-firmware - - libselinux - - ivtv-firmware-20080701 - - sed - - iwl6000-firmware - - xz-libs - - atmel-firmware - - libidn - - iwl4965-firmware - - file-libs - - iwl6000g2a-firmware - - libudev-147 - - iwl100-firmware - - findutils - - ipw2100-firmware - - lua - - rootfiles - - pcre - - fuel-target-centos-images - - cyrus-sasl-lib - - libpcap - - expat - - perl - - libgpg-error - - cpio - - sysvinit-tools - - libyaml - - pth - - p11-kit - - ca-certificates - - glib2 - - dbus-glib - - device-mapper-persistent-data - - libnih - - libutempter - - vim-minimal - - net-tools - - libselinux-ruby - - tar - - libicu - - libss - - db4-utils - - pinentry - - binutils - - m4 - - dash - - groff - - coreutils-libs - - cracklib - - coreutils - - module-init-tools - - redhat-logos - - pciutils - - logrotate - - nss-sysinit - - openldap - - libcap-ng - - ethtool - - gpm-libs - - krb5-libs - - ruby - - libssh2 - - curl - - rpm - - gpgme - - bind-libs - - mysql-libs - - fipscheck - - libsemanage - - python - - python-iniparse - - rpm-python - - python-six - - pyparsing - - python-tablib - - yum-metadata-parser - - python-pypcap - - python-urlgrabber - - yum - - python-daemonize - - python-cliff - - nailgun-net-check - - newt - - plymouth-core-libs - - centos-release-6 - - iptables - - iputils-20071127 - - initscripts - - device-mapper-libs - - device-mapper-event-libs - - virt-what - - device-mapper-event - - libdrm - - ntpdate - - kbd - - dracut-kernel-004 - - cyrus-sasl - - crontabs - - cronie - - selinux-policy - - kernel - - system-config-firewall-base - - fuel-docker-images - - lvm2 - - openssh-server - - libgcc - - b43-openfwwf - - filesystem - - dhcp-common - - authconfig - - ncurses-base - - vim-enhanced - - nss-softokn-freebl - - ncurses-libs - - nmap-ncat - - libattr - - passwd - - zlib - - audit - - popt - - gdisk - - libacl - - mlocate - - db4 - - rsync - - nspr - - attr - - daemonize - - nss-util - - iwl1000-firmware - - libsepol - - iwl5000-firmware - - shadow-utils - - libertas-usb8388-firmware-20150115 - - dbus-libs - - ql2500-firmware - - sqlite - - zd1211-firmware - - gawk - - iwl3945-firmware - - libuuid - - iwl6050-firmware - - libblkid - - iwl5150-firmware - - libselinux-utils - - ipw2200-firmware - - bzip2 - - fuel-bootstrap-image - - grep - - which - - elfutils-libelf - - gdbm - - perl-libs - - perl-version - - pytz-2010h - - nss-softokn - - checkpolicy - - libedit - - pciutils-libs - - dmidecode - - pkgconfig - - libtasn1 - - p11-kit-trust - - gamin - - shared-mime-info - - grubby - - libgcrypt - - file - - upstart - - procps - - psmisc - - augeas-libs - - gmp - - libusb - - e2fsprogs-libs - - libgomp - - diffutils - - make - - hal-info-20090716 - - vim-common - - ncurses - - less-436 - - gzip - - cracklib-dicts - - pam - - hwdata - - plymouth-scripts - - libpciaccess - - nss - - nss-tools - - libuser - - mingetty - - keyutils-libs - - openssl - - libcurl - - rpm-libs - - gnupg2 - - ruby21-rubygem - - tcpdump - - bind-utils - - fipscheck-lib - - ustr - - vconfig - - libffi - - python-libs - - python-setuptools - - python-argparse - - python-stevedore - - python-cmd2 - - pygpgme - - python-pycurl - - yum-plugin-fastestmirror - - scapy - - python-cliff-tablib - - slang - - newt-python - - kbd-misc - - policycoreutils - - iproute - - util-linux-ng - - udev-147 - - device-mapper - - openssh - - lvm2-libs - - plymouth - - ntp - - dracut-004 - - rsyslog - - postfix - - cronie-anacron - - iptables-ipv6 - - linux-firmware-20150115 - - selinux-policy-targeted - - openssh-clients - - erlang-stdlib-R14B - - erlang-ssl-R14B - - erlang-crypto-R14B - - yum-utils - - erlang-otp_mibs-R14B - - erlang-tools-R14B - - erlang-public_key-R14B - - erlang-kernel-R14B - - erlang-hipe-R14B - - erlang-snmp-R14B - - erlang-et-R14B - - erlang-sasl-R14B - - erlang-webtool-R14B - - erlang-os_mon-R14B - - erlang-inets-R14B - - erlang-xmerl-R14B - - erlang-syntax_tools-R14B - - erlang-compiler-R14B - - erlang-mnesia-R14B - - erlang-observer-R14B - - erlang-erts-R14B - - erlang-runtime_tools-R14B - - erlang-gs-R14B - - erlang-wx-R14B - - pydot-ng - - fuel-image - - fuel-provisioning-scripts - - name: uwsgi - packages: - - uwsgi-plugin-python - - uwsgi - - uwsgi-plugin-common - - name: xinetd - packages: - - xinetd - - name: xz - packages: - - xz - - xz-lzma-compat - - name: yajl - packages: - - yajl - - name: zlib - packages: - - zlib-devel -bootstrap: - projects: - - name: undefined - packages: - - bash - - bfa-firmware - - ql2100-firmware - - ql2200-firmware - - ql23xx-firmware - - cronie-noanacron - - crontabs - - dhclient - - dmidecode - - iputils - - logrotate - - mcollective - - mingetty - - net-tools - - ntp - - ntpdate - - openssh-clients - - openssh-server - - pciutils - - rsyslog - - scapy - - tcpdump - - vconfig - - vim-minimal - - wget - - nailgun-agent - - nailgun-mcagents - - nailgun-net-check - - fuel-agent - - python-tasklib \ No newline at end of file diff --git a/packages_tests/rpm/provisioning/test.yaml b/packages_tests/rpm/provisioning/test.yaml deleted file mode 100644 index 4c0ba1055..000000000 --- a/packages_tests/rpm/provisioning/test.yaml +++ /dev/null @@ -1,3 +0,0 @@ -system_tests: - tags: - - deployment \ No newline at end of file diff --git a/pytest.ini b/pytest.ini deleted file mode 100644 index d2ed47f91..000000000 --- a/pytest.ini +++ /dev/null @@ -1,10 +0,0 @@ -[pytest] -markers = - need_ready_cluster: Create and deploy cluster for test - need_ready_slaves: Create environment with bootstraped slaves - need_ready_release: Setup master and prepare releses - need_ready_master: Setup master only - get_logs: Collect logs after test finish - fail_snapshot: Make environment snapshot if test failed -addopts = -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -testpaths = fuel_tests diff --git a/run_system_test.py b/run_system_test.py deleted file mode 100755 index ecdbfb5fc..000000000 --- a/run_system_test.py +++ /dev/null @@ -1,217 +0,0 @@ -#!/usr/bin/env python - -from __future__ import print_function - -import sys -import argparse - -import pytest - -from proboscis import TestProgram -from proboscis import register - -from fuelweb_test.helpers.utils import pretty_log - -from gates_tests.helpers.utils import map_test_review_in_fuel_library -from gates_tests.helpers.utils import \ - map_test_review_in_openstack_puppet_projects - -from system_test import register_system_test_cases -from system_test import get_groups -from system_test import define_custom_groups -from system_test import discover_import_tests -from system_test import tests_directory -from system_test import collect_yamls -from system_test import get_path_to_config -from system_test import get_list_confignames -from system_test import get_basepath - -from system_test.core.repository import split_group_config - -basedir = get_basepath() - - -def print_explain(names): - groups_nums = get_groups() - if not isinstance(names, list): - names = [names] - out = [] - for name in [split_group_config(i)[0] if split_group_config(i) else i - for i in names]: - for i in groups_nums[name]: - if hasattr(i, 'home'): - out.append((i.home._proboscis_entry_.parent.home, i.home)) - else: - out.append(i) - print(pretty_log(out)) - - -def clean_argv_proboscis(): - """Removing argv params unused by Proboscis""" - argv = sys.argv - if '--with-config' in argv: - idx = argv.index('--with-config') - argv.pop(idx) - argv.pop(idx) - if '--explain' in argv: - idx = argv.index('--explain') - argv.pop(idx) - - return argv - - -def cli(): - cli = argparse.ArgumentParser(prog="System test runner", - description="Command line tool for run Fuel " - "System Test") - - commands = cli.add_subparsers(title="Operation commands", - dest="command") - - cli_run = commands.add_parser('run', - help="Run test", - description="Run some test group") - - cli_run.add_argument("run_groups", nargs='*', default=None, ) - cli_run.add_argument("--with-config", default=False, type=str, - action="store", dest="config_name", - help="Select name of yaml config.") - cli_run.add_argument("--explain", default=False, action="store_true", - help="Show explain for running groups. " - "Will not start Proboscis.") - cli_run.add_argument("--show-plan", default=False, action="store_true", - help="Show Proboscis test plan.") - cli_run.add_argument("--with-xunit", default=False, action="store_true", - help="Use xuint report.") - cli_run.add_argument("--nologcapture", default=False, action="store_true", - help="Disable log capture for Proboscis.") - cli_run.add_argument("-q", default=False, action="store_true", - dest="quite", - help="Run Proboscis in quite mode.") - cli_run.add_argument("-a", default=False, action="store_true", - dest="nose_attr", - help="Provide Nose attr to Proboscis.") - cli_run.add_argument("-A", default=False, action="store_true", - dest="eval_nose", - help="Eval Nose attr to Proboscis.") - cli_run.add_argument("--groups", default=None, action="append", type=str, - help="Test group for testing. " - "(backward compatibility)") - - cli_explain_group = commands.add_parser("explain-group", - help="Explain selected group.") - cli_explain_group.add_argument("name", - help="Group name.") - - commands.add_parser("show-all-groups", - help="Show all Proboscis groups") - commands.add_parser("show-fuelweb-groups", - help="Show Proboscis groups defined in fuelweb suite") - commands.add_parser("show-systest-groups", - help="Show Proboscis groups defined in Systest suite") - commands.add_parser("show-systest-configs", - help="Show configurations for Systest suite") - - if len(sys.argv) == 1: - cli.print_help() - sys.exit(1) - - return cli.parse_args() - - -def run(**kwargs): - config_name = kwargs.get('config_name', None) - groups = kwargs.get('run_groups', []) - old_groups = kwargs.get('groups', None) - explain = kwargs.get('explain', None) - - groups_to_run = [] - groups.extend(old_groups or []) - - # Collect from pytest only once! - pytest.main(['--collect-only', 'fuel_tests', ]) - from fuel_tests.tests.conftest import test_names - - for g in set(groups): - if g in test_names: - sys.exit(pytest.main('-m {}'.format(g))) - if config_name: - register_system_test_cases( - groups=[g], - configs=[config_name]) - groups_to_run.append("{0}({1})".format(g, config_name)) - else: - register_system_test_cases(groups=[g]) - groups_to_run.append(g) - if not set([split_group_config(i)[0] if split_group_config(i) else i - for i in groups_to_run]) < set(get_groups()): - sys.exit('There are no cases mapped to current group, ' - 'please be sure that you put right test group name.') - if explain: - print_explain(groups) - else: - register(groups=["run_system_test"], depends_on_groups=groups_to_run) - TestProgram(groups=['run_system_test'], - argv=clean_argv_proboscis()).run_and_exit() - - -def explain_group(**kwargs): - """Explain selected group.""" - name = kwargs.get('name', None) - print_explain(name) - - -def show_all_groups(**kwargs): - """Show all Proboscis groups""" - groups_nums = get_groups() - out = {k: len(v) for k, v in groups_nums.items()} - print(pretty_log(out)) - - -def show_fuelweb_groups(**kwargs): - """Show Proboscis groups defined in fuelweb suite""" - groups_nums = get_groups() - - out = {k: len(v) for k, v in groups_nums.items() - if not k.startswith('system_test')} - print(pretty_log(out)) - - -def show_systest_groups(**kwargs): - """Show Proboscis groups defined in Systest suite""" - groups_nums = get_groups() - - out = {k: len(v) for k, v in groups_nums.items() - if k.startswith('system_test')} - print(pretty_log(out)) - - -def show_systest_configs(**kwargs): - """Show configurations for Systest suite""" - tests_configs = collect_yamls(get_path_to_config()) - - for c in get_list_confignames(tests_configs): - print(c) - - -COMMAND_MAP = { - "run": run, - "explain-group": explain_group, - "show-all-groups": show_all_groups, - "show-fuelweb-groups": show_fuelweb_groups, - "show-systest-groups": show_systest_groups, - "show-systest-configs": show_systest_configs -} - - -def shell(): - args = cli() - discover_import_tests(basedir, tests_directory) - define_custom_groups() - map_test_review_in_fuel_library(**vars(args)) - map_test_review_in_openstack_puppet_projects(**vars(args)) - COMMAND_MAP[args.command](**vars(args)) - - -if __name__ == '__main__': - shell() diff --git a/run_tests.sh b/run_tests.sh deleted file mode 100755 index d781f22e8..000000000 --- a/run_tests.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -set -e -set -x - -flake8 --ignore=H302,H802 --show-source ./ - diff --git a/system_test/__init__.py b/system_test/__init__.py deleted file mode 100644 index b0b47b501..000000000 --- a/system_test/__init__.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from fuelweb_test import logger - -from system_test.core.config import define_custom_groups -from system_test.core.config import tests_directory -from system_test.core.factory import ActionsFactory -from system_test.core.decorators import testcase -from system_test.core.decorators import deferred_decorator -from system_test.core.decorators import action -from system_test.core.decorators import nested_action -from system_test.core.discover import discover_import_tests -from system_test.core.discover import get_configs -from system_test.core.discover import collect_yamls -from system_test.core.discover import get_path_to_config -from system_test.core.discover import get_list_confignames -from system_test.core.discover import get_basepath -from system_test.core.repository import Repository -from system_test.core.repository import register_system_test_cases -from system_test.core.repository import get_groups - - -__all__ = [ - 'Repository', - 'ActionsFactory', - 'discover_import_tests', - 'register_system_test_cases', - 'get_groups', - 'testcase', - 'deferred_decorator', - 'action', - 'nested_action', - 'get_configs', - 'logger', - 'define_custom_groups', - 'tests_directory', - 'collect_yamls', - 'get_path_to_config', - 'get_list_confignames', - 'get_basepath', -] diff --git a/system_test/actions/__init__.py b/system_test/actions/__init__.py deleted file mode 100644 index bc5099f11..000000000 --- a/system_test/actions/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE_2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from .base import BaseActions -from .fuelmaster_actions import FuelMasterActions -from .strength_actions import StrengthActions -from .strength_actions import FillRootActions - - -__all__ = [ - 'BaseActions', - 'FuelMasterActions', - 'StrengthActions', - 'FillRootActions' -] diff --git a/system_test/actions/base.py b/system_test/actions/base.py deleted file mode 100644 index 296a37831..000000000 --- a/system_test/actions/base.py +++ /dev/null @@ -1,541 +0,0 @@ -# Copyright 2015-2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE_2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import division - -import time - -from proboscis import SkipTest -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_true -# pylint: disable=redefined-builtin -# noinspection PyUnresolvedReferences -from six.moves import xrange -# pylint: enable=redefined-builtin - -from fuelweb_test.helpers import checkers -from fuelweb_test.helpers.utils import TimeStat -from fuelweb_test import settings - -from system_test import logger -from system_test import action -from system_test import nested_action -from system_test import deferred_decorator - -from system_test.actions.ostf_actions import HealthCheckActions -from system_test.actions.plugins_actions import PluginsActions - -from system_test.core.discover import load_yaml -from system_test.helpers.decorators import make_snapshot_if_step_fail - - -# pylint: disable=no-member -# noinspection PyUnresolvedReferences -class PrepareActions(object): - """Base class with prepare actions - - _start_case - runned before test case start - _finish_case - runned after test case finish - setup_master - setup master node in environment - config_release - preconfig releases if it needs - make_slaves - boot slaves and snapshot environment with bootstrapped slaves - revert_slaves - revert environment with bootstrapped slaves - - """ - def __init__(self): - self.full_config = None - self.env_config = None - self.env_settings = None - self.config_name = None - self._devops_config = None - self._start_time = 0 - - def _load_config(self): - config = load_yaml(self.config_file) - self.full_config = config - self.env_config = config[ - 'template']['cluster_template'] - self.env_settings = config[ - 'template']['cluster_template']['settings'] - self.config_name = config['template']['name'] - - if 'devops_settings' in config['template']: - self._devops_config = config - - def _start_case(self): - """Start test case""" - self._load_config() - class_doc = getattr(self, "__doc__", self.__class__.__name__) - name = class_doc.splitlines()[0] - class_scenario = class_doc.splitlines()[1:] - start_case = "[ START {} ]".format(name) - header = "<<< {:=^142} >>>".format(start_case) - indent = ' ' * 4 - scenario = '\n'.join(class_scenario) - logger.info("\n{header}\n\n" - "{indent}Configuration: {config}\n" - "\n{scenario}".format( - header=header, - indent=indent, - config=self.config_name, - scenario=scenario)) - self._start_time = time.time() - - def _finish_case(self): - """Finish test case""" - case_time = time.time() - self._start_time - minutes = case_time // 60 - # pylint: disable=round-builtin - seconds = int(round(case_time)) % 60 - # pylint: enable=round-builtin - name = getattr(self, "__doc__", - self.__class__.__name__).splitlines()[0] - finish_case = "[ FINISH {} CASE TOOK {} min {} sec ]".format( - name, - minutes, - seconds) - footer = "<<< {:=^142} >>>".format(finish_case) - logger.info("\n{footer}\n".format(footer=footer)) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def setup_master(self): - """Setup master node""" - self.check_run("empty") - with TimeStat("setup_environment", is_uniq=True): - if list(self.env.d_env.get_nodes(role='fuel_master')): - self.env.setup_environment() - self.fuel_post_install_actions() - - elif list(self.env.d_env.get_nodes(role='centos_master')): - # need to use centos_master.yaml devops template - hostname = ''.join((settings.FUEL_MASTER_HOSTNAME, - settings.DNS_SUFFIX)) - self.centos_setup_fuel(hostname) - - else: - raise SkipTest( - "No Fuel master nodes found!") - - self.env.make_snapshot("empty", is_make=True) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def config_release(self): - """Configuration releases""" - self.check_run("ready") - self.env.revert_snapshot("empty", skip_timesync=True) - - self.fuel_web.get_nailgun_version() - self.fuel_web.change_default_network_settings() - - if (settings.REPLACE_DEFAULT_REPOS and - settings.REPLACE_DEFAULT_REPOS_ONLY_ONCE): - self.fuel_web.replace_default_repos() - - self.env.make_snapshot("ready", is_make=True) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def make_slaves(self): - """Bootstrap slave and make snapshot - - Use slaves parameter from case section - - """ - slaves = int(self.full_config['template']['slaves']) - snapshot_name = "ready_with_{}_slaves".format(slaves) - self.check_run(snapshot_name) - self.env.revert_snapshot("ready", skip_timesync=True) - logger.info("Bootstrap {} nodes".format(slaves)) - self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:slaves], - skip_timesync=True) - self.env.make_snapshot(snapshot_name, is_make=True) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def revert_slaves(self): - """Revert bootstrapped nodes - - Skip if snapshot with cluster exists - - """ - self.check_run(self.env_config['name']) - slaves = int(self.full_config['template']['slaves']) - snapshot_name = "ready_with_{}_slaves".format(slaves) - self.env.revert_snapshot(snapshot_name) - - # noinspection PyMethodParameters - @nested_action - def prepare_admin_node_with_slaves(): - """Combine preparation steps in alias""" - return [ - 'setup_master', - 'config_release', - 'make_slaves', - 'revert_slaves', - ] - - -# noinspection PyUnresolvedReferences -class BaseActions(PrepareActions, HealthCheckActions, PluginsActions): - """Basic actions for acceptance cases - - For choosing action order use actions_order variable, set list of actions - order - - Actions: - create_env - create and configure environment - add_nodes - add nodes to environment - deploy_cluster - deploy en environment - network_check - run network check - reset_cluster - reset an environment - delete_cluster - delete en environment - stop_deploy - stop deploying of environment - - """ - - est_duration = None - base_group = None - actions_order = None - cluster_id = None - scale_step = 0 - power_step = 0 - - def _add_node(self, nodes_list): - """Add nodes to Environment""" - logger.info("Add nodes to env {}".format(self.cluster_id)) - names = "slave-{:02}" - slaves = int(self.full_config['template']['slaves']) - num = iter(xrange(1, slaves + 1)) - nodes = {} - for new in nodes_list: - for _ in xrange(new['count']): - name = names.format(next(num)) - while name in self.assigned_slaves: - name = names.format(next(num)) - - self.assigned_slaves.add(name) - nodes[name] = new['roles'] - logger.info("Set roles {} to node {}".format(new['roles'], - name)) - self.fuel_web.update_nodes(self.cluster_id, nodes) - - def _del_node(self, nodes_list): - """Delete nodes from Environment""" - logger.info("Delete nodes from env {}".format(self.cluster_id)) - nodes = {} - - for node in nodes_list: - cluster_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - self.cluster_id, node['roles']) - for i in xrange(node['count']): - dnode = self.fuel_web.get_devops_node_by_nailgun_node( - cluster_nodes[i]) - self.assigned_slaves.remove(dnode.name) - - nodes[dnode.name] = node['roles'] - logger.info("Delete node {} with role {}".format( - dnode.name, node['roles'])) - - self.fuel_web.update_nodes(self.cluster_id, nodes, False, True) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def create_env(self): - """Create Fuel Environment - - For configure Environment use environment-config section in config file - - Skip action if we have snapshot with Environment name - - """ - self.check_run(self.env_config['name']) - - logger.info("Create env {}".format( - self.env_config['name'])) - cluster_settings = { - "murano": self.env_settings['components'].get('murano', False), - "sahara": self.env_settings['components'].get('sahara', False), - "ceilometer": self.env_settings['components'].get('ceilometer', - False), - "ironic": self.env_settings['components'].get('ironic', False), - "user": self.env_config.get("user", "admin"), - "password": self.env_config.get("password", "admin"), - "tenant": self.env_config.get("tenant", "admin"), - "volumes_lvm": self.env_settings['storages'].get("volume-lvm", - False), - "volumes_ceph": self.env_settings['storages'].get("volume-ceph", - False), - "images_ceph": self.env_settings['storages'].get("image-ceph", - False), - "ephemeral_ceph": self.env_settings['storages'].get( - "ephemeral-ceph", False), - "objects_ceph": self.env_settings['storages'].get("rados-ceph", - False), - "osd_pool_size": str(self.env_settings['storages'].get( - "replica-ceph", 2)), - "net_provider": self.env_config['network'].get('provider', - 'neutron'), - "net_segment_type": self.env_config['network'].get('segment-type', - 'vlan'), - "assign_to_all_nodes": self.env_config['network'].get( - 'pubip-to-all', - False), - "neutron_l3_ha": self.env_config['network'].get( - 'neutron-l3-ha', False), - "neutron_dvr": self.env_config['network'].get( - 'neutron-dvr', False), - "neutron_l2_pop": self.env_config['network'].get( - 'neutron-l2-pop', False), - "neutron_qos": self.env_config['network'].get( - 'neutron-qos', False), - } - - self.cluster_id = self.fuel_web.create_cluster( - name=self.env_config['name'], - mode=settings.DEPLOYMENT_MODE, - release_name=settings.OPENSTACK_RELEASE_UBUNTU - if self.env_config['release'] == 'ubuntu' - else settings.OPENSTACK_RELEASE, - settings=cluster_settings) - - logger.info("Cluster created with ID:{}".format(self.cluster_id)) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def add_nodes(self): - """Add nodes to environment - - Used sub-section nodes in environment-config section - - Skip action if cluster doesn't exist - - """ - if self.cluster_id is None: - raise SkipTest( - "The cluster_id is not specified, can not add node to cluster" - ) - - self._add_node(self.env_config['nodes']) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def deploy_cluster(self): - """Deploy environment - - Skip action if cluster doesn't exist - - """ - if self.cluster_id is None: - raise SkipTest( - "The cluster_id is not specified, can not run the deploy" - ) - - self.fuel_web.deploy_cluster_wait(self.cluster_id) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def stop_on_deploy(self): - """Stop environment deploying and wait while slave bacame online""" - if self.cluster_id is None: - raise SkipTest( - "The cluster_id is not specified, can not stop the deploy" - ) - - cluster_id = self.cluster_id - self.fuel_web.deploy_cluster_wait_progress( - cluster_id, progress=settings.PROGRESS_TO_STOP) - self.fuel_web.stop_deployment_wait(cluster_id) - self.fuel_web.wait_nodes_get_online_state( - self.env.d_env.get_nodes(name__in=list(self.assigned_slaves)), - timeout=10 * 60) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def network_check(self): - """Run network checker - - Skip action if cluster doesn't exist - - """ - if self.cluster_id is None: - raise SkipTest("The cluster_id is not specified, " - "can not run network verification") - - self.fuel_web.verify_network(self.cluster_id) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def save_load_environment(self): - """Load existent environment from snapshot or save it""" - env_name = self.env_config['name'] - if self.cluster_id is None: - logger.info("Revert Environment from " - "snapshot({})".format(env_name)) - assert_true(self.env.d_env.has_snapshot(env_name)) - self.env.revert_snapshot(env_name) - self.cluster_id = self.fuel_web.client.get_cluster_id(env_name) - logger.info("Cluster with ID:{} reverted".format(self.cluster_id)) - else: - logger.info("Make snapshot of Environment '{}' ID:{}".format( - env_name, self.cluster_id)) - self.env.make_snapshot(env_name, is_make=True) - self.env.resume_environment() - self.env.sync_time() - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def check_haproxy(self): - """HAProxy backend checking""" - controller_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - self.cluster_id, ['controller']) - - for node in controller_nodes: - logger.info("Check all HAProxy backends on {}".format( - node['meta']['system']['fqdn'])) - haproxy_status = checkers.check_haproxy_backend(node['ip']) - assert_equal(haproxy_status['exit_code'], 1, - "HAProxy backends are DOWN. {0}".format( - haproxy_status)) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def scale_node(self): - """Scale node in cluster - - For add nodes with role use scale_nodes in yaml with action add in - step:: - - scale_nodes: - - - roles: - - controller - count: 2 - action: add - - For remove nodes with role use scale_nodes in yaml with action delete - in step:: - - scale_nodes: - - - roles: - - controller - count: 2 - action: delete - - Step may contain add and remove action together:: - - scale_nodes: - - - roles: - - compute - count: 2 - action: add - - - roles: - - ceph-osd - count: 1 - action: delete - - """ - step_config = self.env_config['scale_nodes'][self.scale_step] - for node in step_config: - if node['action'] == 'add': - self._add_node([node]) - elif node['action'] == 'delete': - self._del_node([node]) - else: - logger.error("Unknown scale action: {}".format(node['action'])) - self.scale_step += 1 - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def manage_nodes_power(self): - """Manage power of node - - To power off node with role use manage_nodes_power in yaml with action - power_off in step:: - - manage_nodes_power: - - - roles: - - controller - node_number: 0 - action: power_off - - To power on node with role use manage_nodes_power in yaml with action - power_on in step:: - - manage_nodes_power: - - - roles: - - controller - node_number: 0 - action: power_on - - To restart node with role use manage_nodes_power in yaml with action - warm_restart or cold_restart in step:: - - manage_nodes_power: - - - roles: - - controller - node_number: 0 - action: warm_restart - - Example of cold restarting two different nodes with the same role:: - - manage_nodes_power: - - - roles: - - controller - node_number: 0 - action: cold_restart - - - roles: - - controller - node_number: 1 - action: cold_restart - - """ - power_actions = { - 'power_off_warm': self.fuel_web.warm_shutdown_nodes, - 'power_on_warm': self.fuel_web.warm_start_nodes, - 'reboot_warm': self.fuel_web.warm_restart_nodes, - 'reboot_cold': self.fuel_web.cold_restart_nodes - } - - step_config = self.env_config['manage_nodes_power'][self.power_step] - for node in step_config: - power_action = power_actions.get(node['action'], None) - node_number = node['node_number'] - if power_action: - ng_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id=self.cluster_id, roles=[node['roles'][0]]) - - dev_node = self.fuel_web.get_devops_node_by_nailgun_fqdn( - ng_nodes[node_number]['fqdn']) - - # noinspection PyCallingNonCallable - power_action([dev_node]) - else: - logger.error("Unknown power switch action: " - "{}".format(node['action'])) - self.power_step += 1 - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def reset_cluster(self): - """Reset environment""" - cluster_id = self.cluster_id - self.fuel_web.stop_reset_env_wait(cluster_id) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def delete_cluster(self): - """Delete environment""" - cluster_id = self.cluster_id - self.fuel_web.delete_env_wait(cluster_id) diff --git a/system_test/actions/fuelmaster_actions.py b/system_test/actions/fuelmaster_actions.py deleted file mode 100644 index c303979b0..000000000 --- a/system_test/actions/fuelmaster_actions.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2015-2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE_2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from system_test import action -from system_test import deferred_decorator -from system_test import logger - -from system_test.helpers.decorators import make_snapshot_if_step_fail - - -# pylint: disable=no-member -# noinspection PyUnresolvedReferences -class FuelMasterActions(object): - """Actions specific only to Fuel Master node - - check_containers - check that docker containers are up - and running - """ - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def check_containers(self): - """Check that containers are up and running""" - logger.info("Check containers") - self.env.docker_actions.wait_for_ready_containers(timeout=60 * 30) diff --git a/system_test/actions/ostf_actions.py b/system_test/actions/ostf_actions.py deleted file mode 100644 index 85b4563ef..000000000 --- a/system_test/actions/ostf_actions.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE_2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import SkipTest - -from system_test import deferred_decorator -from system_test import action - -from system_test.helpers.decorators import make_snapshot_if_step_fail - - -# pylint: disable=no-member -# noinspection PyUnresolvedReferences -class HealthCheckActions(object): - """Basic actions for OSTF tests - - health_check - run sanity and smoke OSTF tests - health_check_sanity_smoke_ha - run sanity, smoke and ha OSTF tests - health_check_ha - run ha OSTF tests - """ - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def health_check(self): - """Run health checker - - Skip action if cluster doesn't exist - """ - if self.cluster_id is None: - raise SkipTest( - "The cluster_id is not specified, can not run ostf" - ) - - self.fuel_web.run_ostf( - cluster_id=self.cluster_id, - should_fail=getattr(self, 'ostf_tests_should_failed', 0), - failed_test_name=getattr(self, 'failed_test_name', None)) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def health_check_sanity_smoke_ha(self): - """Run health checker Sanity, Smoke and HA - - Skip action if cluster doesn't exist - """ - if self.cluster_id is None: - raise SkipTest( - "The cluster_id is not specified, can not run ostf" - ) - - self.fuel_web.run_ostf( - cluster_id=self.cluster_id, - test_sets=['sanity', 'smoke', 'ha'], - should_fail=getattr(self, 'ostf_tests_should_failed', 0), - failed_test_name=getattr(self, 'failed_test_name', None)) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def health_check_ha(self): - """Run health checker HA - - Skip action if cluster doesn't exist - """ - if self.cluster_id is None: - raise SkipTest( - "The cluster_id is not specified, can not run ostf" - ) - - self.fuel_web.run_ostf( - cluster_id=self.cluster_id, - test_sets=['ha'], - should_fail=getattr(self, 'ostf_tests_should_failed', 0), - failed_test_name=getattr(self, 'failed_test_name', None)) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def health_check_platform(self): - """Run health checker Platform - - Skip action if cluster doesn't exist - """ - if self.cluster_id is None: - raise SkipTest( - "The cluster_id is not specified, can not run ostf" - ) - - self.fuel_web.run_ostf( - cluster_id=self.cluster_id, - test_sets=['tests_platform'], - should_fail=getattr(self, 'ostf_tests_should_failed', 0), - failed_test_name=getattr(self, 'failed_test_name', None)) diff --git a/system_test/actions/plugins_actions.py b/system_test/actions/plugins_actions.py deleted file mode 100644 index 369554760..000000000 --- a/system_test/actions/plugins_actions.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE_2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from proboscis.asserts import assert_true -from proboscis.asserts import assert_equal - -from fuelweb_test.helpers import utils -from system_test import logger -from system_test import action -from system_test import deferred_decorator -from system_test import nested_action - -from system_test.helpers.decorators import make_snapshot_if_step_fail - - -# pylint: disable=no-member -# noinspection PyUnresolvedReferences -class PluginsActions(object): - - plugin_name = None - plugin_path = None - - # noinspection PyMethodParameters - @nested_action - def prepare_env_with_plugin(): - return [ - 'setup_master', - 'config_release', - 'make_slaves', - 'revert_slaves', - 'upload_plugin', - 'install_plugin' - ] - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def upload_plugin(self): - """Upload plugin to master node""" - # copy plugin to the master node - assert_true(self.plugin_path, "plugin_path is not specified") - - utils.upload_tarball( - ip=self.ssh_manager.admin_ip, - tar_path=self.plugin_path, - tar_target='/var') - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def install_plugin(self): - """Install plugin to Fuel""" - assert_true(self.plugin_path, "plugin_path is not specified") - - utils.install_plugin_check_code( - ip=self.ssh_manager.admin_ip, - plugin=os.path.basename(self.plugin_path)) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def enable_plugin(self): - """Enable plugin for Fuel""" - assert_true(self.plugin_name, "plugin_name is not specified") - - msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" - assert_true( - self.fuel_web.check_plugin_exists( - self.cluster_id, - self.plugin_name), - msg) - options = {'metadata/enabled': True} - self.fuel_web.update_plugin_data( - self.cluster_id, - self.plugin_name, options) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def check_example_plugin(self): - """Check if service ran on controller""" - - cmd_curl = 'curl localhost:8234' - cmd = 'pgrep -f fuel-simple-service' - - n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - cluster_id=self.cluster_id, - roles=['controller']) - d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls) - - for node in d_ctrls: - logger.info("Check plugin service on node {0}".format(node.name)) - with self.fuel_web.get_ssh_for_node(node.name) as remote: - res_pgrep = remote.execute(cmd) - assert_equal(0, res_pgrep['exit_code'], - 'Failed with error {0} ' - 'on node {1}'.format(res_pgrep['stderr'], node)) - assert_equal(1, len(res_pgrep['stdout']), - 'Failed with error {0} on the ' - 'node {1}'.format(res_pgrep['stderr'], node)) - # curl to service - res_curl = remote.execute(cmd_curl) - assert_equal(0, res_pgrep['exit_code'], - 'Failed with error {0} ' - 'on node {1}'.format(res_curl['stderr'], node)) diff --git a/system_test/actions/strength_actions.py b/system_test/actions/strength_actions.py deleted file mode 100644 index ef304d274..000000000 --- a/system_test/actions/strength_actions.py +++ /dev/null @@ -1,378 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE_2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import unicode_literals - -import time - -from devops.helpers.helpers import wait -from proboscis.asserts import assert_true - -from fuelweb_test.helpers.pacemaker import get_pacemaker_nodes_attributes -from fuelweb_test.helpers.pacemaker import get_pcs_nodes -from fuelweb_test.helpers.pacemaker import parse_pcs_status_xml -from fuelweb_test.helpers.ssh_manager import SSHManager - -from system_test import logger -from system_test import deferred_decorator -from system_test import action -from system_test.helpers.decorators import make_snapshot_if_step_fail - -ssh_manager = SSHManager() - - -# pylint: disable=no-member -# noinspection PyUnresolvedReferences -class StrengthActions(object): - - destroyed_devops_nodes = [] - ostf_tests_should_failed = 0 - os_service_should_failed = 0 - - def _destroy_controller(self, devops_node_name): - logger.info("Suspend {} node".format(devops_node_name)) - d_node = self.env.d_env.get_node(name=devops_node_name) - d_node.suspend(False) - self.ostf_tests_should_failed += 1 - self.os_service_should_failed += 1 - if d_node not in self.destroyed_devops_nodes: - self.destroyed_devops_nodes.append(d_node) - else: - logger.warning("Try destroy already destroyed node") - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def wait_offline_nodes(self): - """Wait offline status of destroyed nodes""" - assert_true(self.destroyed_devops_nodes, - "No destroyed nodes in Environment") - - def wait_offline_nodes(): - n_nodes = [ - self.fuel_web.get_nailgun_node_by_devops_node(node) for node in - self.destroyed_devops_nodes] - n_nodes = [node['online'] for node in n_nodes] - return n_nodes.count(False) == 0 - - wait(wait_offline_nodes, timeout=60 * 5, - timeout_msg='Nodes failed to become offline') - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def check_ha_service_ready(self): - """Wait for HA services ready""" - self.fuel_web.assert_ha_services_ready(self.cluster_id) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def check_os_services_ready(self): - """Wait until OpenStack services are UP""" - self.fuel_web.assert_os_services_ready( - self.cluster_id, - should_fail=self.os_service_should_failed) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def wait_galera_cluster(self): - """Wait until MySQL Galera is UP on online controllers""" - n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - self.cluster_id, - ['controller']) - d_ctrls = {self.fuel_web.get_devops_node_by_nailgun_node(node) - for node in n_ctrls} - self.fuel_web.wait_mysql_galera_is_up( - [n.name for n in d_ctrls - set(self.destroyed_devops_nodes)], - timeout=300) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def check_pacemaker_status(self): - """Check controllers status in pacemaker""" - n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles( - self.cluster_id, - ['controller']) - d_ctrls = {self.fuel_web.get_devops_node_by_nailgun_node(node) - for node in n_ctrls} - online_d_ctrls = d_ctrls - set(self.destroyed_devops_nodes) - - for node in online_d_ctrls: - logger.info("Check pacemaker status on {}".format(node.name)) - self.fuel_web.assert_pacemaker( - node.name, - online_d_ctrls, - self.destroyed_devops_nodes) - - -# noinspection PyUnresolvedReferences -class FillRootActions(object): - - ostf_tests_should_failed = 0 - primary_controller = None - primary_controller_fqdn = None - primary_controller_space_on_root = 0 - disk_monitor_limit = 512 - rabbit_disk_free_limit = 5 - pacemaker_restart_timeout = 600 - pcs_check_timeout = 300 - primary_controller_space_to_filled = 0 - pcs_status = None - slave_nodes_fqdn = None - slave_node_running_resources = None - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def get_pcs_initial_state(self): - """Get controllers initial status in pacemaker""" - self.primary_controller = self.fuel_web.get_nailgun_primary_node( - self.env.d_env.nodes().slaves[0]) - - self.primary_controller_fqdn = str( - self.fuel_web.fqdn(self.primary_controller)) - - nail_node = self.fuel_web.get_nailgun_node_by_devops_node( - self.primary_controller) - pcs_status = parse_pcs_status_xml(nail_node['ip']) - - with self.fuel_web.get_ssh_for_node( - self.primary_controller.name) as remote: - root_free = remote.check_call( - 'cibadmin --query --scope status').stdout_str - - self.primary_controller_space_on_root = get_pacemaker_nodes_attributes( - root_free)[self.primary_controller_fqdn]['root_free'] - - self.disk_monitor_limit = 512 - - self.rabbit_disk_free_limit = 5 - - self.pacemaker_restart_timeout = 600 - - self.pcs_check_timeout = 300 - - self.primary_controller_space_to_filled = str( - int( - self.primary_controller_space_on_root - ) - self.disk_monitor_limit - 1) - - self.pcs_status = get_pcs_nodes(pcs_status) - - self.slave_nodes_fqdn = list( - set(self.pcs_status.keys()).difference( - set(self.primary_controller_fqdn.split()))) - running_resources_slave_1 = int( - self.pcs_status[self.slave_nodes_fqdn[0]]['resources_running']) - - running_resources_slave_2 = int( - self.pcs_status[self.slave_nodes_fqdn[1]]['resources_running']) - - self.slave_node_running_resources = str(min(running_resources_slave_1, - running_resources_slave_2 - ) - ) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def fill_root_above_rabbit_disk_free_limit(self): - """Filling root filesystem on primary controller""" - - logger.info( - "Free space in root on primary controller - {}".format( - self.primary_controller_space_on_root - )) - - logger.info( - "Need to fill space on root - {}".format( - self.primary_controller_space_to_filled - )) - - node = self.fuel_web.get_nailgun_node_by_name( - self.primary_controller.name) - self.ssh_manager.execute_on_remote( - ip=node['ip'], - cmd='fallocate -l {}M /root/bigfile && sync'.format( - self.primary_controller_space_to_filled) - ) - self.ssh_manager.execute_on_remote( - ip=node['ip'], - cmd='ls /root/bigfile', - assert_ec_equal=[0]) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def fill_root_below_rabbit_disk_free_limit(self): - """Fill root more to below rabbit disk free limit""" - - node = self.fuel_web.get_nailgun_node_by_name( - self.primary_controller.name) - pacemaker_attributes = self.ssh_manager.execute_on_remote( - ip=node['ip'], - cmd='cibadmin --query --scope status' - )['stdout_str'] - controller_space_on_root = get_pacemaker_nodes_attributes( - pacemaker_attributes)[self.primary_controller_fqdn]['root_free'] - - logger.info("Free space in root on primary controller - {}".format( - controller_space_on_root)) - - controller_space_to_filled = str( - int(controller_space_on_root) - self.rabbit_disk_free_limit - 1 - ) - - if int(controller_space_to_filled) < 1: - logger.info( - "Nothing to do." - " Free space in root partition already less than {}.".format( - self.rabbit_disk_free_limit)) - return - - logger.info("Need to fill space on root - {}".format( - controller_space_to_filled)) - - self.ssh_manager.execute_on_remote( - ip=node['ip'], - cmd='fallocate -l {}M /root/bigfile2 && sync'.format( - controller_space_to_filled) - ) - self.ssh_manager.execute_on_remote( - ip=node['ip'], - cmd='ls /root/bigfile2', - assert_ec_equal=[0]) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def check_stopping_resources(self): - """Check stopping pacemaker resources""" - - logger.info( - "Waiting {} seconds for changing pacemaker status of {}".format( - self.pacemaker_restart_timeout, - self.primary_controller_fqdn)) - time.sleep(self.pacemaker_restart_timeout) - - with self.fuel_web.get_ssh_for_node( - self.primary_controller.name) as remote: - - def checking_health_disk_attribute(): - logger.info("Checking for '#health_disk' attribute") - cibadmin_status_xml = remote.check_call( - 'cibadmin --query --scope status').stdout_str - pcs_attribs = get_pacemaker_nodes_attributes( - cibadmin_status_xml) - return '#health_disk' in pcs_attribs[ - self.primary_controller_fqdn] - - def checking_for_red_in_health_disk_attribute(): - logger.info( - "Checking for '#health_disk' attribute have 'red' value") - cibadmin_status_xml = remote.check_call( - 'cibadmin --query --scope status').stdout_str - pcs_attribs = get_pacemaker_nodes_attributes( - cibadmin_status_xml) - return pcs_attribs[self.primary_controller_fqdn][ - '#health_disk'] == 'red' - - def check_stopping_resources(): - logger.info( - "Checking for 'running_resources " - "attribute have '0' value") - - nail_node = self.fuel_web.get_nailgun_node_by_devops_node( - self.primary_controller) - pcs_status = parse_pcs_status_xml(nail_node['ip']) - - pcs_attribs = get_pcs_nodes(pcs_status) - return pcs_attribs[self.primary_controller_fqdn][ - 'resources_running'] == '0' - - wait(checking_health_disk_attribute, - timeout=self.pcs_check_timeout, - timeout_msg="Attribute #health_disk wasn't appeared " - "in attributes on node {} in {} seconds".format( - self.primary_controller_fqdn, - self.pcs_check_timeout)) - - wait(checking_for_red_in_health_disk_attribute, - timeout=self.pcs_check_timeout, - timeout_msg="Attribute #health_disk doesn't have 'red' value " - "on node {} in {} seconds".format( - self.primary_controller_fqdn, - self.pcs_check_timeout)) - - wait(check_stopping_resources, - timeout=self.pcs_check_timeout, - timeout_msg="Attribute 'running_resources' " - "doesn't have '0' value " - "on node {} in {} seconds".format( - self.primary_controller_fqdn, - self.pcs_check_timeout)) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def clean_up_space_on_root(self): - """Clean up space on root filesystem on primary controller""" - - node = self.fuel_web.get_nailgun_node_by_name( - self.primary_controller.name) - - bigfile_2 = ssh_manager.isfile_on_remote( - ip=node['ip'], - path='/root/bigfile2') - - path = str("/root/bigfile {}".format( - "/root/bigfile2" if bigfile_2 else "")) - ssh_manager.rm_rf_on_remote( - ip=node['ip'], - path=path) - - delete_attr = str( - 'crm node status-attr {} delete "#health_disk"'.format( - self.primary_controller_fqdn)) - ssh_manager.check_call( - ip=node['ip'], - command=delete_attr) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def check_starting_resources(self): - """Check starting pacemaker resources""" - - logger.info( - "Waiting {} seconds for changing pacemaker status of {}".format( - self.pacemaker_restart_timeout, - self.primary_controller_fqdn)) - time.sleep(self.pacemaker_restart_timeout) - - with self.fuel_web.get_ssh_for_node( - self.primary_controller.name) as remote: - - def checking_health_disk_attribute_is_not_present(): - logger.info( - "Checking for '#health_disk' attribute " - "is not present on node {}".format( - self.primary_controller_fqdn)) - cibadmin_status_xml = remote.check_call( - 'cibadmin --query --scope status').stdout_str - pcs_attribs = get_pacemaker_nodes_attributes( - cibadmin_status_xml) - return '#health_disk' not in pcs_attribs[ - self.primary_controller_fqdn] - - wait(checking_health_disk_attribute_is_not_present, - timeout=self.pcs_check_timeout, - timeout_msg="Attribute #health_disk was appeared " - "in attributes on node {} in {} seconds".format( - self.primary_controller_fqdn, - self.pcs_check_timeout)) - - self.fuel_web.assert_ha_services_ready(self.cluster_id) diff --git a/system_test/core/__init__.py b/system_test/core/__init__.py deleted file mode 100644 index 81b159d8c..000000000 --- a/system_test/core/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from .factory import ActionsFactory -from .repository import Repository - - -__all__ = [ - 'ActionsFactory', - 'Repository', -] diff --git a/system_test/core/config.py b/system_test/core/config.py deleted file mode 100644 index 5776b8f9e..000000000 --- a/system_test/core/config.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis import register - -from system_test.core.discover import get_configs -from system_test.core.repository import register_system_test_cases - -tests_directory = [ - 'fuelweb_test/tests', - 'system_test/tests', - 'gates_tests' -] - - -def cached_add_group(yamls): - - def add(group, systest_group, config_name, - validate_config=True): - """Add user friendly group - - :type group: str - :type systest_group: str - :type config_name: str - :type validate_config: bool - - """ - if validate_config and config_name not in yamls: - raise NameError("Config {} not found".format(config_name)) - - register_system_test_cases(groups=[systest_group], - configs=[config_name]) - register(groups=[group], - depends_on_groups=[ - "{systest_group}({config_name})".format( - systest_group=systest_group, - config_name=config_name)]) - return add - - -def define_custom_groups(): - """Map user friendly group name to system test groups - - groups - contained user friendly alias - depends - contained groups which should be runned - - """ - - add_group = cached_add_group(get_configs()) - add_group(group="system_test.ceph_ha", - systest_group="system_test.deploy_and_check_radosgw", - config_name="ceph_all_on_neutron_vlan") - - add_group(group="system_test.ceph_ha_30", - systest_group="system_test.deploy_and_check_radosgw", - config_name="ceph_all_on_neutron_vlan_30") - - add_group(group="system_test.ceph_ha_30_bond", - systest_group="system_test.deploy_and_check_radosgw", - config_name="ceph_all_on_neutron_vlan_30-bond") - - add_group(group="system_test.ceph_ha_30_2groups", - systest_group="system_test.deploy_and_check_radosgw", - config_name="ceph_all_on_neutron_vlan_30-2groups") - - add_group(group="filling_root", - systest_group="system_test.failover.filling_root", - config_name="ceph_all_on_neutron_vlan") - - add_group(group="system_test.strength", - systest_group="system_test.failover.destroy_controllers.first", - config_name="ceph_all_on_neutron_vlan") - add_group(group="system_test.strength", - systest_group="system_test.failover.destroy_controllers.second", - config_name="1ctrl_ceph_2ctrl_1comp_1comp_ceph_neutronVLAN") diff --git a/system_test/core/decorators.py b/system_test/core/decorators.py deleted file mode 100644 index 270a31041..000000000 --- a/system_test/core/decorators.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect -import collections - -from system_test.core.repository import Repository - - -def testcase(groups): - """Use this decorator for mark a test case class""" - def testcase_decorator(cls): - if not inspect.isclass(cls): - raise TypeError("Decorator @testcase should used only " - "with classes") - if not isinstance(groups, collections.Sequence): - raise TypeError("Use list for groups") - cls.get_actions_order() - setattr(cls, '_base_groups', groups) - Repository.add(cls) - return cls - return testcase_decorator - - -def action(method): - setattr(method, '_action_method_', True) - return method - - -def nested_action(method): - setattr(method, '_nested_action_method_', True) - return staticmethod(method) - - -def deferred_decorator(decorator_list): - def real_decorator(func): - setattr(func, '_deferred_decorator_', decorator_list) - return func - return real_decorator diff --git a/system_test/core/discover.py b/system_test/core/discover.py deleted file mode 100644 index 66a03258a..000000000 --- a/system_test/core/discover.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os.path -import yaml - - -def get_basepath(): - import system_test - return os.path.join( - os.path.dirname(os.path.dirname(system_test.__file__))) - - -def get_list_confignames(filelist): - """Get list of config name from file list""" - return [get_configname(filename) for filename in filelist] - - -def get_configname(path): - """Get config name from path to yaml file""" - return os.path.splitext(os.path.basename(path))[0] - - -def get_path_to_config(): - """Find path to directory with config files""" - import system_test - return os.path.join(os.path.dirname(system_test.__file__), - 'tests_templates/tests_configs') - - -def get_path_to_template(): - """Find path to directory with templates files""" - import system_test - return os.path.join(os.path.dirname(system_test.__file__), - 'tests_templates') - - -def collect_yamls(path): - """Walk through config directory and find all yaml files""" - ret = [] - for r, _, f in os.walk(path): - for one in f: - if os.path.splitext(one)[1] in ('.yaml', '.yml'): - ret.append(os.path.join(r, one)) - return ret - - -def load_yaml(path): - """Load yaml file from path""" - def yaml_include(loader, node): - file_name = os.path.join(get_path_to_template(), node.value) - if not os.path.isfile(file_name): - raise ValueError( - "Cannot load the template {0} : include file {1} " - "doesn't exist.".format(path, file_name)) - return yaml.load(open(file_name)) - - def yaml_get_env_variable(loader, node): - if not node.value.strip(): - raise ValueError("Environment variable is required after {tag} in " - "{filename}".format(tag=node.tag, - filename=loader.name)) - node_value = node.value.split(',', 1) - # Get the name of environment variable - env_variable = node_value[0].strip() - - # Get the default value for environment variable if it exists in config - if len(node_value) > 1: - default_val = node_value[1].strip() - else: - default_val = None - - value = os.environ.get(env_variable, default_val) - if value is None: - raise ValueError("Environment variable {var} is not set from shell" - " environment! No default value provided in file " - "{filename}".format(var=env_variable, - filename=loader.name)) - - return yaml.load(value) - - yaml.add_constructor("!include", yaml_include) - yaml.add_constructor("!os_env", yaml_get_env_variable) - - return yaml.load(open(path)) - - -def find_duplicates(yamls): - dup = {} - for one in yamls: - name = os.path.basename(one) - if name in dup: - dup[name].append(one) - else: - dup[name] = [one] - return {k: v for k, v in dup.items() if len(v) > 1} - - -def get_configs(): - """Return list of dict environment configurations""" - yamls = collect_yamls(get_path_to_config()) - dup = find_duplicates(yamls) - if dup: - raise NameError( - "Found duplicate files in templates. " - "Name of template should be unique. Errors: {}".format(dup)) - return {get_configname(y): y for y in yamls} - - -def config_filter(configs=None): - if configs is None: - return get_configs() - return {k: v for k, v in get_configs().items() if k in configs} - - -def discover_test_files(basedir, dirs): - """Find all files in path""" - ret = [] - for path in dirs: - path = os.path.join(basedir, path) - for r, _, f in os.walk(path): - for one in f: - if one.startswith('test_') and one.endswith('.py'): - ret.append(os.path.join(r, one)) - return ret - - -def convert_files_to_modules(basedir, files): - """Convert files name to modules name""" - ret = [] - for one in files: - module = os.path.splitext( - os.path.relpath(one, basedir))[0].replace('/', '.') - ret.append(module) - return ret - - -def discover_import_tests(basedir, dirs): - """Walk through directories and import all modules with tests""" - imported_list = [] - for module in convert_files_to_modules(basedir, - discover_test_files(basedir, dirs)): - imported_list.append(__import__(module)) diff --git a/system_test/core/factory.py b/system_test/core/factory.py deleted file mode 100644 index fd502f81c..000000000 --- a/system_test/core/factory.py +++ /dev/null @@ -1,224 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE_2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import division - -import functools -import types - -from proboscis import after_class -from proboscis import before_class -from proboscis import test - -from fuelweb_test.helpers.utils import TimeStat - -from system_test import logger - - -def step_start_stop(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - with TimeStat(func) as timer: - step_name = getattr(func, '_step_name') - start_step = '[ START {} ]'.format(step_name) - header = "<<< {:-^142} >>>".format(start_step) - logger.info("\n{header}\n".format(header=header)) - result = func(*args, **kwargs) - spent_time = timer.spent_time - minutes = spent_time // 60 - # pylint: disable=round-builtin - seconds = int(round(spent_time)) % 60 - # pylint: enable=round-builtin - finish_step = "[ FINISH {} STEP TOOK {} min {} sec ]".format( - step_name, minutes, seconds) - footer = "<<< {:-^142} >>>".format(finish_step) - logger.info("\n{footer}\n".format(footer=footer)) - return result - return wrapper - - -def copy_func(f, name=None): - """ - :param f: - :param name: - :return: a function with same code, globals, defaults, closure, - and name (or provide a new name) - - """ - - fn = types.FunctionType(f.__code__, f.__globals__, name or f.__name__, - f.__defaults__, f.__closure__) - # in case f was given attrs (note this dict is a shallow copy): - fn.__dict__.update(f.__dict__) - return fn - - -class ActionsFactory(object): - - @classmethod - def get_actions(cls): - """Return all action methods""" - return {m: getattr(cls, m) for m in - dir(cls) if - getattr(getattr(cls, m), '_action_method_', False) or - getattr(getattr(cls, m), '_nested_action_method_', False)} - - @classmethod - def get_actions_order(cls): - """Get order of actions""" - if getattr(cls, 'actions_order', None) is None: - raise LookupError("Actions order doesn't exist") - - actions_method = cls.get_actions() - linear_order = [] - # pylint: disable=no-member - # noinspection PyUnresolvedReferences - for action in cls.actions_order: - try: - action_method = actions_method[action] - except KeyError as e: - import inspect - source = inspect.getsourcelines(inspect.getmodule(cls))[0] - counted_data = [n for n in enumerate(source)] - line_num = [n for (n, l) in counted_data if str(e) in l][0] - cutted = counted_data[line_num - 4:line_num + 4] - cutted = [(n, l[:-1] + " " * 20 + "<====\n" - if n == line_num else l) - for (n, l) in cutted] - cutted = ["Line {line_num:04d}: {line}".format( - line_num=n, line=l) for (n, l) in cutted] - # noinspection PyUnresolvedReferences - raise LookupError("Class {} orders to run '{}' action as {} " - "step,\n\tbut action method doesn't exist " - "in class.\nLook at '{}':\n\n{}".format( - cls, action, - cls.actions_order.index(action), - inspect.getsourcefile(cls), - ''.join(cutted))) - if getattr(action_method, - '_nested_action_method_', None): - linear_order.extend(action_method()) - else: - linear_order.append(action) - # pylint: enable=no-member - - steps = [{"action": step, "method": actions_method[step]} for - step in linear_order] - - return steps - - @classmethod - def caseclass_factory(cls, case_group): - """Create new cloned cls class contains only action methods""" - test_steps, scenario = {}, [] - - # Generate human readable class_name, if was method docstring not - # described, use generated name - class_name = "Case_{}__Config_{}".format(cls.__name__, case_group) - - # Make methods for new testcase class, following by order - scenario.append(" Scenario:") - for step, action in enumerate(cls.get_actions_order()): - n_action = action['action'].replace("_action_", "") - # Generate human readable method name, if was method docstring not - # described, use generated name. Used when method failed - step_method_name = "{}.Step{:03d}_{}".format(class_name, - step, - n_action) - - method = copy_func(action['method'], step_method_name) - _step_name = getattr(action['method'], - "__doc__").splitlines()[0] - setattr(method, "_step_name", "Step {:03d}. {}".format(step, - _step_name)) - setattr(method, "_step_num", step) - setattr(method, "_base_class", cls.__name__) - setattr(method, "_config_case_group", case_group) - - # Add step to scenario - scenario.append(" {}. {}".format(step, _step_name)) - - # Add decorator to cloned method - for deco in getattr(method, '_deferred_decorator_', []): - method = deco(method) - - # if not first step make dependency - if step > 0: - prev_step_name = "{}.Step{:03d}_{}".format( - class_name, - step - 1, - cls.get_actions_order()[step - 1]['action'].replace( - "_action_", "")) - depends = [test_steps[prev_step_name]] - else: - depends = None - - # Add start-stop step decorator for measuring time and print - # start and finish info - method = step_start_stop(method) - - test_steps[step_method_name] = test( - method, - depends_on=depends) - - # Create before case methods, start case and setup - start_method = copy_func( - getattr(cls, "_start_case"), - "{}.StartCase".format(class_name)) - test_steps["{}.StartCase".format(class_name)] = before_class( - start_method) - - if hasattr(cls, 'case_setup'): - setup_method = copy_func( - getattr(cls, "case_setup"), - "{}.CaseSetup".format(class_name)) - setattr(setup_method, "_step_name", "CaseSetup") - test_steps["{}.CaseSetup".format(class_name)] = before_class( - step_start_stop(setup_method), runs_after=[start_method]) - - if hasattr(cls, 'case_teardown'): - teardown_method = copy_func( - getattr(cls, "case_teardown"), - "{}.CaseTeardown".format(class_name)) - setattr(teardown_method, "_step_name", "CaseTeardown") - test_steps["{}.CaseTeardown".format(class_name)] = after_class( - step_start_stop(teardown_method), always_run=True) - else: - teardown_method = None - - # Create case methods, teardown and finish case - finish_method = copy_func( - getattr(cls, "_finish_case"), - "{}.FinishCase".format(class_name)) - test_steps["{}.FinishCase".format(class_name)] = after_class( - finish_method, always_run=True, - runs_after=[teardown_method] if teardown_method else []) - - # Generate test case groups - # pylint: disable=no-member - # noinspection PyUnresolvedReferences - groups = ['{}({})'.format(g, case_group) for g in cls._base_groups] - # noinspection PyUnresolvedReferences - groups = cls._base_groups + groups - # pylint: enable=no-member - - # Generate test case docstring - test_steps["__doc__"] = "{}\n\n{}\n\nDuration {}".format( - cls.__doc__.splitlines()[0], - '\n'.join(scenario), - getattr(cls, 'est_duration', '180m') or '180m') - ret = test( - type(class_name, (cls,), test_steps), - groups=groups) - return ret diff --git a/system_test/core/repository.py b/system_test/core/repository.py deleted file mode 100644 index 8c5fc4932..000000000 --- a/system_test/core/repository.py +++ /dev/null @@ -1,187 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -from devops.helpers.metaclasses import SingletonMeta -import proboscis.core -from proboscis import factory -from proboscis.decorators import DEFAULT_REGISTRY -from six import add_metaclass - -from system_test.tests import ActionTest -from system_test.core.discover import config_filter - - -@add_metaclass(SingletonMeta) -class TestCaseRepository(set): - - def __init__(self): - super(TestCaseRepository, self).__init__() - self.__index = {} - - @property - def index(self): - return self.__index - - def __index_add(self, v): - groups = getattr(v, '_base_groups', None) - for g in groups: - if g not in self.__index: - self.__index[g] = set() - self.__index[g].add(v) - - def __index_remove(self, v): - groups = getattr(v, '_base_groups', None) - for g in groups: - self.__index[g].remove(v) - if not len(self.__index[g]): - del self.__index[g] - - def add(self, value): - super(TestCaseRepository, self).add(value) - self.__index_add(value) - - def remove(self, value): - super(TestCaseRepository, self).remove(value) - self.__index_remove(value) - - def pop(self, value): - super(TestCaseRepository, self).pop(value) - self.__index_remove(value) - - def filter(self, groups=None): - """Return list of cases related to groups. All by default""" - if groups is None: - return set(self) - - cases = set() - for g in groups: - if g in self.index: - cases.update(self.index[g]) - return cases - - def union(self, *args, **kwargs): - raise AttributeError("'TestCaseRepository' object has no attribute " - " 'union'") - - def update(self, *args, **kwargs): - raise AttributeError("'TestCaseRepository' object has no attribute " - " 'update'") - - -Repository = TestCaseRepository() - - -def get_groups(only_groups=None, exclude=None): - """Get groups from Proboscis register and count them children""" - groups_childs = {} - - if only_groups and isinstance(only_groups, list): - groups = {g: DEFAULT_REGISTRY.groups[g] - for g in DEFAULT_REGISTRY.groups if g in only_groups} - groups.update({g: Repository.index[g] - for g in Repository.index if g in only_groups}) - else: - groups = DEFAULT_REGISTRY.groups.copy() - groups.update({g: Repository.index[g] for g in Repository.index}) - - for group_name, group in groups.items(): - klass_entries = set() - entries_in_class = set() - - if (exclude and - isinstance(exclude, list) and - any([e in group_name for e in exclude])): - continue - - if hasattr(group, 'entries'): - for entry in group.entries: - if isinstance(entry, proboscis.core.TestMethodClassEntry): - klass_entries.add(entry) - - for klass in klass_entries: - entries_in_class.update(set(klass.children)) - - child = set(group.entries) - entries_in_class - klass_entries - - for klass in klass_entries: - if (klass.used_by_factory and - ActionTest in - klass.home.__mro__): - child.add(klass) - else: - child.update(set(klass.children)) - else: - child = [g for g in group - if ActionTest in g.__mro__] - - groups_childs[group_name] = child - - return groups_childs - - -def case_filter(groups=None): - """Create Proboscis factories for selected groups. For all by default""" - if groups is None: - return set(Repository) - - cases = set() - for g in groups: - if g in Repository.index: - cases.update(Repository.index[g]) - return cases - - -def case_factory(baseclass, configs): - """Return list of instance """ - return [baseclass.caseclass_factory(g)(c) - for g, c in config_filter(configs).items()] - - -def reg_factory(cases, configs): - def ret(): - out = [] - for c in cases: - out.extend(case_factory(c, configs)) - return out - globals()['system_test_factory'] = factory(ret) - - -def split_group_config(group): - m = re.search('([\w\.]*)\(([\w\-\_]*)\)', group) - if m: - return m.groups() - - -def register_system_test_cases(groups=None, configs=None): - to_remove = [] - to_add = [] - for group in groups: - g_c = split_group_config(group) - if g_c: - g, c = g_c - to_add.append(g) - if configs is None: - configs = [] - configs.append(c) - to_remove.append(group) - for one in to_remove: - groups.remove(one) - for one in to_add: - groups.append(one) - cases = case_filter(groups) - configs = config_filter(configs) - if cases: - reg_factory(cases, configs) diff --git a/system_test/helpers/__init__.py b/system_test/helpers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/system_test/helpers/decorators.py b/system_test/helpers/decorators.py deleted file mode 100644 index 56d61fd1c..000000000 --- a/system_test/helpers/decorators.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools -import traceback -import hashlib - -from proboscis import SkipTest - -from fuelweb_test.helpers.utils import pull_out_logs_via_ssh -from fuelweb_test.helpers.decorators import create_diagnostic_snapshot - -from system_test import logger - - -def make_snapshot_if_step_fail(func): - """Generate diagnostic snapshot if step fail. - - - Show test case method name and scenario from docstring. - - Create a diagnostic snapshot of environment in cases: - - if the test case passed; - - if error occurred in the test case. - - Fetch logs from master node if creating the diagnostic - snapshot has failed. - """ - @functools.wraps(func) - def wrapper(*args, **kwargs): - try: - result = func(*args, **kwargs) - except SkipTest: - raise - except Exception: - name = 'error_%s' % func.__name__ - case_name = getattr(func, '_base_class', None) - step_num = getattr(func, '_step_num', None) - config_name = getattr(func, '_config_case_group', None) - description = "Failed in method '{:s}'.".format(func.__name__) - if args[0].env is not None: - try: - create_diagnostic_snapshot(args[0].env, - "fail", name) - except: - logger.error("Fetching of diagnostic snapshot failed: {0}". - format(traceback.format_exc())) - try: - with args[0].env.d_env.get_admin_remote()\ - as admin_remote: - pull_out_logs_via_ssh(admin_remote, name) - except: - logger.error("Fetching of raw logs failed: {0}". - format(traceback.format_exc())) - finally: - logger.debug(args) - try: - if all([case_name, step_num, config_name]): - _hash = hashlib.sha256(config_name) - _hash = _hash.hexdigest()[:8] - snapshot_name = "{case}_{config}_{step}".format( - case=case_name, - config=_hash, - step="Step{:03d}".format(step_num) - ) - else: - snapshot_name = name[-50:] - args[0].env.make_snapshot(snapshot_name=snapshot_name, - description=description, - is_make=True) - except: - logger.error("Error making the environment snapshot:" - " {0}".format(traceback.format_exc())) - raise - return result - return wrapper diff --git a/system_test/tests/__init__.py b/system_test/tests/__init__.py deleted file mode 100644 index ab0b33d76..000000000 --- a/system_test/tests/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE_2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from .base import ActionTest - - -__all__ = [ - 'ActionTest' -] diff --git a/system_test/tests/base.py b/system_test/tests/base.py deleted file mode 100644 index c4bc4ff46..000000000 --- a/system_test/tests/base.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE_2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from fuelweb_test.tests import base_test_case -from system_test.core import ActionsFactory - - -class ActionTest(base_test_case.TestBasic, ActionsFactory): - """ActionTest is TestBasic wraper for system tests""" - - def __init__(self, config_file=None): - super(ActionTest, self).__init__() - self.config_file = config_file - self.assigned_slaves = set() diff --git a/system_test/tests/plugins/__init__.py b/system_test/tests/plugins/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/system_test/tests/plugins/plugin_example/__init__.py b/system_test/tests/plugins/plugin_example/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/system_test/tests/plugins/plugin_example/test_plugin_example.py b/system_test/tests/plugins/plugin_example/test_plugin_example.py deleted file mode 100644 index 70ef0582d..000000000 --- a/system_test/tests/plugins/plugin_example/test_plugin_example.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE_2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from fuelweb_test.settings import EXAMPLE_PLUGIN_PATH - -from system_test import testcase - -from system_test.tests import ActionTest -from system_test.actions import BaseActions - - -@testcase(groups=['system_test', - 'system_test.plugins', - 'system_test.plugins.example_plugin', - 'system_test.plugins.example_plugin.simple']) -class DeployWithPluginExample(ActionTest, BaseActions): - """Deploy cluster with one controller and example plugin - - Scenario: - 1. Upload plugin to the master node - 2. Install plugin - 3. Create cluster - 4. Add 1 node with controller role - 5. Add 2 nodes with compute role - 6. Deploy the cluster - 7. Run network verification - 8. Check plugin health - 9. Run OSTF - - Duration 35m - Snapshot deploy_ha_one_controller_neutron_example - """ - - plugin_name = "fuel_plugin_example" - plugin_path = EXAMPLE_PLUGIN_PATH - - actions_order = [ - 'prepare_env_with_plugin', - 'create_env', - 'enable_plugin', - 'add_nodes', - 'network_check', - 'deploy_cluster', - 'network_check', - 'check_example_plugin', - 'health_check', - ] - - -@testcase(groups=['system_test', - 'system_test.plugins', - 'system_test.plugins.example_plugin', - 'system_test.plugins.example_plugin.simple_scale']) -class DeployScaleWithPluginExample(ActionTest, BaseActions): - """Deploy and scale cluster in ha mode with example plugin - - Scenario: - 1. Upload plugin to the master node - 2. Install plugin - 3. Create cluster - 4. Add 1 node with controller role - 5. Add 1 nodes with compute role - 6. Add 1 nodes with cinder role - 7. Deploy the cluster - 8. Run network verification - 9. Check plugin health - 10. Run OSTF - 11. Add 2 nodes with controller role - 12. Deploy cluster - 13. Check plugin health - 14. Run OSTF - - Duration 150m - Snapshot deploy_neutron_example_ha_add_node - """ - - plugin_name = "fuel_plugin_example" - plugin_path = EXAMPLE_PLUGIN_PATH - - actions_order = [ - 'prepare_env_with_plugin', - 'create_env', - 'enable_plugin', - 'add_nodes', - 'network_check', - 'deploy_cluster', - 'network_check', - 'check_example_plugin', - 'health_check', - 'scale_node', - 'network_check', - 'deploy_cluster', - 'network_check', - 'check_example_plugin', - 'health_check', - ] diff --git a/system_test/tests/plugins/plugin_example/test_plugin_example_v3.py b/system_test/tests/plugins/plugin_example/test_plugin_example_v3.py deleted file mode 100644 index 661ff99dd..000000000 --- a/system_test/tests/plugins/plugin_example/test_plugin_example_v3.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from fuelweb_test.settings import EXAMPLE_PLUGIN_V3_PATH - -from system_test import testcase -from system_test import deferred_decorator -from system_test import action - -from system_test.tests import ActionTest -from system_test.actions import BaseActions -from system_test.helpers.decorators import make_snapshot_if_step_fail - - -@testcase(groups=['system_test', - 'system_test.plugins', - 'system_test.plugins.example_plugin_v3', - 'system_test.plugins.example_plugin_v3.simple']) -class DeployWithPluginExampleV3(ActionTest, BaseActions): - """Deploy cluster with one controller and example plugin v3 - - Scenario: - 1. Upload plugin to the master node - 2. Install plugin - 3. Create cluster - 4. Add 1 node with controller role - 5. Add 1 node with compute role - 6. Add 1 node with custom plugin role - 7. Deploy the cluster - 8. Run network verification - 9. Check plugin health - 10. Run OSTF - - Duration 35m - Snapshot deploy_ha_one_controller_neutron_example_v3 - """ - - plugin_name = 'fuel_plugin_example_v3' - plugin_path = EXAMPLE_PLUGIN_V3_PATH - - actions_order = [ - 'prepare_env_with_plugin', - 'create_env', - 'enable_plugin', - 'add_nodes', - 'add_custom_role_node', - 'network_check', - 'deploy_cluster', - 'network_check', - 'check_example_plugin', - 'health_check', - ] - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def add_custom_role_node(self): - """Add node with custom role from the plugin""" - self._add_node([{ - 'roles': ['fuel_plugin_example_v3'], - 'count': 1 - }]) diff --git a/system_test/tests/strength/__init__.py b/system_test/tests/strength/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/system_test/tests/strength/test_destroy_controllers.py b/system_test/tests/strength/test_destroy_controllers.py deleted file mode 100644 index 559201a2c..000000000 --- a/system_test/tests/strength/test_destroy_controllers.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE_2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from system_test import testcase -from system_test import deferred_decorator -from system_test import action - -from system_test.tests import ActionTest -from system_test.actions import BaseActions -from system_test.actions import StrengthActions -from system_test.helpers.decorators import make_snapshot_if_step_fail - - -@testcase(groups=['system_test', - 'system_test.failover', - 'system_test.failover.destroy_controllers', - 'system_test.failover.destroy_controllers.first']) -class StrengthDestroyFirstController(ActionTest, BaseActions, StrengthActions): - """Destroy two controllers and check pacemaker status is correct - - Scenario: - 1. Create Environment - 2. Add nodes to Environment - 3. Run network checker - 4. Deploy Environment - 5. Run network checker - 6. Run OSTF - 7. Make or use existing snapshot of ready Environment - 8. Destroy first controller - 9. Check pacemaker status - 10. Wait offline status in nailgun - 11. Run OSTF - - """ - - actions_order = [ - 'setup_master', - 'config_release', - 'make_slaves', - 'revert_slaves', - 'create_env', - 'add_nodes', - 'network_check', - 'deploy_cluster', - 'network_check', - 'health_check', - 'save_load_environment', - 'destroy_first_controller', - 'check_pacemaker_status', - 'wait_offline_nodes', - 'check_ha_service_ready', - 'check_os_services_ready', - 'wait_galera_cluster', - 'health_check', - ] - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def destroy_first_controller(self): - """Destroy first controller""" - self._destroy_controller('slave-01') - - -@testcase(groups=['system_test', - 'system_test.failover', - 'system_test.failover.destroy_controllers', - 'system_test.failover.destroy_controllers.second']) -class StrengthDestroySecondController(ActionTest, BaseActions, - StrengthActions): - """Destroy two controllers and check pacemaker status is correct - - Scenario: - 1. Create Environment - 2. Add nodes to Environment - 3. Run network checker - 4. Deploy Environment - 5. Run network checker - 6. Run OSTF - 7. Make or use existing snapshot of ready Environment - 8. Destroy second controller - 9. Check pacemaker status - 10. Wait offline status in nailgun - 11. Run OSTF - - """ - - actions_order = [ - 'setup_master', - 'config_release', - 'make_slaves', - 'revert_slaves', - 'create_env', - 'add_nodes', - 'network_check', - 'deploy_cluster', - 'network_check', - 'health_check', - 'save_load_environment', - 'destroy_second_controller', - 'check_pacemaker_status', - 'wait_offline_nodes', - 'check_ha_service_ready', - 'check_os_services_ready', - 'wait_galera_cluster', - 'health_check', - ] - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def destroy_second_controller(self): - """Destroy second controller""" - self._destroy_controller('slave-02') diff --git a/system_test/tests/strength/test_filling_root.py b/system_test/tests/strength/test_filling_root.py deleted file mode 100644 index 3f5577a2c..000000000 --- a/system_test/tests/strength/test_filling_root.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE_2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from system_test import testcase -from system_test.tests import ActionTest -from system_test.actions import BaseActions -from system_test.actions import FillRootActions - - -@testcase(groups=['system_test', - 'system_test.failover', - 'system_test.failover.filling_root']) -class FillRootPrimaryController(ActionTest, BaseActions, FillRootActions): - """Fill root filesystem on primary controller and check pacemaker - - Scenario: - 1. Setup master node - 2. Config default repositories for release - 3. Bootstrap slaves and make snapshot ready - 4. Revert snapshot ready - 5. Create Environment - 6. Add nodes to Environment - 7. Run network checker - 8. Deploy Environment - 9. Run network checker - 10. Run OSTF - 11. Make or use existing snapshot of ready Environment - 12. Get pcs initial state - 13. Fill root filesystem on primary controller - above rabbit_disk_free_limit of 5Mb - 14. Check for stopping pacemaker resources - 15. Run OSTF Sanity and Smoke tests - 16. Fill root filesystem on primary controller - below rabbit_disk_free_limit of 5Mb - 17. Check for stopped pacemaker resources - 18. Run OSTF Sanity and Smoke tests - 19. Clean up space on root filesystem on - primary controller - 20. Check for started pacemaker resources - 21. Run OSTF Sanity, Smoke, HA - """ - - actions_order = [ - 'setup_master', - 'config_release', - 'make_slaves', - 'revert_slaves', - 'create_env', - 'add_nodes', - 'network_check', - 'deploy_cluster', - 'network_check', - 'health_check', - 'save_load_environment', - 'get_pcs_initial_state', - 'fill_root_above_rabbit_disk_free_limit', - 'check_stopping_resources', - 'health_check', - 'fill_root_below_rabbit_disk_free_limit', - 'check_stopping_resources', - 'health_check', - 'clean_up_space_on_root', - 'check_starting_resources', - 'health_check_sanity_smoke_ha', - ] diff --git a/system_test/tests/test_create_deploy_ostf.py b/system_test/tests/test_create_deploy_ostf.py deleted file mode 100644 index 0ab8f8f68..000000000 --- a/system_test/tests/test_create_deploy_ostf.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE_2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from system_test import testcase -from system_test.tests import ActionTest -from system_test.actions import BaseActions - - -@testcase(groups=['system_test', 'system_test.create_deploy_ostf']) -class CreateDeployOstf(ActionTest, BaseActions): - """Case deploy Environment - - Scenario: - 1. Create Environment - 2. Add nodes to Environment - 3. Run network checker - 4. Deploy Environment - 5. Run network checker - 6. Run OSTF - """ - - actions_order = [ - 'prepare_admin_node_with_slaves', - 'create_env', - 'add_nodes', - 'network_check', - 'deploy_cluster', - 'network_check', - 'health_check', - ] diff --git a/system_test/tests/test_delete_after_deploy.py b/system_test/tests/test_delete_after_deploy.py deleted file mode 100644 index 84e19761a..000000000 --- a/system_test/tests/test_delete_after_deploy.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE_2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from system_test import testcase -from system_test.tests import ActionTest -from system_test.actions import BaseActions - - -@testcase(groups=['system_test', 'system_test.delete_after_deploy']) -class DeleteAfterDeploy(ActionTest, BaseActions): - """Case deploy Environment - - Scenario: - 1. Create Environment - 2. Add nodes to Environment - 3. Run network checker - 4. Deploy Environment - 5. Run network checker - 6. Run OSTF - """ - - actions_order = [ - 'prepare_admin_node_with_slaves', - 'create_env', - 'add_nodes', - 'network_check', - 'deploy_cluster', - 'network_check', - 'health_check', - 'delete_cluster', - ] diff --git a/system_test/tests/test_deploy_check_rados.py b/system_test/tests/test_deploy_check_rados.py deleted file mode 100644 index 484af338d..000000000 --- a/system_test/tests/test_deploy_check_rados.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE_2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from proboscis.asserts import assert_true - -from system_test import testcase -from system_test import action -from system_test import deferred_decorator - -from system_test.tests import ActionTest -from system_test.actions import BaseActions - -from system_test.helpers.decorators import make_snapshot_if_step_fail - - -@testcase(groups=['system_test', - 'system_test.deploy_and_check_radosgw']) -class DeployCheckRadosGW(ActionTest, BaseActions): - """Deploy cluster and check RadosGW - - Scenario: - 1. Create Environment - 2. Add nodes to Environment - 3. Run network checker - 4. Deploy Environment - 5. Run network checker - 6. Check HAProxy backends - 7. Check ceph status - 8. Run OSTF - 9. Check the radosgw daemon is started - - """ - - actions_order = [ - 'setup_master', - 'config_release', - 'make_slaves', - 'revert_slaves', - 'create_env', - 'add_nodes', - 'network_check', - 'deploy_cluster', - 'network_check', - 'check_haproxy', - 'check_ceph_status', - 'health_check', - 'check_rados_daemon' - ] - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def check_ceph_status(self): - """Check Ceph status in cluster""" - self.fuel_web.check_ceph_status(self.cluster_id) - - @deferred_decorator([make_snapshot_if_step_fail]) - @action - def check_rados_daemon(self): - """Check the radosgw daemon is started""" - def radosgw_started(remote): - return remote.check_call('pkill -0 radosgw')['exit_code'] == 0 - - with self.fuel_web.get_ssh_for_node('slave-01') as remote: - assert_true(radosgw_started(remote), 'radosgw daemon started') diff --git a/system_test/tests/test_redeploy_after_reset.py b/system_test/tests/test_redeploy_after_reset.py deleted file mode 100644 index bd86bbe14..000000000 --- a/system_test/tests/test_redeploy_after_reset.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE_2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from system_test import testcase -from system_test.tests import ActionTest -from system_test.actions import BaseActions - - -@testcase(groups=['system_test', 'system_test.redeploy_after_reset']) -class RedeployAfterReset(ActionTest, BaseActions): - """Case deploy Environment - - Scenario: - 1. Create Environment - 2. Add nodes to Environment - 3. Run network checker - 4. Deploy Environment - 5. Run network checker - 6. Run OSTF - """ - - actions_order = [ - 'prepare_admin_node_with_slaves', - 'create_env', - 'add_nodes', - 'network_check', - 'deploy_cluster', - 'network_check', - 'health_check', - 'reset_cluster', - 'network_check', - 'deploy_cluster', - 'network_check', - 'health_check', - ] diff --git a/system_test/tests/test_redeploy_after_stop.py b/system_test/tests/test_redeploy_after_stop.py deleted file mode 100644 index 06984863d..000000000 --- a/system_test/tests/test_redeploy_after_stop.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE_2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from system_test import testcase -from system_test.tests import ActionTest -from system_test.actions import BaseActions - - -@testcase(groups=['system_test', 'system_test.redeploy_after_stop']) -class RedeployAfterStop(ActionTest, BaseActions): - """Case deploy Environment - - Scenario: - 1. Create Environment - 2. Add nodes to Environment - 3. Run network checker - 4. Deploy Environment - 5. Run network checker - 6. Run OSTF - """ - - actions_order = [ - 'prepare_admin_node_with_slaves', - 'create_env', - 'add_nodes', - 'network_check', - 'stop_on_deploy', - 'network_check', - 'deploy_cluster', - 'network_check', - 'health_check', - ] diff --git a/system_test/tests_templates/cluster_configs/networks/neutron_gre.yaml b/system_test/tests_templates/cluster_configs/networks/neutron_gre.yaml deleted file mode 100644 index 4a0a0878a..000000000 --- a/system_test/tests_templates/cluster_configs/networks/neutron_gre.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -provider: neutron -segment-type: gre -pubip-to-all: false diff --git a/system_test/tests_templates/cluster_configs/networks/neutron_tun.yaml b/system_test/tests_templates/cluster_configs/networks/neutron_tun.yaml deleted file mode 100644 index a13ecf95f..000000000 --- a/system_test/tests_templates/cluster_configs/networks/neutron_tun.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -provider: neutron -segment-type: tun -pubip-to-all: false diff --git a/system_test/tests_templates/cluster_configs/networks/neutron_vlan.yaml b/system_test/tests_templates/cluster_configs/networks/neutron_vlan.yaml deleted file mode 100644 index 560df34d1..000000000 --- a/system_test/tests_templates/cluster_configs/networks/neutron_vlan.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -provider: neutron -segment-type: vlan -pubip-to-all: false diff --git a/system_test/tests_templates/cluster_configs/nodes/mixed/1ctrl_1comp_1cndr_mongo_2ceph.yaml b/system_test/tests_templates/cluster_configs/nodes/mixed/1ctrl_1comp_1cndr_mongo_2ceph.yaml deleted file mode 100644 index 6f04bc38d..000000000 --- a/system_test/tests_templates/cluster_configs/nodes/mixed/1ctrl_1comp_1cndr_mongo_2ceph.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- roles: - - controller - count: 1 -- roles: - - compute - count: 1 -- roles: - - cinder - - mongo - count: 1 -- roles: - - ceph-osd - count: 2 diff --git a/system_test/tests_templates/cluster_configs/nodes/mixed/3ctrl_3comp_ceph.yaml b/system_test/tests_templates/cluster_configs/nodes/mixed/3ctrl_3comp_ceph.yaml deleted file mode 100644 index 1f747842e..000000000 --- a/system_test/tests_templates/cluster_configs/nodes/mixed/3ctrl_3comp_ceph.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- roles: - - controller - count: 3 -- roles: - - compute - - ceph-osd - count: 3 - diff --git a/system_test/tests_templates/cluster_configs/nodes/mixed/3ctrl_mongo_3comp_ceph.yaml b/system_test/tests_templates/cluster_configs/nodes/mixed/3ctrl_mongo_3comp_ceph.yaml deleted file mode 100644 index 382a749cc..000000000 --- a/system_test/tests_templates/cluster_configs/nodes/mixed/3ctrl_mongo_3comp_ceph.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- roles: - - controller - - mongo - count: 3 -- roles: - - compute - - ceph-osd - count: 3 - diff --git a/system_test/tests_templates/cluster_configs/nodes/single/1ctrl_1comp.yaml b/system_test/tests_templates/cluster_configs/nodes/single/1ctrl_1comp.yaml deleted file mode 100644 index 1754a13d5..000000000 --- a/system_test/tests_templates/cluster_configs/nodes/single/1ctrl_1comp.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- roles: - - controller - count: 1 -- roles: - - compute - count: 1 diff --git a/system_test/tests_templates/cluster_configs/nodes/single/1ctrl_1comp_1cndr.yaml b/system_test/tests_templates/cluster_configs/nodes/single/1ctrl_1comp_1cndr.yaml deleted file mode 100644 index ed669d189..000000000 --- a/system_test/tests_templates/cluster_configs/nodes/single/1ctrl_1comp_1cndr.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- roles: - - controller - count: 1 -- roles: - - compute - count: 1 -- roles: - - cinder - count: 1 - diff --git a/system_test/tests_templates/cluster_configs/nodes/single/1ctrl_1comp_1cndr_3ceph.yaml b/system_test/tests_templates/cluster_configs/nodes/single/1ctrl_1comp_1cndr_3ceph.yaml deleted file mode 100644 index ebc1cfb9f..000000000 --- a/system_test/tests_templates/cluster_configs/nodes/single/1ctrl_1comp_1cndr_3ceph.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- roles: - - controller - count: 1 -- roles: - - compute - count: 1 -- roles: - - cinder - count: 1 -- roles: - - ceph-osd - count: 3 diff --git a/system_test/tests_templates/cluster_configs/nodes/single/1ctrl_1comp_3ceph_1mongo.yaml b/system_test/tests_templates/cluster_configs/nodes/single/1ctrl_1comp_3ceph_1mongo.yaml deleted file mode 100644 index 7e49201de..000000000 --- a/system_test/tests_templates/cluster_configs/nodes/single/1ctrl_1comp_3ceph_1mongo.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- roles: - - controller - count: 1 -- roles: - - compute - count: 1 -- roles: - - ceph-osd - count: 3 -- roles: - - mongo - count: 1 diff --git a/system_test/tests_templates/cluster_configs/nodes/single/1ctrl_2comp.yaml b/system_test/tests_templates/cluster_configs/nodes/single/1ctrl_2comp.yaml deleted file mode 100644 index ceb734933..000000000 --- a/system_test/tests_templates/cluster_configs/nodes/single/1ctrl_2comp.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- roles: - - controller - count: 1 -- roles: - - compute - count: 2 diff --git a/system_test/tests_templates/cluster_configs/nodes/single/1ctrl_2comp_1cndr_3ceph_1mongo.yaml b/system_test/tests_templates/cluster_configs/nodes/single/1ctrl_2comp_1cndr_3ceph_1mongo.yaml deleted file mode 100644 index 17c0d77c8..000000000 --- a/system_test/tests_templates/cluster_configs/nodes/single/1ctrl_2comp_1cndr_3ceph_1mongo.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- roles: - - controller - count: 1 -- roles: - - compute - count: 2 -- roles: - - cinder - count: 1 -- roles: - - ceph-osd - count: 3 -- roles: - - mongo - count: 1 diff --git a/system_test/tests_templates/cluster_configs/nodes/single/3ctrl_1comp_1cndr.yaml b/system_test/tests_templates/cluster_configs/nodes/single/3ctrl_1comp_1cndr.yaml deleted file mode 100644 index 6b338d1a2..000000000 --- a/system_test/tests_templates/cluster_configs/nodes/single/3ctrl_1comp_1cndr.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- roles: - - controller - count: 3 -- roles: - - compute - count: 1 -- roles: - - cinder - count: 1 - diff --git a/system_test/tests_templates/cluster_configs/nodes/single/3ctrl_2comp_1cndr.yaml b/system_test/tests_templates/cluster_configs/nodes/single/3ctrl_2comp_1cndr.yaml deleted file mode 100644 index 8be5f2acc..000000000 --- a/system_test/tests_templates/cluster_configs/nodes/single/3ctrl_2comp_1cndr.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- roles: - - controller - count: 3 -- roles: - - compute - count: 2 -- roles: - - cinder - count: 1 - diff --git a/system_test/tests_templates/cluster_configs/settings/cephVolImgRados.yaml b/system_test/tests_templates/cluster_configs/settings/cephVolImgRados.yaml deleted file mode 100644 index 97aacd901..000000000 --- a/system_test/tests_templates/cluster_configs/settings/cephVolImgRados.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -storages: - !include cluster_configs/settings/storages/cephImgVolRados.yaml -components: - !include cluster_configs/settings/components/wo_components.yaml diff --git a/system_test/tests_templates/cluster_configs/settings/cephVolImgRados_ceilometer.yaml b/system_test/tests_templates/cluster_configs/settings/cephVolImgRados_ceilometer.yaml deleted file mode 100644 index 4c9e76a28..000000000 --- a/system_test/tests_templates/cluster_configs/settings/cephVolImgRados_ceilometer.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -storages: - !include cluster_configs/settings/storages/cephImgVolRados.yaml -components: - !include cluster_configs/settings/components/ceilometer.yaml diff --git a/system_test/tests_templates/cluster_configs/settings/cephVolImg_ceilometer.yaml b/system_test/tests_templates/cluster_configs/settings/cephVolImg_ceilometer.yaml deleted file mode 100644 index d87a3b9fb..000000000 --- a/system_test/tests_templates/cluster_configs/settings/cephVolImg_ceilometer.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -storages: - !include cluster_configs/settings/storages/cephImgVol.yaml -components: - !include cluster_configs/settings/components/ceilometer.yaml diff --git a/system_test/tests_templates/cluster_configs/settings/cephVolImg_ceilometer_sahara.yaml b/system_test/tests_templates/cluster_configs/settings/cephVolImg_ceilometer_sahara.yaml deleted file mode 100644 index 7dd9df945..000000000 --- a/system_test/tests_templates/cluster_configs/settings/cephVolImg_ceilometer_sahara.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -storages: - !include cluster_configs/settings/storages/cephImgVol.yaml -components: - !include cluster_configs/settings/components/ceilometer_sahara.yaml diff --git a/system_test/tests_templates/cluster_configs/settings/cephVolImg_wo_components.yaml b/system_test/tests_templates/cluster_configs/settings/cephVolImg_wo_components.yaml deleted file mode 100644 index 6fb4f6586..000000000 --- a/system_test/tests_templates/cluster_configs/settings/cephVolImg_wo_components.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -storages: - !include cluster_configs/settings/storages/cephImgVol.yaml -components: - !include cluster_configs/settings/components/wo_components.yaml diff --git a/system_test/tests_templates/cluster_configs/settings/cinder_ceilometer.yaml b/system_test/tests_templates/cluster_configs/settings/cinder_ceilometer.yaml deleted file mode 100644 index ad37c5a05..000000000 --- a/system_test/tests_templates/cluster_configs/settings/cinder_ceilometer.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -storages: - !include cluster_configs/settings/storages/cinder_only.yaml -components: - !include cluster_configs/settings/components/ceilometer.yaml diff --git a/system_test/tests_templates/cluster_configs/settings/cinder_cephImgRados_ceilometer.yaml b/system_test/tests_templates/cluster_configs/settings/cinder_cephImgRados_ceilometer.yaml deleted file mode 100644 index 21383abef..000000000 --- a/system_test/tests_templates/cluster_configs/settings/cinder_cephImgRados_ceilometer.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -storages: - !include cluster_configs/settings/storages/cinder_cephImgRados.yaml -components: - !include cluster_configs/settings/components/ceilometer.yaml diff --git a/system_test/tests_templates/cluster_configs/settings/cinder_cephImgRados_wo_components.yaml b/system_test/tests_templates/cluster_configs/settings/cinder_cephImgRados_wo_components.yaml deleted file mode 100644 index 49b7ba197..000000000 --- a/system_test/tests_templates/cluster_configs/settings/cinder_cephImgRados_wo_components.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -storages: - !include cluster_configs/settings/storages/cinder_cephImgRados.yaml -components: - !include cluster_configs/settings/components/wo_components.yaml diff --git a/system_test/tests_templates/cluster_configs/settings/cinder_cephImg_ceilometer.yaml b/system_test/tests_templates/cluster_configs/settings/cinder_cephImg_ceilometer.yaml deleted file mode 100644 index ec67614cc..000000000 --- a/system_test/tests_templates/cluster_configs/settings/cinder_cephImg_ceilometer.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -storages: - !include cluster_configs/settings/storages/cinder_cephImg.yaml -components: - !include cluster_configs/settings/components/ceilometer.yaml diff --git a/system_test/tests_templates/cluster_configs/settings/cinder_sahara.yaml b/system_test/tests_templates/cluster_configs/settings/cinder_sahara.yaml deleted file mode 100644 index e273dee0e..000000000 --- a/system_test/tests_templates/cluster_configs/settings/cinder_sahara.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -storages: - !include cluster_configs/settings/storages/cinder_only.yaml -components: - !include cluster_configs/settings/components/sahara.yaml diff --git a/system_test/tests_templates/cluster_configs/settings/cinder_wo_components.yaml b/system_test/tests_templates/cluster_configs/settings/cinder_wo_components.yaml deleted file mode 100644 index 6a52a9cb1..000000000 --- a/system_test/tests_templates/cluster_configs/settings/cinder_wo_components.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -storages: - !include cluster_configs/settings/storages/cinder_only.yaml -components: - !include cluster_configs/settings/components/wo_components.yaml diff --git a/system_test/tests_templates/cluster_configs/settings/cinder_wo_componets.yaml b/system_test/tests_templates/cluster_configs/settings/cinder_wo_componets.yaml deleted file mode 100644 index 6a52a9cb1..000000000 --- a/system_test/tests_templates/cluster_configs/settings/cinder_wo_componets.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -storages: - !include cluster_configs/settings/storages/cinder_only.yaml -components: - !include cluster_configs/settings/components/wo_components.yaml diff --git a/system_test/tests_templates/cluster_configs/settings/components/ceilometer.yaml b/system_test/tests_templates/cluster_configs/settings/components/ceilometer.yaml deleted file mode 100644 index c44bf60a8..000000000 --- a/system_test/tests_templates/cluster_configs/settings/components/ceilometer.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -sahara: false -murano: false -ceilometer: true diff --git a/system_test/tests_templates/cluster_configs/settings/components/ceilometer_murano.yaml b/system_test/tests_templates/cluster_configs/settings/components/ceilometer_murano.yaml deleted file mode 100644 index a27cde492..000000000 --- a/system_test/tests_templates/cluster_configs/settings/components/ceilometer_murano.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -sahara: false -murano: true -ceilometer: true diff --git a/system_test/tests_templates/cluster_configs/settings/components/ceilometer_murano_sahara.yaml b/system_test/tests_templates/cluster_configs/settings/components/ceilometer_murano_sahara.yaml deleted file mode 100644 index f9bd3cc90..000000000 --- a/system_test/tests_templates/cluster_configs/settings/components/ceilometer_murano_sahara.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -sahara: true -murano: true -ceilometer: true diff --git a/system_test/tests_templates/cluster_configs/settings/components/ceilometer_sahara.yaml b/system_test/tests_templates/cluster_configs/settings/components/ceilometer_sahara.yaml deleted file mode 100644 index 94a66a1a6..000000000 --- a/system_test/tests_templates/cluster_configs/settings/components/ceilometer_sahara.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -sahara: true -murano: false -ceilometer: true diff --git a/system_test/tests_templates/cluster_configs/settings/components/murano.yaml b/system_test/tests_templates/cluster_configs/settings/components/murano.yaml deleted file mode 100644 index 1682f3b6b..000000000 --- a/system_test/tests_templates/cluster_configs/settings/components/murano.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -sahara: false -murano: true -ceilometer: false diff --git a/system_test/tests_templates/cluster_configs/settings/components/murano_sahara.yaml b/system_test/tests_templates/cluster_configs/settings/components/murano_sahara.yaml deleted file mode 100644 index d39dcab8a..000000000 --- a/system_test/tests_templates/cluster_configs/settings/components/murano_sahara.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -sahara: true -murano: true -ceilometer: false diff --git a/system_test/tests_templates/cluster_configs/settings/components/sahara.yaml b/system_test/tests_templates/cluster_configs/settings/components/sahara.yaml deleted file mode 100644 index afcb9f9bf..000000000 --- a/system_test/tests_templates/cluster_configs/settings/components/sahara.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -sahara: true -murano: false -ceilometer: false diff --git a/system_test/tests_templates/cluster_configs/settings/components/wo_components.yaml b/system_test/tests_templates/cluster_configs/settings/components/wo_components.yaml deleted file mode 100644 index fa796a81e..000000000 --- a/system_test/tests_templates/cluster_configs/settings/components/wo_components.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -sahara: false -murano: false -ceilometer: false diff --git a/system_test/tests_templates/cluster_configs/settings/storages/cephImgVol.yaml b/system_test/tests_templates/cluster_configs/settings/storages/cephImgVol.yaml deleted file mode 100644 index 721897216..000000000 --- a/system_test/tests_templates/cluster_configs/settings/storages/cephImgVol.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -volume-lvm: false -volume-ceph: true -image-ceph: true -rados-ceph: false -ephemeral-ceph: false -replica-ceph: 2 diff --git a/system_test/tests_templates/cluster_configs/settings/storages/cephImgVolRados.yaml b/system_test/tests_templates/cluster_configs/settings/storages/cephImgVolRados.yaml deleted file mode 100644 index beea17c48..000000000 --- a/system_test/tests_templates/cluster_configs/settings/storages/cephImgVolRados.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -volume-lvm: false -volume-ceph: true -image-ceph: true -rados-ceph: true -ephemeral-ceph: false -replica-ceph: 2 diff --git a/system_test/tests_templates/cluster_configs/settings/storages/cinder_cephImg.yaml b/system_test/tests_templates/cluster_configs/settings/storages/cinder_cephImg.yaml deleted file mode 100644 index d7e05f72f..000000000 --- a/system_test/tests_templates/cluster_configs/settings/storages/cinder_cephImg.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -volume-lvm: true -volume-ceph: false -image-ceph: true -rados-ceph: false -ephemeral-ceph: false -replica-ceph: 2 diff --git a/system_test/tests_templates/cluster_configs/settings/storages/cinder_cephImgRados.yaml b/system_test/tests_templates/cluster_configs/settings/storages/cinder_cephImgRados.yaml deleted file mode 100644 index 0602dffd3..000000000 --- a/system_test/tests_templates/cluster_configs/settings/storages/cinder_cephImgRados.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -volume-lvm: true -volume-ceph: false -image-ceph: true -rados-ceph: true -ephemeral-ceph: false -replica-ceph: 2 diff --git a/system_test/tests_templates/cluster_configs/settings/storages/cinder_only.yaml b/system_test/tests_templates/cluster_configs/settings/storages/cinder_only.yaml deleted file mode 100644 index 952fa0923..000000000 --- a/system_test/tests_templates/cluster_configs/settings/storages/cinder_only.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -volume-lvm: true -volume-ceph: false -image-ceph: false -rados-ceph: false -ephemeral-ceph: false -replica-ceph: 2 diff --git a/system_test/tests_templates/devops_configs/centos_master.yaml b/system_test/tests_templates/devops_configs/centos_master.yaml deleted file mode 100644 index fa80a10a0..000000000 --- a/system_test/tests_templates/devops_configs/centos_master.yaml +++ /dev/null @@ -1,190 +0,0 @@ ---- -aliases: - - dynamic_address_pool: - - &pool_default !os_env POOL_DEFAULT, 10.109.0.0/16:24 - - default_interface_model: - - &interface_model !os_env INTERFACE_MODEL, e1000 - - rack-01-slave-interfaces: &rack-01-slave-interfaces - - label: eth0 - l2_network_device: admin # Libvirt bridge name. It is *NOT* Nailgun networks - interface_model: *interface_model - - label: eth1 - l2_network_device: public - interface_model: *interface_model - - label: eth2 - l2_network_device: storage - interface_model: *interface_model - - label: eth3 - l2_network_device: management - interface_model: *interface_model - - label: eth4 - l2_network_device: private - interface_model: *interface_model - - rack-01-slave-network_config: &rack-01-slave-network_config - eth0: - networks: - - fuelweb_admin - eth1: - networks: - - public - eth2: - networks: - - storage - eth3: - networks: - - management - eth4: - networks: - - private - - rack-01-slave-node-params: &rack-01-slave-node-params - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: cinder - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: swift - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - interfaces: *rack-01-slave-interfaces - network_config: *rack-01-slave-network_config - - -env_name: !os_env ENV_NAME - -address_pools: -# Network pools used by the environment - fuelweb_admin-pool01: - net: *pool_default - params: - vlan_start: 0 - public-pool01: - net: *pool_default - params: - vlan_start: 0 - storage-pool01: - net: *pool_default - params: - vlan_start: 101 - management-pool01: - net: *pool_default - params: - vlan_start: 102 - private-pool01: - net: *pool_default - params: - vlan_start: 103 - vlan_end: 120 - -groups: - - name: default - driver: - name: devops.driver.libvirt.libvirt_driver - params: - connection_string: !os_env CONNECTION_STRING, qemu:///system - storage_pool_name: !os_env STORAGE_POOL_NAME, default - stp: True - hpet: False - use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true - - network_pools: # Address pools for OpenStack networks. - # Actual names should be used for keys - # (the same as in Nailgun, for example) - - fuelweb_admin: fuelweb_admin-pool01 - public: public-pool01 - storage: storage-pool01 - management: management-pool01 - private: private-pool01 - - l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks - admin: - address_pool: fuelweb_admin-pool01 - dhcp: false - forward: - mode: nat - - public: - address_pool: public-pool01 - dhcp: false - forward: - mode: nat - - storage: - address_pool: storage-pool01 - dhcp: false - - management: - address_pool: management-pool01 - dhcp: false - - private: - address_pool: private-pool01 - dhcp: false - - nodes: - - name: admin # Custom name of VM for Fuel admin node - role: fuel_master # Fixed role for Fuel master node properties - params: - vcpu: !os_env ADMIN_NODE_CPU, 2 - memory: !os_env ADMIN_NODE_MEMORY, 3072 - boot: - - hd - - cdrom # for boot from usb - without 'cdrom' - volumes: - - name: system - source_image: !os_env CENTOS_CLOUD_IMAGE_PATH - format: qcow2 - - name: iso - capacity: 1 - format: raw - device: cdrom # for boot from usb - 'disk' - bus: ide # for boot from usb - 'usb' - interfaces: - - label: eth0 - l2_network_device: admin # Libvirt bridge name. It is *NOT* a Nailgun network - interface_model: *interface_model - network_config: - eth0: - networks: - - fuelweb_admin - - - name: slave-01 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-02 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-03 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-04 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-05 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-06 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-07 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-08 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-09 - role: fuel_slave - params: *rack-01-slave-node-params - diff --git a/system_test/tests_templates/devops_configs/default.yaml b/system_test/tests_templates/devops_configs/default.yaml deleted file mode 100644 index 55ad8b6e9..000000000 --- a/system_test/tests_templates/devops_configs/default.yaml +++ /dev/null @@ -1,190 +0,0 @@ ---- -aliases: - - dynamic_address_pool: - - &pool_default !os_env POOL_DEFAULT, 10.109.0.0/16:24 - - default_interface_model: - - &interface_model !os_env INTERFACE_MODEL, e1000 - - rack-01-slave-interfaces: &rack-01-slave-interfaces - - label: eth0 - l2_network_device: admin # Libvirt bridge name. It is *NOT* Nailgun networks - interface_model: *interface_model - - label: eth1 - l2_network_device: public - interface_model: *interface_model - - label: eth2 - l2_network_device: storage - interface_model: *interface_model - - label: eth3 - l2_network_device: management - interface_model: *interface_model - - label: eth4 - l2_network_device: private - interface_model: *interface_model - - rack-01-slave-network_config: &rack-01-slave-network_config - eth0: - networks: - - fuelweb_admin - eth1: - networks: - - public - eth2: - networks: - - storage - eth3: - networks: - - management - eth4: - networks: - - private - - rack-01-slave-node-params: &rack-01-slave-node-params - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: cinder - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: swift - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - interfaces: *rack-01-slave-interfaces - network_config: *rack-01-slave-network_config - - -env_name: !os_env ENV_NAME - -address_pools: -# Network pools used by the environment - fuelweb_admin-pool01: - net: *pool_default - params: - vlan_start: 0 - public-pool01: - net: *pool_default - params: - vlan_start: 0 - storage-pool01: - net: *pool_default - params: - vlan_start: 101 - management-pool01: - net: *pool_default - params: - vlan_start: 102 - private-pool01: - net: *pool_default - params: - vlan_start: 103 - vlan_end: 120 - -groups: - - name: default - driver: - name: devops.driver.libvirt.libvirt_driver - params: - connection_string: !os_env CONNECTION_STRING, qemu:///system - storage_pool_name: !os_env STORAGE_POOL_NAME, default - stp: True - hpet: False - use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true - - network_pools: # Address pools for OpenStack networks. - # Actual names should be used for keys - # (the same as in Nailgun, for example) - - fuelweb_admin: fuelweb_admin-pool01 - public: public-pool01 - storage: storage-pool01 - management: management-pool01 - private: private-pool01 - - l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks - admin: - address_pool: fuelweb_admin-pool01 - dhcp: false - forward: - mode: nat - - public: - address_pool: public-pool01 - dhcp: false - forward: - mode: nat - - storage: - address_pool: storage-pool01 - dhcp: false - - management: - address_pool: management-pool01 - dhcp: false - - private: - address_pool: private-pool01 - dhcp: false - - nodes: - - name: admin # Custom name of VM for Fuel admin node - role: fuel_master # Fixed role for Fuel master node properties - params: - vcpu: !os_env ADMIN_NODE_CPU, 2 - memory: !os_env ADMIN_NODE_MEMORY, 3072 - boot: - - hd - - cdrom # for boot from usb - without 'cdrom' - volumes: - - name: system - capacity: !os_env ADMIN_NODE_VOLUME_SIZE, 80 - format: qcow2 - - name: iso - source_image: !os_env ISO_PATH # if 'source_image' set, then volume capacity is calculated from it's size - format: raw - device: cdrom # for boot from usb - 'disk' - bus: ide # for boot from usb - 'usb' - interfaces: - - label: eth0 - l2_network_device: admin # Libvirt bridge name. It is *NOT* a Nailgun network - interface_model: *interface_model - network_config: - eth0: - networks: - - fuelweb_admin - - - name: slave-01 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-02 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-03 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-04 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-05 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-06 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-07 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-08 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-09 - role: fuel_slave - params: *rack-01-slave-node-params - diff --git a/system_test/tests_templates/devops_configs/default30-2groups.yaml b/system_test/tests_templates/devops_configs/default30-2groups.yaml deleted file mode 100644 index dec78fc56..000000000 --- a/system_test/tests_templates/devops_configs/default30-2groups.yaml +++ /dev/null @@ -1,506 +0,0 @@ ---- -aliases: - - dynamic_address_pool: - - &pool_default !os_env POOL_DEFAULT, 10.109.0.0/16:24 - - default_interface_model: - - &interface_model !os_env INTERFACE_MODEL, e1000 - - -env_name: !os_env ENV_NAME - -address_pools: -# Network pools used by the environment - fuelweb_admin-pool01: - net: *pool_default - params: - vlan_start: 0 - ip_reserved: - gateway: +1 - l2_network_device: +1 # l2_network_device will get this IP address - ip_ranges: - default: [+2, -2] # admin IP range for 'default' nodegroup name - - public-pool01: - net: *pool_default - params: - vlan_start: 100 - ip_reserved: - gateway: +1 - l2_network_device: +1 # l2_network_device will get this IP address - ip_ranges: - default: [+2, +127] # public IP range for 'default' nodegroup name - floating: [+128, -2] - - storage-pool01: - net: *pool_default - params: - vlan_start: 101 - ip_reserved: - gateway: +1 - l2_network_device: +1 # l2_network_device will get this IP address - ip_ranges: - default: [+2, -2] # public IP range for 'default' nodegroup name - - management-pool01: - net: *pool_default - params: - vlan_start: 102 - ip_reserved: - gateway: +1 - l2_network_device: +1 # l2_network_device will get this IP address - ip_ranges: - default: [+2, -2] # public IP range for 'default' nodegroup name - - private-pool01: - net: *pool_default - params: - vlan_start: 960 - vlan_end: 990 - ip_reserved: - gateway: +1 - l2_network_device: +1 # l2_network_device will get this IP address - ip_ranges: - default: [+2, -2] # public IP range for 'default' nodegroup name - - public-pool02: - net: *pool_default - params: - vlan_start: 100 - ip_reserved: - gateway: +1 - l2_network_device: +1 # l2_network_device will get this IP address - ip_ranges: - virtual-rack-01: [+2, +127] # public IP range for 'virtual-rack-01' nodegroup name - floating: [+128, -2] - - storage-pool02: - net: *pool_default - params: - vlan_start: 101 - ip_reserved: - gateway: +1 - l2_network_device: +1 # l2_network_device will get this IP address - ip_ranges: - virtual-rack-01: [+2, -2] # public IP range for 'virtual-rack-01' nodegroup name - - management-pool02: - net: *pool_default - params: - vlan_start: 102 - ip_reserved: - gateway: +1 - l2_network_device: +1 # l2_network_device will get this IP address - ip_ranges: - virtual-rack-01: [+2, -2] # public IP range for 'virtual-rack-01' nodegroup name - - private-pool02: - net: *pool_default - params: - vlan_start: 960 - vlan_end: 990 - ip_reserved: - gateway: +1 - l2_network_device: +1 # l2_network_device will get this IP address - ip_ranges: - virtual-rack-01: [+2, -2] # public IP range for 'virtual-rack-01' nodegroup name - -groups: - - name: default - driver: - name: devops.driver.libvirt.libvirt_driver - params: - connection_string: !os_env CONNECTION_STRING, qemu:///system - storage_pool_name: !os_env STORAGE_POOL_NAME, default - stp: True - hpet: False - use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true - - network_pools: # Address pools for OpenStack networks. - # Actual names should be used for keys - # (the same as in Nailgun, for example) - - fuelweb_admin: fuelweb_admin-pool01 - public: public-pool01 - storage: storage-pool01 - management: management-pool01 - private: private-pool01 - - l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks - admin: - address_pool: fuelweb_admin-pool01 - dhcp: false - forward: - mode: nat - vlan_ifaces: - - 100 - - # Public libvirt network is only for connecting public network - # to the Internet. - # Actually, public network with tag:100 use 'admin' l2_network_device - # (see 'network_config' in nodes) - public: - address_pool: public-pool01 - dhcp: false - forward: - mode: nat - parent_iface: - l2_net_dev: admin - tag: 100 - - nodes: - - name: admin # Custom name of VM for Fuel admin node - role: fuel_master # Fixed role for Fuel master node properties - params: - vcpu: !os_env ADMIN_NODE_CPU, 2 - memory: !os_env ADMIN_NODE_MEMORY, 3072 - boot: - - hd - - cdrom # for boot from usb - without 'cdrom' - volumes: - - name: system - capacity: !os_env ADMIN_NODE_VOLUME_SIZE, 75 - format: qcow2 - - name: iso - source_image: !os_env ISO_PATH # if 'source_image' set, then volume capacity is calculated from it's size - format: raw - device: cdrom # for boot from usb - 'disk' - bus: ide # for boot from usb - 'usb' - interfaces: - - label: iface1 - l2_network_device: admin # Libvirt bridge name. It is *NOT* a Nailgun network - interface_model: *interface_model - network_config: - iface1: - networks: - - fuelweb_admin - - - name: slave-01 # Custom name for Fuel slave node - role: fuel_slave # Fixed role for Fuel master node properties - params: - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 200 - format: qcow2 - - # so, interfaces can be turn on in one or in a different switches. - interfaces: - - label: iface1 - mac_address: 7c:14:7a:6c:9b:96 - interface_model: *interface_model - l2_network_device: admin - - label: iface2 - mac_address: 8c:04:7a:6c:9b:97 - interface_model: *interface_model - l2_network_device: admin - network_config: - iface1: - networks: - - fuelweb_admin ## OpenStack network, NOT switch name - - public ## OpenStack network, NOT switch name - iface2: - networks: - - storage ## OpenStack network, NOT switch name - - management ## OpenStack network, NOT switch name - - private ## OpenStack network, NOT switch name - - - name: slave-02 # Custom name for Fuel slave node - role: fuel_slave # Fixed role for Fuel master node properties - params: - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 200 - format: qcow2 - - # so, interfaces can be turn on in one or in a different switches. - interfaces: - - label: iface1 - mac_address: fc:c4:7a:6c:9a:16 - interface_model: *interface_model - l2_network_device: admin - - label: iface2 - mac_address: dc:c4:7a:6c:9a:17 - interface_model: *interface_model - l2_network_device: admin - network_config: - iface1: - networks: - - fuelweb_admin ## OpenStack network, NOT switch name - - public ## OpenStack network, NOT switch name - iface2: - networks: - - storage ## OpenStack network, NOT switch name - - management ## OpenStack network, NOT switch name - - private ## OpenStack network, NOT switch name - - - name: slave-03 # Custom name for Fuel slave node - role: fuel_slave # Fixed role for Fuel master node properties - params: - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 200 - format: qcow2 - - # so, interfaces can be turn on in one or in a different switches. - interfaces: - - label: iface1 - mac_address: ac:c4:7a:6c:9b:40 - interface_model: *interface_model - l2_network_device: admin - - label: iface2 - mac_address: 9c:c4:7a:6c:9b:41 - interface_model: *interface_model - l2_network_device: admin - network_config: - iface1: - networks: - - fuelweb_admin ## OpenStack network, NOT switch name - - public ## OpenStack network, NOT switch name - iface2: - networks: - - storage ## OpenStack network, NOT switch name - - management ## OpenStack network, NOT switch name - - private ## OpenStack network, NOT switch name - - - name: virtual-rack-01 - driver: - name: devops.driver.libvirt.libvirt_driver - params: - connection_string: !os_env CONNECTION_STRING, qemu:///system - storage_pool_name: !os_env STORAGE_POOL_NAME, default - stp: True - hpet: False - use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true - - network_pools: # Address pools for OpenStack networks. - # Actual names should be used for keys - # (the same as in Nailgun, for example) - - fuelweb_admin: fuelweb_admin-pool01 - public: public-pool02 - storage: storage-pool02 - management: management-pool02 - private: private-pool02 - - nodes: - - - name: slave-04 # Custom name for Fuel slave node - role: fuel_slave # Fixed role for Fuel master node properties - params: - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 200 - format: qcow2 - - # so, interfaces can be turn on in one or in a different switches. - interfaces: - - label: iface1 - mac_address: 0c:d4:7a:6d:35:98 - l2_network_device: admin - interface_model: *interface_model - - label: iface3 - mac_address: 00:ab:21:8a:7b:18 - l2_network_device: admin - interface_model: *interface_model - network_config: - iface1: - networks: - - fuelweb_admin ## OpenStack network, NOT switch name - - public ## OpenStack network, NOT switch name - iface3: - networks: - - storage ## OpenStack network, NOT switch name - - management ## OpenStack network, NOT switch name - - private ## OpenStack network, NOT switch name - - - name: slave-05 # Custom name for Fuel slave node - role: fuel_slave # Fixed role for Fuel master node properties - params: - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 200 - format: qcow2 - - # so, interfaces can be turn on in one or in a different switches. - interfaces: - - label: iface1 - mac_address: 3c:c4:7a:6d:28:de - interface_model: *interface_model - l2_network_device: admin - - label: iface2 - mac_address: 20:1b:21:89:47:90 - interface_model: *interface_model - l2_network_device: admin - network_config: - iface2: - networks: - - storage ## OpenStack network, NOT switch name - - management ## OpenStack network, NOT switch name - - private ## OpenStack network, NOT switch name - iface1: - networks: - - fuelweb_admin ## OpenStack network, NOT switch name - - public ## OpenStack network, NOT switch name - - - name: slave-06 # Custom name for Fuel slave node - role: fuel_slave # Fixed role for Fuel master node properties - params: - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 200 - format: qcow2 - - # so, interfaces can be turn on in one or in a different switches. - interfaces: - - label: iface1 - mac_address: 1c:c4:7a:6d:28:de - interface_model: *interface_model - l2_network_device: admin - - label: iface2 - mac_address: 10:1b:21:89:47:90 - interface_model: *interface_model - l2_network_device: admin - network_config: - iface2: - networks: - - storage ## OpenStack network, NOT switch name - - management ## OpenStack network, NOT switch name - - private ## OpenStack network, NOT switch name - iface1: - networks: - - fuelweb_admin ## OpenStack network, NOT switch name - - public ## OpenStack network, NOT switch name - - - name: slave-07 # Custom name for Fuel slave node - role: fuel_slave # Fixed role for Fuel master node properties - params: - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 200 - format: qcow2 - - # so, interfaces can be turn on in one or in a different switches. - interfaces: - - label: iface1 - mac_address: 0c:c4:7a:6d:18:de - interface_model: *interface_model - l2_network_device: admin - - label: iface2 - mac_address: 00:1b:21:89:37:90 - interface_model: *interface_model - l2_network_device: admin - network_config: - iface1: - networks: - - fuelweb_admin ## OpenStack network, NOT switch name - - public ## OpenStack network, NOT switch name - iface2: - networks: - - storage ## OpenStack network, NOT switch name - - management ## OpenStack network, NOT switch name - - private ## OpenStack network, NOT switch name - - - name: slave-08 # Custom name for Fuel slave node - role: fuel_slave # Fixed role for Fuel master node properties - params: - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 200 - format: qcow2 - - # so, interfaces can be turn on in one or in a different switches. - interfaces: - - label: iface1 - mac_address: 0c:c4:7a:5d:28:de - interface_model: *interface_model - l2_network_device: admin - - label: iface2 - mac_address: 00:1b:21:79:47:90 - interface_model: *interface_model - l2_network_device: admin - network_config: - iface1: - networks: - - fuelweb_admin ## OpenStack network, NOT switch name - - public ## OpenStack network, NOT switch name - iface2: - networks: - - storage ## OpenStack network, NOT switch name - - management ## OpenStack network, NOT switch name - - private ## OpenStack network, NOT switch name - - - name: slave-09 # Custom name for Fuel slave node - role: fuel_slave # Fixed role for Fuel master node properties - params: - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 200 - format: qcow2 - - # so, interfaces can be turn on in one or in a different switches. - interfaces: - - label: iface1 - mac_address: 0c:c4:6a:6d:28:de - interface_model: *interface_model - l2_network_device: admin - - label: iface2 - mac_address: 00:1b:11:89:47:90 - interface_model: *interface_model - l2_network_device: admin - network_config: - iface1: - networks: - - fuelweb_admin ## OpenStack network, NOT switch name - - public ## OpenStack network, NOT switch name - iface2: - networks: - - storage ## OpenStack network, NOT switch name - - management ## OpenStack network, NOT switch name - - private ## OpenStack network, NOT switch name - diff --git a/system_test/tests_templates/devops_configs/default30-2groups_.yaml b/system_test/tests_templates/devops_configs/default30-2groups_.yaml deleted file mode 100644 index b7ab78514..000000000 --- a/system_test/tests_templates/devops_configs/default30-2groups_.yaml +++ /dev/null @@ -1,321 +0,0 @@ ---- -aliases: - - dynamic_address_pool: - - &pool_default !os_env POOL_DEFAULT, 10.109.0.0/16:24 - - default_interface_model: - - &interface_model !os_env INTERFACE_MODEL, e1000 - -env_name: !os_env ENV_NAME - -address_pools: -# Network pools used by the environment - fuelweb_admin-pool01: - net: *pool_default - params: - vlan_start: 0 - ip_reserved: - gateway: +1 - l2_network_device: +1 # l2_network_device will get this IP address - ip_ranges: - default: [+2, -2] # admin IP range for 'default' nodegroup name - - public-pool01: - net: *pool_default - params: - vlan_start: 100 - ip_reserved: - gateway: +1 - l2_network_device: +1 # l2_network_device will get this IP address - ip_ranges: - default: [+2, +127] # public IP range for 'default' nodegroup name - floating: [+128, -2] - - storage-pool01: - net: *pool_default - params: - vlan_start: 101 - management-pool01: - net: *pool_default - params: - vlan_start: 102 - private-pool01: - net: *pool_default - params: - vlan_start: 960 - vlan_start: 990 - -groups: - - - - name: baremetal-rack-01 - driver: - name: devops.driver.libvirt.libvirt_driver - params: - connection_string: !os_env CONNECTION_STRING, qemu:///system - storage_pool_name: !os_env STORAGE_POOL_NAME, default - stp: True - hpet: False - use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true - - network_pools: # Address pools for OpenStack networks. - # Actual names should be used for keys - # (the same as in Nailgun, for example) - - fuelweb_admin: fuelweb_admin-pool01 - public: public-pool01 - storage: storage-pool01 - management: management-pool01 - private: private-pool01 - - l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks - admin-net-01: - address_pool: fuelweb_admin-pool01 - dhcp: false - vlan_ifaces: - - 0 - - openstack-net-01: - forward: - mode: Null - - nodes: - - - name: slave-01 # Custom name of baremetal for Fuel slave node - role: fuel_slave # Fixed role for Fuel master node properties - params: - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 200 - format: qcow2 - - # so, interfaces can be turn on in one or in a different switches. - interfaces: - - label: iface1 - mac_address: 0c:c4:7a:6c:9b:96 - interface_model: *interface_model - l2_network_device: admin-net-01 - - label: iface2 - mac_address: 0c:c4:7a:6c:9b:97 - interface_model: *interface_model - l2_network_device: openstack-net-01 - network_config: - iface1: - networks: - - fuelweb_admin ## OpenStack network, NOT switch name - - public ## OpenStack network, NOT switch name - iface2: - networks: - - storage ## OpenStack network, NOT switch name - - management ## OpenStack network, NOT switch name - - private ## OpenStack network, NOT switch name - - - name: slave-02 # Custom name of baremetal for Fuel slave node - role: fuel_slave # Fixed role for Fuel master node properties - params: - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 200 - format: qcow2 - - # so, interfaces can be turn on in one or in a different switches. - interfaces: - - label: iface1 - mac_address: 0c:c4:7a:6c:9a:16 - interface_model: *interface_model - l2_network_device: admin-net-01 - - label: iface2 - mac_address: 0c:c4:7a:6c:9a:17 - interface_model: *interface_model - l2_network_device: openstack-net-01 - network_config: - iface1: - networks: - - fuelweb_admin ## OpenStack network, NOT switch name - - public ## OpenStack network, NOT switch name - iface2: - networks: - - storage ## OpenStack network, NOT switch name - - management ## OpenStack network, NOT switch name - - private ## OpenStack network, NOT switch name - - - name: slave-03 # Custom name of baremetal for Fuel slave node - role: fuel_slave # Fixed role for Fuel master node properties - params: - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 200 - format: qcow2 - - # so, interfaces can be turn on in one or in a different switches. - interfaces: - - label: iface1 - mac_address: 0c:c4:7a:6c:9b:40 - interface_model: *interface_model - l2_network_device: admin-net-01 - - label: iface2 - mac_address: 0c:c4:7a:6c:9b:41 - interface_model: *interface_model - l2_network_device: openstack-net-01 - network_config: - iface1: - networks: - - fuelweb_admin ## OpenStack network, NOT switch name - - public ## OpenStack network, NOT switch name - iface2: - networks: - - storage ## OpenStack network, NOT switch name - - management ## OpenStack network, NOT switch name - - private ## OpenStack network, NOT switch name - - - name: slave-04 # Custom name of baremetal for Fuel slave node - role: fuel_slave # Fixed role for Fuel master node properties - params: - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 200 - format: qcow2 - - # so, interfaces can be turn on in one or in a different switches. - interfaces: - - label: iface1 - mac_address: 00:1b:21:8a:7b:18 - l2_network_device: openstack-net-01 - interface_model: *interface_model - - label: iface3 - mac_address: 0c:c4:7a:6d:35:98 - l2_network_device: admin-net-01 - interface_model: *interface_model - network_config: - iface3: - networks: - - fuelweb_admin ## OpenStack network, NOT switch name - - public ## OpenStack network, NOT switch name - iface1: - networks: - - storage ## OpenStack network, NOT switch name - - management ## OpenStack network, NOT switch name - - private ## OpenStack network, NOT switch name - - - name: slave-05 # Custom name of baremetal for Fuel slave node - role: fuel_slave # Fixed role for Fuel master node properties - params: - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 200 - format: qcow2 - - # so, interfaces can be turn on in one or in a different switches. - interfaces: - - label: iface1 - mac_address: 00:1b:21:89:47:90 - interface_model: *interface_model - l2_network_device: openstack-net-01 - - label: iface2 - mac_address: 0c:c4:7a:6d:28:de - interface_model: *interface_model - l2_network_device: admin-net-01 - network_config: - iface2: - networks: - - fuelweb_admin ## OpenStack network, NOT switch name - - public ## OpenStack network, NOT switch name - iface1: - networks: - - storage ## OpenStack network, NOT switch name - - management ## OpenStack network, NOT switch name - - private ## OpenStack network, NOT switch name - - - name: default - driver: - name: devops.driver.libvirt.libvirt_driver - params: - connection_string: !os_env CONNECTION_STRING, qemu:///system - storage_pool_name: !os_env STORAGE_POOL_NAME, default - stp: True - hpet: False - use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true - - network_pools: # Address pools for OpenStack networks. - # Actual names should be used for keys - # (the same as in Nailgun, for example) - - fuelweb_admin: fuelweb_admin-pool01 - public: public-pool01 - storage: storage-pool01 - management: management-pool01 - private: private-pool01 - - l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks - admin: - address_pool: fuelweb_admin-pool01 - dhcp: false - forward: - mode: nat - parent_iface: - l2_net_dev: admin-net-01 - tag: 0 - vlan_ifaces: - - 100 - - public: - address_pool: public-pool01 - dhcp: false - forward: - mode: nat - parent_iface: - l2_net_dev: admin - tag: 100 - - nodes: - - name: admin # Custom name of VM for Fuel admin node - role: fuel_master # Fixed role for Fuel master node properties - params: - vcpu: !os_env ADMIN_NODE_CPU, 2 - memory: !os_env ADMIN_NODE_MEMORY, 3072 - boot: - - hd - - cdrom # for boot from usb - without 'cdrom' - volumes: - - name: system - capacity: !os_env ADMIN_NODE_VOLUME_SIZE, 75 - format: qcow2 - - name: iso - source_image: !os_env ISO_PATH # if 'source_image' set, then volume capacity is calculated from it's size - format: raw - device: cdrom # for boot from usb - 'disk' - bus: ide # for boot from usb - 'usb' - interfaces: - - label: iface1 - l2_network_device: admin # Libvirt bridge name. It is *NOT* a Nailgun network - interface_model: *interface_model - network_config: - iface1: - networks: - - fuelweb_admin diff --git a/system_test/tests_templates/devops_configs/default30-bond.yaml b/system_test/tests_templates/devops_configs/default30-bond.yaml deleted file mode 100644 index f15aa3bc0..000000000 --- a/system_test/tests_templates/devops_configs/default30-bond.yaml +++ /dev/null @@ -1,230 +0,0 @@ ---- -aliases: - - dynamic_address_pool: - - &pool_default !os_env POOL_DEFAULT, 10.109.0.0/16:24 - - default_interface_model: - - &interface_model !os_env INTERFACE_MODEL, e1000 - - -env_name: !os_env ENV_NAME - -address_pools: -# Network pools used by the environment - fuelweb_admin-pool01: - net: *pool_default - params: - vlan_start: 0 - ip_reserved: - gateway: +1 - l2_network_device: +1 # l2_network_device will get this IP address - ip_ranges: - default: [+2, -2] # admin IP range for 'default' nodegroup name - - public-pool01: - net: *pool_default - params: - vlan_start: 100 - ip_reserved: - gateway: +1 - l2_network_device: +1 # l2_network_device will get this IP address - ip_ranges: - default: [+2, +127] # public IP range for 'default' nodegroup name - floating: [+128, -2] - - storage-pool01: - net: *pool_default - params: - vlan_start: 101 - management-pool01: - net: *pool_default - params: - vlan_start: 102 - private-pool01: - net: *pool_default - params: - vlan_start: 103 - vlan_end: 120 - -groups: - - name: default - driver: - name: devops.driver.libvirt.libvirt_driver - params: - connection_string: !os_env CONNECTION_STRING, qemu:///system - storage_pool_name: !os_env STORAGE_POOL_NAME, default - stp: True - hpet: False - use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true - - network_pools: # Address pools for OpenStack networks. - # Actual names should be used for keys - # (the same as in Nailgun, for example) - - fuelweb_admin: fuelweb_admin-pool01 - public: public-pool01 - storage: storage-pool01 - management: management-pool01 - private: private-pool01 - - l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks - admin: - address_pool: fuelweb_admin-pool01 - dhcp: false - forward: - mode: nat - - openstack_br: - vlan_ifaces: - - 100 - - 101 - - 102 - - public: - address_pool: public-pool01 - dhcp: false - forward: - mode: nat - parent_iface: - l2_net_dev: openstack_br - tag: 100 - - storage: - address_pool: storage-pool01 - dhcp: false - parent_iface: - l2_net_dev: openstack_br - tag: 101 - - management: - address_pool: management-pool01 - dhcp: false - parent_iface: - l2_net_dev: openstack_br - tag: 102 - - private: - address_pool: private-pool01 - dhcp: false - - nodes: - - name: admin # Custom name of VM for Fuel admin node - role: fuel_master # Fixed role for Fuel master node properties - params: - vcpu: !os_env ADMIN_NODE_CPU, 2 - memory: !os_env ADMIN_NODE_MEMORY, 3072 - boot: - - hd - - cdrom # for boot from usb - without 'cdrom' - volumes: - - name: system - capacity: !os_env ADMIN_NODE_VOLUME_SIZE, 75 - format: qcow2 - - name: iso - source_image: !os_env ISO_PATH # if 'source_image' set, then volume capacity is calculated from it's size - format: raw - device: cdrom # for boot from usb - 'disk' - bus: ide # for boot from usb - 'usb' - interfaces: - - label: iface2 - l2_network_device: admin # Libvirt bridge name. It is *NOT* a Nailgun network - interface_model: *interface_model -# - # not used - network_config: - iface2: - networks: - - fuelweb_admin - - # Slave nodes - - - name: slave-01 - role: fuel_slave - params: &rack-01-slave-node-params - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 50 - format: qcow2 - - name: cinder - capacity: !os_env NODE_VOLUME_SIZE, 50 - format: qcow2 - - name: swift - capacity: !os_env NODE_VOLUME_SIZE, 50 - format: qcow2 - - # List of node interfaces - interfaces: - - - label: iface6 - l2_network_device: admin # Libvirt bridge name. It is *NOT* Nailgun networks - interface_model: *interface_model - - - label: iface2 - l2_network_device: openstack_br # Libvirt bridge name. It is *NOT* Nailgun networks - interface_model: *interface_model - - - label: iface3 - l2_network_device: openstack_br # Libvirt bridge name. It is *NOT* Nailgun networks - interface_model: *interface_model - - - label: iface4 - l2_network_device: openstack_br # Libvirt bridge name. It is *NOT* Nailgun networks - interface_model: *interface_model - - - label: iface5 - l2_network_device: openstack_br # Libvirt bridge name. It is *NOT* Nailgun networks - interface_model: *interface_model - - - # How Nailgun/OpenStack networks should assigned for interfaces - network_config: - - iface6: - networks: - - fuelweb_admin # Nailgun/OpenStack network name - - bond0: - networks: - - public - - storage - - management - - private - aggregation: active-backup # if 'aggregation' present in the config - then enable bonding for interfaces in 'parents' - parents: - - iface2 - - iface3 - - iface4 - - iface5 - - - - - name: slave-02 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-03 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-04 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-05 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-06 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-07 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-08 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-09 - role: fuel_slave - params: *rack-01-slave-node-params - diff --git a/system_test/tests_templates/devops_configs/default30.yaml b/system_test/tests_templates/devops_configs/default30.yaml deleted file mode 100644 index 5e457282f..000000000 --- a/system_test/tests_templates/devops_configs/default30.yaml +++ /dev/null @@ -1,209 +0,0 @@ ---- -aliases: - - dynamic_address_pool: - - &pool_default !os_env POOL_DEFAULT, 10.109.0.0/16:24 - - default_interface_model: - - &interface_model !os_env INTERFACE_MODEL, e1000 - - -env_name: !os_env ENV_NAME - -address_pools: -# Network pools used by the environment - fuelweb_admin-pool01: - net: *pool_default - params: - vlan_start: 0 - ip_reserved: - gateway: +1 - l2_network_device: +1 # l2_network_device will get this IP address - ip_ranges: - default: [+2, -2] # admin IP range for 'default' nodegroup name - - public-pool01: - net: *pool_default - params: - vlan_start: 100 - ip_reserved: - gateway: +1 - l2_network_device: +1 # l2_network_device will get this IP address - ip_ranges: - default: [+2, +127] # public IP range for 'default' nodegroup name - floating: [+128, -2] - - storage-pool01: - net: *pool_default - params: - vlan_start: 101 - management-pool01: - net: *pool_default - params: - vlan_start: 102 - private-pool01: - net: *pool_default - params: - vlan_start: 103 - vlan_end: 120 - -groups: - - name: default - driver: - name: devops.driver.libvirt.libvirt_driver - params: - connection_string: !os_env CONNECTION_STRING, qemu:///system - storage_pool_name: !os_env STORAGE_POOL_NAME, default - stp: True - hpet: False - use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true - - network_pools: # Address pools for OpenStack networks. - # Actual names should be used for keys - # (the same as in Nailgun, for example) - - fuelweb_admin: fuelweb_admin-pool01 - public: public-pool01 - storage: storage-pool01 - management: management-pool01 - private: private-pool01 - - l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks - admin: - address_pool: fuelweb_admin-pool01 - dhcp: false - forward: - mode: nat - - openstack_br: - vlan_ifaces: - - 100 - - 101 - - 102 - - public: - address_pool: public-pool01 - dhcp: false - forward: - mode: nat - parent_iface: - l2_net_dev: openstack_br - tag: 100 - - storage: - address_pool: storage-pool01 - dhcp: false - parent_iface: - l2_net_dev: openstack_br - tag: 101 - - management: - address_pool: management-pool01 - dhcp: false - parent_iface: - l2_net_dev: openstack_br - tag: 102 - - private: - address_pool: private-pool01 - dhcp: false - - nodes: - - name: admin # Custom name of VM for Fuel admin node - role: fuel_master # Fixed role for Fuel master node properties - params: - vcpu: !os_env ADMIN_NODE_CPU, 2 - memory: !os_env ADMIN_NODE_MEMORY, 3072 - boot: - - hd - - cdrom # for boot from usb - without 'cdrom' - volumes: - - name: system - capacity: !os_env ADMIN_NODE_VOLUME_SIZE, 75 - format: qcow2 - - name: iso - source_image: !os_env ISO_PATH # if 'source_image' set, then volume capacity is calculated from it's size - format: raw - device: cdrom # for boot from usb - 'disk' - bus: ide # for boot from usb - 'usb' - interfaces: - - label: iface2 - l2_network_device: admin # Libvirt bridge name. It is *NOT* a Nailgun network - interface_model: *interface_model - network_config: - iface2: - networks: - - fuelweb_admin - - # Slave nodes - - - name: slave-01 - role: fuel_slave - params: &rack-01-slave-node-params - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 50 - format: qcow2 - - name: cinder - capacity: !os_env NODE_VOLUME_SIZE, 50 - format: qcow2 - - name: swift - capacity: !os_env NODE_VOLUME_SIZE, 50 - format: qcow2 - - # List of node interfaces - interfaces: - - - label: iface3 - l2_network_device: admin # Libvirt bridge name. It is *NOT* Nailgun networks - interface_model: *interface_model - - - label: iface2 - l2_network_device: openstack_br # Libvirt bridge name. It is *NOT* Nailgun networks - interface_model: *interface_model - - - # How Nailgun/OpenStack networks should assigned for interfaces - network_config: - - iface2: - networks: - - public - - storage - - management - - private - - iface3: - networks: - - fuelweb_admin # Nailgun/OpenStack network name - - - - name: slave-02 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-03 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-04 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-05 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-06 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-07 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-08 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-09 - role: fuel_slave - params: *rack-01-slave-node-params diff --git a/system_test/tests_templates/devops_configs/external_haproxy.yaml b/system_test/tests_templates/devops_configs/external_haproxy.yaml deleted file mode 100644 index fc51d684d..000000000 --- a/system_test/tests_templates/devops_configs/external_haproxy.yaml +++ /dev/null @@ -1,454 +0,0 @@ ---- -aliases: - - dynamic_address_pool: - - &pool_default !os_env POOL_DEFAULT, 10.109.0.0/16:24 - - default_interface_model: - - &interface_model !os_env INTERFACE_MODEL, e1000 - - default-slave-interfaces: &default-slave-interfaces - - label: enp0s3 - l2_network_device: admin # Libvirt bridge name. It is *NOT* Nailgun networks - interface_model: *interface_model - - label: enp0s4 - l2_network_device: public - interface_model: *interface_model - - label: enp0s5 - l2_network_device: management - interface_model: *interface_model - - label: enp0s6 - l2_network_device: private - interface_model: *interface_model - - label: enp0s7 - l2_network_device: storage - interface_model: *interface_model - - label: enp0s13 - l2_network_device: public3 - interface_model: *interface_model - - label: enp0s14 - l2_network_device: management3 - interface_model: *interface_model - - rack-02-slave-interfaces: &rack-02-slave-interfaces - - label: enp0s3 - l2_network_device: admin2 # Libvirt bridge name. It is *NOT* Nailgun networks - interface_model: *interface_model - - label: enp0s4 - l2_network_device: public2 - interface_model: *interface_model - - label: enp0s5 - l2_network_device: management2 - interface_model: *interface_model - - label: enp0s6 - l2_network_device: private2 - interface_model: *interface_model - - label: enp0s7 - l2_network_device: storage - interface_model: *interface_model - - label: enp0s13 - l2_network_device: public3 - interface_model: *interface_model - - label: enp0s14 - l2_network_device: management3 - interface_model: *interface_model - - rack-03-slave-interfaces: &rack-03-slave-interfaces - - label: enp0s3 - l2_network_device: admin3 # Libvirt bridge name. It is *NOT* Nailgun networks - interface_model: *interface_model - - label: enp0s4 - l2_network_device: public3 - interface_model: *interface_model - - label: enp0s5 - l2_network_device: management3 - interface_model: *interface_model - - label: enp0s6 - l2_network_device: private3 - interface_model: *interface_model - - label: enp0s7 - l2_network_device: storage - interface_model: *interface_model - - label: enp0s13 - l2_network_device: public - interface_model: *interface_model - - label: enp0s14 - l2_network_device: management - interface_model: *interface_model - - label: enp0s15 - l2_network_device: public2 - interface_model: *interface_model - - label: enp0s16 - l2_network_device: management2 - interface_model: *interface_model - - default-slave-network_config: &default-slave-network_config - enp0s3: - networks: - - fuelweb_admin - enp0s4: - networks: - - public - enp0s5: - networks: - - management - enp0s6: - networks: - - private - enp0s7: - networks: - - storage - - rack-02-slave-network_config: &rack-02-slave-network_config - enp0s3: - networks: - - fuelweb_admin2 - enp0s4: - networks: - - public2 - enp0s5: - networks: - - management2 - enp0s6: - networks: - - private2 - enp0s7: - networks: - - storage - - rack-03-slave-network_config: &rack-03-slave-network_config - enp0s3: - networks: - - fuelweb_admin3 - enp0s4: - networks: - - public3 - enp0s5: - networks: - - management3 - enp0s6: - networks: - - private3 - enp0s7: - networks: - - storage - - default-slave-node-params: &default-slave-node-params - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: cinder - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: swift - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - interfaces: *default-slave-interfaces - network_config: *default-slave-network_config - - rack-02-slave-node-params: &rack-02-slave-node-params - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: cinder - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: swift - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - interfaces: *rack-02-slave-interfaces - network_config: *rack-02-slave-network_config - - rack-03-slave-node-params: &rack-03-slave-node-params - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: cinder - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: swift - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - interfaces: *rack-03-slave-interfaces - network_config: *rack-03-slave-network_config - -env_name: !os_env ENV_NAME - -address_pools: -# Network pools used by the environment - fuelweb_admin-pool01: - net: *pool_default - params: - vlan_start: 0 - public-pool01: - net: *pool_default - params: - vlan_start: 0 - storage-pool01: - net: *pool_default - params: - vlan_start: 101 - management-pool01: - net: *pool_default - params: - vlan_start: 102 - private-pool01: - net: *pool_default - params: - vlan_start: 103 - vlan_end: 120 - - fuelweb_admin-pool02: - net: *pool_default - params: - vlan_start: 0 - public-pool02: - net: *pool_default - params: - vlan_start: 0 - management-pool02: - net: *pool_default - params: - vlan_start: 102 - private-pool02: - net: *pool_default - params: - vlan_start: 103 - vlan_end: 120 - - fuelweb_admin-pool03: - net: *pool_default - params: - vlan_start: 0 - public-pool03: - net: *pool_default - params: - vlan_start: 0 - management-pool03: - net: *pool_default - params: - vlan_start: 102 - private-pool03: - net: *pool_default - params: - vlan_start: 103 - vlan_end: 120 - -groups: - - name: default - driver: - name: devops.driver.libvirt.libvirt_driver - params: - connection_string: !os_env CONNECTION_STRING, qemu:///system - storage_pool_name: !os_env STORAGE_POOL_NAME, default - stp: True - hpet: False - use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true - - network_pools: # Address pools for OpenStack networks. - # Actual names should be used for keys - # (the same as in Nailgun, for example) - - fuelweb_admin: fuelweb_admin-pool01 - public: public-pool01 - storage: storage-pool01 - management: management-pool01 - private: private-pool01 - - l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks - admin: - address_pool: fuelweb_admin-pool01 - dhcp: false - forward: - mode: nat - - public: - address_pool: public-pool01 - dhcp: false - forward: - mode: route - - storage: - address_pool: storage-pool01 - dhcp: false - forward: - mode: route - - management: - address_pool: management-pool01 - dhcp: false - forward: - mode: route - - private: - address_pool: private-pool01 - dhcp: false - forward: - mode: route - - nodes: - - name: admin # Custom name of VM for Fuel admin node - role: fuel_master # Fixed role for Fuel master node properties - params: - vcpu: !os_env ADMIN_NODE_CPU, 2 - memory: !os_env ADMIN_NODE_MEMORY, 3072 - boot: - - hd - - cdrom # for boot from usb - without 'cdrom' - volumes: - - name: system - capacity: !os_env ADMIN_NODE_VOLUME_SIZE, 80 - format: qcow2 - - name: iso - source_image: !os_env ISO_PATH # if 'source_image' set, then volume capacity is calculated from it's size - format: raw - device: cdrom # for boot from usb - 'disk' - bus: ide # for boot from usb - 'usb' - interfaces: - - label: enp0s3 - l2_network_device: admin # Libvirt bridge name. It is *NOT* a Nailgun network - interface_model: *interface_model - - label: enp0s4 - l2_network_device: admin2 - interface_model: *interface_model - - label: enp0s5 - l2_network_device: admin3 - interface_model: *interface_model - network_config: - enp0s3: - networks: - - fuelweb_admin - - - name: slave-01 - role: fuel_slave - params: *default-slave-node-params - - name: slave-02 - role: fuel_slave - params: *default-slave-node-params - - name: slave-03 - role: fuel_slave - params: *default-slave-node-params - - - name: rack-02 - driver: - name: devops.driver.libvirt.libvirt_driver - params: - connection_string: !os_env CONNECTION_STRING, qemu:///system - storage_pool_name: !os_env STORAGE_POOL_NAME, default - stp: True - hpet: False - use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true - - network_pools: # Address pools for OpenStack networks. - # Actual names should be used for keys - # (the same as in Nailgun, for example) - - fuelweb_admin: fuelweb_admin-pool02 - public: public-pool02 - storage: storage-pool01 - management: management-pool02 - private: private-pool02 - - l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks - admin2: - address_pool: fuelweb_admin-pool02 - dhcp: false - forward: - mode: nat - - public2: - address_pool: public-pool02 - dhcp: false - forward: - mode: route - - management2: - address_pool: management-pool02 - dhcp: false - forward: - mode: route - - private2: - address_pool: private-pool02 - dhcp: false - forward: - mode: route - - nodes: - - name: slave-04 - role: fuel_slave - params: *rack-02-slave-node-params - - name: slave-05 - role: fuel_slave - params: *rack-02-slave-node-params - - name: slave-06 - role: fuel_slave - params: *rack-02-slave-node-params - - - name: rack-03 - driver: - name: devops.driver.libvirt.libvirt_driver - params: - connection_string: !os_env CONNECTION_STRING, qemu:///system - storage_pool_name: !os_env STORAGE_POOL_NAME, default - stp: True - hpet: False - use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true - - network_pools: # Address pools for OpenStack networks. - # Actual names should be used for keys - # (the same as in Nailgun, for example) - - fuelweb_admin: fuelweb_admin-pool03 - public: public-pool03 - storage: storage-pool01 - management: management-pool03 - private: private-pool03 - - l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks - admin3: - address_pool: fuelweb_admin-pool03 - dhcp: false - forward: - mode: nat - - public3: - address_pool: public-pool03 - dhcp: false - forward: - mode: route - - management3: - address_pool: management-pool03 - dhcp: false - forward: - mode: route - - private3: - address_pool: private-pool03 - dhcp: false - forward: - mode: route - - nodes: - - name: slave-07 - role: fuel_slave - params: *rack-03-slave-node-params diff --git a/system_test/tests_templates/devops_configs/ironic.yaml b/system_test/tests_templates/devops_configs/ironic.yaml deleted file mode 100644 index 8179e6514..000000000 --- a/system_test/tests_templates/devops_configs/ironic.yaml +++ /dev/null @@ -1,224 +0,0 @@ ---- -aliases: - - dynamic_address_pool: - - &pool_default !os_env POOL_DEFAULT, 10.109.0.0/16:24 - - default_interface_model: - - &interface_model !os_env INTERFACE_MODEL, e1000 - - slave-interfaces: &slave-interfaces - - label: eth0 - l2_network_device: admin # Libvirt bridge name. It is *NOT* Nailgun networks - interface_model: *interface_model - - label: eth1 - l2_network_device: public - interface_model: *interface_model - - label: eth2 - l2_network_device: storage - interface_model: *interface_model - - label: eth3 - l2_network_device: management - interface_model: *interface_model - - label: eth4 - l2_network_device: private - interface_model: *interface_model - - label: eth5 - l2_network_device: ironic - interface_model: *interface_model - - slave-network_config: &slave-network_config - eth0: - networks: - - fuelweb_admin - eth1: - networks: - - public - eth2: - networks: - - storage - eth3: - networks: - - management - eth4: - networks: - - private - eth5: - networks: - - baremetal - - slave-node-params: &slave-node-params - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: cinder - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: swift - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - interfaces: *slave-interfaces - network_config: *slave-network_config - -template: - devops_settings: - env_name: !os_env ENV_NAME - address_pools: - # Network pools used by the environment - fuelweb_admin: - net: *pool_default - params: - tag: 0 - public: - net: *pool_default - params: - vlan_start: 0 - storage: - net: *pool_default - params: - vlan_start: 101 - management: - net: *pool_default - params: - vlan_start: 102 - ironic: - net: *pool_default - params: - vlan_start: 0 - private: - net: *pool_default - params: - vlan_start: 104 - vlan_end: 120 - - groups: - - name: default - driver: - name: devops.driver.libvirt.libvirt_driver - params: - connection_string: !os_env CONNECTION_STRING, qemu:///system - storage_pool_name: !os_env STORAGE_POOL_NAME, default - stp: True - hpet: False - use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true - - network_pools: # Address pools for OpenStack networks. - # Actual names should be used for keys - # (the same as in Nailgun, for example) - - fuelweb_admin: fuelweb_admin - public: public - storage: storage - management: management - private: private - ironic: ironic - - l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks - admin: - address_pool: fuelweb_admin - dhcp: false - forward: - mode: nat - - public: - address_pool: public - dhcp: false - forward: - mode: nat - - storage: - address_pool: storage - dhcp: false - - management: - address_pool: management - dhcp: false - - private: - address_pool: private - dhcp: false - - ironic: - address_pool: ironic - dhcp: false - - nodes: - - name: admin # Custom name of VM for Fuel admin node - role: fuel_master # Fixed role for Fuel master node properties - params: - vcpu: !os_env ADMIN_NODE_CPU, 2 - memory: !os_env ADMIN_NODE_MEMORY, 3072 - boot: - - hd - - cdrom # for boot from usb - without 'cdrom' - volumes: - - name: system - capacity: !os_env ADMIN_NODE_VOLUME_SIZE, 80 - format: qcow2 - - name: iso - source_image: !os_env ISO_PATH # if 'source_image' set, then volume capacity is calculated from it's size - format: raw - device: cdrom # for boot from usb - 'disk' - bus: ide # for boot from usb - 'usb' - interfaces: - - label: eth0 - l2_network_device: admin # Libvirt bridge name. It is *NOT* a Nailgun network - interface_model: *interface_model - network_config: - eth0: - networks: - - fuelweb_admin - - - name: slave-01 - role: fuel_slave - params: *slave-node-params - - name: slave-02 - role: fuel_slave - params: *slave-node-params - - name: slave-03 - role: fuel_slave - params: *slave-node-params - - name: slave-04 - role: fuel_slave - params: *slave-node-params - - name: slave-05 - role: fuel_slave - params: *slave-node-params - - name: slave-06 - role: fuel_slave - params: *slave-node-params - # slaves 7-9 is not used by fuel-qa but can be used in manual tests - - name: slave-07 - role: fuel_slave - params: *slave-node-params - - name: slave-08 - role: fuel_slave - params: *slave-node-params - - name: slave-09 - role: fuel_slave - params: *slave-node-params - - name: ironic-slave-01 - role: ironic - params: &ironic-slave-node-params - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 50 - format: qcow2 - interfaces: - - l2_network_device: ironic - label: eth0 - interface_model: *interface_model - - name: ironic-slave-02 - params: *ironic-slave-node-params - role: ironic diff --git a/system_test/tests_templates/devops_configs/multipath.yaml b/system_test/tests_templates/devops_configs/multipath.yaml deleted file mode 100644 index f7786d322..000000000 --- a/system_test/tests_templates/devops_configs/multipath.yaml +++ /dev/null @@ -1,172 +0,0 @@ ---- -aliases: - - dynamic_address_pool: - - &pool_default !os_env POOL_DEFAULT, 10.109.0.0/16:24 - - default_interface_model: - - &interface_model !os_env INTERFACE_MODEL, e1000 - - rack-01-slave-interfaces: &rack-01-slave-interfaces - - label: enp0s3 - l2_network_device: admin # Libvirt bridge name. It is *NOT* Nailgun networks - interface_model: *interface_model - - label: enp0s4 - l2_network_device: public - interface_model: *interface_model - - label: enp0s5 - l2_network_device: storage - interface_model: *interface_model - - label: enp0s6 - l2_network_device: management - interface_model: *interface_model - - label: enp0s7 - l2_network_device: private - interface_model: *interface_model - - rack-01-slave-network_config: &rack-01-slave-network_config - enp0s3: - networks: - - fuelweb_admin - enp0s4: - networks: - - public - enp0s5: - networks: - - storage - enp0s6: - networks: - - management - enp0s7: - networks: - - private - - rack-01-slave-node-params: &rack-01-slave-node-params - vcpu: !os_env SLAVE_NODE_CPU, 1 - memory: !os_env SLAVE_NODE_MEMORY, 2048 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: raw - multipath_count: !os_env SLAVE_MULTIPATH_DISKS_COUNT, 2 - - name: cinder - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: raw - multipath_count: !os_env SLAVE_MULTIPATH_DISKS_COUNT, 2 - - name: swift - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: raw - multipath_count: !os_env SLAVE_MULTIPATH_DISKS_COUNT, 2 - interfaces: *rack-01-slave-interfaces - network_config: *rack-01-slave-network_config - - -env_name: !os_env ENV_NAME - -address_pools: -# Network pools used by the environment - fuelweb_admin-pool01: - net: *pool_default - public-pool01: - net: *pool_default - params: - vlan_start: 0 - storage-pool01: - net: *pool_default - params: - vlan_start: 101 - management-pool01: - net: *pool_default - params: - vlan_start: 102 - private-pool01: - net: *pool_default - params: - vlan_start: 103 - vlan_end: 110 - -groups: - - name: default - driver: - name: devops.driver.libvirt.libvirt_driver - params: - connection_string: !os_env CONNECTION_STRING, qemu:///system - storage_pool_name: !os_env STORAGE_POOL_NAME, default - stp: True - hpet: False - use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true - - network_pools: # Address pools for OpenStack networks. - # Actual names should be used for keys - # (the same as in Nailgun, for example) - - fuelweb_admin: fuelweb_admin-pool01 - public: public-pool01 - storage: storage-pool01 - management: management-pool01 - private: private-pool01 - - l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks - admin: - address_pool: fuelweb_admin-pool01 - dhcp: false - forward: - mode: nat - - public: - address_pool: public-pool01 - dhcp: false - forward: - mode: nat - - storage: - address_pool: storage-pool01 - dhcp: false - - management: - address_pool: management-pool01 - dhcp: false - - private: - address_pool: private-pool01 - dhcp: false - - nodes: - - name: admin # Custom name of VM for Fuel admin node - role: fuel_master # Fixed role for Fuel master node properties - params: - vcpu: !os_env ADMIN_NODE_CPU, 2 - memory: !os_env ADMIN_NODE_MEMORY, 3072 - boot: - - hd - - cdrom # for boot from usb - without 'cdrom' - volumes: - - name: system - capacity: !os_env ADMIN_NODE_VOLUME_SIZE, 80 - format: qcow2 - - name: iso - source_image: !os_env ISO_PATH # if 'source_image' set, then volume capacity is calculated from it's size - format: raw - device: cdrom # for boot from usb - 'disk' - bus: ide # for boot from usb - 'usb' - interfaces: - - label: enp0s3 - l2_network_device: admin # Libvirt bridge name. It is *NOT* a Nailgun network - interface_model: *interface_model - network_config: - enp0s3: - networks: - - fuelweb_admin - - - name: slave-01 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-02 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-03 - role: fuel_slave - params: *rack-01-slave-node-params diff --git a/system_test/tests_templates/devops_configs/multirack.yaml b/system_test/tests_templates/devops_configs/multirack.yaml deleted file mode 100644 index 2df841e1a..000000000 --- a/system_test/tests_templates/devops_configs/multirack.yaml +++ /dev/null @@ -1,449 +0,0 @@ ---- -aliases: - - dynamic_address_pool: - - &pool_default !os_env POOL_DEFAULT, 10.109.0.0/16:24 - - default_interface_model: - - &interface_model !os_env INTERFACE_MODEL, e1000 - - default-slave-interfaces: &default-slave-interfaces - - label: enp0s3 - l2_network_device: admin # Libvirt bridge name. It is *NOT* Nailgun networks - interface_model: *interface_model - - label: enp0s4 - l2_network_device: public - interface_model: *interface_model - - label: enp0s5 - l2_network_device: management - interface_model: *interface_model - - label: enp0s6 - l2_network_device: private - interface_model: *interface_model - - label: enp0s7 - l2_network_device: storage - interface_model: *interface_model - - rack-02-slave-interfaces: &rack-02-slave-interfaces - - label: enp0s3 - l2_network_device: admin2 # Libvirt bridge name. It is *NOT* Nailgun networks - interface_model: *interface_model - - label: enp0s4 - l2_network_device: public2 - interface_model: *interface_model - - label: enp0s5 - l2_network_device: management2 - interface_model: *interface_model - - label: enp0s6 - l2_network_device: private2 - interface_model: *interface_model - - label: enp0s7 - l2_network_device: storage - interface_model: *interface_model - - rack-03-slave-interfaces: &rack-03-slave-interfaces - - label: enp0s3 - l2_network_device: admin3 # Libvirt bridge name. It is *NOT* Nailgun networks - interface_model: *interface_model - - label: enp0s4 - l2_network_device: public3 - interface_model: *interface_model - - label: enp0s5 - l2_network_device: management3 - interface_model: *interface_model - - label: enp0s6 - l2_network_device: private3 - interface_model: *interface_model - - label: enp0s7 - l2_network_device: storage - interface_model: *interface_model - - default-slave-network_config: &default-slave-network_config - enp0s3: - networks: - - fuelweb_admin - enp0s4: - networks: - - public - enp0s5: - networks: - - management - enp0s6: - networks: - - private - enp0s7: - networks: - - storage - - rack-02-slave-network_config: &rack-02-slave-network_config - enp0s3: - networks: - - fuelweb_admin - enp0s4: - networks: - - public - enp0s5: - networks: - - management - enp0s6: - networks: - - private - enp0s7: - networks: - - storage - - rack-03-slave-network_config: &rack-03-slave-network_config - enp0s3: - networks: - - fuelweb_admin - enp0s4: - networks: - - public - enp0s5: - networks: - - management - enp0s6: - networks: - - private - enp0s7: - networks: - - storage - - default-slave-node-params: &default-slave-node-params - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: cinder - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: swift - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - interfaces: *default-slave-interfaces - network_config: *default-slave-network_config - - rack-02-slave-node-params: &rack-02-slave-node-params - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - bootmenu_timeout: 3000 - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: cinder - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: swift - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - interfaces: *rack-02-slave-interfaces - network_config: *rack-02-slave-network_config - - rack-03-slave-node-params: &rack-03-slave-node-params - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - bootmenu_timeout: 3000 - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: cinder - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: swift - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - interfaces: *rack-03-slave-interfaces - network_config: *rack-03-slave-network_config - -env_name: !os_env ENV_NAME - -address_pools: -# Network pools used by the environment - fuelweb_admin-pool01: - net: *pool_default - params: - vlan_start: 0 - public-pool01: - net: *pool_default - params: - vlan_start: 0 - ip_ranges: - default: [+2, +31] # public IP range for 'default' nodegroup name - floating: [+32, -2] # floating IP range for 'default' nodegroup name - storage-pool01: - net: *pool_default - params: - vlan_start: 101 - management-pool01: - net: *pool_default - params: - vlan_start: 102 - private-pool01: - net: *pool_default - params: - vlan_start: 103 - vlan_end: 110 - - fuelweb_admin-pool02: - net: *pool_default - params: - vlan_start: 0 - ip_reserved: - gateway: +2 # gateway is Fuel master, not libvirt bridge - l2_network_device: +1 # l2_network_device will get this IP address - ip_ranges: - group-custom-1: [+3, -2] # admin IP range for 'group-custom-1' nodegroup name - public-pool02: - net: *pool_default - params: - vlan_start: 0 - ip_ranges: - group-custom-1: [+2, +31] # public IP range for 'group-custom-1' nodegroup name - management-pool02: - net: *pool_default - params: - vlan_start: 102 - private-pool02: - net: *pool_default - params: - vlan_start: 103 - vlan_end: 110 - - fuelweb_admin-pool03: - net: *pool_default - params: - vlan_start: 0 - ip_reserved: - gateway: +2 # gateway is Fuel master, not libvirt bridge - l2_network_device: +1 # l2_network_device will get this IP address - ip_ranges: - group-custom-2: [+3, -2] # admin IP range for 'group-custom-2' nodegroup name - public-pool03: - net: *pool_default - params: - vlan_start: 0 - ip_ranges: - group-custom-2: [+2, +31] # public IP range for 'group-custom-2' nodegroup name - management-pool03: - net: *pool_default - params: - vlan_start: 102 - private-pool03: - net: *pool_default - params: - vlan_start: 103 - vlan_end: 110 - -groups: - - name: default - driver: - name: devops.driver.libvirt.libvirt_driver - params: - connection_string: !os_env CONNECTION_STRING, qemu:///system - storage_pool_name: !os_env STORAGE_POOL_NAME, default - stp: True - hpet: False - use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true - - network_pools: # Address pools for OpenStack networks. - # Actual names should be used for keys - # (the same as in Nailgun, for example) - - fuelweb_admin: fuelweb_admin-pool01 - public: public-pool01 - storage: storage-pool01 - management: management-pool01 - private: private-pool01 - - l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks - admin: - address_pool: fuelweb_admin-pool01 - dhcp: false - forward: - mode: nat - - public: - address_pool: public-pool01 - dhcp: false - forward: - mode: nat - - storage: - address_pool: storage-pool01 - dhcp: false - forward: - mode: route - - management: - address_pool: management-pool01 - dhcp: false - forward: - mode: route - - private: - address_pool: private-pool01 - dhcp: false - forward: - mode: route - - nodes: - - name: admin # Custom name of VM for Fuel admin node - role: fuel_master # Fixed role for Fuel master node properties - params: - vcpu: !os_env ADMIN_NODE_CPU, 2 - memory: !os_env ADMIN_NODE_MEMORY, 3072 - boot: - - hd - - cdrom # for boot from usb - without 'cdrom' - volumes: - - name: system - capacity: !os_env ADMIN_NODE_VOLUME_SIZE, 80 - format: qcow2 - - name: iso - source_image: !os_env ISO_PATH # if 'source_image' set, then volume capacity is calculated from it's size - format: raw - device: cdrom # for boot from usb - 'disk' - bus: ide # for boot from usb - 'usb' - interfaces: - - label: enp0s3 - l2_network_device: admin # Libvirt bridge name. It is *NOT* a Nailgun network - interface_model: *interface_model - - label: enp0s4 - l2_network_device: admin2 - interface_model: *interface_model - - label: enp0s5 - l2_network_device: admin3 - interface_model: *interface_model - network_config: - enp0s3: - networks: - - fuelweb_admin - - - name: slave-01 - role: fuel_slave - params: *default-slave-node-params - - name: slave-02 - role: fuel_slave - params: *default-slave-node-params - - name: slave-03 - role: fuel_slave - params: *default-slave-node-params - - - name: group-custom-1 - driver: - name: devops.driver.libvirt.libvirt_driver - params: - connection_string: !os_env CONNECTION_STRING, qemu:///system - storage_pool_name: !os_env STORAGE_POOL_NAME, default - stp: True - hpet: False - use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true - - network_pools: # Address pools for OpenStack networks. - # Actual names should be used for keys - # (the same as in Nailgun, for example) - - fuelweb_admin: fuelweb_admin-pool02 - public: public-pool02 - storage: storage-pool01 - management: management-pool02 - private: private-pool02 - - l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks - admin2: - address_pool: fuelweb_admin-pool02 - dhcp: false - forward: - mode: nat - - public2: - address_pool: public-pool02 - dhcp: false - forward: - mode: nat - - management2: - address_pool: management-pool02 - dhcp: false - forward: - mode: route - - private2: - address_pool: private-pool02 - dhcp: false - forward: - mode: route - - nodes: - - name: slave-04 - role: fuel_slave - params: *rack-02-slave-node-params - - name: slave-05 - role: fuel_slave - params: *rack-02-slave-node-params - - name: slave-06 - role: fuel_slave - params: *rack-02-slave-node-params - - - name: group-custom-2 - driver: - name: devops.driver.libvirt.libvirt_driver - params: - connection_string: !os_env CONNECTION_STRING, qemu:///system - storage_pool_name: !os_env STORAGE_POOL_NAME, default - stp: True - hpet: False - use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true - - network_pools: # Address pools for OpenStack networks. - # Actual names should be used for keys - # (the same as in Nailgun, for example) - - fuelweb_admin: fuelweb_admin-pool03 - public: public-pool03 - storage: storage-pool01 - management: management-pool03 - private: private-pool03 - - l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks - admin3: - address_pool: fuelweb_admin-pool03 - dhcp: false - forward: - mode: nat - - public3: - address_pool: public-pool03 - dhcp: false - forward: - mode: nat - - management3: - address_pool: management-pool03 - dhcp: false - forward: - mode: route - - private3: - address_pool: private-pool03 - dhcp: false - forward: - mode: route - - nodes: - - name: slave-07 - role: fuel_slave - params: *rack-03-slave-node-params diff --git a/system_test/tests_templates/devops_configs/public_api.yaml b/system_test/tests_templates/devops_configs/public_api.yaml deleted file mode 100644 index da25e0231..000000000 --- a/system_test/tests_templates/devops_configs/public_api.yaml +++ /dev/null @@ -1,207 +0,0 @@ ---- -aliases: - - dynamic_address_pool: - - &pool_default !os_env POOL_DEFAULT, 10.109.0.0/16:24 - - default_interface_model: - - &interface_model !os_env INTERFACE_MODEL, e1000 - - rack-01-slave-interfaces: &rack-01-slave-interfaces - - label: eth0 - l2_network_device: admin # Libvirt bridge name. It is *NOT* Nailgun networks - interface_model: *interface_model - - label: eth1 - l2_network_device: public - interface_model: *interface_model - - label: eth2 - l2_network_device: os-api - interface_model: *interface_model - - label: eth3 - l2_network_device: storage - interface_model: *interface_model - - label: eth4 - l2_network_device: management - interface_model: *interface_model - - label: eth5 - l2_network_device: private - interface_model: *interface_model - - rack-01-slave-network_config: &rack-01-slave-network_config - eth0: - networks: - - fuelweb_admin - eth1: - networks: - - public - eth2: - networks: - - os-api - eth3: - networks: - - storage - eth4: - networks: - - management - eth5: - networks: - - private - - rack-01-slave-node-params: &rack-01-slave-node-params - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: cinder - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: swift - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - interfaces: *rack-01-slave-interfaces - network_config: *rack-01-slave-network_config - - -env_name: !os_env ENV_NAME - -address_pools: -# Network pools used by the environment - fuelweb_admin-pool01: - net: *pool_default - params: - vlan_start: 0 - public-pool01: - net: *pool_default - params: - vlan_start: 0 - os-api-pool01: - net: *pool_default - params: - vlan_start: 0 - storage-pool01: - net: *pool_default - params: - vlan_start: 101 - management-pool01: - net: *pool_default - params: - vlan_start: 102 - private-pool01: - net: *pool_default - params: - vlan_start: 103 - vlan_end: 120 - -groups: - - name: default - driver: - name: devops.driver.libvirt.libvirt_driver - params: - connection_string: !os_env CONNECTION_STRING, qemu:///system - storage_pool_name: !os_env STORAGE_POOL_NAME, default - stp: True - hpet: False - use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true - - network_pools: # Address pools for OpenStack networks. - # Actual names should be used for keys - # (the same as in Nailgun, for example) - - fuelweb_admin: fuelweb_admin-pool01 - public: public-pool01 - os-api: os-api-pool01 - storage: storage-pool01 - management: management-pool01 - private: private-pool01 - - l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks - admin: - address_pool: fuelweb_admin-pool01 - dhcp: false - forward: - mode: nat - - public: - address_pool: public-pool01 - dhcp: false - forward: - mode: nat - - os-api: - address_pool: os-api-pool01 - dhcp: false - forward: - mode: nat - - storage: - address_pool: storage-pool01 - dhcp: false - - management: - address_pool: management-pool01 - dhcp: false - - private: - address_pool: private-pool01 - dhcp: false - - nodes: - - name: admin # Custom name of VM for Fuel admin node - role: fuel_master # Fixed role for Fuel master node properties - params: - vcpu: !os_env ADMIN_NODE_CPU, 2 - memory: !os_env ADMIN_NODE_MEMORY, 3072 - boot: - - hd - - cdrom # for boot from usb - without 'cdrom' - volumes: - - name: system - capacity: !os_env ADMIN_NODE_VOLUME_SIZE, 80 - format: qcow2 - - name: iso - source_image: !os_env ISO_PATH # if 'source_image' set, then volume capacity is calculated from it's size - format: raw - device: cdrom # for boot from usb - 'disk' - bus: ide # for boot from usb - 'usb' - interfaces: - - label: eth0 - l2_network_device: admin # Libvirt bridge name. It is *NOT* a Nailgun network - interface_model: *interface_model - network_config: - eth0: - networks: - - fuelweb_admin - - - name: slave-01 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-02 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-03 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-04 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-05 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-06 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-07 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-08 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-09 - role: fuel_slave - params: *rack-01-slave-node-params - diff --git a/system_test/tests_templates/devops_configs/security_scan.yaml b/system_test/tests_templates/devops_configs/security_scan.yaml deleted file mode 100644 index 916f4460e..000000000 --- a/system_test/tests_templates/devops_configs/security_scan.yaml +++ /dev/null @@ -1,202 +0,0 @@ ---- -aliases: - - dynamic_address_pool: - - &pool_default !os_env POOL_DEFAULT, 10.109.0.0/16:24 - - default_interface_model: - - &interface_model !os_env INTERFACE_MODEL, e1000 - - rack-01-slave-interfaces: &rack-01-slave-interfaces - - label: eth0 - l2_network_device: admin # Libvirt bridge name. It is *NOT* Nailgun networks - interface_model: *interface_model - - label: eth1 - l2_network_device: public - interface_model: *interface_model - - label: eth2 - l2_network_device: storage - interface_model: *interface_model - - label: eth3 - l2_network_device: management - interface_model: *interface_model - - label: eth4 - l2_network_device: private - interface_model: *interface_model - - rack-01-slave-network_config: &rack-01-slave-network_config - eth0: - networks: - - fuelweb_admin - eth1: - networks: - - public - eth2: - networks: - - storage - eth3: - networks: - - management - eth4: - networks: - - private - - rack-01-slave-node-params: &rack-01-slave-node-params - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: cinder - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: swift - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - interfaces: *rack-01-slave-interfaces - network_config: *rack-01-slave-network_config - -template: - devops_settings: - env_name: !os_env ENV_NAME - address_pools: - # Network pools used by the environment - fuelweb_admin-pool01: - net: *pool_default - params: - vlan_start: 0 - public-pool01: - net: *pool_default - params: - vlan_start: 0 - storage-pool01: - net: *pool_default - params: - vlan_start: 101 - management-pool01: - net: *pool_default - params: - vlan_start: 102 - private-pool01: - net: *pool_default - params: - vlan_start: 103 - vlan_end: 120 - - groups: - - name: default - driver: - name: devops.driver.libvirt.libvirt_driver - params: - connection_string: !os_env CONNECTION_STRING, qemu:///system - storage_pool_name: !os_env STORAGE_POOL_NAME, default - stp: True - hpet: False - use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true - - network_pools: # Address pools for OpenStack networks. - # Actual names should be used for keys - # (the same as in Nailgun, for example) - - fuelweb_admin: fuelweb_admin-pool01 - public: public-pool01 - storage: storage-pool01 - management: management-pool01 - private: private-pool01 - - l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks - admin: - address_pool: fuelweb_admin-pool01 - dhcp: false - forward: - mode: nat - - public: - address_pool: public-pool01 - dhcp: false - forward: - mode: nat - - storage: - address_pool: storage-pool01 - dhcp: false - - management: - address_pool: management-pool01 - dhcp: false - - private: - address_pool: private-pool01 - dhcp: false - - nodes: - - name: admin # Custom name of VM for Fuel admin node - role: fuel_master # Fixed role for Fuel master node properties - params: - vcpu: !os_env ADMIN_NODE_CPU, 2 - memory: !os_env ADMIN_NODE_MEMORY, 3072 - boot: - - hd - - cdrom # for boot from usb - without 'cdrom' - volumes: - - name: system - capacity: !os_env ADMIN_NODE_VOLUME_SIZE, 80 - format: qcow2 - - name: iso - source_image: !os_env ISO_PATH # if 'source_image' set, then volume capacity is calculated from it's size - format: raw - device: cdrom # for boot from usb - 'disk' - bus: ide # for boot from usb - 'usb' - interfaces: - - label: eth0 - l2_network_device: admin # Libvirt bridge name. It is *NOT* a Nailgun network - interface_model: *interface_model - network_config: - eth0: - networks: - - fuelweb_admin - - - name: slave-01 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-02 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-03 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-04 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-05 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-06 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-07 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-08 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-09 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-nessus # Custom name of VM for Nessus node - role: nessus_node # Fixed role for Fuel master node properties - params: - vcpu: !os_env NESSUS_NODE_CPU, 1 - memory: !os_env NESSUS_NODE_MEMORY, 1024 - boot: - - hd - volumes: - - name: system - source_image: !os_env NESSUS_IMAGE_PATH - format: qcow2 - interfaces: *rack-01-slave-interfaces - network_config: *rack-01-slave-network_config diff --git a/system_test/tests_templates/devops_configs/vcenter_ha_default.yaml b/system_test/tests_templates/devops_configs/vcenter_ha_default.yaml deleted file mode 100644 index 18e2e450c..000000000 --- a/system_test/tests_templates/devops_configs/vcenter_ha_default.yaml +++ /dev/null @@ -1,208 +0,0 @@ ---- -aliases: - - dynamic_address_pool: - - &pool_default !os_env POOL_DEFAULT, 10.109.0.0/16:24 - - default_interface_model: - - &interface_model !os_env INTERFACE_MODEL, e1000 - - rack-01-slave-interfaces: &rack-01-slave-interfaces - - label: eth0 - l2_network_device: admin # Libvirt bridge name. It is *NOT* Nailgun networks - interface_model: *interface_model - - label: eth1 - l2_network_device: public - interface_model: *interface_model - - label: eth2 - l2_network_device: management - interface_model: *interface_model - - label: eth3 - l2_network_device: private - interface_model: *interface_model - - label: eth4 - l2_network_device: storage - interface_model: *interface_model - - rack-01-slave-network_config: &rack-01-slave-network_config - eth0: - networks: - - fuelweb_admin - eth1: - networks: - - public - eth2: - networks: - - management - eth3: - networks: - - private - eth4: - networks: - - storage - - rack-01-slave-node-params: &rack-01-slave-node-params - vcpu: !os_env SLAVE_NODE_CPU, 4 - memory: !os_env SLAVE_NODE_MEMORY, 8192 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: cinder - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: swift - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - interfaces: *rack-01-slave-interfaces - network_config: *rack-01-slave-network_config - - rack-02-slave-node-params: &rack-02-slave-node-params - vcpu: !os_env SLAVE_NODE_CPU, 2 - memory: !os_env SLAVE_NODE_MEMORY, 3072 - boot: - - network - - hd - volumes: - - name: system - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: cinder - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - - name: swift - capacity: !os_env NODE_VOLUME_SIZE, 55 - format: qcow2 - interfaces: *rack-01-slave-interfaces - network_config: *rack-01-slave-network_config - - -env_name: !os_env ENV_NAME - -address_pools: -# Network pools used by the environment - fuelweb_admin-pool01: - net: *pool_default - params: - vlan_start: 0 - public-pool01: - net: *pool_default - params: - vlan_start: 0 - storage-pool01: - net: *pool_default - params: - vlan_start: 101 - management-pool01: - net: *pool_default - params: - vlan_start: 102 - private-pool01: - net: *pool_default - params: - vlan_start: 103 - vlan_end: 120 - -groups: - - name: cat - driver: - name: devops.driver.libvirt.libvirt_driver - params: - connection_string: !os_env CONNECTION_STRING, qemu:///system - storage_pool_name: !os_env STORAGE_POOL_NAME, default - stp: True - hpet: False - use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true - - network_pools: # Address pools for OpenStack networks. - # Actual names should be used for keys - # (the same as in Nailgun, for example) - - fuelweb_admin: fuelweb_admin-pool01 - public: public-pool01 - storage: storage-pool01 - management: management-pool01 - private: private-pool01 - - l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks - admin: - address_pool: fuelweb_admin-pool01 - dhcp: false - forward: - mode: nat - - public: - address_pool: public-pool01 - dhcp: false - forward: - mode: nat - - storage: - address_pool: storage-pool01 - dhcp: false - - management: - address_pool: management-pool01 - dhcp: false - - private: - address_pool: private-pool01 - dhcp: false - - nodes: - - name: admin # Custom name of VM for Fuel admin node - role: fuel_master # Fixed role for Fuel master node properties - params: - vcpu: !os_env ADMIN_NODE_CPU, 2 - memory: !os_env ADMIN_NODE_MEMORY, 3072 - boot: - - hd - - cdrom # for boot from usb - without 'cdrom' - volumes: - - name: system - capacity: !os_env ADMIN_NODE_VOLUME_SIZE, 80 - format: qcow2 - - name: iso - source_image: !os_env ISO_PATH # if 'source_image' set, then volume capacity is calculated from it's size - format: raw - device: cdrom # for boot from usb - 'disk' - bus: ide # for boot from usb - 'usb' - interfaces: - - label: eth0 - l2_network_device: admin # Libvirt bridge name. It is *NOT* a Nailgun network - interface_model: *interface_model - network_config: - eth0: - networks: - - fuelweb_admin - - - name: slave-01 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-02 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-03 - role: fuel_slave - params: *rack-01-slave-node-params - - name: slave-04 - role: fuel_slave - params: *rack-02-slave-node-params - - name: slave-05 - role: fuel_slave - params: *rack-02-slave-node-params - - name: slave-06 - role: fuel_slave - params: *rack-02-slave-node-params - - name: slave-07 - role: fuel_slave - params: *rack-02-slave-node-params - - name: slave-08 - role: fuel_slave - params: *rack-02-slave-node-params - - name: slave-09 - role: fuel_slave - params: *rack-02-slave-node-params diff --git a/system_test/tests_templates/tests_configs/centos_master_ceph_all_on_neutron_vlan.yaml b/system_test/tests_templates/tests_configs/centos_master_ceph_all_on_neutron_vlan.yaml deleted file mode 100644 index c8d1360cf..000000000 --- a/system_test/tests_templates/tests_configs/centos_master_ceph_all_on_neutron_vlan.yaml +++ /dev/null @@ -1,40 +0,0 @@ ---- -network-config: &network-config - provider: neutron - segment-type: vlan - pubip-to-all: false - -storages-config: &storages-config - volume-lvm: false - volume-ceph: true - image-ceph: true - rados-ceph: true - ephemeral-ceph: false - replica-ceph: 1 - -nodes: &nodes - - roles: - - controller - count: 1 - - roles: - - ceph-osd - count: 2 - - roles: - - compute - count: 1 - -template: - name: 1 Controller, 1 Compute & 2 Ceph on Neutron/VLAN - slaves: 4 - devops_settings: !include devops_configs/centos_master.yaml - cluster_template: &environment-config - name: rados - release: ubuntu - settings: - components: - sahara: false - murano: false - ceilometer: false - storages: *storages-config - network: *network-config - nodes: *nodes diff --git a/system_test/tests_templates/tests_configs/ceph_all_ceilo_on_neutron_tun.yaml b/system_test/tests_templates/tests_configs/ceph_all_ceilo_on_neutron_tun.yaml deleted file mode 100644 index 5c12528c2..000000000 --- a/system_test/tests_templates/tests_configs/ceph_all_ceilo_on_neutron_tun.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -template: - name: 3 Controller and mongo, 3 Compute & Ceph on Neutron/TUN with Ceilometer - slaves: 6 - cluster_template: - name: CephImgVolRados - release: ubuntu - network: - !include cluster_configs/networks/neutron_tun.yaml - settings: - !include cluster_configs/settings/cephVolImgRados_ceilometer.yaml - nodes: - !include cluster_configs/nodes/mixed/3ctrl_mongo_3comp_ceph.yaml diff --git a/system_test/tests_templates/tests_configs/ceph_all_on_neutron_vlan.yaml b/system_test/tests_templates/tests_configs/ceph_all_on_neutron_vlan.yaml deleted file mode 100644 index 609f6b7f6..000000000 --- a/system_test/tests_templates/tests_configs/ceph_all_on_neutron_vlan.yaml +++ /dev/null @@ -1,38 +0,0 @@ ---- -network-config: &network-config - provider: neutron - segment-type: vlan - pubip-to-all: false - -storages-config: &storages-config - volume-lvm: false - volume-ceph: true - image-ceph: true - rados-ceph: true - ephemeral-ceph: false - replica-ceph: 2 - -nodes: &nodes - - roles: - - controller - count: 3 - - roles: - - compute - - ceph-osd - count: 3 - -template: - name: 3 Controller, 3 Compute & Ceph on Neutron/VLAN - slaves: 6 - devops_settings: !include devops_configs/default.yaml - cluster_template: &environment-config - name: rados - release: ubuntu - settings: - components: - sahara: false - murano: false - ceilometer: false - storages: *storages-config - network: *network-config - nodes: *nodes diff --git a/system_test/tests_templates/tests_configs/ceph_all_on_neutron_vlan_30-2groups.yaml b/system_test/tests_templates/tests_configs/ceph_all_on_neutron_vlan_30-2groups.yaml deleted file mode 100644 index fa46a1a23..000000000 --- a/system_test/tests_templates/tests_configs/ceph_all_on_neutron_vlan_30-2groups.yaml +++ /dev/null @@ -1,38 +0,0 @@ ---- -network-config: &network-config - provider: neutron - segment-type: vlan - pubip-to-all: false - -storages-config: &storages-config - volume-lvm: false - volume-ceph: true - image-ceph: true - rados-ceph: true - ephemeral-ceph: false - replica-ceph: 2 - -nodes: &nodes - - roles: - - controller - count: 3 - - roles: - - compute - - ceph-osd - count: 3 - -template: - name: 3 Controller, 3 Compute & Ceph on Neutron/VLAN - slaves: 6 - devops_settings: !include devops_configs/default30-2groups.yaml - cluster_template: &environment-config - name: rados - release: ubuntu - settings: - components: - sahara: false - murano: false - ceilometer: false - storages: *storages-config - network: *network-config - nodes: *nodes diff --git a/system_test/tests_templates/tests_configs/ceph_all_on_neutron_vlan_30-bond.yaml b/system_test/tests_templates/tests_configs/ceph_all_on_neutron_vlan_30-bond.yaml deleted file mode 100644 index 46aa2901b..000000000 --- a/system_test/tests_templates/tests_configs/ceph_all_on_neutron_vlan_30-bond.yaml +++ /dev/null @@ -1,38 +0,0 @@ ---- -network-config: &network-config - provider: neutron - segment-type: vlan - pubip-to-all: false - -storages-config: &storages-config - volume-lvm: false - volume-ceph: true - image-ceph: true - rados-ceph: true - ephemeral-ceph: false - replica-ceph: 2 - -nodes: &nodes - - roles: - - controller - count: 3 - - roles: - - compute - - ceph-osd - count: 3 - -template: - name: 3 Controller, 3 Compute & Ceph on Neutron/VLAN - slaves: 6 - devops_settings: !include devops_configs/default30-bond.yaml - cluster_template: &environment-config - name: rados - release: ubuntu - settings: - components: - sahara: false - murano: false - ceilometer: false - storages: *storages-config - network: *network-config - nodes: *nodes diff --git a/system_test/tests_templates/tests_configs/ceph_all_on_neutron_vlan_30.yaml b/system_test/tests_templates/tests_configs/ceph_all_on_neutron_vlan_30.yaml deleted file mode 100644 index 0006d7323..000000000 --- a/system_test/tests_templates/tests_configs/ceph_all_on_neutron_vlan_30.yaml +++ /dev/null @@ -1,38 +0,0 @@ ---- -network-config: &network-config - provider: neutron - segment-type: vlan - pubip-to-all: false - -storages-config: &storages-config - volume-lvm: false - volume-ceph: true - image-ceph: true - rados-ceph: true - ephemeral-ceph: false - replica-ceph: 2 - -nodes: &nodes - - roles: - - controller - count: 3 - - roles: - - compute - - ceph-osd - count: 3 - -template: - name: 3 Controller, 3 Compute & Ceph on Neutron/VLAN - slaves: 6 - devops_settings: !include devops_configs/default30.yaml - cluster_template: &environment-config - name: rados - release: ubuntu - settings: - components: - sahara: false - murano: false - ceilometer: false - storages: *storages-config - network: *network-config - nodes: *nodes diff --git a/system_test/tests_templates/tests_configs/example_test_environment.yaml b/system_test/tests_templates/tests_configs/example_test_environment.yaml deleted file mode 100644 index 5e2676d3d..000000000 --- a/system_test/tests_templates/tests_configs/example_test_environment.yaml +++ /dev/null @@ -1,40 +0,0 @@ ---- - -network-config: &network-config - provider: neutron - segment-type: vlan - pubip-to-all: false - -storages-config: &storages-config - volume-lvm: true - volume-ceph: false - image-ceph: false - ephemeral-ceph: false - rados-ceph: false - replica-ceph: 2 - -nodes: &nodes - - roles: - - controller - count: 1 - - roles: - - compute - count: 1 - - roles: - - cinder - count: 1 - -template: - name: 1 Controller, 1 Compute, 1 Cinder on Neutron/VLAN - slaves: 3 - cluster_template: &environment-config - name: env1 - release: ubuntu - settings: - components: - sahara: false - murano: false - ceilometer: false - storages: *storages-config - network: *network-config - nodes: *nodes diff --git a/system_test/tests_templates/tests_configs/external_haproxy.yaml b/system_test/tests_templates/tests_configs/external_haproxy.yaml deleted file mode 100644 index 5b12616ce..000000000 --- a/system_test/tests_templates/tests_configs/external_haproxy.yaml +++ /dev/null @@ -1,44 +0,0 @@ ---- - -network-config: &network-config - provider: neutron - segment-type: tun - pubip-to-all: false - -storages-config: &storages-config - volume-lvm: false - volume-ceph: true - image-ceph: true - rados-ceph: true - ephemeral-ceph: false - replica-ceph: 2 - -nodes: &nodes - - roles: - - controller - count: 3 - nodegroup: default - - roles: - - compute - count: 1 - nodegroup: rack-02 - - roles: - - ceph-osd - count: 2 - nodegroup: rack-03 - -template: - name: 3 Controller, 1 Compute, 2 Ceph on Neutron/VLAN - slaves: 6 - devops_settings: !include devops_configs/external_haproxy.yaml - cluster_template: &environment-config - name: env1 - release: ubuntu - settings: - components: - sahara: false - murano: false - ceilometer: false - storages: *storages-config - network: *network-config - nodes: *nodes diff --git a/system_test/tests_templates/tests_configs/ha/pairwise/1ctrl_ceph_2ctrl_1comp_1comp_ceph_neutronVLAN.yaml b/system_test/tests_templates/tests_configs/ha/pairwise/1ctrl_ceph_2ctrl_1comp_1comp_ceph_neutronVLAN.yaml deleted file mode 100644 index cf02ffcee..000000000 --- a/system_test/tests_templates/tests_configs/ha/pairwise/1ctrl_ceph_2ctrl_1comp_1comp_ceph_neutronVLAN.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -template: - name: 1 Controller + Ceph, 2 Controller, 1 Compute, 1 Compute + Ceph on Neutron/VLAN - slaves: 5 - cluster_template: - name: HAwithCinderNeutronVLAN - release: ubuntu - network: - !include cluster_configs/networks/neutron_vlan.yaml - settings: - !include cluster_configs/settings/cephVolImg_wo_components.yaml - nodes: - !include cluster_configs/nodes/single/3ctrl_2comp_1cndr.yaml diff --git a/system_test/tests_templates/tests_configs/ha/single/3ctrl_2comp_1cndr_neutronVLAN.yaml b/system_test/tests_templates/tests_configs/ha/single/3ctrl_2comp_1cndr_neutronVLAN.yaml deleted file mode 100644 index 11d8e5df0..000000000 --- a/system_test/tests_templates/tests_configs/ha/single/3ctrl_2comp_1cndr_neutronVLAN.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -template: - name: 3 Controller, 2 Compute, 1 Cinder Neutron/VLAN - slaves: 6 - cluster_template: - name: HAwithCinderNeutronVLAN - release: ubuntu - network: - !include cluster_configs/networks/neutron_vlan.yaml - settings: - !include cluster_configs/settings/cinder_wo_components.yaml - nodes: - !include cluster_configs/nodes/single/3ctrl_2comp_1cndr.yaml diff --git a/system_test/tests_templates/tests_configs/multipath_3_nodes.yaml b/system_test/tests_templates/tests_configs/multipath_3_nodes.yaml deleted file mode 100644 index 2752c4223..000000000 --- a/system_test/tests_templates/tests_configs/multipath_3_nodes.yaml +++ /dev/null @@ -1,41 +0,0 @@ ---- - -network-config: &network-config - provider: neutron - segment-type: vlan - pubip-to-all: false - -storages-config: &storages-config - volume-lvm: true - volume-ceph: false - image-ceph: false - ephemeral-ceph: false - rados-ceph: false - replica-ceph: 2 - -nodes: &nodes - - roles: - - controller - count: 1 - - roles: - - compute - count: 1 - - roles: - - cinder - count: 1 - -template: - name: 1 Controller, 1 Compute, 1 Cinder on Neutron/VLAN - slaves: 3 - devops_settings: !include devops_configs/multipath.yaml - cluster_template: &environment-config - name: env1 - release: ubuntu - settings: - components: - sahara: false - murano: false - ceilometer: false - storages: *storages-config - network: *network-config - nodes: *nodes diff --git a/system_test/tests_templates/tests_configs/multirack.yaml b/system_test/tests_templates/tests_configs/multirack.yaml deleted file mode 100644 index 27af5b4a1..000000000 --- a/system_test/tests_templates/tests_configs/multirack.yaml +++ /dev/null @@ -1,44 +0,0 @@ ---- - -network-config: &network-config - provider: neutron - segment-type: tun - pubip-to-all: false - -storages-config: &storages-config - volume-lvm: false - volume-ceph: true - image-ceph: true - rados-ceph: true - ephemeral-ceph: false - replica-ceph: 2 - -nodes: &nodes - - roles: - - controller - count: 3 - nodegroup: default - - roles: - - compute - count: 1 - nodegroup: rack-02 - - roles: - - ceph-osd - count: 2 - nodegroup: rack-03 - -template: - name: 3 Controller, 1 Compute, 2 Ceph on Neutron/VLAN - slaves: 6 - devops_settings: !include devops_configs/multirack.yaml - cluster_template: &environment-config - name: env1 - release: ubuntu - settings: - components: - sahara: false - murano: false - ceilometer: false - storages: *storages-config - network: *network-config - nodes: *nodes diff --git a/system_test/tests_templates/tests_configs/public_api.yaml b/system_test/tests_templates/tests_configs/public_api.yaml deleted file mode 100644 index 1052dfade..000000000 --- a/system_test/tests_templates/tests_configs/public_api.yaml +++ /dev/null @@ -1,41 +0,0 @@ ---- - -network-config: &network-config - provider: neutron - segment-type: vlan - pubip-to-all: false - -storages-config: &storages-config - volume-lvm: true - volume-ceph: false - image-ceph: false - ephemeral-ceph: false - rados-ceph: false - replica-ceph: 2 - -nodes: &nodes - - roles: - - controller - count: 1 - - roles: - - compute - count: 1 - - roles: - - cinder - count: 1 - -template: - name: 1 Controller, 1 Compute, 1 Cinder on Neutron/VLAN with DMZ - slaves: 3 - devops_settings: !include devops_configs/public_api.yaml - cluster_template: &environment-config - name: env1 - release: ubuntu - settings: - components: - sahara: false - murano: false - ceilometer: false - storages: *storages-config - network: *network-config - nodes: *nodes diff --git a/system_test/tests_templates/tests_configs/scale/1ctrl_1comp_1cndr_neutronTUN_scale_3ctrl.yaml b/system_test/tests_templates/tests_configs/scale/1ctrl_1comp_1cndr_neutronTUN_scale_3ctrl.yaml deleted file mode 100644 index c52b06054..000000000 --- a/system_test/tests_templates/tests_configs/scale/1ctrl_1comp_1cndr_neutronTUN_scale_3ctrl.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -template: - name: 1 Controller, 1 Compute, 1 Cinder on Neutron/TUN scale Controllers to 3 - slaves: 5 - cluster_template: - name: SimpleScaleToHANeutronTUN - release: ubuntu - network: - !include cluster_configs/networks/neutron_tun.yaml - settings: - !include cluster_configs/settings/cinder_wo_components.yaml - nodes: - !include cluster_configs/nodes/single/1ctrl_1comp_1cndr.yaml - scale_nodes: - - - roles: - - controller - count: 2 diff --git a/system_test/tests_templates/tests_configs/simple/pairwise/1ctrl_1comp_1cndr_mongo_2ceph_neutronVLAN_CephImgRados_ceilometer.yaml b/system_test/tests_templates/tests_configs/simple/pairwise/1ctrl_1comp_1cndr_mongo_2ceph_neutronVLAN_CephImgRados_ceilometer.yaml deleted file mode 100644 index 915aefab1..000000000 --- a/system_test/tests_templates/tests_configs/simple/pairwise/1ctrl_1comp_1cndr_mongo_2ceph_neutronVLAN_CephImgRados_ceilometer.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -template: - name: 1 Controller, 1 Compute, 1 Cinder and Mongo, 2 Ceph on Neutron/VLAN use Ceph Image and Rados with Ceilometer - testrail: cases/view/375025 - slaves: 5 - cluster_template: - name: SimpleCinderMongoCephImgRadosCeiloNeutronVLAN - release: ubuntu - network: - !include cluster_configs/networks/neutron_vlan.yaml - settings: - !include cluster_configs/settings/cinder_cephImgRados_ceilometer.yaml - nodes: - !include cluster_configs/nodes/mixed/1ctrl_1comp_1cndr_mongo_2ceph.yaml diff --git a/system_test/tests_templates/tests_configs/simple/single/1ctrl_1comp_1cndr_3ceph_neutronVLAN_cephImgRados.yaml b/system_test/tests_templates/tests_configs/simple/single/1ctrl_1comp_1cndr_3ceph_neutronVLAN_cephImgRados.yaml deleted file mode 100644 index 465c4d17f..000000000 --- a/system_test/tests_templates/tests_configs/simple/single/1ctrl_1comp_1cndr_3ceph_neutronVLAN_cephImgRados.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -template: - name: 1 Controller, 1 Compute, 1 Cinder, 3 Ceph on Neutron/VLAN use Ceph Image and Rados - slaves: 6 - cluster_template: - name: SimpleCinderCephImgRadosNeutronVLAN - release: ubuntu - network: - !include cluster_configs/networks/neutron_vlan.yaml - settings: - !include cluster_configs/settings/cinder_cephImgRados_wo_components.yaml - nodes: - !include cluster_configs/nodes/single/1ctrl_1comp_1cndr_3ceph.yaml diff --git a/system_test/tests_templates/tests_configs/simple/single/1ctrl_1comp_1cndr_neutronTUN_sahara.yaml b/system_test/tests_templates/tests_configs/simple/single/1ctrl_1comp_1cndr_neutronTUN_sahara.yaml deleted file mode 100644 index 352e7ee1f..000000000 --- a/system_test/tests_templates/tests_configs/simple/single/1ctrl_1comp_1cndr_neutronTUN_sahara.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -template: - name: 1 Controller, 1 Compute, 1 Cinder on Neutron/TUN with Sahara - slaves: 3 - cluster_template: - name: SimpleSaharaNeutronTUN - release: ubuntu - network: - !include cluster_configs/networks/neutron_tun.yaml - settings: - !include cluster_configs/settings/cinder_sahara.yaml - nodes: - !include cluster_configs/nodes/single/1ctrl_1comp_1cndr.yaml diff --git a/system_test/tests_templates/tests_configs/simple/single/1ctrl_1comp_3ceph_1mongo_neutronTUN_CephImgVol_ceilometer_sahara.yaml b/system_test/tests_templates/tests_configs/simple/single/1ctrl_1comp_3ceph_1mongo_neutronTUN_CephImgVol_ceilometer_sahara.yaml deleted file mode 100644 index 640871b4a..000000000 --- a/system_test/tests_templates/tests_configs/simple/single/1ctrl_1comp_3ceph_1mongo_neutronTUN_CephImgVol_ceilometer_sahara.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -template: - name: 1 Controller, 1 Compute, 3 Ceph, 1 Mongo on Neutron/TUN use Ceph Volume and Image with Ceilometer and Sahara - slaves: 6 - cluster_template: - name: SimpleCephImgVolMongoCeiloSaharaNeutronTUN - release: ubuntu - network: - !include cluster_configs/networks/neutron_tun.yaml - settings: - !include cluster_configs/settings/cephVolImg_ceilometer_sahara.yaml - nodes: - !include cluster_configs/nodes/single/1ctrl_1comp_3ceph_1mongo.yaml diff --git a/system_test/tests_templates/tests_configs/simple/single/1ctrl_1comp_neutronTUN.yaml b/system_test/tests_templates/tests_configs/simple/single/1ctrl_1comp_neutronTUN.yaml deleted file mode 100644 index 0758f8625..000000000 --- a/system_test/tests_templates/tests_configs/simple/single/1ctrl_1comp_neutronTUN.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -template: - name: 1 Controller, 1 Compute on Neutron/TUN - slaves: 2 - cluster_template: - name: SimpleNeutronTUN - release: ubuntu - network: - !include cluster_configs/networks/neutron_tun.yaml - settings: - !include cluster_configs/settings/cinder_wo_components.yaml - nodes: - !include cluster_configs/nodes/single/1ctrl_1comp.yaml diff --git a/system_test/tests_templates/tests_configs/simple/single/1ctrl_1comp_neutronVLAN.yaml b/system_test/tests_templates/tests_configs/simple/single/1ctrl_1comp_neutronVLAN.yaml deleted file mode 100644 index f37a3533a..000000000 --- a/system_test/tests_templates/tests_configs/simple/single/1ctrl_1comp_neutronVLAN.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -template: - name: 1 Controller, 1 Compute on Neutron/VLAN - slaves: 2 - cluster_template: - name: SimpleNeutronVLAN - release: ubuntu - network: - !include cluster_configs/networks/neutron_vlan.yaml - settings: - !include cluster_configs/settings/cinder_wo_components.yaml - nodes: - !include cluster_configs/nodes/single/1ctrl_1comp.yaml diff --git a/system_test/tests_templates/tests_configs/simple/single/1ctrl_2comp_1cndr_3ceph_1mongo_neutronVLAN_CephImg_ceilometer.yaml b/system_test/tests_templates/tests_configs/simple/single/1ctrl_2comp_1cndr_3ceph_1mongo_neutronVLAN_CephImg_ceilometer.yaml deleted file mode 100644 index 5cf211342..000000000 --- a/system_test/tests_templates/tests_configs/simple/single/1ctrl_2comp_1cndr_3ceph_1mongo_neutronVLAN_CephImg_ceilometer.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -template: - name: 1 Controller, 2 Compute, 1 Cinder, 3 Ceph, 1 Mongo on Neutron/VLAN use Ceph Image with Ceilometer - slaves: 8 - cluster_template: - name: SimpleCinderCephImgMongoCeiloNeutronVLAN - release: ubuntu - network: - !include cluster_configs/networks/neutron_vlan.yaml - settings: - !include cluster_configs/settings/cinder_cephImg_ceilometer.yaml - nodes: - !include cluster_configs/nodes/single/1ctrl_2comp_1cndr_3ceph_1mongo.yaml diff --git a/system_test/tests_templates/tests_configs/simple/single/1ctrl_2comp_neutronVLAN.yaml b/system_test/tests_templates/tests_configs/simple/single/1ctrl_2comp_neutronVLAN.yaml deleted file mode 100644 index 2645ec3ee..000000000 --- a/system_test/tests_templates/tests_configs/simple/single/1ctrl_2comp_neutronVLAN.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -template: - name: 1 Controller, 2 Compute on Neutron/VLAN - slaves: 3 - cluster-template: - name: Simple2ComputeNeutronVLAN - release: ubuntu - network: - !include cluster_configs/networks/neutron_vlan.yaml - settings: - !include cluster_configs/settings/cinder_wo_components.yaml - nodes: - !include cluster_configs/nodes/single/1ctrl_2comp.yaml diff --git a/tox.ini b/tox.ini deleted file mode 100644 index df78c4713..000000000 --- a/tox.ini +++ /dev/null @@ -1,99 +0,0 @@ -# Tox (http://tox.testrun.org/) is a tool for running tests -# in multiple virtualenvs. This configuration file will run the -# test suite on all supported python versions. To use it, "pip install tox" -# and then run "tox" from this directory. - -[tox] -skipsdist = True -envlist = pep8, py27, pylint, docs, cover, pep8-py{34,35}, pylint-py{27}-{fuelweb,system,gates,fuel} -skip_missing_interpreters = True - -[testenv] -deps = - -r{toxinidir}/fuelweb_test/requirements-devops-source.txt - -r{toxinidir}/fuelweb_test/requirements.txt - mock>=1.2 - pytest-cov -usedevelop = False -commands = - ./run_system_test.py show-all-groups - # Run unit tests and coverage: groups collect does not cover API changes - py.test --cov-config .coveragerc --cov-report html --cov=core core/_tests - coverage html -d {envlogdir} - coverage report --fail-under 75 - -[testenv:pep8] -# TODO: #deps = hacking==0.7 -deps = flake8 -usedevelop = False -commands = - flake8 {posargs:.} - -[testenv:pep8-py34] -# TODO: #deps = hacking==0.7 -deps = flake8 -usedevelop = False -commands = - flake8 {posargs:.} - -[testenv:pep8-py35] -# TODO: #deps = hacking==0.7 -deps = flake8 -usedevelop = False -commands = - flake8 {posargs:.} - -[flake8] -ignore = H302,H802 -exclude = .venv,.git,.tox,dist,doc,*egg,build -show-pep8 = True -show-source = True -count = True - -[testenv:pylint] -deps= - {[testenv]deps} - pylint -commands= - pylint --rcfile=.pylintrc_gerrit fuelweb_test system_test gates_tests fuel_tests run_system_test core - - -[testenv:pylint-py27-fuelweb] -deps= - {[testenv]deps} - pylint -commands=pylint fuelweb_test - -[testenv:pylint-py27-system] -deps= - {[testenv]deps} - pylint -commands=pylint system_test - -[testenv:pylint-py27-gates] -deps= - {[testenv]deps} - pylint -commands=pylint gates_tests - -[testenv:pylint-py27-fuel] -deps= - {[testenv]deps} - pylint -commands=pylint fuel_tests - - -[testenv:docs] -changedir = doc -deps = - {[testenv]deps} - sphinx -commands = sphinx-build -b html -d _build/doctrees . _build/html - -[testenv:cover] -deps = - {[testenv]deps} -commands = - py.test --cov-config .coveragerc --cov-report html --cov=core core/_tests - coverage html -d {envlogdir} - coverage report --fail-under 75 diff --git a/utils/jenkins/conv_snapshot_file.py b/utils/jenkins/conv_snapshot_file.py deleted file mode 100755 index 6532f4398..000000000 --- a/utils/jenkins/conv_snapshot_file.py +++ /dev/null @@ -1,212 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This tool converts artifacts (snapshots.envfile) file which is built in -# Product CI "snapshots" job to shell file (extra_repos.sh) -# which could be sourced before running system_tests.sh script. -# -# Resulting file will provide 4 main parameters: -# - UPDATE_MASTER flag -# - UPDATE_FUEL_MIRROR simple rpm repo list -# - EXTRA_RPM_REPOS rpm repo list -# - EXTRA_DEB_REPOS deb repo list -# -# For meaning of these variables look into system_tests.sh help -# -# Required env variables and their defaults: -# - ENABLE_MOS_UBUNTU_PROPOSED true -# - ENABLE_MOS_UBUNTU_UPDATES true -# - ENABLE_MOS_UBUNTU_SECURITY true -# - ENABLE_MOS_UBUNTU_HOLDBACK true -# - ENABLE_MOS_UBUNTU_HOTFIX false -# - ENABLE_MOS_CENTOS_OS true -# - ENABLE_MOS_CENTOS_PROPOSED true -# - ENABLE_MOS_CENTOS_UPDATES true -# - ENABLE_MOS_CENTOS_SECURITY true -# - ENABLE_MOS_CENTOS_HOLDBACK true -# - ENABLE_MOS_CENTOS_HOTFIX false - -import os - -# when bump or degradem grep this file for version usages (it's hardcoded) -VERSION = '10.0' - -SNAPSHOT_ARTIFACTS_FILE = os.environ.get('SNAPSHOT_ARTIFACTS_FILE', - 'snapshots.params') - -SNAPSHOT_OUTPUT_FILE = os.environ.get('SNAPSHOT_OUTPUT_FILE', 'extra_repos.sh') - -MIRROR_HOST = os.environ.get( - 'MIRROR_HOST', - "mirror.seed-cz1.fuel-infra.org") -BASE_MOS_CENTOS_PATH = '' -BASE_MOS_UBUNTU_PATH = '' - -SNAPSHOT_KEYS = { - "MOS_UBUNTU_MIRROR_ID", - "MOS_CENTOS_OS_MIRROR_ID", - "MOS_CENTOS_PROPOSED_MIRROR_ID", - "MOS_CENTOS_UPDATES_MIRROR_ID", - "MOS_CENTOS_HOLDBACK_MIRROR_ID", - "MOS_CENTOS_HOTFIX_MIRROR_ID", - "MOS_CENTOS_SECURITY_MIRROR_ID" -} - -DEFAULT_MIRROR_FLAGS = { - 'ENABLE_MOS_UBUNTU_PROPOSED': True, - 'ENABLE_MOS_UBUNTU_UPDATES': True, - 'ENABLE_MOS_UBUNTU_SECURITY': True, - 'ENABLE_MOS_UBUNTU_HOLDBACK': True, - 'ENABLE_MOS_UBUNTU_HOTFIX': False, - - 'ENABLE_MOS_CENTOS_OS': True, - 'ENABLE_MOS_CENTOS_PROPOSED': True, - 'ENABLE_MOS_CENTOS_UPDATES': True, - 'ENABLE_MOS_CENTOS_SECURITY': True, - 'ENABLE_MOS_CENTOS_HOLDBACK': True, - 'ENABLE_MOS_CENTOS_HOTFIX': False -} - -_boolean_states = { - '1': True, 'yes': True, 'true': True, 'on': True, - '0': False, 'no': False, 'false': False, 'off': False} - - -def read_snapshots(filename): - if not os.path.isfile(filename): - raise Exception('Snapshot artifacts file "{0}" ' - 'not found!'.format(filename)) - with open(filename, 'rt') as f: - lines = f.read().split('\n') - data = (line.strip().split('=', 2) for line in lines if line) - data = (i for i in data if len(i) == 2) - return {k: v for k, v in data if k in SNAPSHOT_KEYS} - - -def write_test_vars(filename, test_variables): - with open(filename, 'wt') as f: - f.write( - '\n'.join( - ["{0}='{1}'".format(k.upper(), v) - for k, v in test_variables.items()] - ) - ) - - -def get_var_as_bool(name, default): - value = os.environ.get(name, '') - return _boolean_states.get(value.lower(), default) - - -def read_mirror_flags(): - return { - k: get_var_as_bool(k, v) - for k, v - in DEFAULT_MIRROR_FLAGS.items()} - - -def combine_deb_url( - snapshot_id, - mirror_host=MIRROR_HOST): - return ("http://{mirror_host}/mos-repos/ubuntu/snapshots/" - "{snapshot_id}".format(mirror_host=mirror_host, - version=VERSION, - snapshot_id=snapshot_id)) - - -def combine_rpm_url( - snapshot_id, - mirror_host=MIRROR_HOST): - return ("http://{mirror_host}/mos-repos/centos/mos{version}-centos7/" - "snapshots/{snapshot_id}/x86_64".format(mirror_host=mirror_host, - version=VERSION, - snapshot_id=snapshot_id)) - - -def g_build_extra_deb_repos( - snapshots, - mirror_flags=DEFAULT_MIRROR_FLAGS): - repo_url = combine_deb_url(snapshots['MOS_UBUNTU_MIRROR_ID']) - for dn in ( - 'proposed', - 'updates', - 'security', - 'holdback', - 'hotfix'): - if mirror_flags['ENABLE_MOS_UBUNTU_{}'.format(dn.upper())]: - yield ("mos-{dn},deb {repo_url} mos{version}-" - "{dn} main restricted".format(dn=dn, - repo_url=repo_url, - version=VERSION)) - - -def g_build_extra_rpm_repos( - snapshots, - mirror_flags=DEFAULT_MIRROR_FLAGS): - for dn in ( - 'os', - 'proposed', - 'updates', - 'security', - 'holdback', - 'hotfix'): - if mirror_flags['ENABLE_MOS_CENTOS_{}'.format(dn.upper())]: - repo_url = combine_rpm_url( - snapshots['MOS_CENTOS_{}_MIRROR_ID'.format(dn.upper())]) - yield 'mos-{dn},{repo_url}'.format(**locals()) - - -def g_build_update_fuel_mirror( - snapshots, - mirror_flags=DEFAULT_MIRROR_FLAGS): - for dn in ( - 'os', - 'proposed', - 'updates', - 'security', - 'holdback', - 'hotfix'): - if mirror_flags['ENABLE_MOS_CENTOS_{}'.format(dn.upper())]: - repo_url = combine_rpm_url( - snapshots['MOS_CENTOS_{}_MIRROR_ID'.format(dn.upper())]) - yield '{repo_url}'.format(**locals()) - - -def main(): - snapshots = read_snapshots(SNAPSHOT_ARTIFACTS_FILE) - mirror_flags = read_mirror_flags() - - test_variables = dict() - - test_variables['extra_deb_repos'] = '|'.join( - g_build_extra_deb_repos(snapshots, mirror_flags=mirror_flags)) - - test_variables['extra_rpm_repos'] = '|'.join( - g_build_extra_rpm_repos(snapshots, mirror_flags=mirror_flags)) - - test_variables['update_fuel_mirror'] = ' '.join( - g_build_update_fuel_mirror(snapshots, mirror_flags=mirror_flags)) - - # no reasons to update master if no repos provided - test_variables['update_master'] = ('true' - if test_variables['update_fuel_mirror'] - else 'false') - - write_test_vars(SNAPSHOT_OUTPUT_FILE, test_variables) - - -if __name__ == '__main__': - main() diff --git a/utils/jenkins/fuel_logs.py b/utils/jenkins/fuel_logs.py deleted file mode 100755 index 9bb7b6bbf..000000000 --- a/utils/jenkins/fuel_logs.py +++ /dev/null @@ -1,890 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -This tool can extract the useful lines from Astute and Puppet logs -within the Fuel log snapshot or on the live Fuel Master node. - -usage: fuel_logs [-h] [--astute] [--puppet] [--clear] [--sort] [--evals] - [--mcagent] [--less] - [SNAPSHOT [SNAPSHOT ...]] - -positional arguments: - SNAPSHOT Take logs from these snapshots - -optional arguments: - -h, --help show this help message and exit - --astute, -a Parse Astute log - --puppet, -p Parse Puppet logs - --clear, -c Clear the logs on the master node - --sort, -s Sort Puppet logs by date - --evals, -e Show Puppet evaltrace lines - --mcagent, -m Show Astute MCAgent calls debug - --less, -l Redirect data to the "less" pager - -Using anywhere to view Fuel snapshot data: - -fuel_logs.py fail_error_deploy_ha_vlan-2015_02_20__20_35_18.tar.gz - -Using on the live Fuel Master node: - -# View the current Astute log -fuel_logs.py -a -# View the current Puppet logs -fuel_logs.py -p - -Using without -a and -p options assumes both options - -fuel_logs.py -c Truncates Astute and Puppet logs. Respects -a and -p options. - -It you are running and debugging many deployments on a single Fuel Master -node, you may want to truncate the logs from the previous deployments. -Using -l option is also recommended for interactive use. -""" - -import argparse -from datetime import datetime -import os -import re -import sys -import tarfile - -PUPPET_LOG = 'puppet-apply.log' -ASTUTE_LOG = 'astute.log' - - -class IO(object): - """ - This object does the input, the output and the main application logic - """ - pipe = None - args = None - - @classmethod - def separator(cls): - """ - Draw a separator line if both Puppet and Astute logs are enabled - :return: - """ - if cls.args.puppet and cls.args.astute: - IO.output('#' * 79 + "\n") - - @classmethod - def process_snapshots(cls): - """ - Extract the logs from the snapshots and process the logs - :return: - """ - for snapshot in cls.args.snapshots: - - if not os.path.isfile(snapshot): - continue - - with FuelSnapshot(snapshot) as fuel_snapshot: - - if cls.args.astute: - fuel_snapshot.parse_astute_log( - show_mcagent=cls.args.mcagent, - show_full=cls.args.full, - - ) - - cls.separator() - - if cls.args.puppet: - fuel_snapshot.parse_puppet_logs( - enable_sort=cls.args.sort, - show_evals=cls.args.evals, - show_full=cls.args.full, - ) - - @classmethod - def process_logs(cls): - """ - Read the logs on the live Fuel Master node and process them - :return: - """ - fuel_logs = FuelLogs() - if cls.args.astute: - if cls.args.clear: - fuel_logs.clear_astute_logs() - else: - fuel_logs.parse_astute_logs( - show_mcagent=cls.args.mcagent, - show_full=cls.args.full, - ) - - cls.separator() - - if cls.args.puppet: - if cls.args.clear: - fuel_logs.clear_puppet_logs() - else: - fuel_logs.parse_puppet_logs( - enable_sort=cls.args.sort, - show_evals=cls.args.evals, - show_full=cls.args.full, - ) - - @classmethod - def main(cls): - """ - The main application workflow - :return: - """ - cls.options() - - if cls.args.less: - cls.open_pager() - - if len(cls.args.snapshots) == 0: - cls.process_logs() - else: - cls.process_snapshots() - - if cls.args.less: - cls.close_pager() - - @classmethod - def open_pager(cls): - """ - Open the pipe to the pager subprocess in order - to display the output there - :return: - """ - cls.pipe = os.popen('less --chop-long-lines', 'w') - - @classmethod - def close_pager(cls): - """ - Close the pager process and finish the output - :return: - """ - cls.pipe.close() - cls.pipe = None - - @classmethod - def output(cls, line): - """ - Output a single line of text to the console - or to the pager - :param line: the line to display - :type line: str - :return: - """ - if not line.endswith('\n'): - line += '\n' - if not cls.pipe: - sys.stdout.write(line) - else: - cls.pipe.write(line) - - @classmethod - def options(cls): - """ - Parse the input options and parameters - :return: arguments structure - """ - parser = argparse.ArgumentParser() - parser.add_argument("--astute", "-a", - action="store_true", - default=False, - help='Parse Astute log') - parser.add_argument("--puppet", "-p", - action="store_true", - default=False, - help='Parse Puppet logs') - parser.add_argument("--clear", "-c", - action="store_true", - default=False, - help='Clear the logs on the Fuel Master node') - parser.add_argument("--sort", "-s", - action="store_true", - default=False, - help='Sort Puppet logs by date') - parser.add_argument("--evals", "-e", - action="store_true", - default=False, - help='Show Puppet evaltrace lines') - parser.add_argument("--mcagent", "-m", - action="store_true", - default=False, - help='Show Astute MCAgent calls debug') - parser.add_argument("--less", "-l", - action="store_true", - default=False, - help='Redirect data to the "less" pager') - parser.add_argument("--full", "-f", - action="store_true", - default=False, - help='Full output without filters') - parser.add_argument('snapshots', - metavar='SNAPSHOT', - type=str, - nargs='*', - default=[], - help='Take logs from these snapshots') - cls.args = parser.parse_args() - if not cls.args.puppet and not cls.args.astute: - cls.args.puppet = True - cls.args.astute = False - return cls.args - - -class AbstractLog(object): - """ - The abstract log object with common methods - Attributes: - content logging content list - log list() of collected logging records - """ - - def __init__(self): - self.content = [] - self.log = [] - - def clear(self): - """ - Clear the parsed and raw log contents - :return: - """ - self.content = [] - self.log = [] - - def catch_record(self, record, include_markers=None, exclude_markers=None): - """ - Add a record to the parsed log if any of the include marker are - found in the record and any of the exclude markers are not - :param record The record from the input log - :type record str - :param include_markers Array of include markers - :type include_markers list - :param exclude_markers Array of exclude markers - :type exclude_markers list - """ - match = False - if not include_markers: - return - for marker in include_markers: - if marker in record: - match = True - break - if exclude_markers: - for marker in exclude_markers: - if marker in record: - match = False - break - if match: - self.add_record(record) - - def each_record(self): - """ - Abstract record iterator that iterates - through the content lines - :return: iter - """ - for record in self.content: - yield record.decode() - - def parse(self, content): - """ - Abstract parser that adds every line - :param content: Input log content - :type content: str - :return: - """ - self.content = content.splitlines() - for record in self.each_record(): - self.add_record(record) - - def output(self): - """ - Output the parsed log content - :return: - """ - for record in self.log: - IO.output(record) - - @staticmethod - def normalize_record(record): - """ - Normalize newlines inside the text of the record - :param record Record text - :type record str - :return Normalized record - :rtype: str - """ - record = record.replace('\n', ' ') - record = record.replace('\\n', ' ') - record = ' '.join(record.split()) - if not record.endswith('\n'): - record += '\n' - return record - - def add_record(self, record): - """ - Add this record to the result log - :param record Record text - :type record str - """ - record = self.normalize_record(record) - self.log.append(record) - - -class AstuteLog(AbstractLog): - """ - This class is responsible for Astute log parsing - Attributes: - show_mcagent enable or disable MCAgent debug strings - """ - - def __init__(self): - self.show_mcagent = False - self.show_full = False - super(AstuteLog, self).__init__() - - def parse(self, content): - """ - Parse the string containing the log content - :param content: the log file content - :type content: str - :return: - """ - self.content = content.splitlines() - for record in self.each_record(): - if self.show_full: - self.add_record(record) - else: - self.rpc_call(record) - self.rpc_cast(record) - self.task_status(record) - self.task_run(record) - self.hook_run(record) - if self.show_mcagent: - self.cmd_exec(record) - self.mc_agent_results(record) - - def each_record(self): - """ - Iterates through the multi line records of the log file - :return: iter - """ - record = '' - date_regexp = re.compile(r'^\d+-\d+-\S+\s') - for bline in self.content: - line = bline.decode() - if re.match(date_regexp, line): - yield record - record = line - else: - record += line - yield record - - def rpc_call(self, record): - """ - Catch the lines with RPC calls from Nailgun to Astute - :param record: log record - :type record: str - :return: - """ - include_markers = ['Processing RPC call'] - self.catch_record(record, include_markers) - - def rpc_cast(self, record): - """ - Catch the lines with RPC casts from Astute to Nailgun - :param record: log record - :type record: str - :return: - """ - include_markers = ['Casting message to Nailgun'] - exclude_markers = ['deploying', 'provisioning'] - self.catch_record(record, include_markers, exclude_markers) - - def task_status(self, record): - """ - Catch the lines with modular task status reports - :param record: log record - :type record: str - :return: - """ - include_markers = ['Task'] - exclude_markers = ['deploying'] - self.catch_record(record, include_markers, exclude_markers) - - def task_run(self, record): - """ - Catch the lines with modular task run debug structures - :param record: log record - :type record: str - :return: - """ - include_markers = ['run task'] - self.catch_record(record, include_markers) - - def hook_run(self, record): - """ - Catch the lines with Astute pre/post deploy hooks debug structures - :param record: log record - :type record: str - :return: - """ - include_markers = ['Run hook'] - self.catch_record(record, include_markers) - - def cmd_exec(self, record): - """ - Catch the lines with cmd execution debug reports - :param record: log record - :type record: str - :return: - """ - include_markers = ['cmd:', 'stdout:', 'stderr:'] - self.catch_record(record, include_markers) - - def mc_agent_results(self, record): - """ - Catch the lines with MCAgent call traces - :param record: log record - :type record: str - :return: - """ - include_markers = ['MC agent'] - exclude_markers = ['puppetd'] - self.catch_record(record, include_markers, exclude_markers) - - -class PuppetLog(AbstractLog): - """ - This class is responsible for Puppet log parsing - Attributes: - log_name name for logger - show_evals show of Puppet evaltrace lines - enable_sort sorting log lines by event time - """ - - def __init__(self): - self.log_name = None - self.show_evals = False - self.enable_sort = False - self.show_full = False - super(PuppetLog, self).__init__() - - def parse(self, content): - """ - Parse the string with Puppet log content - :param content: Puppet log - :type content: str - :return: - """ - self.content = content.splitlines() - for record in self.each_record(): - if self.show_full: - self.add_record(record) - else: - self.err_line(record) - self.catalog_start(record) - self.catalog_end(record) - self.catalog_modular(record) - if self.show_evals: - self.resource_evaluation(record) - - @staticmethod - def node_name(string): - """ - Extract the node name from the Puppet log name - It is used to mark log lines in the output - :param string: log name - :type string: str - :return: node name - :rtype: str - """ - path_elements = string.split('/') - try: - log_index = path_elements.index(PUPPET_LOG) - except ValueError: - return None - name_index = log_index - 1 - if name_index < 0: - return None - return path_elements[name_index] - - def output(self): - """ - Output the collected log lines sorting - them if enabled - :return: - """ - if self.enable_sort: - self.sort_log() - previous_log = None - for record in self.log: - log = record.get('log', None) - if log and not self.enable_sort and previous_log != log: - IO.output("Log file: '{0}'".format(log)) - previous_log = log - time = record.get('time', None) - line = record.get('line', None) - if not (log and time and line): - continue - IO.output("{name:s} {time:s} {line:s}".format( - name=self.node_name(log), - time=time.isoformat(), - line=line - )) - - def sort_log(self): - """ - Sort the collected log lines bu the event date and time - :return: - """ - self.log = sorted(self.log, - key=lambda record: record.get('time', None)) - - def convert_record(self, line): - """ - Split the log line to date, log name and event string - :param line: log line - :type line: str - :return: log record - :rtype: dict - """ - fields = line.split() - time = fields[0] - line = ' '.join(fields[1:]) - time = time[0:26] - try: - time = datetime.strptime(time, "%Y-%m-%dT%H:%M:%S.%f") - except ValueError: - return - record = { - 'time': time, - 'line': self.normalize_record(line), - 'log': self.log_name, - } - return record - - def add_record(self, record): - """ - Add this record to the result log - :param record: Record text - :type record: str - :return: - """ - record = self.convert_record(record) - if record: - self.log.append(record) - - def err_line(self, record): - """ - Catch lines that are marked as 'err:' - :param record: log line - :type record: str - :return: - """ - include_markers = ['err:'] - self.catch_record(record, include_markers) - - def catalog_end(self, record): - """ - Catch the end of the catalog run - :param record: log line - :type record: str - :return: - """ - include_markers = ['Finished catalog run'] - self.catch_record(record, include_markers) - - def catalog_start(self, record): - """ - Catch the end of the catalog compilation and start of the catalog run - :param record: log line - :type record: str - :return: - """ - include_markers = ['Compiled catalog for'] - self.catch_record(record, include_markers) - - def catalog_modular(self, record): - """ - Catch the MODULAR marker of the modular tasks - :param record: log line - :type record: str - :return: - """ - include_markers = ['MODULAR'] - self.catch_record(record, include_markers) - - def resource_evaluation(self, record): - """ - Catch the evaltrace lines marking every resource - processing start and end - :param record: log line - :type record: str - :return: - """ - include_markers = [ - 'Starting to evaluate the resource', - 'Evaluated in', - ] - self.catch_record(record, include_markers) - - -class FuelSnapshot(object): - """ - This class extracts data from the Fuel log snapshot - """ - - def __init__(self, snapshot): - if not os.path.isfile(snapshot): - raise RuntimeError('File "%s" is not found!' % snapshot) - self.snapshot = snapshot - - def __enter__(self): - """ - Enter the context manager - """ - self.open_fuel_snapshot(self.snapshot) - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - """ - Exit the context manager - """ - self.close_fuel_snapshot() - - def open_fuel_snapshot(self, snapshot): - """ - Open the Fuel log snapshot file - :param snapshot: path to file - :type snapshot: str - :return: - """ - self.snapshot = tarfile.open(snapshot) - - def close_fuel_snapshot(self): - """ - Close the Fuel log snapshot file - :return: - """ - if self.snapshot: - self.snapshot.close() - - def astute_logs(self): - """ - Find the Astute logs in the snapshot archive - :return: iter - """ - for log in self.snapshot.getmembers(): - if not log.isfile(): - continue - if log.name.endswith(ASTUTE_LOG): - yield log - - def puppet_logs(self): - """ - Find the Puppet logs inside the snapshot archive - :return: iter - """ - for log in self.snapshot.getmembers(): - if not log.isfile(): - continue - if log.name.endswith(PUPPET_LOG): - yield log - - def parse_log(self, log_file, parser): - """ - Extract from the snapshot and parse the log - using a given parser object - :param log_file Path to the log file in the archive - :type log_file str - :param parser Parser object - :type parser PuppetLog, AstuteLog - """ - log = self.snapshot.extractfile(log_file) - content = log.read() - parser.parse(content) - - def parse_astute_log(self, - show_mcagent=False, - show_full=False): - """ - Parse the Astute log from the archive - :param show_mcagent: show or hide MCAgent debug - :type show_mcagent: bool - :return: - """ - astute_logs = AstuteLog() - astute_logs.show_mcagent = show_mcagent - astute_logs.show_full = show_full - for astute_log in self.astute_logs(): - self.parse_log(astute_log, astute_logs) - astute_logs.output() - astute_logs.clear() - - def parse_puppet_logs(self, - enable_sort=False, - show_evals=False, - show_full=False): - """ - Parse the Puppet logs found inside the archive - :param enable_sort: enable sorting of logs by date - :type enable_sort: bool - :param show_evals: show evaltrace lines in the logs - :type show_evals: bool - :return: - """ - puppet_logs = PuppetLog() - puppet_logs.show_evals = show_evals - puppet_logs.enable_sort = enable_sort - puppet_logs.show_full = show_full - for puppet_log in self.puppet_logs(): - puppet_logs.log_name = puppet_log.name - self.parse_log(puppet_log, puppet_logs) - puppet_logs.output() - puppet_logs.clear() - - -class FuelLogs(object): - """ - This class works with Astute and Puppet logs on the - live Fuel master system - """ - - def __init__(self, log_dir='/var/log'): - self.log_dir = log_dir - - def find_logs(self, name): - """ - Find log files with the given name - :return: iter - """ - for root, files, files in os.walk(self.log_dir): - for log_file in files: - if log_file == name: - path = os.path.join(root, log_file) - yield path - - def puppet_logs(self): - """ - Find the Puppet logs in the log directory - :return: iter - """ - return self.find_logs(PUPPET_LOG) - - def astute_logs(self): - """ - Find the Astute logs in the log directory - :return: iter - """ - return self.find_logs(ASTUTE_LOG) - - @staticmethod - def truncate_log(log_file): - """ - Truncate the log in the log dir. It's better to - truncate the logs between several deployment runs - to drop all the previous lines. - :param log_file: path to log file - :type log_file: str - :return: - """ - if not os.path.isfile(log_file): - return - IO.output('Clear log: %s' % log_file) - with open(log_file, 'w') as log: - log.truncate() - - @staticmethod - def parse_log(log_file, parser): - """ - Read the log file and parse it using the given parser object - :param log_file Opened file object - :type log_file FileIO - :param parser Parser object - :type parser PuppetLog, AstuteLog - """ - content = log_file.read() - parser.parse(content) - - def parse_astute_logs(self, - show_mcagent=False, - show_full=False): - """ - Parse Astute log on the Fuel Master system - :param show_mcagent: show MCAgent call debug - :type show_mcagent: bool - :return: - """ - astute_logs = AstuteLog() - astute_logs.show_mcagent = show_mcagent - astute_logs.show_full = show_full - for astute_log in self.astute_logs(): - with open(astute_log, 'r') as log: - self.parse_log(log, astute_logs) - astute_logs.output() - astute_logs.clear() - - def parse_puppet_logs(self, - enable_sort=False, - show_evals=False, - show_full=False): - """ - Parse Puppet logs on the Fuel Master system - :param enable_sort: sort log files by date - :type enable_sort: bool - :param show_evals: show evaltrace lines - :type show_evals: bool - :return: - """ - puppet_logs = PuppetLog() - puppet_logs.show_evals = show_evals - puppet_logs.enable_sort = enable_sort - puppet_logs.show_full = show_full - for puppet_log in self.puppet_logs(): - with open(puppet_log, 'r') as log: - puppet_logs.log_name = puppet_log - self.parse_log(log, puppet_logs) - puppet_logs.output() - puppet_logs.clear() - - def clear_logs(self, iterator): - """ - Clear all the logs found by the iterator_function - :param iterator An iterator with a list of files - :type iterator iter - """ - for log in iterator: - self.truncate_log(log) - - def clear_astute_logs(self): - """ - Clear all Astute logs found in the log dir - :return: - """ - self.clear_logs(self.astute_logs()) - - def clear_puppet_logs(self): - """ - Clear all Puppet logs found in the log dir - :return: - """ - self.clear_logs(self.puppet_logs()) - -############################################################################## - - -if __name__ == '__main__': - IO.main() diff --git a/utils/jenkins/system_tests.sh b/utils/jenkins/system_tests.sh deleted file mode 100755 index 6e79da075..000000000 --- a/utils/jenkins/system_tests.sh +++ /dev/null @@ -1,395 +0,0 @@ -#!/bin/sh -PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - -# functions - -INVALIDOPTS_ERR=100 -NOJOBNAME_ERR=101 -NOISOPATH_ERR=102 -NOTASKNAME_ERR=103 -NOWORKSPACE_ERR=104 -DEEPCLEAN_ERR=105 -MAKEISO_ERR=106 -NOISOFOUND_ERR=107 -COPYISO_ERR=108 -SYMLINKISO_ERR=109 -CDWORKSPACE_ERR=110 -ISODOWNLOAD_ERR=111 -INVALIDTASK_ERR=112 - -# Defaults - -export REBOOT_TIMEOUT=${REBOOT_TIMEOUT:-5000} -export ALWAYS_CREATE_DIAGNOSTIC_SNAPSHOT=${ALWAYS_CREATE_DIAGNOSTIC_SNAPSHOT:-true} - -ShowHelp() { -cat << EOF -System Tests Script - -It can perform several actions depending on Jenkins JOB_NAME it's ran from -or it can take names from exported environment variables or command line options -if you do need to override them. - --w (dir) - Path to workspace where fuelweb git repository was checked out. - Uses Jenkins' WORKSPACE if not set --e (name) - Directly specify environment name used in tests - Uses ENV_NAME variable is set. --j (name) - Name of this job. Determines ISO name, Task name and used by tests. - Uses Jenkins' JOB_NAME if not set --v - Do not use virtual environment --V (dir) - Path to python virtual environment --i (file) - Full path to ISO file to build or use for tests. - Made from iso dir and name if not set. --o (str) - Allows you any extra command line option to run test job if you - want to use some parameters. --a (str) - Allows you to path NOSE_ATTR to the test job if you want - to use some parameters. --A (str) - Allows you to path NOSE_EVAL_ATTR if you want to enter attributes - as python expressions. --U - ISO URL for tests. - Null by default. --b (num) - Allows you to override Jenkins' build number if you need to. --l (dir) - Path to logs directory. Can be set by LOGS_DIR environment variable. - Uses WORKSPACE/logs if not set. --L - Disable fuel_logs tool to extract the useful lines from Astute and Puppet logs - within the Fuel log snapshot or on the live Fuel Master node. --d - Dry run mode. Only show what would be done and do nothing. - Useful for debugging. --k - Keep previously created test environment before tests run --K - Keep test environment after tests are finished --R (name) - Name of the package where requirements.txt is located. For use with the option -N only. - Uses 'fuelweb_test' if option is not set. --N - Install PyPi packages from 'requirements.txt'. --h - Show this help page - -Most variables uses guessing from Jenkins' job name but can be overridden -by exported variable before script is run or by one of command line options. - -You can override following variables using export VARNAME="value" before running this script -WORKSPACE - path to directory where Fuelweb repository was checked out by Jenkins or manually -JOB_NAME - name of Jenkins job that determines which task should be done and ISO file name. - -If task name is "iso" it will make iso file -Other defined names will run Nose tests using previously built ISO file. - -ISO file name is taken from job name prefix -Task name is taken from job name suffix -Separator is one dot '.' - -For example if JOB_NAME is: -mytest.somestring.iso -ISO name: mytest.iso -Task name: iso -If ran with such JOB_NAME iso file with name mytest.iso will be created - -If JOB_NAME is: -mytest.somestring.node -ISO name: mytest.iso -Task name: node -If script was run with this JOB_NAME node tests will be using ISO file mytest.iso. - -First you should run mytest.somestring.iso job to create mytest.iso. -Then you can ran mytest.somestring.node job to start tests using mytest.iso and other tests too. -EOF -} - -GlobalVariables() { - # where built iso's should be placed - # use hardcoded default if not set before by export - ISO_DIR="${ISO_DIR:=/var/www/fuelweb-iso}" - - # name of iso file - # taken from jenkins job prefix - # if not set before by variable export - if [ -z "${ISO_NAME}" ]; then - ISO_NAME="${JOB_NAME%.*}.iso" - fi - - # full path where iso file should be placed - # make from iso name and path to iso shared directory - # if was not overridden by options or export - if [ -z "${ISO_PATH}" ]; then - ISO_PATH="${ISO_DIR}/${ISO_NAME}" - fi - - # only show what commands would be executed but do nothing - # this feature is useful if you want to debug this script's behaviour - DRY_RUN="${DRY_RUN:=no}" - - VENV="${VENV:=yes}" - - # Path to the directory where requirements.txt is placed. - # Default place is ./fuelweb_test/requirements.txt - REQUIREMENTS_DIR="${REQUIREMENTS_DIR:=fuelweb_test}" - - # Perform requirements update from the requirements.txt file. Default = no. - UPDATE_REQUIREMENTS="${UPDATE_REQUIREMENTS:=no}" -} - -GetoptsVariables() { - while getopts ":w:j:i:t:o:a:A:m:U:r:b:V:l:LdkKNe:v:R:h" opt; do - case ${opt} in - w) - WORKSPACE="${OPTARG}" - ;; - j) - JOB_NAME="${OPTARG}" - ;; - i) - ISO_PATH="${OPTARG}" - ;; - t) - echo "Option 'TASK_NAME' deprecated." - ;; - o) - TEST_OPTIONS="${TEST_OPTIONS} ${OPTARG}" - ;; - a) - NOSE_ATTR="${OPTARG}" - ;; - A) - NOSE_EVAL_ATTR="${OPTARG}" - ;; - m) - echo "Option 'USE_MIRROR' deprecated." - ;; - U) - ISO_URL="${OPTARG}" - ;; - r) - echo "Option 'ROTATE_ISO' deprecated." - ;; - b) - BUILD_NUMBER="${OPTARG}" - ;; - V) - VENV_PATH="${OPTARG}" - ;; - l) - LOGS_DIR="${OPTARG}" - ;; - L) - FUELLOGS_TOOL="no" - ;; - k) - KEEP_BEFORE="yes" - ;; - K) - KEEP_AFTER="yes" - ;; - e) - ENV_NAME="${OPTARG}" - ;; - d) - DRY_RUN="yes" - ;; - v) - VENV="no" - ;; - R) - REQUIREMENTS_DIR="${OPTARG}" - ;; - N) - UPDATE_REQUIREMENTS="yes" - ;; - h) - ShowHelp - exit 0 - ;; - \?) - echo "Invalid option: -$OPTARG" - ShowHelp - exit ${INVALIDOPTS_ERR} - ;; - :) - echo "Option -$OPTARG requires an argument." - ShowHelp - exit ${INVALIDOPTS_ERR} - ;; - esac - done -} - -CheckVariables() { - - if [ -z "${JOB_NAME}" ]; then - echo "Error! JOB_NAME is not set!" - exit ${NOJOBNAME_ERR} - fi - - if [ -z "${ISO_PATH}" ]; then - echo "Error! ISO_PATH is not set!" - exit ${NOISOPATH_ERR} - fi - - if [ -z "${WORKSPACE}" ]; then - echo "Error! WORKSPACE is not set!" - exit ${NOWORKSPACE_ERR} - fi -} - -CdWorkSpace() { - # chdir into workspace or fail if could not - if [ "${DRY_RUN}" != "yes" ]; then - cd "${WORKSPACE}" - ec=$? - - if [ "${ec}" -gt "0" ]; then - echo "Error! Cannot cd to WORKSPACE!" - exit ${CDWORKSPACE_ERR} - fi - else - echo cd "${WORKSPACE}" - fi -} - -CheckRequirements() { - REQUIREMENTS_PATH="${WORKSPACE}/${REQUIREMENTS_DIR}" - - if [ "${UPDATE_REQUIREMENTS}" = "yes" ]; then - if [ -f "${REQUIREMENTS_PATH}/requirements.txt" ]; then - # Install packages from requirements.txt - pip install -r "${REQUIREMENTS_PATH}/requirements.txt" - fi - - if [ -f "${REQUIREMENTS_PATH}/requirements-devops.txt" ]; then - # Try to install fuel-devops as a package, to controll that - # required version of fuel-devops is already installed. - # Installation will fail if fuel-devops is not installed or - # installed with correct version (until it is not a PyPi package) - pip install -r "${REQUIREMENTS_PATH}/requirements-devops.txt" - fi - fi -} - -ActivateVirtualenv() { - if [ -z "${VENV_PATH}" ]; then - VENV_PATH="/home/jenkins/venv-nailgun-tests" - fi - - # run python virtualenv - if [ "${VENV}" = "yes" ]; then - if [ "${DRY_RUN}" = "yes" ]; then - echo . ${VENV_PATH}/bin/activate - else - . ${VENV_PATH}/bin/activate - fi - fi -} - -RunTest() { - # Run test selected by task name - - # check if iso file exists - if [ ! -f "${ISO_PATH}" ]; then - if [ -z "${ISO_URL}" -a "${DRY_RUN}" != "yes" ]; then - echo "Error! File ${ISO_PATH} not found and no ISO_URL (-U key) for downloading!" - exit ${NOISOFOUND_ERR} - else - if [ "${DRY_RUN}" = "yes" ]; then - echo wget -c ${ISO_URL} -O ${ISO_PATH} - else - echo "No ${ISO_PATH} found. Trying to download file." - wget -c ${ISO_URL} -O ${ISO_PATH} - rc=$? - if [ ${rc} -ne 0 ]; then - echo "Failed to fetch ISO from ${ISO_URL}" - exit ${ISODOWNLOAD_ERR} - fi - fi - fi - fi - - if [ "${ENV_NAME}" = "" ]; then - ENV_NAME="${JOB_NAME}_system_test" - fi - - if [ "${LOGS_DIR}" = "" ]; then - LOGS_DIR="${WORKSPACE}/logs" - fi - - if [ ! -f "$LOGS_DIR" ]; then - mkdir -p ${LOGS_DIR} - fi - - export ENV_NAME - export LOGS_DIR - export ISO_PATH - - if [ "${KEEP_BEFORE}" != "yes" ]; then - # remove previous environment - if [ "${DRY_RUN}" = "yes" ]; then - echo dos.py erase "${ENV_NAME}" - else - if [ $(dos.py list | grep "^${ENV_NAME}\$") ]; then - dos.py erase "${ENV_NAME}" - fi - fi - fi - - # gather additional option for this nose test run - OPTS="" - if [ -n "${NOSE_ATTR}" ]; then - OPTS="${OPTS} -a ${NOSE_ATTR}" - fi - if [ -n "${NOSE_EVAL_ATTR}" ]; then - OPTS="${OPTS} -A ${NOSE_EVAL_ATTR}" - fi - if [ -n "${TEST_OPTIONS}" ]; then - OPTS="${OPTS} ${TEST_OPTIONS}" - fi - - # run python test set to create environments, deploy and test product - if [ "${DRY_RUN}" = "yes" ]; then - echo export PYTHONPATH="${PYTHONPATH:+${PYTHONPATH}:}${WORKSPACE}" - echo python run_system_test.py run -q --nologcapture --with-xunit ${OPTS} - else - export PYTHONPATH="${PYTHONPATH:+${PYTHONPATH}:}${WORKSPACE}" - echo ${PYTHONPATH} - python run_system_test.py run -q --nologcapture --with-xunit ${OPTS} - - fi - ec=$? - - # Extract logs using fuel_logs utility - if [ "${FUELLOGS_TOOL}" != "no" ]; then - for logfile in $(find "${LOGS_DIR}" -name "fail*.tar.[gx]z" -type f); - do - ./utils/jenkins/fuel_logs.py "${logfile}" > "${logfile}.filtered.log" - done - fi - - if [ "${KEEP_AFTER}" != "yes" ]; then - # remove environment after tests - if [ "${DRY_RUN}" = "yes" ]; then - echo dos.py destroy "${ENV_NAME}" - else - dos.py destroy "${ENV_NAME}" - fi - fi - - exit "${ec}" -} - -# MAIN - -# first we want to get variable from command line options -GetoptsVariables ${@} - -# then we define global variables and there defaults when needed -GlobalVariables - -# check do we have all critical variables set -CheckVariables - -# first we chdir into our working directory unless we dry run -CdWorkSpace - -# Activate python virtual environment -ActivateVirtualenv - -# Check/update PyPi requirements -CheckRequirements - -# Run the test -RunTest